Revert "ath9k: do not insert padding into tx buffers on AR9380+"
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
5b68138e 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
b7f080cf 17#include <linux/dma-mapping.h>
394cf0a1 18#include "ath9k.h"
b622a720 19#include "ar9003_mac.h"
f078f209
LR
20
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
f078f209 35
c6663876 36static u16 bits_per_symbol[][2] = {
f078f209
LR
37 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
46};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
82b873af 50static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c
FF
51 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 58 struct list_head *head, bool internal);
0cdd5c60
FF
59static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 61 int txok);
90fa539c
FF
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
44f1d26c
FF
64static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
c4288390 68
545750d3 69enum {
0e668cde
FF
70 MCS_HT20,
71 MCS_HT20_SGI,
545750d3
FF
72 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
0e668cde
FF
76static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
88 },
89 [MCS_HT40] = {
0e668cde
FF
90 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
94 },
95 [MCS_HT40_SGI] = {
0e668cde
FF
96 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
100 }
101};
102
e8324357
S
103/*********************/
104/* Aggregation logic */
105/*********************/
f078f209 106
e8324357 107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 108{
e8324357 109 struct ath_atx_ac *ac = tid->ac;
ff37e337 110
e8324357
S
111 if (tid->paused)
112 return;
ff37e337 113
e8324357
S
114 if (tid->sched)
115 return;
ff37e337 116
e8324357
S
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 119
e8324357
S
120 if (ac->sched)
121 return;
f078f209 122
e8324357
S
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
f078f209 126
e8324357 127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 128{
066dae93 129 struct ath_txq *txq = tid->ac->txq;
e6a9854b 130
75401849 131 WARN_ON(!tid->paused);
f078f209 132
75401849
LB
133 spin_lock_bh(&txq->axq_lock);
134 tid->paused = false;
f078f209 135
56dc6336 136 if (skb_queue_empty(&tid->buf_q))
e8324357 137 goto unlock;
f078f209 138
e8324357
S
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
528f0c6b 143}
f078f209 144
2d42efc4 145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
76e45221
FF
146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
76e45221
FF
151}
152
e8324357 153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 154{
066dae93 155 struct ath_txq *txq = tid->ac->txq;
56dc6336 156 struct sk_buff *skb;
e8324357
S
157 struct ath_buf *bf;
158 struct list_head bf_head;
90fa539c 159 struct ath_tx_status ts;
2d42efc4 160 struct ath_frame_info *fi;
f078f209 161
90fa539c 162 INIT_LIST_HEAD(&bf_head);
e6a9854b 163
90fa539c 164 memset(&ts, 0, sizeof(ts));
75401849 165 spin_lock_bh(&txq->axq_lock);
f078f209 166
56dc6336
FF
167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
e1566d1f 171 spin_unlock_bh(&txq->axq_lock);
44f1d26c
FF
172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
6a0ddaef 174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
7d2c16be 175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
90fa539c 176 } else {
44f1d26c 177 ath_tx_send_normal(sc, txq, NULL, skb);
90fa539c 178 }
e1566d1f 179 spin_lock_bh(&txq->axq_lock);
528f0c6b 180 }
f078f209 181
e8324357 182 spin_unlock_bh(&txq->axq_lock);
528f0c6b 183}
f078f209 184
e8324357
S
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
528f0c6b 187{
e8324357 188 int index, cindex;
f078f209 189
e8324357
S
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 192
81ee13ba 193 __clear_bit(cindex, tid->tx_buf);
528f0c6b 194
81ee13ba 195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
528f0c6b 199}
f078f209 200
e8324357 201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 202 u16 seqno)
528f0c6b 203{
e8324357 204 int index, cindex;
528f0c6b 205
2d3bcba0 206 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 208 __set_bit(cindex, tid->tx_buf);
f078f209 209
e8324357
S
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 214 }
f078f209
LR
215}
216
217/*
e8324357
S
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
f078f209 222 */
e8324357
S
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
f078f209 225
f078f209 226{
56dc6336 227 struct sk_buff *skb;
e8324357
S
228 struct ath_buf *bf;
229 struct list_head bf_head;
db1a052b 230 struct ath_tx_status ts;
2d42efc4 231 struct ath_frame_info *fi;
db1a052b
FF
232
233 memset(&ts, 0, sizeof(ts));
e8324357 234 INIT_LIST_HEAD(&bf_head);
f078f209 235
56dc6336
FF
236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
f078f209 239
44f1d26c
FF
240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
56dc6336 247 list_add_tail(&bf->list, &bf_head);
f078f209 248
2d42efc4 249 if (fi->retries)
6a0ddaef 250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
f078f209 251
e8324357 252 spin_unlock(&txq->axq_lock);
db1a052b 253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
254 spin_lock(&txq->axq_lock);
255 }
f078f209 256
e8324357
S
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
f078f209
LR
259}
260
fec247c0 261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
2d42efc4 262 struct sk_buff *skb)
f078f209 263{
8b7f8532 264 struct ath_frame_info *fi = get_frame_info(skb);
e8324357 265 struct ieee80211_hdr *hdr;
f078f209 266
fec247c0 267 TX_STAT_INC(txq->axq_qnum, a_retries);
8b7f8532 268 if (fi->retries++ > 0)
2d42efc4 269 return;
f078f209 270
e8324357
S
271 hdr = (struct ieee80211_hdr *)skb->data;
272 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
273}
274
0a8cea84 275static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 276{
0a8cea84 277 struct ath_buf *bf = NULL;
d43f3015
S
278
279 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
280
281 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
282 spin_unlock_bh(&sc->tx.txbuflock);
283 return NULL;
284 }
0a8cea84
FF
285
286 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
287 list_del(&bf->list);
288
d43f3015
S
289 spin_unlock_bh(&sc->tx.txbuflock);
290
0a8cea84
FF
291 return bf;
292}
293
294static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
295{
296 spin_lock_bh(&sc->tx.txbuflock);
297 list_add_tail(&bf->list, &sc->tx.txbuf);
298 spin_unlock_bh(&sc->tx.txbuflock);
299}
300
301static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
302{
303 struct ath_buf *tbf;
304
305 tbf = ath_tx_get_buffer(sc);
306 if (WARN_ON(!tbf))
307 return NULL;
308
d43f3015
S
309 ATH_TXBUF_RESET(tbf);
310
311 tbf->bf_mpdu = bf->bf_mpdu;
312 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 313 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 314 tbf->bf_state = bf->bf_state;
d43f3015
S
315
316 return tbf;
317}
318
b572d033
FF
319static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
320 struct ath_tx_status *ts, int txok,
321 int *nframes, int *nbad)
322{
2d42efc4 323 struct ath_frame_info *fi;
b572d033
FF
324 u16 seq_st = 0;
325 u32 ba[WME_BA_BMP_SIZE >> 5];
326 int ba_index;
327 int isaggr = 0;
328
329 *nbad = 0;
330 *nframes = 0;
331
b572d033
FF
332 isaggr = bf_isaggr(bf);
333 if (isaggr) {
334 seq_st = ts->ts_seqnum;
335 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
336 }
337
338 while (bf) {
2d42efc4 339 fi = get_frame_info(bf->bf_mpdu);
6a0ddaef 340 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
b572d033
FF
341
342 (*nframes)++;
343 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
344 (*nbad)++;
345
346 bf = bf->bf_next;
347 }
348}
349
350
d43f3015
S
351static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
352 struct ath_buf *bf, struct list_head *bf_q,
c5992618 353 struct ath_tx_status *ts, int txok, bool retry)
f078f209 354{
e8324357
S
355 struct ath_node *an = NULL;
356 struct sk_buff *skb;
1286ec6d 357 struct ieee80211_sta *sta;
0cdd5c60 358 struct ieee80211_hw *hw = sc->hw;
1286ec6d 359 struct ieee80211_hdr *hdr;
76d5a9e8 360 struct ieee80211_tx_info *tx_info;
e8324357 361 struct ath_atx_tid *tid = NULL;
d43f3015 362 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
56dc6336
FF
363 struct list_head bf_head;
364 struct sk_buff_head bf_pending;
0934af23 365 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 366 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
367 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
368 bool rc_update = true;
78c4653a 369 struct ieee80211_tx_rate rates[4];
2d42efc4 370 struct ath_frame_info *fi;
ebd02287 371 int nframes;
5daefbd0 372 u8 tidno;
5519541d 373 bool clear_filter;
f078f209 374
a22be22a 375 skb = bf->bf_mpdu;
1286ec6d
S
376 hdr = (struct ieee80211_hdr *)skb->data;
377
76d5a9e8 378 tx_info = IEEE80211_SKB_CB(skb);
76d5a9e8 379
78c4653a
FF
380 memcpy(rates, tx_info->control.rates, sizeof(rates));
381
1286ec6d 382 rcu_read_lock();
f078f209 383
686b9cb9 384 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
385 if (!sta) {
386 rcu_read_unlock();
73e19463 387
31e79a59
FF
388 INIT_LIST_HEAD(&bf_head);
389 while (bf) {
390 bf_next = bf->bf_next;
391
fce041be 392 if (!bf->bf_stale || bf_next != NULL)
31e79a59
FF
393 list_move_tail(&bf->list, &bf_head);
394
31e79a59
FF
395 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
396 0, 0);
397
398 bf = bf_next;
399 }
1286ec6d 400 return;
f078f209
LR
401 }
402
1286ec6d 403 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
404 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
405 tid = ATH_AN_2_TID(an, tidno);
1286ec6d 406
b11b160d
FF
407 /*
408 * The hardware occasionally sends a tx status for the wrong TID.
409 * In this case, the BA status cannot be considered valid and all
410 * subframes need to be retransmitted
411 */
5daefbd0 412 if (tidno != ts->tid)
b11b160d
FF
413 txok = false;
414
e8324357 415 isaggr = bf_isaggr(bf);
d43f3015 416 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 417
d43f3015 418 if (isaggr && txok) {
db1a052b
FF
419 if (ts->ts_flags & ATH9K_TX_BA) {
420 seq_st = ts->ts_seqnum;
421 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 422 } else {
d43f3015
S
423 /*
424 * AR5416 can become deaf/mute when BA
425 * issue happens. Chip needs to be reset.
426 * But AP code may have sychronization issues
427 * when perform internal reset in this routine.
428 * Only enable reset in STA mode for now.
429 */
2660b81a 430 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 431 needreset = 1;
e8324357 432 }
f078f209
LR
433 }
434
56dc6336 435 __skb_queue_head_init(&bf_pending);
f078f209 436
b572d033 437 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
e8324357 438 while (bf) {
6a0ddaef
FF
439 u16 seqno = bf->bf_state.seqno;
440
f0b8220c 441 txfail = txpending = sendbar = 0;
e8324357 442 bf_next = bf->bf_next;
f078f209 443
78c4653a
FF
444 skb = bf->bf_mpdu;
445 tx_info = IEEE80211_SKB_CB(skb);
2d42efc4 446 fi = get_frame_info(skb);
78c4653a 447
6a0ddaef 448 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
e8324357
S
449 /* transmit completion, subframe is
450 * acked by block ack */
0934af23 451 acked_cnt++;
e8324357
S
452 } else if (!isaggr && txok) {
453 /* transmit completion */
0934af23 454 acked_cnt++;
e8324357 455 } else {
5519541d 456 if ((tid->state & AGGR_CLEANUP) || !retry) {
e8324357
S
457 /*
458 * cleanup in progress, just fail
459 * the un-acked sub-frames
460 */
461 txfail = 1;
5519541d
FF
462 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
463 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
464 !an->sleeping)
465 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
466
467 clear_filter = true;
468 txpending = 1;
469 } else {
5519541d
FF
470 txfail = 1;
471 sendbar = 1;
472 txfail_cnt++;
e8324357
S
473 }
474 }
f078f209 475
fce041be
FF
476 /*
477 * Make sure the last desc is reclaimed if it
478 * not a holding desc.
479 */
56dc6336
FF
480 INIT_LIST_HEAD(&bf_head);
481 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
482 bf_next != NULL || !bf_last->bf_stale)
d43f3015 483 list_move_tail(&bf->list, &bf_head);
f078f209 484
90fa539c 485 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
486 /*
487 * complete the acked-ones/xretried ones; update
488 * block-ack window
489 */
490 spin_lock_bh(&txq->axq_lock);
6a0ddaef 491 ath_tx_update_baw(sc, tid, seqno);
e8324357 492 spin_unlock_bh(&txq->axq_lock);
f078f209 493
8a92e2ee 494 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 495 memcpy(tx_info->control.rates, rates, sizeof(rates));
3afd21e7 496 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
8a92e2ee 497 rc_update = false;
8a92e2ee
VT
498 }
499
db1a052b
FF
500 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
501 !txfail, sendbar);
e8324357 502 } else {
d43f3015 503 /* retry the un-acked ones */
e5003249
VT
504 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
505 if (bf->bf_next == NULL && bf_last->bf_stale) {
506 struct ath_buf *tbf;
507
508 tbf = ath_clone_txbuf(sc, bf_last);
509 /*
510 * Update tx baw and complete the
511 * frame with failed status if we
512 * run out of tx buf.
513 */
514 if (!tbf) {
515 spin_lock_bh(&txq->axq_lock);
6a0ddaef 516 ath_tx_update_baw(sc, tid, seqno);
e5003249
VT
517 spin_unlock_bh(&txq->axq_lock);
518
e5003249
VT
519 ath_tx_complete_buf(sc, bf, txq,
520 &bf_head,
55797b1a 521 ts, 0, 1);
e5003249
VT
522 break;
523 }
524
56dc6336 525 fi->bf = tbf;
c41d92dc 526 }
e8324357
S
527 }
528
529 /*
530 * Put this buffer to the temporary pending
531 * queue to retain ordering
532 */
56dc6336 533 __skb_queue_tail(&bf_pending, skb);
e8324357
S
534 }
535
536 bf = bf_next;
f078f209 537 }
f078f209 538
4cee7861 539 /* prepend un-acked frames to the beginning of the pending frame queue */
56dc6336 540 if (!skb_queue_empty(&bf_pending)) {
5519541d
FF
541 if (an->sleeping)
542 ieee80211_sta_set_tim(sta);
543
4cee7861 544 spin_lock_bh(&txq->axq_lock);
5519541d
FF
545 if (clear_filter)
546 tid->ac->clear_ps_filter = true;
56dc6336 547 skb_queue_splice(&bf_pending, &tid->buf_q);
9af73cf7
FF
548 if (!an->sleeping)
549 ath_tx_queue_tid(txq, tid);
4cee7861
FF
550 spin_unlock_bh(&txq->axq_lock);
551 }
552
e8324357 553 if (tid->state & AGGR_CLEANUP) {
90fa539c
FF
554 ath_tx_flush_tid(sc, tid);
555
e8324357
S
556 if (tid->baw_head == tid->baw_tail) {
557 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 558 tid->state &= ~AGGR_CLEANUP;
d43f3015 559 }
e8324357 560 }
f078f209 561
1286ec6d
S
562 rcu_read_unlock();
563
f6b4e4d4 564 if (needreset)
236de514 565 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
e8324357 566}
f078f209 567
1a6e9d0f
RM
568static bool ath_lookup_legacy(struct ath_buf *bf)
569{
570 struct sk_buff *skb;
571 struct ieee80211_tx_info *tx_info;
572 struct ieee80211_tx_rate *rates;
573 int i;
574
575 skb = bf->bf_mpdu;
576 tx_info = IEEE80211_SKB_CB(skb);
577 rates = tx_info->control.rates;
578
059ee09b
FF
579 for (i = 0; i < 4; i++) {
580 if (!rates[i].count || rates[i].idx < 0)
581 break;
582
1a6e9d0f
RM
583 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
584 return true;
585 }
586
587 return false;
588}
589
e8324357
S
590static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
591 struct ath_atx_tid *tid)
f078f209 592{
528f0c6b
S
593 struct sk_buff *skb;
594 struct ieee80211_tx_info *tx_info;
a8efee4f 595 struct ieee80211_tx_rate *rates;
d43f3015 596 u32 max_4ms_framelen, frmlen;
4ef70841 597 u16 aggr_limit, legacy = 0;
e8324357 598 int i;
528f0c6b 599
a22be22a 600 skb = bf->bf_mpdu;
528f0c6b 601 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 602 rates = tx_info->control.rates;
528f0c6b 603
e8324357
S
604 /*
605 * Find the lowest frame length among the rate series that will have a
606 * 4ms transmit duration.
607 * TODO - TXOP limit needs to be considered.
608 */
609 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 610
e8324357
S
611 for (i = 0; i < 4; i++) {
612 if (rates[i].count) {
545750d3
FF
613 int modeidx;
614 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
615 legacy = 1;
616 break;
617 }
618
0e668cde 619 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
620 modeidx = MCS_HT40;
621 else
0e668cde
FF
622 modeidx = MCS_HT20;
623
624 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
625 modeidx++;
545750d3
FF
626
627 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 628 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
629 }
630 }
e63835b0 631
f078f209 632 /*
e8324357
S
633 * limit aggregate size by the minimum rate if rate selected is
634 * not a probe rate, if rate selected is a probe rate then
635 * avoid aggregation of this packet.
f078f209 636 */
e8324357
S
637 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
638 return 0;
f078f209 639
1773912b
VT
640 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
641 aggr_limit = min((max_4ms_framelen * 3) / 8,
642 (u32)ATH_AMPDU_LIMIT_MAX);
643 else
644 aggr_limit = min(max_4ms_framelen,
645 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 646
e8324357 647 /*
25985edc
LDM
648 * h/w can accept aggregates up to 16 bit lengths (65535).
649 * The IE, however can hold up to 65536, which shows up here
e8324357 650 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 651 */
4ef70841
S
652 if (tid->an->maxampdu)
653 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 654
e8324357
S
655 return aggr_limit;
656}
f078f209 657
e8324357 658/*
d43f3015 659 * Returns the number of delimiters to be added to
e8324357 660 * meet the minimum required mpdudensity.
e8324357
S
661 */
662static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
7a12dfdb
RM
663 struct ath_buf *bf, u16 frmlen,
664 bool first_subfrm)
e8324357 665{
7a12dfdb 666#define FIRST_DESC_NDELIMS 60
e8324357
S
667 struct sk_buff *skb = bf->bf_mpdu;
668 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 669 u32 nsymbits, nsymbols;
e8324357 670 u16 minlen;
545750d3 671 u8 flags, rix;
c6663876 672 int width, streams, half_gi, ndelim, mindelim;
2d42efc4 673 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
e8324357
S
674
675 /* Select standard number of delimiters based on frame length alone */
676 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
677
678 /*
e8324357
S
679 * If encryption enabled, hardware requires some more padding between
680 * subframes.
681 * TODO - this could be improved to be dependent on the rate.
682 * The hardware can keep up at lower rates, but not higher rates
f078f209 683 */
4f6760b0
RM
684 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
685 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
e8324357 686 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 687
7a12dfdb
RM
688 /*
689 * Add delimiter when using RTS/CTS with aggregation
690 * and non enterprise AR9003 card
691 */
3459731a
FF
692 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
693 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
7a12dfdb
RM
694 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
695
e8324357
S
696 /*
697 * Convert desired mpdu density from microeconds to bytes based
698 * on highest rate in rate series (i.e. first rate) to determine
699 * required minimum length for subframe. Take into account
700 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 701 *
e8324357
S
702 * If there is no mpdu density restriction, no further calculation
703 * is needed.
704 */
4ef70841
S
705
706 if (tid->an->mpdudensity == 0)
e8324357 707 return ndelim;
f078f209 708
e8324357
S
709 rix = tx_info->control.rates[0].idx;
710 flags = tx_info->control.rates[0].flags;
e8324357
S
711 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
712 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 713
e8324357 714 if (half_gi)
4ef70841 715 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 716 else
4ef70841 717 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 718
e8324357
S
719 if (nsymbols == 0)
720 nsymbols = 1;
f078f209 721
c6663876
FF
722 streams = HT_RC_2_STREAMS(rix);
723 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 724 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 725
e8324357 726 if (frmlen < minlen) {
e8324357
S
727 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
728 ndelim = max(mindelim, ndelim);
f078f209
LR
729 }
730
e8324357 731 return ndelim;
f078f209
LR
732}
733
e8324357 734static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 735 struct ath_txq *txq,
d43f3015 736 struct ath_atx_tid *tid,
269c44bc
FF
737 struct list_head *bf_q,
738 int *aggr_len)
f078f209 739{
e8324357 740#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
56dc6336 741 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
d43f3015 742 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
743 u16 aggr_limit = 0, al = 0, bpad = 0,
744 al_delta, h_baw = tid->baw_size / 2;
745 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 746 struct ieee80211_tx_info *tx_info;
2d42efc4 747 struct ath_frame_info *fi;
56dc6336 748 struct sk_buff *skb;
6a0ddaef 749 u16 seqno;
f078f209 750
e8324357 751 do {
56dc6336
FF
752 skb = skb_peek(&tid->buf_q);
753 fi = get_frame_info(skb);
754 bf = fi->bf;
44f1d26c
FF
755 if (!fi->bf)
756 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
56dc6336 757
44f1d26c
FF
758 if (!bf)
759 continue;
760
399c6489 761 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
44f1d26c 762 seqno = bf->bf_state.seqno;
56dc6336
FF
763 if (!bf_first)
764 bf_first = bf;
f078f209 765
d43f3015 766 /* do not step over block-ack window */
6a0ddaef 767 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
e8324357
S
768 status = ATH_AGGR_BAW_CLOSED;
769 break;
770 }
f078f209 771
e8324357
S
772 if (!rl) {
773 aggr_limit = ath_lookup_rate(sc, bf, tid);
774 rl = 1;
775 }
f078f209 776
d43f3015 777 /* do not exceed aggregation limit */
2d42efc4 778 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
f078f209 779
d43f3015 780 if (nframes &&
1a6e9d0f
RM
781 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
782 ath_lookup_legacy(bf))) {
e8324357
S
783 status = ATH_AGGR_LIMITED;
784 break;
785 }
f078f209 786
0299a50a 787 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
bdf2dbfb 788 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
0299a50a
FF
789 break;
790
d43f3015
S
791 /* do not exceed subframe limit */
792 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
793 status = ATH_AGGR_LIMITED;
794 break;
795 }
f078f209 796
d43f3015 797 /* add padding for previous frame to aggregation length */
e8324357 798 al += bpad + al_delta;
f078f209 799
e8324357
S
800 /*
801 * Get the delimiters needed to meet the MPDU
802 * density for this node.
803 */
7a12dfdb
RM
804 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
805 !nframes);
e8324357 806 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 807
7a12dfdb 808 nframes++;
e8324357 809 bf->bf_next = NULL;
f078f209 810
d43f3015 811 /* link buffers of this frame to the aggregate */
2d42efc4 812 if (!fi->retries)
6a0ddaef 813 ath_tx_addto_baw(sc, tid, seqno);
399c6489 814 bf->bf_state.ndelim = ndelim;
56dc6336
FF
815
816 __skb_unlink(skb, &tid->buf_q);
817 list_add_tail(&bf->list, bf_q);
399c6489 818 if (bf_prev)
e8324357 819 bf_prev->bf_next = bf;
399c6489 820
e8324357 821 bf_prev = bf;
fec247c0 822
56dc6336 823 } while (!skb_queue_empty(&tid->buf_q));
f078f209 824
269c44bc 825 *aggr_len = al;
d43f3015 826
e8324357
S
827 return status;
828#undef PADBYTES
829}
f078f209 830
38dad7ba
FF
831/*
832 * rix - rate index
833 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
834 * width - 0 for 20 MHz, 1 for 40 MHz
835 * half_gi - to use 4us v/s 3.6 us for symbol time
836 */
837static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
838 int width, int half_gi, bool shortPreamble)
839{
840 u32 nbits, nsymbits, duration, nsymbols;
841 int streams;
842
843 /* find number of symbols: PLCP + data */
844 streams = HT_RC_2_STREAMS(rix);
845 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
846 nsymbits = bits_per_symbol[rix % 8][width] * streams;
847 nsymbols = (nbits + nsymbits - 1) / nsymbits;
848
849 if (!half_gi)
850 duration = SYMBOL_TIME(nsymbols);
851 else
852 duration = SYMBOL_TIME_HALFGI(nsymbols);
853
854 /* addup duration for legacy/ht training and signal fields */
855 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
856
857 return duration;
858}
859
493cf04f
FF
860static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
861 struct ath_tx_info *info, int len)
38dad7ba
FF
862{
863 struct ath_hw *ah = sc->sc_ah;
38dad7ba
FF
864 struct sk_buff *skb;
865 struct ieee80211_tx_info *tx_info;
866 struct ieee80211_tx_rate *rates;
867 const struct ieee80211_rate *rate;
868 struct ieee80211_hdr *hdr;
493cf04f
FF
869 int i;
870 u8 rix = 0;
38dad7ba
FF
871
872 skb = bf->bf_mpdu;
873 tx_info = IEEE80211_SKB_CB(skb);
874 rates = tx_info->control.rates;
875 hdr = (struct ieee80211_hdr *)skb->data;
493cf04f
FF
876
877 /* set dur_update_en for l-sig computation except for PS-Poll frames */
878 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
38dad7ba
FF
879
880 /*
881 * We check if Short Preamble is needed for the CTS rate by
882 * checking the BSS's global flag.
883 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
884 */
885 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
493cf04f 886 info->rtscts_rate = rate->hw_value;
38dad7ba 887 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
493cf04f 888 info->rtscts_rate |= rate->hw_value_short;
38dad7ba
FF
889
890 for (i = 0; i < 4; i++) {
891 bool is_40, is_sgi, is_sp;
892 int phy;
893
894 if (!rates[i].count || (rates[i].idx < 0))
895 continue;
896
897 rix = rates[i].idx;
493cf04f 898 info->rates[i].Tries = rates[i].count;
38dad7ba
FF
899
900 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
493cf04f
FF
901 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
902 info->flags |= ATH9K_TXDESC_RTSENA;
38dad7ba 903 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
493cf04f
FF
904 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
905 info->flags |= ATH9K_TXDESC_CTSENA;
38dad7ba
FF
906 }
907
908 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
493cf04f 909 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
38dad7ba 910 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
493cf04f 911 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
38dad7ba
FF
912
913 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
914 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
915 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
916
917 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
918 /* MCS rates */
493cf04f
FF
919 info->rates[i].Rate = rix | 0x80;
920 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
921 ah->txchainmask, info->rates[i].Rate);
922 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
38dad7ba
FF
923 is_40, is_sgi, is_sp);
924 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
493cf04f 925 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
38dad7ba
FF
926 continue;
927 }
928
929 /* legacy rates */
930 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
931 !(rate->flags & IEEE80211_RATE_ERP_G))
932 phy = WLAN_RC_PHY_CCK;
933 else
934 phy = WLAN_RC_PHY_OFDM;
935
936 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
493cf04f 937 info->rates[i].Rate = rate->hw_value;
38dad7ba
FF
938 if (rate->hw_value_short) {
939 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
493cf04f 940 info->rates[i].Rate |= rate->hw_value_short;
38dad7ba
FF
941 } else {
942 is_sp = false;
943 }
944
945 if (bf->bf_state.bfs_paprd)
493cf04f 946 info->rates[i].ChSel = ah->txchainmask;
38dad7ba 947 else
493cf04f
FF
948 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
949 ah->txchainmask, info->rates[i].Rate);
38dad7ba 950
493cf04f 951 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
38dad7ba
FF
952 phy, rate->bitrate * 100, len, rix, is_sp);
953 }
954
955 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
956 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
493cf04f 957 info->flags &= ~ATH9K_TXDESC_RTSENA;
38dad7ba
FF
958
959 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
493cf04f
FF
960 if (info->flags & ATH9K_TXDESC_RTSENA)
961 info->flags &= ~ATH9K_TXDESC_CTSENA;
962}
38dad7ba 963
493cf04f
FF
964static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
965{
966 struct ieee80211_hdr *hdr;
967 enum ath9k_pkt_type htype;
968 __le16 fc;
969
970 hdr = (struct ieee80211_hdr *)skb->data;
971 fc = hdr->frame_control;
38dad7ba 972
493cf04f
FF
973 if (ieee80211_is_beacon(fc))
974 htype = ATH9K_PKT_TYPE_BEACON;
975 else if (ieee80211_is_probe_resp(fc))
976 htype = ATH9K_PKT_TYPE_PROBE_RESP;
977 else if (ieee80211_is_atim(fc))
978 htype = ATH9K_PKT_TYPE_ATIM;
979 else if (ieee80211_is_pspoll(fc))
980 htype = ATH9K_PKT_TYPE_PSPOLL;
981 else
982 htype = ATH9K_PKT_TYPE_NORMAL;
983
984 return htype;
38dad7ba
FF
985}
986
493cf04f
FF
987static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
988 struct ath_txq *txq, int len)
399c6489
FF
989{
990 struct ath_hw *ah = sc->sc_ah;
991 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
992 struct ath_buf *bf_first = bf;
493cf04f 993 struct ath_tx_info info;
399c6489 994 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
399c6489 995
493cf04f
FF
996 memset(&info, 0, sizeof(info));
997 info.is_first = true;
998 info.is_last = true;
999 info.txpower = MAX_RATE_POWER;
1000 info.qcu = txq->axq_qnum;
1001
1002 info.flags = ATH9K_TXDESC_INTREQ;
1003 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1004 info.flags |= ATH9K_TXDESC_NOACK;
1005 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1006 info.flags |= ATH9K_TXDESC_LDPC;
1007
1008 ath_buf_set_rate(sc, bf, &info, len);
1009
1010 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1011 info.flags |= ATH9K_TXDESC_CLRDMASK;
1012
1013 if (bf->bf_state.bfs_paprd)
1014 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
399c6489 1015
399c6489
FF
1016
1017 while (bf) {
493cf04f
FF
1018 struct sk_buff *skb = bf->bf_mpdu;
1019 struct ath_frame_info *fi = get_frame_info(skb);
1020
1021 info.type = get_hw_packet_type(skb);
399c6489 1022 if (bf->bf_next)
493cf04f 1023 info.link = bf->bf_next->bf_daddr;
399c6489 1024 else
493cf04f
FF
1025 info.link = 0;
1026
42cecc34
JL
1027 info.buf_addr[0] = bf->bf_buf_addr;
1028 info.buf_len[0] = skb->len;
493cf04f
FF
1029 info.pkt_len = fi->framelen;
1030 info.keyix = fi->keyix;
1031 info.keytype = fi->keytype;
1032
1033 if (aggr) {
399c6489 1034 if (bf == bf_first)
493cf04f
FF
1035 info.aggr = AGGR_BUF_FIRST;
1036 else if (!bf->bf_next)
1037 info.aggr = AGGR_BUF_LAST;
1038 else
1039 info.aggr = AGGR_BUF_MIDDLE;
399c6489 1040
493cf04f
FF
1041 info.ndelim = bf->bf_state.ndelim;
1042 info.aggr_len = len;
399c6489
FF
1043 }
1044
493cf04f 1045 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
399c6489
FF
1046 bf = bf->bf_next;
1047 }
1048}
1049
e8324357
S
1050static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1051 struct ath_atx_tid *tid)
1052{
d43f3015 1053 struct ath_buf *bf;
e8324357 1054 enum ATH_AGGR_STATUS status;
399c6489 1055 struct ieee80211_tx_info *tx_info;
e8324357 1056 struct list_head bf_q;
269c44bc 1057 int aggr_len;
f078f209 1058
e8324357 1059 do {
56dc6336 1060 if (skb_queue_empty(&tid->buf_q))
e8324357 1061 return;
f078f209 1062
e8324357
S
1063 INIT_LIST_HEAD(&bf_q);
1064
269c44bc 1065 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
f078f209 1066
f078f209 1067 /*
d43f3015
S
1068 * no frames picked up to be aggregated;
1069 * block-ack window is not open.
f078f209 1070 */
e8324357
S
1071 if (list_empty(&bf_q))
1072 break;
f078f209 1073
e8324357 1074 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 1075 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
399c6489 1076 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
f078f209 1077
5519541d
FF
1078 if (tid->ac->clear_ps_filter) {
1079 tid->ac->clear_ps_filter = false;
399c6489
FF
1080 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1081 } else {
1082 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
5519541d
FF
1083 }
1084
d43f3015 1085 /* if only one frame, send as non-aggregate */
b572d033 1086 if (bf == bf->bf_lastbf) {
399c6489
FF
1087 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1088 bf->bf_state.bf_type = BUF_AMPDU;
1089 } else {
1090 TX_STAT_INC(txq->axq_qnum, a_aggr);
e8324357 1091 }
f078f209 1092
493cf04f 1093 ath_tx_fill_desc(sc, bf, txq, aggr_len);
fce041be 1094 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
4b3ba66a 1095 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
e8324357
S
1096 status != ATH_AGGR_BAW_CLOSED);
1097}
1098
231c3a1f
FF
1099int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1100 u16 tid, u16 *ssn)
e8324357
S
1101{
1102 struct ath_atx_tid *txtid;
1103 struct ath_node *an;
1104
1105 an = (struct ath_node *)sta->drv_priv;
f83da965 1106 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
1107
1108 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1109 return -EAGAIN;
1110
f83da965 1111 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 1112 txtid->paused = true;
49447f2f 1113 *ssn = txtid->seq_start = txtid->seq_next;
231c3a1f 1114
2ed72229
FF
1115 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1116 txtid->baw_head = txtid->baw_tail = 0;
1117
231c3a1f 1118 return 0;
e8324357 1119}
f078f209 1120
f83da965 1121void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
1122{
1123 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1124 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 1125 struct ath_txq *txq = txtid->ac->txq;
f078f209 1126
e8324357 1127 if (txtid->state & AGGR_CLEANUP)
f83da965 1128 return;
f078f209 1129
e8324357 1130 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 1131 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 1132 return;
e8324357 1133 }
f078f209 1134
e8324357 1135 spin_lock_bh(&txq->axq_lock);
75401849 1136 txtid->paused = true;
f078f209 1137
90fa539c
FF
1138 /*
1139 * If frames are still being transmitted for this TID, they will be
1140 * cleaned up during tx completion. To prevent race conditions, this
1141 * TID can only be reused after all in-progress subframes have been
1142 * completed.
1143 */
1144 if (txtid->baw_head != txtid->baw_tail)
e8324357 1145 txtid->state |= AGGR_CLEANUP;
90fa539c 1146 else
e8324357 1147 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
1148 spin_unlock_bh(&txq->axq_lock);
1149
1150 ath_tx_flush_tid(sc, txtid);
e8324357 1151}
f078f209 1152
5519541d
FF
1153bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
1154{
1155 struct ath_atx_tid *tid;
1156 struct ath_atx_ac *ac;
1157 struct ath_txq *txq;
1158 bool buffered = false;
1159 int tidno;
1160
1161 for (tidno = 0, tid = &an->tid[tidno];
1162 tidno < WME_NUM_TID; tidno++, tid++) {
1163
1164 if (!tid->sched)
1165 continue;
1166
1167 ac = tid->ac;
1168 txq = ac->txq;
1169
1170 spin_lock_bh(&txq->axq_lock);
1171
56dc6336 1172 if (!skb_queue_empty(&tid->buf_q))
5519541d
FF
1173 buffered = true;
1174
1175 tid->sched = false;
1176 list_del(&tid->list);
1177
1178 if (ac->sched) {
1179 ac->sched = false;
1180 list_del(&ac->list);
1181 }
1182
1183 spin_unlock_bh(&txq->axq_lock);
1184 }
1185
1186 return buffered;
1187}
1188
1189void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1190{
1191 struct ath_atx_tid *tid;
1192 struct ath_atx_ac *ac;
1193 struct ath_txq *txq;
1194 int tidno;
1195
1196 for (tidno = 0, tid = &an->tid[tidno];
1197 tidno < WME_NUM_TID; tidno++, tid++) {
1198
1199 ac = tid->ac;
1200 txq = ac->txq;
1201
1202 spin_lock_bh(&txq->axq_lock);
1203 ac->clear_ps_filter = true;
1204
56dc6336 1205 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
5519541d
FF
1206 ath_tx_queue_tid(txq, tid);
1207 ath_txq_schedule(sc, txq);
1208 }
1209
1210 spin_unlock_bh(&txq->axq_lock);
1211 }
1212}
1213
e8324357
S
1214void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1215{
1216 struct ath_atx_tid *txtid;
1217 struct ath_node *an;
1218
1219 an = (struct ath_node *)sta->drv_priv;
1220
1221 if (sc->sc_flags & SC_OP_TXAGGR) {
1222 txtid = ATH_AN_2_TID(an, tid);
1223 txtid->baw_size =
1224 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1225 txtid->state |= AGGR_ADDBA_COMPLETE;
1226 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1227 ath_tx_resume_tid(sc, txtid);
1228 }
f078f209
LR
1229}
1230
e8324357
S
1231/********************/
1232/* Queue Management */
1233/********************/
f078f209 1234
e8324357
S
1235static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1236 struct ath_txq *txq)
f078f209 1237{
e8324357
S
1238 struct ath_atx_ac *ac, *ac_tmp;
1239 struct ath_atx_tid *tid, *tid_tmp;
f078f209 1240
e8324357
S
1241 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1242 list_del(&ac->list);
1243 ac->sched = false;
1244 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1245 list_del(&tid->list);
1246 tid->sched = false;
1247 ath_tid_drain(sc, txq, tid);
1248 }
f078f209
LR
1249 }
1250}
1251
e8324357 1252struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 1253{
cbe61d8a 1254 struct ath_hw *ah = sc->sc_ah;
c46917bb 1255 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1256 struct ath9k_tx_queue_info qi;
066dae93
FF
1257 static const int subtype_txq_to_hwq[] = {
1258 [WME_AC_BE] = ATH_TXQ_AC_BE,
1259 [WME_AC_BK] = ATH_TXQ_AC_BK,
1260 [WME_AC_VI] = ATH_TXQ_AC_VI,
1261 [WME_AC_VO] = ATH_TXQ_AC_VO,
1262 };
60f2d1d5 1263 int axq_qnum, i;
f078f209 1264
e8324357 1265 memset(&qi, 0, sizeof(qi));
066dae93 1266 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
1267 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1268 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1269 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1270 qi.tqi_physCompBuf = 0;
f078f209
LR
1271
1272 /*
e8324357
S
1273 * Enable interrupts only for EOL and DESC conditions.
1274 * We mark tx descriptors to receive a DESC interrupt
1275 * when a tx queue gets deep; otherwise waiting for the
1276 * EOL to reap descriptors. Note that this is done to
1277 * reduce interrupt load and this only defers reaping
1278 * descriptors, never transmitting frames. Aside from
1279 * reducing interrupts this also permits more concurrency.
1280 * The only potential downside is if the tx queue backs
1281 * up in which case the top half of the kernel may backup
1282 * due to a lack of tx descriptors.
1283 *
1284 * The UAPSD queue is an exception, since we take a desc-
1285 * based intr on the EOSP frames.
f078f209 1286 */
afe754d6
VT
1287 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1288 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1289 TXQ_FLAG_TXERRINT_ENABLE;
1290 } else {
1291 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1292 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1293 else
1294 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1295 TXQ_FLAG_TXDESCINT_ENABLE;
1296 }
60f2d1d5
BG
1297 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1298 if (axq_qnum == -1) {
f078f209 1299 /*
e8324357
S
1300 * NB: don't print a message, this happens
1301 * normally on parts with too few tx queues
f078f209 1302 */
e8324357 1303 return NULL;
f078f209 1304 }
60f2d1d5 1305 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
3800276a 1306 ath_err(common, "qnum %u out of range, max %zu!\n",
60f2d1d5
BG
1307 axq_qnum, ARRAY_SIZE(sc->tx.txq));
1308 ath9k_hw_releasetxqueue(ah, axq_qnum);
e8324357
S
1309 return NULL;
1310 }
60f2d1d5
BG
1311 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1312 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
f078f209 1313
60f2d1d5
BG
1314 txq->axq_qnum = axq_qnum;
1315 txq->mac80211_qnum = -1;
e8324357
S
1316 txq->axq_link = NULL;
1317 INIT_LIST_HEAD(&txq->axq_q);
1318 INIT_LIST_HEAD(&txq->axq_acq);
1319 spin_lock_init(&txq->axq_lock);
1320 txq->axq_depth = 0;
4b3ba66a 1321 txq->axq_ampdu_depth = 0;
164ace38 1322 txq->axq_tx_inprogress = false;
60f2d1d5 1323 sc->tx.txqsetup |= 1<<axq_qnum;
e5003249
VT
1324
1325 txq->txq_headidx = txq->txq_tailidx = 0;
1326 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1327 INIT_LIST_HEAD(&txq->txq_fifo[i]);
e8324357 1328 }
60f2d1d5 1329 return &sc->tx.txq[axq_qnum];
f078f209
LR
1330}
1331
e8324357
S
1332int ath_txq_update(struct ath_softc *sc, int qnum,
1333 struct ath9k_tx_queue_info *qinfo)
1334{
cbe61d8a 1335 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1336 int error = 0;
1337 struct ath9k_tx_queue_info qi;
1338
1339 if (qnum == sc->beacon.beaconq) {
1340 /*
1341 * XXX: for beacon queue, we just save the parameter.
1342 * It will be picked up by ath_beaconq_config when
1343 * it's necessary.
1344 */
1345 sc->beacon.beacon_qi = *qinfo;
f078f209 1346 return 0;
e8324357 1347 }
f078f209 1348
9680e8a3 1349 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1350
1351 ath9k_hw_get_txq_props(ah, qnum, &qi);
1352 qi.tqi_aifs = qinfo->tqi_aifs;
1353 qi.tqi_cwmin = qinfo->tqi_cwmin;
1354 qi.tqi_cwmax = qinfo->tqi_cwmax;
1355 qi.tqi_burstTime = qinfo->tqi_burstTime;
1356 qi.tqi_readyTime = qinfo->tqi_readyTime;
1357
1358 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
3800276a
JP
1359 ath_err(ath9k_hw_common(sc->sc_ah),
1360 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1361 error = -EIO;
1362 } else {
1363 ath9k_hw_resettxqueue(ah, qnum);
1364 }
1365
1366 return error;
1367}
1368
1369int ath_cabq_update(struct ath_softc *sc)
1370{
1371 struct ath9k_tx_queue_info qi;
9814f6b3 1372 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
e8324357 1373 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1374
e8324357 1375 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1376 /*
e8324357 1377 * Ensure the readytime % is within the bounds.
f078f209 1378 */
17d7904d
S
1379 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1380 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1381 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1382 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1383
9814f6b3 1384 qi.tqi_readyTime = (cur_conf->beacon_interval *
fdbf7335 1385 sc->config.cabqReadytime) / 100;
e8324357
S
1386 ath_txq_update(sc, qnum, &qi);
1387
1388 return 0;
f078f209
LR
1389}
1390
4b3ba66a
FF
1391static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1392{
1393 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1394 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1395}
1396
fce041be
FF
1397static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1398 struct list_head *list, bool retry_tx)
5479de6e
RM
1399 __releases(txq->axq_lock)
1400 __acquires(txq->axq_lock)
f078f209 1401{
e8324357
S
1402 struct ath_buf *bf, *lastbf;
1403 struct list_head bf_head;
db1a052b
FF
1404 struct ath_tx_status ts;
1405
1406 memset(&ts, 0, sizeof(ts));
e8324357 1407 INIT_LIST_HEAD(&bf_head);
f078f209 1408
fce041be
FF
1409 while (!list_empty(list)) {
1410 bf = list_first_entry(list, struct ath_buf, list);
f078f209 1411
fce041be
FF
1412 if (bf->bf_stale) {
1413 list_del(&bf->list);
f078f209 1414
fce041be
FF
1415 ath_tx_return_buffer(sc, bf);
1416 continue;
e8324357 1417 }
f078f209 1418
e8324357 1419 lastbf = bf->bf_lastbf;
fce041be 1420 list_cut_position(&bf_head, list, &lastbf->list);
e5003249 1421
e8324357 1422 txq->axq_depth--;
4b3ba66a
FF
1423 if (bf_is_ampdu_not_probing(bf))
1424 txq->axq_ampdu_depth--;
e8324357 1425
fce041be 1426 spin_unlock_bh(&txq->axq_lock);
e8324357 1427 if (bf_isampdu(bf))
c5992618
FF
1428 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1429 retry_tx);
e8324357 1430 else
db1a052b 1431 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
fce041be 1432 spin_lock_bh(&txq->axq_lock);
f078f209 1433 }
fce041be 1434}
f078f209 1435
fce041be
FF
1436/*
1437 * Drain a given TX queue (could be Beacon or Data)
1438 *
1439 * This assumes output has been stopped and
1440 * we do not need to block ath_tx_tasklet.
1441 */
1442void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1443{
164ace38 1444 spin_lock_bh(&txq->axq_lock);
e5003249 1445 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
fce041be 1446 int idx = txq->txq_tailidx;
e5003249 1447
fce041be
FF
1448 while (!list_empty(&txq->txq_fifo[idx])) {
1449 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1450 retry_tx);
1451
1452 INCR(idx, ATH_TXFIFO_DEPTH);
e5003249 1453 }
fce041be 1454 txq->txq_tailidx = idx;
e5003249 1455 }
e609e2ea 1456
fce041be
FF
1457 txq->axq_link = NULL;
1458 txq->axq_tx_inprogress = false;
1459 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
1460
e609e2ea 1461 /* flush any pending frames if aggregation is enabled */
fce041be
FF
1462 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1463 ath_txq_drain_pending_buffers(sc, txq);
1464
1465 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
1466}
1467
080e1a25 1468bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1469{
cbe61d8a 1470 struct ath_hw *ah = sc->sc_ah;
c46917bb 1471 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1472 struct ath_txq *txq;
1473 int i, npend = 0;
1474
1475 if (sc->sc_flags & SC_OP_INVALID)
080e1a25 1476 return true;
043a0405 1477
0d51cccc 1478 ath9k_hw_abort_tx_dma(ah);
043a0405 1479
0d51cccc 1480 /* Check if any queue remains active */
043a0405 1481 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
0d51cccc
FF
1482 if (!ATH_TXQ_SETUP(sc, i))
1483 continue;
1484
1485 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
043a0405
S
1486 }
1487
080e1a25 1488 if (npend)
393934c6 1489 ath_err(common, "Failed to stop TX DMA!\n");
043a0405
S
1490
1491 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
92460412
FF
1492 if (!ATH_TXQ_SETUP(sc, i))
1493 continue;
1494
1495 /*
1496 * The caller will resume queues with ieee80211_wake_queues.
1497 * Mark the queue as not stopped to prevent ath_tx_complete
1498 * from waking the queue too early.
1499 */
1500 txq = &sc->tx.txq[i];
1501 txq->stopped = false;
1502 ath_draintxq(sc, txq, retry_tx);
043a0405 1503 }
080e1a25
FF
1504
1505 return !npend;
e8324357 1506}
f078f209 1507
043a0405 1508void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1509{
043a0405
S
1510 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1511 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1512}
f078f209 1513
7755bad9
BG
1514/* For each axq_acq entry, for each tid, try to schedule packets
1515 * for transmit until ampdu_depth has reached min Q depth.
1516 */
e8324357
S
1517void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1518{
7755bad9
BG
1519 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1520 struct ath_atx_tid *tid, *last_tid;
f078f209 1521
236de514 1522 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
21f28e6f 1523 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
e8324357 1524 return;
f078f209 1525
e8324357 1526 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
7755bad9 1527 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
f078f209 1528
7755bad9
BG
1529 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1530 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1531 list_del(&ac->list);
1532 ac->sched = false;
f078f209 1533
7755bad9
BG
1534 while (!list_empty(&ac->tid_q)) {
1535 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1536 list);
1537 list_del(&tid->list);
1538 tid->sched = false;
f078f209 1539
7755bad9
BG
1540 if (tid->paused)
1541 continue;
f078f209 1542
7755bad9 1543 ath_tx_sched_aggr(sc, txq, tid);
f078f209 1544
7755bad9
BG
1545 /*
1546 * add tid to round-robin queue if more frames
1547 * are pending for the tid
1548 */
56dc6336 1549 if (!skb_queue_empty(&tid->buf_q))
7755bad9 1550 ath_tx_queue_tid(txq, tid);
f078f209 1551
7755bad9
BG
1552 if (tid == last_tid ||
1553 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1554 break;
1555 }
f078f209 1556
7755bad9
BG
1557 if (!list_empty(&ac->tid_q)) {
1558 if (!ac->sched) {
1559 ac->sched = true;
1560 list_add_tail(&ac->list, &txq->axq_acq);
1561 }
f078f209 1562 }
7755bad9
BG
1563
1564 if (ac == last_ac ||
1565 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1566 return;
e8324357
S
1567 }
1568}
f078f209 1569
e8324357
S
1570/***********/
1571/* TX, DMA */
1572/***********/
1573
f078f209 1574/*
e8324357
S
1575 * Insert a chain of ath_buf (descriptors) on a txq and
1576 * assume the descriptors are already chained together by caller.
f078f209 1577 */
e8324357 1578static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 1579 struct list_head *head, bool internal)
f078f209 1580{
cbe61d8a 1581 struct ath_hw *ah = sc->sc_ah;
c46917bb 1582 struct ath_common *common = ath9k_hw_common(ah);
fce041be
FF
1583 struct ath_buf *bf, *bf_last;
1584 bool puttxbuf = false;
1585 bool edma;
f078f209 1586
e8324357
S
1587 /*
1588 * Insert the frame on the outbound list and
1589 * pass it on to the hardware.
1590 */
f078f209 1591
e8324357
S
1592 if (list_empty(head))
1593 return;
f078f209 1594
fce041be 1595 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
e8324357 1596 bf = list_first_entry(head, struct ath_buf, list);
fce041be 1597 bf_last = list_entry(head->prev, struct ath_buf, list);
f078f209 1598
226afe68
JP
1599 ath_dbg(common, ATH_DBG_QUEUE,
1600 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1601
fce041be
FF
1602 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1603 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
e5003249 1604 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
fce041be 1605 puttxbuf = true;
e8324357 1606 } else {
e5003249
VT
1607 list_splice_tail_init(head, &txq->axq_q);
1608
fce041be
FF
1609 if (txq->axq_link) {
1610 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
226afe68
JP
1611 ath_dbg(common, ATH_DBG_XMIT,
1612 "link[%u] (%p)=%llx (%p)\n",
1613 txq->axq_qnum, txq->axq_link,
1614 ito64(bf->bf_daddr), bf->bf_desc);
fce041be
FF
1615 } else if (!edma)
1616 puttxbuf = true;
1617
1618 txq->axq_link = bf_last->bf_desc;
1619 }
1620
1621 if (puttxbuf) {
1622 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1623 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1624 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1625 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1626 }
1627
1628 if (!edma) {
8d8d3fdc 1629 TX_STAT_INC(txq->axq_qnum, txstart);
e5003249 1630 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1631 }
fce041be
FF
1632
1633 if (!internal) {
1634 txq->axq_depth++;
1635 if (bf_is_ampdu_not_probing(bf))
1636 txq->axq_ampdu_depth++;
1637 }
e8324357 1638}
f078f209 1639
e8324357 1640static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
44f1d26c 1641 struct sk_buff *skb, struct ath_tx_control *txctl)
f078f209 1642{
44f1d26c 1643 struct ath_frame_info *fi = get_frame_info(skb);
04caf863 1644 struct list_head bf_head;
44f1d26c 1645 struct ath_buf *bf;
f078f209 1646
e8324357
S
1647 /*
1648 * Do not queue to h/w when any of the following conditions is true:
1649 * - there are pending frames in software queue
1650 * - the TID is currently paused for ADDBA/BAR request
1651 * - seqno is not within block-ack window
1652 * - h/w queue depth exceeds low water mark
1653 */
56dc6336 1654 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
44f1d26c 1655 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
4b3ba66a 1656 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1657 /*
e8324357
S
1658 * Add this frame to software queue for scheduling later
1659 * for aggregation.
f078f209 1660 */
bda8adda 1661 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
44f1d26c 1662 __skb_queue_tail(&tid->buf_q, skb);
9af73cf7
FF
1663 if (!txctl->an || !txctl->an->sleeping)
1664 ath_tx_queue_tid(txctl->txq, tid);
e8324357
S
1665 return;
1666 }
1667
44f1d26c
FF
1668 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1669 if (!bf)
1670 return;
1671
399c6489 1672 bf->bf_state.bf_type = BUF_AMPDU;
04caf863
FF
1673 INIT_LIST_HEAD(&bf_head);
1674 list_add(&bf->list, &bf_head);
1675
e8324357 1676 /* Add sub-frame to BAW */
44f1d26c 1677 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
e8324357
S
1678
1679 /* Queue to h/w without aggregation */
bda8adda 1680 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
d43f3015 1681 bf->bf_lastbf = bf;
493cf04f 1682 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
fce041be 1683 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
e8324357
S
1684}
1685
82b873af 1686static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c 1687 struct ath_atx_tid *tid, struct sk_buff *skb)
e8324357 1688{
44f1d26c
FF
1689 struct ath_frame_info *fi = get_frame_info(skb);
1690 struct list_head bf_head;
e8324357
S
1691 struct ath_buf *bf;
1692
44f1d26c
FF
1693 bf = fi->bf;
1694 if (!bf)
1695 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1696
1697 if (!bf)
1698 return;
1699
1700 INIT_LIST_HEAD(&bf_head);
1701 list_add_tail(&bf->list, &bf_head);
399c6489 1702 bf->bf_state.bf_type = 0;
e8324357
S
1703
1704 /* update starting sequence number for subsequent ADDBA request */
82b873af
FF
1705 if (tid)
1706 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
e8324357 1707
d43f3015 1708 bf->bf_lastbf = bf;
493cf04f 1709 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
44f1d26c 1710 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
fec247c0 1711 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1712}
1713
2d42efc4
FF
1714static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1715 int framelen)
e8324357
S
1716{
1717 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
1718 struct ieee80211_sta *sta = tx_info->control.sta;
1719 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
6a0ddaef 1720 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2d42efc4 1721 struct ath_frame_info *fi = get_frame_info(skb);
93ae2dd2 1722 struct ath_node *an = NULL;
2d42efc4 1723 enum ath9k_key_type keytype;
e8324357 1724
2d42efc4 1725 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
e8324357 1726
93ae2dd2
FF
1727 if (sta)
1728 an = (struct ath_node *) sta->drv_priv;
1729
2d42efc4
FF
1730 memset(fi, 0, sizeof(*fi));
1731 if (hw_key)
1732 fi->keyix = hw_key->hw_key_idx;
93ae2dd2
FF
1733 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1734 fi->keyix = an->ps_key;
2d42efc4
FF
1735 else
1736 fi->keyix = ATH9K_TXKEYIX_INVALID;
1737 fi->keytype = keytype;
1738 fi->framelen = framelen;
e8324357
S
1739}
1740
ea066d5a
MSS
1741u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1742{
1743 struct ath_hw *ah = sc->sc_ah;
1744 struct ath9k_channel *curchan = ah->curchan;
d77bf3eb
RM
1745 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1746 (curchan->channelFlags & CHANNEL_5GHZ) &&
1747 (chainmask == 0x7) && (rate < 0x90))
ea066d5a
MSS
1748 return 0x3;
1749 else
1750 return chainmask;
1751}
1752
44f1d26c
FF
1753/*
1754 * Assign a descriptor (and sequence number if necessary,
1755 * and map buffer for DMA. Frees skb on error
1756 */
fa05f87a 1757static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
04caf863 1758 struct ath_txq *txq,
fa05f87a 1759 struct ath_atx_tid *tid,
2d42efc4 1760 struct sk_buff *skb)
f078f209 1761{
82b873af 1762 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2d42efc4 1763 struct ath_frame_info *fi = get_frame_info(skb);
fa05f87a 1764 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
82b873af 1765 struct ath_buf *bf;
fa05f87a 1766 u16 seqno;
82b873af
FF
1767
1768 bf = ath_tx_get_buffer(sc);
1769 if (!bf) {
226afe68 1770 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
44f1d26c 1771 goto error;
82b873af 1772 }
e022edbd 1773
528f0c6b 1774 ATH_TXBUF_RESET(bf);
f078f209 1775
fa05f87a
FF
1776 if (tid) {
1777 seqno = tid->seq_next;
1778 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1779 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1780 bf->bf_state.seqno = seqno;
1781 }
1782
f078f209 1783 bf->bf_mpdu = skb;
f8316df1 1784
c1739eb3
BG
1785 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1786 skb->len, DMA_TO_DEVICE);
1787 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1788 bf->bf_mpdu = NULL;
6cf9e995 1789 bf->bf_buf_addr = 0;
3800276a
JP
1790 ath_err(ath9k_hw_common(sc->sc_ah),
1791 "dma_mapping_error() on TX\n");
82b873af 1792 ath_tx_return_buffer(sc, bf);
44f1d26c 1793 goto error;
f8316df1
LR
1794 }
1795
56dc6336 1796 fi->bf = bf;
04caf863
FF
1797
1798 return bf;
44f1d26c
FF
1799
1800error:
1801 dev_kfree_skb_any(skb);
1802 return NULL;
04caf863
FF
1803}
1804
1805/* FIXME: tx power */
44f1d26c 1806static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
04caf863
FF
1807 struct ath_tx_control *txctl)
1808{
04caf863
FF
1809 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1810 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
248a38d0 1811 struct ath_atx_tid *tid = NULL;
fa05f87a 1812 struct ath_buf *bf;
04caf863 1813 u8 tidno;
f078f209 1814
528f0c6b 1815 spin_lock_bh(&txctl->txq->axq_lock);
61e1b0b0
MSS
1816 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1817 ieee80211_is_data_qos(hdr->frame_control)) {
5daefbd0
FF
1818 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1819 IEEE80211_QOS_CTL_TID_MASK;
2d42efc4 1820 tid = ATH_AN_2_TID(txctl->an, tidno);
5daefbd0 1821
066dae93 1822 WARN_ON(tid->ac->txq != txctl->txq);
248a38d0
FF
1823 }
1824
1825 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
04caf863
FF
1826 /*
1827 * Try aggregation if it's a unicast data frame
1828 * and the destination is HT capable.
1829 */
44f1d26c 1830 ath_tx_send_ampdu(sc, tid, skb, txctl);
f078f209 1831 } else {
44f1d26c
FF
1832 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1833 if (!bf)
1834 goto out;
04caf863 1835
82b873af
FF
1836 bf->bf_state.bfs_paprd = txctl->paprd;
1837
9cf04dcc
MSS
1838 if (txctl->paprd)
1839 bf->bf_state.bfs_paprd_timestamp = jiffies;
1840
44f1d26c 1841 ath_tx_send_normal(sc, txctl->txq, tid, skb);
f078f209 1842 }
528f0c6b 1843
fa05f87a 1844out:
528f0c6b 1845 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1846}
1847
f8316df1 1848/* Upon failure caller should free skb */
c52f33d0 1849int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1850 struct ath_tx_control *txctl)
f078f209 1851{
28d16708
FF
1852 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1853 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2d42efc4 1854 struct ieee80211_sta *sta = info->control.sta;
f59a59fe 1855 struct ieee80211_vif *vif = info->control.vif;
9ac58615 1856 struct ath_softc *sc = hw->priv;
84642d6b 1857 struct ath_txq *txq = txctl->txq;
4d91f9f3 1858 int padpos, padsize;
04caf863 1859 int frmlen = skb->len + FCS_LEN;
28d16708 1860 int q;
f078f209 1861
a9927ba3
BG
1862 /* NOTE: sta can be NULL according to net/mac80211.h */
1863 if (sta)
1864 txctl->an = (struct ath_node *)sta->drv_priv;
1865
04caf863
FF
1866 if (info->control.hw_key)
1867 frmlen += info->control.hw_key->icv_len;
1868
f078f209 1869 /*
e8324357
S
1870 * As a temporary workaround, assign seq# here; this will likely need
1871 * to be cleaned up to work better with Beacon transmission and virtual
1872 * BSSes.
f078f209 1873 */
e8324357 1874 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1875 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1876 sc->tx.seq_no += 0x10;
1877 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1878 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1879 }
f078f209 1880
42cecc34
JL
1881 /* Add the padding after the header if this is not already done */
1882 padpos = ath9k_cmn_padpos(hdr->frame_control);
1883 padsize = padpos & 3;
1884 if (padsize && skb->len > padpos) {
1885 if (skb_headroom(skb) < padsize)
1886 return -ENOMEM;
28d16708 1887
42cecc34
JL
1888 skb_push(skb, padsize);
1889 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1890 }
f078f209 1891
f59a59fe
FF
1892 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1893 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1894 !ieee80211_is_data(hdr->frame_control))
1895 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1896
2d42efc4
FF
1897 setup_frame_info(hw, skb, frmlen);
1898
1899 /*
1900 * At this point, the vif, hw_key and sta pointers in the tx control
1901 * info are no longer valid (overwritten by the ath_frame_info data.
1902 */
1903
28d16708
FF
1904 q = skb_get_queue_mapping(skb);
1905 spin_lock_bh(&txq->axq_lock);
1906 if (txq == sc->tx.txq_map[q] &&
1907 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
7545daf4 1908 ieee80211_stop_queue(sc->hw, q);
28d16708 1909 txq->stopped = 1;
f078f209 1910 }
28d16708 1911 spin_unlock_bh(&txq->axq_lock);
f078f209 1912
44f1d26c
FF
1913 ath_tx_start_dma(sc, skb, txctl);
1914 return 0;
f078f209
LR
1915}
1916
e8324357
S
1917/*****************/
1918/* TX Completion */
1919/*****************/
528f0c6b 1920
e8324357 1921static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
0f9dc298 1922 int tx_flags, struct ath_txq *txq)
528f0c6b 1923{
e8324357
S
1924 struct ieee80211_hw *hw = sc->hw;
1925 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1926 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1927 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1928 int q, padpos, padsize;
528f0c6b 1929
226afe68 1930 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1931
6b2c4032 1932 if (tx_flags & ATH_TX_BAR)
e8324357 1933 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1934
55797b1a 1935 if (!(tx_flags & ATH_TX_ERROR))
e8324357
S
1936 /* Frame was ACKed */
1937 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b 1938
42cecc34
JL
1939 padpos = ath9k_cmn_padpos(hdr->frame_control);
1940 padsize = padpos & 3;
1941 if (padsize && skb->len>padpos+padsize) {
1942 /*
1943 * Remove MAC header padding before giving the frame back to
1944 * mac80211.
1945 */
1946 memmove(skb->data + padsize, skb->data, padpos);
1947 skb_pull(skb, padsize);
e8324357 1948 }
528f0c6b 1949
1b04b930
S
1950 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1951 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
226afe68
JP
1952 ath_dbg(common, ATH_DBG_PS,
1953 "Going back to sleep after having received TX status (0x%lx)\n",
1b04b930
S
1954 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1955 PS_WAIT_FOR_CAB |
1956 PS_WAIT_FOR_PSPOLL_DATA |
1957 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1958 }
1959
7545daf4
FF
1960 q = skb_get_queue_mapping(skb);
1961 if (txq == sc->tx.txq_map[q]) {
1962 spin_lock_bh(&txq->axq_lock);
1963 if (WARN_ON(--txq->pending_frames < 0))
1964 txq->pending_frames = 0;
92460412 1965
7545daf4
FF
1966 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1967 ieee80211_wake_queue(sc->hw, q);
1968 txq->stopped = 0;
066dae93 1969 }
7545daf4 1970 spin_unlock_bh(&txq->axq_lock);
97923b14 1971 }
7545daf4
FF
1972
1973 ieee80211_tx_status(hw, skb);
e8324357 1974}
f078f209 1975
e8324357 1976static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1977 struct ath_txq *txq, struct list_head *bf_q,
1978 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1979{
e8324357 1980 struct sk_buff *skb = bf->bf_mpdu;
3afd21e7 1981 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
e8324357 1982 unsigned long flags;
6b2c4032 1983 int tx_flags = 0;
f078f209 1984
e8324357 1985 if (sendbar)
6b2c4032 1986 tx_flags = ATH_TX_BAR;
f078f209 1987
55797b1a 1988 if (!txok)
6b2c4032 1989 tx_flags |= ATH_TX_ERROR;
f078f209 1990
3afd21e7
FF
1991 if (ts->ts_status & ATH9K_TXERR_FILT)
1992 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1993
c1739eb3 1994 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 1995 bf->bf_buf_addr = 0;
9f42c2b6
FF
1996
1997 if (bf->bf_state.bfs_paprd) {
9cf04dcc
MSS
1998 if (time_after(jiffies,
1999 bf->bf_state.bfs_paprd_timestamp +
2000 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 2001 dev_kfree_skb_any(skb);
78a18172 2002 else
ca369eb4 2003 complete(&sc->paprd_complete);
9f42c2b6 2004 } else {
55797b1a 2005 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
0f9dc298 2006 ath_tx_complete(sc, skb, tx_flags, txq);
9f42c2b6 2007 }
6cf9e995
BG
2008 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2009 * accidentally reference it later.
2010 */
2011 bf->bf_mpdu = NULL;
e8324357
S
2012
2013 /*
2014 * Return the list of ath_buf of this mpdu to free queue
2015 */
2016 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2017 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2018 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
2019}
2020
0cdd5c60
FF
2021static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2022 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 2023 int txok)
f078f209 2024{
a22be22a 2025 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 2026 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 2027 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0cdd5c60 2028 struct ieee80211_hw *hw = sc->hw;
f0c255a0 2029 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 2030 u8 i, tx_rateindex;
f078f209 2031
95e4acb7 2032 if (txok)
db1a052b 2033 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2034
db1a052b 2035 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2036 WARN_ON(tx_rateindex >= hw->max_rates);
2037
3afd21e7 2038 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
d969847c 2039 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2040
b572d033 2041 BUG_ON(nbad > nframes);
ebd02287 2042
b572d033
FF
2043 tx_info->status.ampdu_len = nframes;
2044 tx_info->status.ampdu_ack_len = nframes - nbad;
ebd02287
BS
2045 }
2046
db1a052b 2047 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
3afd21e7 2048 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
f0c255a0
FF
2049 /*
2050 * If an underrun error is seen assume it as an excessive
2051 * retry only if max frame trigger level has been reached
2052 * (2 KB for single stream, and 4 KB for dual stream).
2053 * Adjust the long retry as if the frame was tried
2054 * hw->max_rate_tries times to affect how rate control updates
2055 * PER for the failed rate.
2056 * In case of congestion on the bus penalizing this type of
2057 * underruns should help hardware actually transmit new frames
2058 * successfully by eventually preferring slower rates.
2059 * This itself should also alleviate congestion on the bus.
2060 */
3afd21e7
FF
2061 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2062 ATH9K_TX_DELIM_UNDERRUN)) &&
2063 ieee80211_is_data(hdr->frame_control) &&
83860c59 2064 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
f0c255a0
FF
2065 tx_info->status.rates[tx_rateindex].count =
2066 hw->max_rate_tries;
f078f209 2067 }
8a92e2ee 2068
545750d3 2069 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2070 tx_info->status.rates[i].count = 0;
545750d3
FF
2071 tx_info->status.rates[i].idx = -1;
2072 }
8a92e2ee 2073
78c4653a 2074 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2075}
2076
fce041be
FF
2077static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2078 struct ath_tx_status *ts, struct ath_buf *bf,
2079 struct list_head *bf_head)
5479de6e
RM
2080 __releases(txq->axq_lock)
2081 __acquires(txq->axq_lock)
fce041be
FF
2082{
2083 int txok;
2084
2085 txq->axq_depth--;
2086 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2087 txq->axq_tx_inprogress = false;
2088 if (bf_is_ampdu_not_probing(bf))
2089 txq->axq_ampdu_depth--;
2090
2091 spin_unlock_bh(&txq->axq_lock);
2092
2093 if (!bf_isampdu(bf)) {
3afd21e7 2094 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
fce041be
FF
2095 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2096 } else
2097 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2098
2099 spin_lock_bh(&txq->axq_lock);
2100
2101 if (sc->sc_flags & SC_OP_TXAGGR)
2102 ath_txq_schedule(sc, txq);
2103}
2104
e8324357 2105static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2106{
cbe61d8a 2107 struct ath_hw *ah = sc->sc_ah;
c46917bb 2108 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2109 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2110 struct list_head bf_head;
e8324357 2111 struct ath_desc *ds;
29bffa96 2112 struct ath_tx_status ts;
e8324357 2113 int status;
f078f209 2114
226afe68
JP
2115 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2116 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2117 txq->axq_link);
f078f209 2118
fce041be 2119 spin_lock_bh(&txq->axq_lock);
f078f209 2120 for (;;) {
236de514
FF
2121 if (work_pending(&sc->hw_reset_work))
2122 break;
2123
f078f209
LR
2124 if (list_empty(&txq->axq_q)) {
2125 txq->axq_link = NULL;
86271e46 2126 if (sc->sc_flags & SC_OP_TXAGGR)
082f6536 2127 ath_txq_schedule(sc, txq);
f078f209
LR
2128 break;
2129 }
f078f209
LR
2130 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2131
e8324357
S
2132 /*
2133 * There is a race condition that a BH gets scheduled
2134 * after sw writes TxE and before hw re-load the last
2135 * descriptor to get the newly chained one.
2136 * Software must keep the last DONE descriptor as a
2137 * holding descriptor - software does so by marking
2138 * it with the STALE flag.
2139 */
2140 bf_held = NULL;
a119cc49 2141 if (bf->bf_stale) {
e8324357 2142 bf_held = bf;
fce041be 2143 if (list_is_last(&bf_held->list, &txq->axq_q))
e8324357 2144 break;
fce041be
FF
2145
2146 bf = list_entry(bf_held->list.next, struct ath_buf,
2147 list);
f078f209
LR
2148 }
2149
2150 lastbf = bf->bf_lastbf;
e8324357 2151 ds = lastbf->bf_desc;
f078f209 2152
29bffa96
FF
2153 memset(&ts, 0, sizeof(ts));
2154 status = ath9k_hw_txprocdesc(ah, ds, &ts);
fce041be 2155 if (status == -EINPROGRESS)
e8324357 2156 break;
fce041be 2157
2dac4fb9 2158 TX_STAT_INC(txq->axq_qnum, txprocdesc);
f078f209 2159
e8324357
S
2160 /*
2161 * Remove ath_buf's of the same transmit unit from txq,
2162 * however leave the last descriptor back as the holding
2163 * descriptor for hw.
2164 */
a119cc49 2165 lastbf->bf_stale = true;
e8324357 2166 INIT_LIST_HEAD(&bf_head);
e8324357
S
2167 if (!list_is_singular(&lastbf->list))
2168 list_cut_position(&bf_head,
2169 &txq->axq_q, lastbf->list.prev);
f078f209 2170
fce041be 2171 if (bf_held) {
0a8cea84 2172 list_del(&bf_held->list);
0a8cea84 2173 ath_tx_return_buffer(sc, bf_held);
e8324357 2174 }
f078f209 2175
fce041be 2176 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
8469cdef 2177 }
fce041be 2178 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2179}
2180
305fe47f 2181static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2182{
2183 struct ath_softc *sc = container_of(work, struct ath_softc,
2184 tx_complete_work.work);
2185 struct ath_txq *txq;
2186 int i;
2187 bool needreset = false;
60f2d1d5
BG
2188#ifdef CONFIG_ATH9K_DEBUGFS
2189 sc->tx_complete_poll_work_seen++;
2190#endif
164ace38
SB
2191
2192 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2193 if (ATH_TXQ_SETUP(sc, i)) {
2194 txq = &sc->tx.txq[i];
2195 spin_lock_bh(&txq->axq_lock);
2196 if (txq->axq_depth) {
2197 if (txq->axq_tx_inprogress) {
2198 needreset = true;
2199 spin_unlock_bh(&txq->axq_lock);
2200 break;
2201 } else {
2202 txq->axq_tx_inprogress = true;
2203 }
2204 }
2205 spin_unlock_bh(&txq->axq_lock);
2206 }
2207
2208 if (needreset) {
226afe68
JP
2209 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2210 "tx hung, resetting the chip\n");
236de514 2211 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
164ace38
SB
2212 }
2213
42935eca 2214 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2215 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2216}
2217
2218
f078f209 2219
e8324357 2220void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2221{
e8324357
S
2222 int i;
2223 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2224
e8324357 2225 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2226
e8324357
S
2227 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2228 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2229 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2230 }
2231}
2232
e5003249
VT
2233void ath_tx_edma_tasklet(struct ath_softc *sc)
2234{
fce041be 2235 struct ath_tx_status ts;
e5003249
VT
2236 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2237 struct ath_hw *ah = sc->sc_ah;
2238 struct ath_txq *txq;
2239 struct ath_buf *bf, *lastbf;
2240 struct list_head bf_head;
2241 int status;
e5003249
VT
2242
2243 for (;;) {
236de514
FF
2244 if (work_pending(&sc->hw_reset_work))
2245 break;
2246
fce041be 2247 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
e5003249
VT
2248 if (status == -EINPROGRESS)
2249 break;
2250 if (status == -EIO) {
226afe68
JP
2251 ath_dbg(common, ATH_DBG_XMIT,
2252 "Error processing tx status\n");
e5003249
VT
2253 break;
2254 }
2255
2256 /* Skip beacon completions */
fce041be 2257 if (ts.qid == sc->beacon.beaconq)
e5003249
VT
2258 continue;
2259
fce041be 2260 txq = &sc->tx.txq[ts.qid];
e5003249
VT
2261
2262 spin_lock_bh(&txq->axq_lock);
fce041be 2263
e5003249
VT
2264 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2265 spin_unlock_bh(&txq->axq_lock);
2266 return;
2267 }
2268
2269 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2270 struct ath_buf, list);
2271 lastbf = bf->bf_lastbf;
2272
2273 INIT_LIST_HEAD(&bf_head);
2274 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2275 &lastbf->list);
e5003249 2276
fce041be
FF
2277 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2278 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
e5003249 2279
fce041be
FF
2280 if (!list_empty(&txq->axq_q)) {
2281 struct list_head bf_q;
60f2d1d5 2282
fce041be
FF
2283 INIT_LIST_HEAD(&bf_q);
2284 txq->axq_link = NULL;
2285 list_splice_tail_init(&txq->axq_q, &bf_q);
2286 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2287 }
2288 }
86271e46 2289
fce041be 2290 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
e5003249
VT
2291 spin_unlock_bh(&txq->axq_lock);
2292 }
2293}
2294
e8324357
S
2295/*****************/
2296/* Init, Cleanup */
2297/*****************/
f078f209 2298
5088c2f1
VT
2299static int ath_txstatus_setup(struct ath_softc *sc, int size)
2300{
2301 struct ath_descdma *dd = &sc->txsdma;
2302 u8 txs_len = sc->sc_ah->caps.txs_len;
2303
2304 dd->dd_desc_len = size * txs_len;
2305 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2306 &dd->dd_desc_paddr, GFP_KERNEL);
2307 if (!dd->dd_desc)
2308 return -ENOMEM;
2309
2310 return 0;
2311}
2312
2313static int ath_tx_edma_init(struct ath_softc *sc)
2314{
2315 int err;
2316
2317 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2318 if (!err)
2319 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2320 sc->txsdma.dd_desc_paddr,
2321 ATH_TXSTATUS_RING_SIZE);
2322
2323 return err;
2324}
2325
2326static void ath_tx_edma_cleanup(struct ath_softc *sc)
2327{
2328 struct ath_descdma *dd = &sc->txsdma;
2329
2330 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2331 dd->dd_desc_paddr);
2332}
2333
e8324357 2334int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2335{
c46917bb 2336 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2337 int error = 0;
f078f209 2338
797fe5cb 2339 spin_lock_init(&sc->tx.txbuflock);
f078f209 2340
797fe5cb 2341 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2342 "tx", nbufs, 1, 1);
797fe5cb 2343 if (error != 0) {
3800276a
JP
2344 ath_err(common,
2345 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2346 goto err;
2347 }
f078f209 2348
797fe5cb 2349 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2350 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2351 if (error != 0) {
3800276a
JP
2352 ath_err(common,
2353 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2354 goto err;
2355 }
f078f209 2356
164ace38
SB
2357 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2358
5088c2f1
VT
2359 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2360 error = ath_tx_edma_init(sc);
2361 if (error)
2362 goto err;
2363 }
2364
797fe5cb 2365err:
e8324357
S
2366 if (error != 0)
2367 ath_tx_cleanup(sc);
f078f209 2368
e8324357 2369 return error;
f078f209
LR
2370}
2371
797fe5cb 2372void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2373{
2374 if (sc->beacon.bdma.dd_desc_len != 0)
2375 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2376
2377 if (sc->tx.txdma.dd_desc_len != 0)
2378 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2379
2380 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2381 ath_tx_edma_cleanup(sc);
e8324357 2382}
f078f209
LR
2383
2384void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2385{
c5170163
S
2386 struct ath_atx_tid *tid;
2387 struct ath_atx_ac *ac;
2388 int tidno, acno;
f078f209 2389
8ee5afbc 2390 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2391 tidno < WME_NUM_TID;
2392 tidno++, tid++) {
2393 tid->an = an;
2394 tid->tidno = tidno;
2395 tid->seq_start = tid->seq_next = 0;
2396 tid->baw_size = WME_MAX_BA;
2397 tid->baw_head = tid->baw_tail = 0;
2398 tid->sched = false;
e8324357 2399 tid->paused = false;
a37c2c79 2400 tid->state &= ~AGGR_CLEANUP;
56dc6336 2401 __skb_queue_head_init(&tid->buf_q);
c5170163 2402 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2403 tid->ac = &an->ac[acno];
a37c2c79
S
2404 tid->state &= ~AGGR_ADDBA_COMPLETE;
2405 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2406 }
f078f209 2407
8ee5afbc 2408 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2409 acno < WME_NUM_AC; acno++, ac++) {
2410 ac->sched = false;
066dae93 2411 ac->txq = sc->tx.txq_map[acno];
c5170163 2412 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2413 }
2414}
2415
b5aa9bf9 2416void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2417{
2b40994c
FF
2418 struct ath_atx_ac *ac;
2419 struct ath_atx_tid *tid;
f078f209 2420 struct ath_txq *txq;
066dae93 2421 int tidno;
e8324357 2422
2b40994c
FF
2423 for (tidno = 0, tid = &an->tid[tidno];
2424 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2425
2b40994c 2426 ac = tid->ac;
066dae93 2427 txq = ac->txq;
f078f209 2428
2b40994c
FF
2429 spin_lock_bh(&txq->axq_lock);
2430
2431 if (tid->sched) {
2432 list_del(&tid->list);
2433 tid->sched = false;
2434 }
2435
2436 if (ac->sched) {
2437 list_del(&ac->list);
2438 tid->ac->sched = false;
f078f209 2439 }
2b40994c
FF
2440
2441 ath_tid_drain(sc, txq, tid);
2442 tid->state &= ~AGGR_ADDBA_COMPLETE;
2443 tid->state &= ~AGGR_CLEANUP;
2444
2445 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2446 }
2447}
This page took 0.705753 seconds and 5 git commands to generate.