ath9k: improve PS filter clearing and retry counting for A-MPDU
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
5b68138e 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
b7f080cf 17#include <linux/dma-mapping.h>
394cf0a1 18#include "ath9k.h"
b622a720 19#include "ar9003_mac.h"
f078f209
LR
20
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
f078f209 35
c6663876 36static u16 bits_per_symbol[][2] = {
f078f209
LR
37 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
46};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
82b873af 50static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c
FF
51 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 58 struct list_head *head, bool internal);
0cdd5c60
FF
59static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 61 int txok);
90fa539c
FF
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
44f1d26c
FF
64static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
c4288390 68
545750d3 69enum {
0e668cde
FF
70 MCS_HT20,
71 MCS_HT20_SGI,
545750d3
FF
72 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
0e668cde
FF
76static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
88 },
89 [MCS_HT40] = {
0e668cde
FF
90 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
94 },
95 [MCS_HT40_SGI] = {
0e668cde
FF
96 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
100 }
101};
102
e8324357
S
103/*********************/
104/* Aggregation logic */
105/*********************/
f078f209 106
e8324357 107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 108{
e8324357 109 struct ath_atx_ac *ac = tid->ac;
ff37e337 110
e8324357
S
111 if (tid->paused)
112 return;
ff37e337 113
e8324357
S
114 if (tid->sched)
115 return;
ff37e337 116
e8324357
S
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 119
e8324357
S
120 if (ac->sched)
121 return;
f078f209 122
e8324357
S
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
f078f209 126
e8324357 127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 128{
066dae93 129 struct ath_txq *txq = tid->ac->txq;
e6a9854b 130
75401849 131 WARN_ON(!tid->paused);
f078f209 132
75401849
LB
133 spin_lock_bh(&txq->axq_lock);
134 tid->paused = false;
f078f209 135
56dc6336 136 if (skb_queue_empty(&tid->buf_q))
e8324357 137 goto unlock;
f078f209 138
e8324357
S
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
528f0c6b 143}
f078f209 144
2d42efc4 145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
76e45221
FF
146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
76e45221
FF
151}
152
e8324357 153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 154{
066dae93 155 struct ath_txq *txq = tid->ac->txq;
56dc6336 156 struct sk_buff *skb;
e8324357
S
157 struct ath_buf *bf;
158 struct list_head bf_head;
90fa539c 159 struct ath_tx_status ts;
2d42efc4 160 struct ath_frame_info *fi;
f078f209 161
90fa539c 162 INIT_LIST_HEAD(&bf_head);
e6a9854b 163
90fa539c 164 memset(&ts, 0, sizeof(ts));
75401849 165 spin_lock_bh(&txq->axq_lock);
f078f209 166
56dc6336
FF
167 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb);
169 bf = fi->bf;
170
e1566d1f 171 spin_unlock_bh(&txq->axq_lock);
44f1d26c
FF
172 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head);
6a0ddaef 174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
7d2c16be 175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
90fa539c 176 } else {
44f1d26c 177 ath_tx_send_normal(sc, txq, NULL, skb);
90fa539c 178 }
e1566d1f 179 spin_lock_bh(&txq->axq_lock);
528f0c6b 180 }
f078f209 181
e8324357 182 spin_unlock_bh(&txq->axq_lock);
528f0c6b 183}
f078f209 184
e8324357
S
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 int seqno)
528f0c6b 187{
e8324357 188 int index, cindex;
f078f209 189
e8324357
S
190 index = ATH_BA_INDEX(tid->seq_start, seqno);
191 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 192
81ee13ba 193 __clear_bit(cindex, tid->tx_buf);
528f0c6b 194
81ee13ba 195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
196 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
198 }
528f0c6b 199}
f078f209 200
e8324357 201static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 202 u16 seqno)
528f0c6b 203{
e8324357 204 int index, cindex;
528f0c6b 205
2d3bcba0 206 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 207 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 208 __set_bit(cindex, tid->tx_buf);
f078f209 209
e8324357
S
210 if (index >= ((tid->baw_tail - tid->baw_head) &
211 (ATH_TID_MAX_BUFS - 1))) {
212 tid->baw_tail = cindex;
213 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 214 }
f078f209
LR
215}
216
217/*
e8324357
S
218 * TODO: For frame(s) that are in the retry state, we will reuse the
219 * sequence number(s) without setting the retry bit. The
220 * alternative is to give up on these and BAR the receiver's window
221 * forward.
f078f209 222 */
e8324357
S
223static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
224 struct ath_atx_tid *tid)
f078f209 225
f078f209 226{
56dc6336 227 struct sk_buff *skb;
e8324357
S
228 struct ath_buf *bf;
229 struct list_head bf_head;
db1a052b 230 struct ath_tx_status ts;
2d42efc4 231 struct ath_frame_info *fi;
db1a052b
FF
232
233 memset(&ts, 0, sizeof(ts));
e8324357 234 INIT_LIST_HEAD(&bf_head);
f078f209 235
56dc6336
FF
236 while ((skb = __skb_dequeue(&tid->buf_q))) {
237 fi = get_frame_info(skb);
238 bf = fi->bf;
f078f209 239
44f1d26c
FF
240 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue;
245 }
246
56dc6336 247 list_add_tail(&bf->list, &bf_head);
f078f209 248
2d42efc4 249 if (fi->retries)
6a0ddaef 250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
f078f209 251
e8324357 252 spin_unlock(&txq->axq_lock);
db1a052b 253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
254 spin_lock(&txq->axq_lock);
255 }
f078f209 256
e8324357
S
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
f078f209
LR
259}
260
fec247c0 261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
2d42efc4 262 struct sk_buff *skb)
f078f209 263{
8b7f8532 264 struct ath_frame_info *fi = get_frame_info(skb);
f11cc949 265 struct ath_buf *bf = fi->bf;
e8324357 266 struct ieee80211_hdr *hdr;
f078f209 267
fec247c0 268 TX_STAT_INC(txq->axq_qnum, a_retries);
8b7f8532 269 if (fi->retries++ > 0)
2d42efc4 270 return;
f078f209 271
e8324357
S
272 hdr = (struct ieee80211_hdr *)skb->data;
273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f11cc949
FF
274 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
275 sizeof(*hdr), DMA_TO_DEVICE);
f078f209
LR
276}
277
0a8cea84 278static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 279{
0a8cea84 280 struct ath_buf *bf = NULL;
d43f3015
S
281
282 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
283
284 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
285 spin_unlock_bh(&sc->tx.txbuflock);
286 return NULL;
287 }
0a8cea84
FF
288
289 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
290 list_del(&bf->list);
291
d43f3015
S
292 spin_unlock_bh(&sc->tx.txbuflock);
293
0a8cea84
FF
294 return bf;
295}
296
297static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
298{
299 spin_lock_bh(&sc->tx.txbuflock);
300 list_add_tail(&bf->list, &sc->tx.txbuf);
301 spin_unlock_bh(&sc->tx.txbuflock);
302}
303
304static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
305{
306 struct ath_buf *tbf;
307
308 tbf = ath_tx_get_buffer(sc);
309 if (WARN_ON(!tbf))
310 return NULL;
311
d43f3015
S
312 ATH_TXBUF_RESET(tbf);
313
314 tbf->bf_mpdu = bf->bf_mpdu;
315 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 316 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 317 tbf->bf_state = bf->bf_state;
d43f3015
S
318
319 return tbf;
320}
321
b572d033
FF
322static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
323 struct ath_tx_status *ts, int txok,
324 int *nframes, int *nbad)
325{
2d42efc4 326 struct ath_frame_info *fi;
b572d033
FF
327 u16 seq_st = 0;
328 u32 ba[WME_BA_BMP_SIZE >> 5];
329 int ba_index;
330 int isaggr = 0;
331
332 *nbad = 0;
333 *nframes = 0;
334
b572d033
FF
335 isaggr = bf_isaggr(bf);
336 if (isaggr) {
337 seq_st = ts->ts_seqnum;
338 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
339 }
340
341 while (bf) {
2d42efc4 342 fi = get_frame_info(bf->bf_mpdu);
6a0ddaef 343 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
b572d033
FF
344
345 (*nframes)++;
346 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
347 (*nbad)++;
348
349 bf = bf->bf_next;
350 }
351}
352
353
d43f3015
S
354static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
355 struct ath_buf *bf, struct list_head *bf_q,
c5992618 356 struct ath_tx_status *ts, int txok, bool retry)
f078f209 357{
e8324357
S
358 struct ath_node *an = NULL;
359 struct sk_buff *skb;
1286ec6d 360 struct ieee80211_sta *sta;
0cdd5c60 361 struct ieee80211_hw *hw = sc->hw;
1286ec6d 362 struct ieee80211_hdr *hdr;
76d5a9e8 363 struct ieee80211_tx_info *tx_info;
e8324357 364 struct ath_atx_tid *tid = NULL;
d43f3015 365 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
56dc6336
FF
366 struct list_head bf_head;
367 struct sk_buff_head bf_pending;
0934af23 368 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 369 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
370 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
371 bool rc_update = true;
78c4653a 372 struct ieee80211_tx_rate rates[4];
2d42efc4 373 struct ath_frame_info *fi;
ebd02287 374 int nframes;
5daefbd0 375 u8 tidno;
f078f209 376
a22be22a 377 skb = bf->bf_mpdu;
1286ec6d
S
378 hdr = (struct ieee80211_hdr *)skb->data;
379
76d5a9e8 380 tx_info = IEEE80211_SKB_CB(skb);
76d5a9e8 381
78c4653a
FF
382 memcpy(rates, tx_info->control.rates, sizeof(rates));
383
1286ec6d 384 rcu_read_lock();
f078f209 385
686b9cb9 386 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
387 if (!sta) {
388 rcu_read_unlock();
73e19463 389
31e79a59
FF
390 INIT_LIST_HEAD(&bf_head);
391 while (bf) {
392 bf_next = bf->bf_next;
393
fce041be 394 if (!bf->bf_stale || bf_next != NULL)
31e79a59
FF
395 list_move_tail(&bf->list, &bf_head);
396
31e79a59
FF
397 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
398 0, 0);
399
400 bf = bf_next;
401 }
1286ec6d 402 return;
f078f209
LR
403 }
404
1286ec6d 405 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
406 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
407 tid = ATH_AN_2_TID(an, tidno);
1286ec6d 408
b11b160d
FF
409 /*
410 * The hardware occasionally sends a tx status for the wrong TID.
411 * In this case, the BA status cannot be considered valid and all
412 * subframes need to be retransmitted
413 */
5daefbd0 414 if (tidno != ts->tid)
b11b160d
FF
415 txok = false;
416
e8324357 417 isaggr = bf_isaggr(bf);
d43f3015 418 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 419
d43f3015 420 if (isaggr && txok) {
db1a052b
FF
421 if (ts->ts_flags & ATH9K_TX_BA) {
422 seq_st = ts->ts_seqnum;
423 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 424 } else {
d43f3015
S
425 /*
426 * AR5416 can become deaf/mute when BA
427 * issue happens. Chip needs to be reset.
428 * But AP code may have sychronization issues
429 * when perform internal reset in this routine.
430 * Only enable reset in STA mode for now.
431 */
2660b81a 432 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 433 needreset = 1;
e8324357 434 }
f078f209
LR
435 }
436
56dc6336 437 __skb_queue_head_init(&bf_pending);
f078f209 438
b572d033 439 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
e8324357 440 while (bf) {
6a0ddaef
FF
441 u16 seqno = bf->bf_state.seqno;
442
f0b8220c 443 txfail = txpending = sendbar = 0;
e8324357 444 bf_next = bf->bf_next;
f078f209 445
78c4653a
FF
446 skb = bf->bf_mpdu;
447 tx_info = IEEE80211_SKB_CB(skb);
2d42efc4 448 fi = get_frame_info(skb);
78c4653a 449
6a0ddaef 450 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
e8324357
S
451 /* transmit completion, subframe is
452 * acked by block ack */
0934af23 453 acked_cnt++;
e8324357
S
454 } else if (!isaggr && txok) {
455 /* transmit completion */
0934af23 456 acked_cnt++;
e8324357 457 } else {
5519541d 458 if ((tid->state & AGGR_CLEANUP) || !retry) {
e8324357
S
459 /*
460 * cleanup in progress, just fail
461 * the un-acked sub-frames
462 */
463 txfail = 1;
5519541d 464 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
26a64259 465 if (txok || !an->sleeping)
5519541d
FF
466 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
467
5519541d
FF
468 txpending = 1;
469 } else {
5519541d
FF
470 txfail = 1;
471 sendbar = 1;
472 txfail_cnt++;
e8324357
S
473 }
474 }
f078f209 475
fce041be
FF
476 /*
477 * Make sure the last desc is reclaimed if it
478 * not a holding desc.
479 */
56dc6336
FF
480 INIT_LIST_HEAD(&bf_head);
481 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
482 bf_next != NULL || !bf_last->bf_stale)
d43f3015 483 list_move_tail(&bf->list, &bf_head);
f078f209 484
90fa539c 485 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
486 /*
487 * complete the acked-ones/xretried ones; update
488 * block-ack window
489 */
490 spin_lock_bh(&txq->axq_lock);
6a0ddaef 491 ath_tx_update_baw(sc, tid, seqno);
e8324357 492 spin_unlock_bh(&txq->axq_lock);
f078f209 493
8a92e2ee 494 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 495 memcpy(tx_info->control.rates, rates, sizeof(rates));
3afd21e7 496 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
8a92e2ee 497 rc_update = false;
8a92e2ee
VT
498 }
499
db1a052b
FF
500 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
501 !txfail, sendbar);
e8324357 502 } else {
d43f3015 503 /* retry the un-acked ones */
e5003249
VT
504 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
505 if (bf->bf_next == NULL && bf_last->bf_stale) {
506 struct ath_buf *tbf;
507
508 tbf = ath_clone_txbuf(sc, bf_last);
509 /*
510 * Update tx baw and complete the
511 * frame with failed status if we
512 * run out of tx buf.
513 */
514 if (!tbf) {
515 spin_lock_bh(&txq->axq_lock);
6a0ddaef 516 ath_tx_update_baw(sc, tid, seqno);
e5003249
VT
517 spin_unlock_bh(&txq->axq_lock);
518
e5003249
VT
519 ath_tx_complete_buf(sc, bf, txq,
520 &bf_head,
55797b1a 521 ts, 0, 1);
e5003249
VT
522 break;
523 }
524
56dc6336 525 fi->bf = tbf;
c41d92dc 526 }
e8324357
S
527 }
528
529 /*
530 * Put this buffer to the temporary pending
531 * queue to retain ordering
532 */
56dc6336 533 __skb_queue_tail(&bf_pending, skb);
e8324357
S
534 }
535
536 bf = bf_next;
f078f209 537 }
f078f209 538
4cee7861 539 /* prepend un-acked frames to the beginning of the pending frame queue */
56dc6336 540 if (!skb_queue_empty(&bf_pending)) {
5519541d 541 if (an->sleeping)
042ec453 542 ieee80211_sta_set_buffered(sta, tid->tidno, true);
5519541d 543
4cee7861 544 spin_lock_bh(&txq->axq_lock);
56dc6336 545 skb_queue_splice(&bf_pending, &tid->buf_q);
26a64259 546 if (!an->sleeping) {
9af73cf7 547 ath_tx_queue_tid(txq, tid);
26a64259
FF
548
549 if (ts->ts_status & ATH9K_TXERR_FILT)
550 tid->ac->clear_ps_filter = true;
551 }
4cee7861
FF
552 spin_unlock_bh(&txq->axq_lock);
553 }
554
e8324357 555 if (tid->state & AGGR_CLEANUP) {
90fa539c
FF
556 ath_tx_flush_tid(sc, tid);
557
e8324357
S
558 if (tid->baw_head == tid->baw_tail) {
559 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 560 tid->state &= ~AGGR_CLEANUP;
d43f3015 561 }
e8324357 562 }
f078f209 563
1286ec6d
S
564 rcu_read_unlock();
565
030d6294
FF
566 if (needreset) {
567 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
236de514 568 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
030d6294 569 }
e8324357 570}
f078f209 571
1a6e9d0f
RM
572static bool ath_lookup_legacy(struct ath_buf *bf)
573{
574 struct sk_buff *skb;
575 struct ieee80211_tx_info *tx_info;
576 struct ieee80211_tx_rate *rates;
577 int i;
578
579 skb = bf->bf_mpdu;
580 tx_info = IEEE80211_SKB_CB(skb);
581 rates = tx_info->control.rates;
582
059ee09b
FF
583 for (i = 0; i < 4; i++) {
584 if (!rates[i].count || rates[i].idx < 0)
585 break;
586
1a6e9d0f
RM
587 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
588 return true;
589 }
590
591 return false;
592}
593
e8324357
S
594static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
595 struct ath_atx_tid *tid)
f078f209 596{
528f0c6b
S
597 struct sk_buff *skb;
598 struct ieee80211_tx_info *tx_info;
a8efee4f 599 struct ieee80211_tx_rate *rates;
d43f3015 600 u32 max_4ms_framelen, frmlen;
4ef70841 601 u16 aggr_limit, legacy = 0;
e8324357 602 int i;
528f0c6b 603
a22be22a 604 skb = bf->bf_mpdu;
528f0c6b 605 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 606 rates = tx_info->control.rates;
528f0c6b 607
e8324357
S
608 /*
609 * Find the lowest frame length among the rate series that will have a
610 * 4ms transmit duration.
611 * TODO - TXOP limit needs to be considered.
612 */
613 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 614
e8324357
S
615 for (i = 0; i < 4; i++) {
616 if (rates[i].count) {
545750d3
FF
617 int modeidx;
618 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
619 legacy = 1;
620 break;
621 }
622
0e668cde 623 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
624 modeidx = MCS_HT40;
625 else
0e668cde
FF
626 modeidx = MCS_HT20;
627
628 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
629 modeidx++;
545750d3
FF
630
631 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 632 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
633 }
634 }
e63835b0 635
f078f209 636 /*
e8324357
S
637 * limit aggregate size by the minimum rate if rate selected is
638 * not a probe rate, if rate selected is a probe rate then
639 * avoid aggregation of this packet.
f078f209 640 */
e8324357
S
641 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
642 return 0;
f078f209 643
1773912b
VT
644 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
645 aggr_limit = min((max_4ms_framelen * 3) / 8,
646 (u32)ATH_AMPDU_LIMIT_MAX);
647 else
648 aggr_limit = min(max_4ms_framelen,
649 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 650
e8324357 651 /*
25985edc
LDM
652 * h/w can accept aggregates up to 16 bit lengths (65535).
653 * The IE, however can hold up to 65536, which shows up here
e8324357 654 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 655 */
4ef70841
S
656 if (tid->an->maxampdu)
657 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 658
e8324357
S
659 return aggr_limit;
660}
f078f209 661
e8324357 662/*
d43f3015 663 * Returns the number of delimiters to be added to
e8324357 664 * meet the minimum required mpdudensity.
e8324357
S
665 */
666static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
7a12dfdb
RM
667 struct ath_buf *bf, u16 frmlen,
668 bool first_subfrm)
e8324357 669{
7a12dfdb 670#define FIRST_DESC_NDELIMS 60
e8324357
S
671 struct sk_buff *skb = bf->bf_mpdu;
672 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 673 u32 nsymbits, nsymbols;
e8324357 674 u16 minlen;
545750d3 675 u8 flags, rix;
c6663876 676 int width, streams, half_gi, ndelim, mindelim;
2d42efc4 677 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
e8324357
S
678
679 /* Select standard number of delimiters based on frame length alone */
680 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
681
682 /*
e8324357
S
683 * If encryption enabled, hardware requires some more padding between
684 * subframes.
685 * TODO - this could be improved to be dependent on the rate.
686 * The hardware can keep up at lower rates, but not higher rates
f078f209 687 */
4f6760b0
RM
688 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
689 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
e8324357 690 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 691
7a12dfdb
RM
692 /*
693 * Add delimiter when using RTS/CTS with aggregation
694 * and non enterprise AR9003 card
695 */
3459731a
FF
696 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
697 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
7a12dfdb
RM
698 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
699
e8324357
S
700 /*
701 * Convert desired mpdu density from microeconds to bytes based
702 * on highest rate in rate series (i.e. first rate) to determine
703 * required minimum length for subframe. Take into account
704 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 705 *
e8324357
S
706 * If there is no mpdu density restriction, no further calculation
707 * is needed.
708 */
4ef70841
S
709
710 if (tid->an->mpdudensity == 0)
e8324357 711 return ndelim;
f078f209 712
e8324357
S
713 rix = tx_info->control.rates[0].idx;
714 flags = tx_info->control.rates[0].flags;
e8324357
S
715 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
716 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 717
e8324357 718 if (half_gi)
4ef70841 719 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 720 else
4ef70841 721 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 722
e8324357
S
723 if (nsymbols == 0)
724 nsymbols = 1;
f078f209 725
c6663876
FF
726 streams = HT_RC_2_STREAMS(rix);
727 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 728 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 729
e8324357 730 if (frmlen < minlen) {
e8324357
S
731 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
732 ndelim = max(mindelim, ndelim);
f078f209
LR
733 }
734
e8324357 735 return ndelim;
f078f209
LR
736}
737
e8324357 738static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 739 struct ath_txq *txq,
d43f3015 740 struct ath_atx_tid *tid,
269c44bc
FF
741 struct list_head *bf_q,
742 int *aggr_len)
f078f209 743{
e8324357 744#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
56dc6336 745 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
d43f3015 746 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
747 u16 aggr_limit = 0, al = 0, bpad = 0,
748 al_delta, h_baw = tid->baw_size / 2;
749 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 750 struct ieee80211_tx_info *tx_info;
2d42efc4 751 struct ath_frame_info *fi;
56dc6336 752 struct sk_buff *skb;
6a0ddaef 753 u16 seqno;
f078f209 754
e8324357 755 do {
56dc6336
FF
756 skb = skb_peek(&tid->buf_q);
757 fi = get_frame_info(skb);
758 bf = fi->bf;
44f1d26c
FF
759 if (!fi->bf)
760 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
56dc6336 761
44f1d26c
FF
762 if (!bf)
763 continue;
764
399c6489 765 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
44f1d26c 766 seqno = bf->bf_state.seqno;
56dc6336
FF
767 if (!bf_first)
768 bf_first = bf;
f078f209 769
d43f3015 770 /* do not step over block-ack window */
6a0ddaef 771 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
e8324357
S
772 status = ATH_AGGR_BAW_CLOSED;
773 break;
774 }
f078f209 775
e8324357
S
776 if (!rl) {
777 aggr_limit = ath_lookup_rate(sc, bf, tid);
778 rl = 1;
779 }
f078f209 780
d43f3015 781 /* do not exceed aggregation limit */
2d42efc4 782 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
f078f209 783
d43f3015 784 if (nframes &&
1a6e9d0f
RM
785 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
786 ath_lookup_legacy(bf))) {
e8324357
S
787 status = ATH_AGGR_LIMITED;
788 break;
789 }
f078f209 790
0299a50a 791 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
bdf2dbfb 792 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
0299a50a
FF
793 break;
794
d43f3015
S
795 /* do not exceed subframe limit */
796 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
797 status = ATH_AGGR_LIMITED;
798 break;
799 }
f078f209 800
d43f3015 801 /* add padding for previous frame to aggregation length */
e8324357 802 al += bpad + al_delta;
f078f209 803
e8324357
S
804 /*
805 * Get the delimiters needed to meet the MPDU
806 * density for this node.
807 */
7a12dfdb
RM
808 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
809 !nframes);
e8324357 810 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 811
7a12dfdb 812 nframes++;
e8324357 813 bf->bf_next = NULL;
f078f209 814
d43f3015 815 /* link buffers of this frame to the aggregate */
2d42efc4 816 if (!fi->retries)
6a0ddaef 817 ath_tx_addto_baw(sc, tid, seqno);
399c6489 818 bf->bf_state.ndelim = ndelim;
56dc6336
FF
819
820 __skb_unlink(skb, &tid->buf_q);
821 list_add_tail(&bf->list, bf_q);
399c6489 822 if (bf_prev)
e8324357 823 bf_prev->bf_next = bf;
399c6489 824
e8324357 825 bf_prev = bf;
fec247c0 826
56dc6336 827 } while (!skb_queue_empty(&tid->buf_q));
f078f209 828
269c44bc 829 *aggr_len = al;
d43f3015 830
e8324357
S
831 return status;
832#undef PADBYTES
833}
f078f209 834
38dad7ba
FF
835/*
836 * rix - rate index
837 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
838 * width - 0 for 20 MHz, 1 for 40 MHz
839 * half_gi - to use 4us v/s 3.6 us for symbol time
840 */
841static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
842 int width, int half_gi, bool shortPreamble)
843{
844 u32 nbits, nsymbits, duration, nsymbols;
845 int streams;
846
847 /* find number of symbols: PLCP + data */
848 streams = HT_RC_2_STREAMS(rix);
849 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
850 nsymbits = bits_per_symbol[rix % 8][width] * streams;
851 nsymbols = (nbits + nsymbits - 1) / nsymbits;
852
853 if (!half_gi)
854 duration = SYMBOL_TIME(nsymbols);
855 else
856 duration = SYMBOL_TIME_HALFGI(nsymbols);
857
858 /* addup duration for legacy/ht training and signal fields */
859 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
860
861 return duration;
862}
863
493cf04f
FF
864static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
865 struct ath_tx_info *info, int len)
38dad7ba
FF
866{
867 struct ath_hw *ah = sc->sc_ah;
38dad7ba
FF
868 struct sk_buff *skb;
869 struct ieee80211_tx_info *tx_info;
870 struct ieee80211_tx_rate *rates;
871 const struct ieee80211_rate *rate;
872 struct ieee80211_hdr *hdr;
493cf04f
FF
873 int i;
874 u8 rix = 0;
38dad7ba
FF
875
876 skb = bf->bf_mpdu;
877 tx_info = IEEE80211_SKB_CB(skb);
878 rates = tx_info->control.rates;
879 hdr = (struct ieee80211_hdr *)skb->data;
493cf04f
FF
880
881 /* set dur_update_en for l-sig computation except for PS-Poll frames */
882 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
38dad7ba
FF
883
884 /*
885 * We check if Short Preamble is needed for the CTS rate by
886 * checking the BSS's global flag.
887 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
888 */
889 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
493cf04f 890 info->rtscts_rate = rate->hw_value;
38dad7ba 891 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
493cf04f 892 info->rtscts_rate |= rate->hw_value_short;
38dad7ba
FF
893
894 for (i = 0; i < 4; i++) {
895 bool is_40, is_sgi, is_sp;
896 int phy;
897
898 if (!rates[i].count || (rates[i].idx < 0))
899 continue;
900
901 rix = rates[i].idx;
493cf04f 902 info->rates[i].Tries = rates[i].count;
38dad7ba
FF
903
904 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
493cf04f
FF
905 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
906 info->flags |= ATH9K_TXDESC_RTSENA;
38dad7ba 907 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
493cf04f
FF
908 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
909 info->flags |= ATH9K_TXDESC_CTSENA;
38dad7ba
FF
910 }
911
912 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
493cf04f 913 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
38dad7ba 914 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
493cf04f 915 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
38dad7ba
FF
916
917 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
918 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
919 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
920
921 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
922 /* MCS rates */
493cf04f
FF
923 info->rates[i].Rate = rix | 0x80;
924 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
925 ah->txchainmask, info->rates[i].Rate);
926 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
38dad7ba
FF
927 is_40, is_sgi, is_sp);
928 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
493cf04f 929 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
38dad7ba
FF
930 continue;
931 }
932
933 /* legacy rates */
934 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
935 !(rate->flags & IEEE80211_RATE_ERP_G))
936 phy = WLAN_RC_PHY_CCK;
937 else
938 phy = WLAN_RC_PHY_OFDM;
939
940 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
493cf04f 941 info->rates[i].Rate = rate->hw_value;
38dad7ba
FF
942 if (rate->hw_value_short) {
943 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
493cf04f 944 info->rates[i].Rate |= rate->hw_value_short;
38dad7ba
FF
945 } else {
946 is_sp = false;
947 }
948
949 if (bf->bf_state.bfs_paprd)
493cf04f 950 info->rates[i].ChSel = ah->txchainmask;
38dad7ba 951 else
493cf04f
FF
952 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
953 ah->txchainmask, info->rates[i].Rate);
38dad7ba 954
493cf04f 955 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
38dad7ba
FF
956 phy, rate->bitrate * 100, len, rix, is_sp);
957 }
958
959 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
960 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
493cf04f 961 info->flags &= ~ATH9K_TXDESC_RTSENA;
38dad7ba
FF
962
963 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
493cf04f
FF
964 if (info->flags & ATH9K_TXDESC_RTSENA)
965 info->flags &= ~ATH9K_TXDESC_CTSENA;
966}
38dad7ba 967
493cf04f
FF
968static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
969{
970 struct ieee80211_hdr *hdr;
971 enum ath9k_pkt_type htype;
972 __le16 fc;
973
974 hdr = (struct ieee80211_hdr *)skb->data;
975 fc = hdr->frame_control;
38dad7ba 976
493cf04f
FF
977 if (ieee80211_is_beacon(fc))
978 htype = ATH9K_PKT_TYPE_BEACON;
979 else if (ieee80211_is_probe_resp(fc))
980 htype = ATH9K_PKT_TYPE_PROBE_RESP;
981 else if (ieee80211_is_atim(fc))
982 htype = ATH9K_PKT_TYPE_ATIM;
983 else if (ieee80211_is_pspoll(fc))
984 htype = ATH9K_PKT_TYPE_PSPOLL;
985 else
986 htype = ATH9K_PKT_TYPE_NORMAL;
987
988 return htype;
38dad7ba
FF
989}
990
493cf04f
FF
991static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
992 struct ath_txq *txq, int len)
399c6489
FF
993{
994 struct ath_hw *ah = sc->sc_ah;
995 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
996 struct ath_buf *bf_first = bf;
493cf04f 997 struct ath_tx_info info;
399c6489 998 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
399c6489 999
493cf04f
FF
1000 memset(&info, 0, sizeof(info));
1001 info.is_first = true;
1002 info.is_last = true;
1003 info.txpower = MAX_RATE_POWER;
1004 info.qcu = txq->axq_qnum;
1005
1006 info.flags = ATH9K_TXDESC_INTREQ;
1007 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1008 info.flags |= ATH9K_TXDESC_NOACK;
1009 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1010 info.flags |= ATH9K_TXDESC_LDPC;
1011
1012 ath_buf_set_rate(sc, bf, &info, len);
1013
1014 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1015 info.flags |= ATH9K_TXDESC_CLRDMASK;
1016
1017 if (bf->bf_state.bfs_paprd)
1018 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
399c6489 1019
399c6489
FF
1020
1021 while (bf) {
493cf04f
FF
1022 struct sk_buff *skb = bf->bf_mpdu;
1023 struct ath_frame_info *fi = get_frame_info(skb);
1024
1025 info.type = get_hw_packet_type(skb);
399c6489 1026 if (bf->bf_next)
493cf04f 1027 info.link = bf->bf_next->bf_daddr;
399c6489 1028 else
493cf04f
FF
1029 info.link = 0;
1030
42cecc34
JL
1031 info.buf_addr[0] = bf->bf_buf_addr;
1032 info.buf_len[0] = skb->len;
493cf04f
FF
1033 info.pkt_len = fi->framelen;
1034 info.keyix = fi->keyix;
1035 info.keytype = fi->keytype;
1036
1037 if (aggr) {
399c6489 1038 if (bf == bf_first)
493cf04f
FF
1039 info.aggr = AGGR_BUF_FIRST;
1040 else if (!bf->bf_next)
1041 info.aggr = AGGR_BUF_LAST;
1042 else
1043 info.aggr = AGGR_BUF_MIDDLE;
399c6489 1044
493cf04f
FF
1045 info.ndelim = bf->bf_state.ndelim;
1046 info.aggr_len = len;
399c6489
FF
1047 }
1048
493cf04f 1049 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
399c6489
FF
1050 bf = bf->bf_next;
1051 }
1052}
1053
e8324357
S
1054static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1055 struct ath_atx_tid *tid)
1056{
d43f3015 1057 struct ath_buf *bf;
e8324357 1058 enum ATH_AGGR_STATUS status;
399c6489 1059 struct ieee80211_tx_info *tx_info;
e8324357 1060 struct list_head bf_q;
269c44bc 1061 int aggr_len;
f078f209 1062
e8324357 1063 do {
56dc6336 1064 if (skb_queue_empty(&tid->buf_q))
e8324357 1065 return;
f078f209 1066
e8324357
S
1067 INIT_LIST_HEAD(&bf_q);
1068
269c44bc 1069 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
f078f209 1070
f078f209 1071 /*
d43f3015
S
1072 * no frames picked up to be aggregated;
1073 * block-ack window is not open.
f078f209 1074 */
e8324357
S
1075 if (list_empty(&bf_q))
1076 break;
f078f209 1077
e8324357 1078 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 1079 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
399c6489 1080 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
f078f209 1081
5519541d
FF
1082 if (tid->ac->clear_ps_filter) {
1083 tid->ac->clear_ps_filter = false;
399c6489
FF
1084 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1085 } else {
1086 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
5519541d
FF
1087 }
1088
d43f3015 1089 /* if only one frame, send as non-aggregate */
b572d033 1090 if (bf == bf->bf_lastbf) {
399c6489
FF
1091 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1092 bf->bf_state.bf_type = BUF_AMPDU;
1093 } else {
1094 TX_STAT_INC(txq->axq_qnum, a_aggr);
e8324357 1095 }
f078f209 1096
493cf04f 1097 ath_tx_fill_desc(sc, bf, txq, aggr_len);
fce041be 1098 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
4b3ba66a 1099 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
e8324357
S
1100 status != ATH_AGGR_BAW_CLOSED);
1101}
1102
231c3a1f
FF
1103int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1104 u16 tid, u16 *ssn)
e8324357
S
1105{
1106 struct ath_atx_tid *txtid;
1107 struct ath_node *an;
1108
1109 an = (struct ath_node *)sta->drv_priv;
f83da965 1110 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
1111
1112 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1113 return -EAGAIN;
1114
f83da965 1115 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 1116 txtid->paused = true;
49447f2f 1117 *ssn = txtid->seq_start = txtid->seq_next;
231c3a1f 1118
2ed72229
FF
1119 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1120 txtid->baw_head = txtid->baw_tail = 0;
1121
231c3a1f 1122 return 0;
e8324357 1123}
f078f209 1124
f83da965 1125void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
1126{
1127 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1128 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 1129 struct ath_txq *txq = txtid->ac->txq;
f078f209 1130
e8324357 1131 if (txtid->state & AGGR_CLEANUP)
f83da965 1132 return;
f078f209 1133
e8324357 1134 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 1135 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 1136 return;
e8324357 1137 }
f078f209 1138
e8324357 1139 spin_lock_bh(&txq->axq_lock);
75401849 1140 txtid->paused = true;
f078f209 1141
90fa539c
FF
1142 /*
1143 * If frames are still being transmitted for this TID, they will be
1144 * cleaned up during tx completion. To prevent race conditions, this
1145 * TID can only be reused after all in-progress subframes have been
1146 * completed.
1147 */
1148 if (txtid->baw_head != txtid->baw_tail)
e8324357 1149 txtid->state |= AGGR_CLEANUP;
90fa539c 1150 else
e8324357 1151 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
1152 spin_unlock_bh(&txq->axq_lock);
1153
1154 ath_tx_flush_tid(sc, txtid);
e8324357 1155}
f078f209 1156
042ec453
JB
1157void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1158 struct ath_node *an)
5519541d
FF
1159{
1160 struct ath_atx_tid *tid;
1161 struct ath_atx_ac *ac;
1162 struct ath_txq *txq;
042ec453 1163 bool buffered;
5519541d
FF
1164 int tidno;
1165
1166 for (tidno = 0, tid = &an->tid[tidno];
1167 tidno < WME_NUM_TID; tidno++, tid++) {
1168
1169 if (!tid->sched)
1170 continue;
1171
1172 ac = tid->ac;
1173 txq = ac->txq;
1174
1175 spin_lock_bh(&txq->axq_lock);
1176
042ec453 1177 buffered = !skb_queue_empty(&tid->buf_q);
5519541d
FF
1178
1179 tid->sched = false;
1180 list_del(&tid->list);
1181
1182 if (ac->sched) {
1183 ac->sched = false;
1184 list_del(&ac->list);
1185 }
1186
1187 spin_unlock_bh(&txq->axq_lock);
5519541d 1188
042ec453
JB
1189 ieee80211_sta_set_buffered(sta, tidno, buffered);
1190 }
5519541d
FF
1191}
1192
1193void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1194{
1195 struct ath_atx_tid *tid;
1196 struct ath_atx_ac *ac;
1197 struct ath_txq *txq;
1198 int tidno;
1199
1200 for (tidno = 0, tid = &an->tid[tidno];
1201 tidno < WME_NUM_TID; tidno++, tid++) {
1202
1203 ac = tid->ac;
1204 txq = ac->txq;
1205
1206 spin_lock_bh(&txq->axq_lock);
1207 ac->clear_ps_filter = true;
1208
56dc6336 1209 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
5519541d
FF
1210 ath_tx_queue_tid(txq, tid);
1211 ath_txq_schedule(sc, txq);
1212 }
1213
1214 spin_unlock_bh(&txq->axq_lock);
1215 }
1216}
1217
e8324357
S
1218void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1219{
1220 struct ath_atx_tid *txtid;
1221 struct ath_node *an;
1222
1223 an = (struct ath_node *)sta->drv_priv;
1224
1225 if (sc->sc_flags & SC_OP_TXAGGR) {
1226 txtid = ATH_AN_2_TID(an, tid);
1227 txtid->baw_size =
1228 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1229 txtid->state |= AGGR_ADDBA_COMPLETE;
1230 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1231 ath_tx_resume_tid(sc, txtid);
1232 }
f078f209
LR
1233}
1234
e8324357
S
1235/********************/
1236/* Queue Management */
1237/********************/
f078f209 1238
e8324357
S
1239static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1240 struct ath_txq *txq)
f078f209 1241{
e8324357
S
1242 struct ath_atx_ac *ac, *ac_tmp;
1243 struct ath_atx_tid *tid, *tid_tmp;
f078f209 1244
e8324357
S
1245 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1246 list_del(&ac->list);
1247 ac->sched = false;
1248 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1249 list_del(&tid->list);
1250 tid->sched = false;
1251 ath_tid_drain(sc, txq, tid);
1252 }
f078f209
LR
1253 }
1254}
1255
e8324357 1256struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 1257{
cbe61d8a 1258 struct ath_hw *ah = sc->sc_ah;
e8324357 1259 struct ath9k_tx_queue_info qi;
066dae93
FF
1260 static const int subtype_txq_to_hwq[] = {
1261 [WME_AC_BE] = ATH_TXQ_AC_BE,
1262 [WME_AC_BK] = ATH_TXQ_AC_BK,
1263 [WME_AC_VI] = ATH_TXQ_AC_VI,
1264 [WME_AC_VO] = ATH_TXQ_AC_VO,
1265 };
60f2d1d5 1266 int axq_qnum, i;
f078f209 1267
e8324357 1268 memset(&qi, 0, sizeof(qi));
066dae93 1269 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
1270 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1271 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1272 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1273 qi.tqi_physCompBuf = 0;
f078f209
LR
1274
1275 /*
e8324357
S
1276 * Enable interrupts only for EOL and DESC conditions.
1277 * We mark tx descriptors to receive a DESC interrupt
1278 * when a tx queue gets deep; otherwise waiting for the
1279 * EOL to reap descriptors. Note that this is done to
1280 * reduce interrupt load and this only defers reaping
1281 * descriptors, never transmitting frames. Aside from
1282 * reducing interrupts this also permits more concurrency.
1283 * The only potential downside is if the tx queue backs
1284 * up in which case the top half of the kernel may backup
1285 * due to a lack of tx descriptors.
1286 *
1287 * The UAPSD queue is an exception, since we take a desc-
1288 * based intr on the EOSP frames.
f078f209 1289 */
afe754d6
VT
1290 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1291 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1292 TXQ_FLAG_TXERRINT_ENABLE;
1293 } else {
1294 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1295 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1296 else
1297 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1298 TXQ_FLAG_TXDESCINT_ENABLE;
1299 }
60f2d1d5
BG
1300 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1301 if (axq_qnum == -1) {
f078f209 1302 /*
e8324357
S
1303 * NB: don't print a message, this happens
1304 * normally on parts with too few tx queues
f078f209 1305 */
e8324357 1306 return NULL;
f078f209 1307 }
60f2d1d5
BG
1308 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1309 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
f078f209 1310
60f2d1d5
BG
1311 txq->axq_qnum = axq_qnum;
1312 txq->mac80211_qnum = -1;
e8324357
S
1313 txq->axq_link = NULL;
1314 INIT_LIST_HEAD(&txq->axq_q);
1315 INIT_LIST_HEAD(&txq->axq_acq);
1316 spin_lock_init(&txq->axq_lock);
1317 txq->axq_depth = 0;
4b3ba66a 1318 txq->axq_ampdu_depth = 0;
164ace38 1319 txq->axq_tx_inprogress = false;
60f2d1d5 1320 sc->tx.txqsetup |= 1<<axq_qnum;
e5003249
VT
1321
1322 txq->txq_headidx = txq->txq_tailidx = 0;
1323 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1324 INIT_LIST_HEAD(&txq->txq_fifo[i]);
e8324357 1325 }
60f2d1d5 1326 return &sc->tx.txq[axq_qnum];
f078f209
LR
1327}
1328
e8324357
S
1329int ath_txq_update(struct ath_softc *sc, int qnum,
1330 struct ath9k_tx_queue_info *qinfo)
1331{
cbe61d8a 1332 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1333 int error = 0;
1334 struct ath9k_tx_queue_info qi;
1335
1336 if (qnum == sc->beacon.beaconq) {
1337 /*
1338 * XXX: for beacon queue, we just save the parameter.
1339 * It will be picked up by ath_beaconq_config when
1340 * it's necessary.
1341 */
1342 sc->beacon.beacon_qi = *qinfo;
f078f209 1343 return 0;
e8324357 1344 }
f078f209 1345
9680e8a3 1346 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1347
1348 ath9k_hw_get_txq_props(ah, qnum, &qi);
1349 qi.tqi_aifs = qinfo->tqi_aifs;
1350 qi.tqi_cwmin = qinfo->tqi_cwmin;
1351 qi.tqi_cwmax = qinfo->tqi_cwmax;
1352 qi.tqi_burstTime = qinfo->tqi_burstTime;
1353 qi.tqi_readyTime = qinfo->tqi_readyTime;
1354
1355 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
3800276a
JP
1356 ath_err(ath9k_hw_common(sc->sc_ah),
1357 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1358 error = -EIO;
1359 } else {
1360 ath9k_hw_resettxqueue(ah, qnum);
1361 }
1362
1363 return error;
1364}
1365
1366int ath_cabq_update(struct ath_softc *sc)
1367{
1368 struct ath9k_tx_queue_info qi;
9814f6b3 1369 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
e8324357 1370 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1371
e8324357 1372 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1373 /*
e8324357 1374 * Ensure the readytime % is within the bounds.
f078f209 1375 */
17d7904d
S
1376 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1377 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1378 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1379 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1380
9814f6b3 1381 qi.tqi_readyTime = (cur_conf->beacon_interval *
fdbf7335 1382 sc->config.cabqReadytime) / 100;
e8324357
S
1383 ath_txq_update(sc, qnum, &qi);
1384
1385 return 0;
f078f209
LR
1386}
1387
4b3ba66a
FF
1388static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1389{
1390 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1391 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1392}
1393
fce041be
FF
1394static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1395 struct list_head *list, bool retry_tx)
5479de6e
RM
1396 __releases(txq->axq_lock)
1397 __acquires(txq->axq_lock)
f078f209 1398{
e8324357
S
1399 struct ath_buf *bf, *lastbf;
1400 struct list_head bf_head;
db1a052b
FF
1401 struct ath_tx_status ts;
1402
1403 memset(&ts, 0, sizeof(ts));
e8324357 1404 INIT_LIST_HEAD(&bf_head);
f078f209 1405
fce041be
FF
1406 while (!list_empty(list)) {
1407 bf = list_first_entry(list, struct ath_buf, list);
f078f209 1408
fce041be
FF
1409 if (bf->bf_stale) {
1410 list_del(&bf->list);
f078f209 1411
fce041be
FF
1412 ath_tx_return_buffer(sc, bf);
1413 continue;
e8324357 1414 }
f078f209 1415
e8324357 1416 lastbf = bf->bf_lastbf;
fce041be 1417 list_cut_position(&bf_head, list, &lastbf->list);
e5003249 1418
e8324357 1419 txq->axq_depth--;
4b3ba66a
FF
1420 if (bf_is_ampdu_not_probing(bf))
1421 txq->axq_ampdu_depth--;
e8324357 1422
fce041be 1423 spin_unlock_bh(&txq->axq_lock);
e8324357 1424 if (bf_isampdu(bf))
c5992618
FF
1425 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1426 retry_tx);
e8324357 1427 else
db1a052b 1428 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
fce041be 1429 spin_lock_bh(&txq->axq_lock);
f078f209 1430 }
fce041be 1431}
f078f209 1432
fce041be
FF
1433/*
1434 * Drain a given TX queue (could be Beacon or Data)
1435 *
1436 * This assumes output has been stopped and
1437 * we do not need to block ath_tx_tasklet.
1438 */
1439void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1440{
164ace38 1441 spin_lock_bh(&txq->axq_lock);
e5003249 1442 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
fce041be 1443 int idx = txq->txq_tailidx;
e5003249 1444
fce041be
FF
1445 while (!list_empty(&txq->txq_fifo[idx])) {
1446 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1447 retry_tx);
1448
1449 INCR(idx, ATH_TXFIFO_DEPTH);
e5003249 1450 }
fce041be 1451 txq->txq_tailidx = idx;
e5003249 1452 }
e609e2ea 1453
fce041be
FF
1454 txq->axq_link = NULL;
1455 txq->axq_tx_inprogress = false;
1456 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
1457
e609e2ea 1458 /* flush any pending frames if aggregation is enabled */
fce041be
FF
1459 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1460 ath_txq_drain_pending_buffers(sc, txq);
1461
1462 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
1463}
1464
080e1a25 1465bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1466{
cbe61d8a 1467 struct ath_hw *ah = sc->sc_ah;
c46917bb 1468 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405 1469 struct ath_txq *txq;
34d25810
FF
1470 int i;
1471 u32 npend = 0;
043a0405
S
1472
1473 if (sc->sc_flags & SC_OP_INVALID)
080e1a25 1474 return true;
043a0405 1475
0d51cccc 1476 ath9k_hw_abort_tx_dma(ah);
043a0405 1477
0d51cccc 1478 /* Check if any queue remains active */
043a0405 1479 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
0d51cccc
FF
1480 if (!ATH_TXQ_SETUP(sc, i))
1481 continue;
1482
34d25810
FF
1483 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1484 npend |= BIT(i);
043a0405
S
1485 }
1486
080e1a25 1487 if (npend)
34d25810 1488 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
043a0405
S
1489
1490 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
92460412
FF
1491 if (!ATH_TXQ_SETUP(sc, i))
1492 continue;
1493
1494 /*
1495 * The caller will resume queues with ieee80211_wake_queues.
1496 * Mark the queue as not stopped to prevent ath_tx_complete
1497 * from waking the queue too early.
1498 */
1499 txq = &sc->tx.txq[i];
1500 txq->stopped = false;
1501 ath_draintxq(sc, txq, retry_tx);
043a0405 1502 }
080e1a25
FF
1503
1504 return !npend;
e8324357 1505}
f078f209 1506
043a0405 1507void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1508{
043a0405
S
1509 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1510 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1511}
f078f209 1512
7755bad9
BG
1513/* For each axq_acq entry, for each tid, try to schedule packets
1514 * for transmit until ampdu_depth has reached min Q depth.
1515 */
e8324357
S
1516void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1517{
7755bad9
BG
1518 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1519 struct ath_atx_tid *tid, *last_tid;
f078f209 1520
236de514 1521 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
21f28e6f 1522 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
e8324357 1523 return;
f078f209 1524
e8324357 1525 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
7755bad9 1526 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
f078f209 1527
7755bad9
BG
1528 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1529 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1530 list_del(&ac->list);
1531 ac->sched = false;
f078f209 1532
7755bad9
BG
1533 while (!list_empty(&ac->tid_q)) {
1534 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1535 list);
1536 list_del(&tid->list);
1537 tid->sched = false;
f078f209 1538
7755bad9
BG
1539 if (tid->paused)
1540 continue;
f078f209 1541
7755bad9 1542 ath_tx_sched_aggr(sc, txq, tid);
f078f209 1543
7755bad9
BG
1544 /*
1545 * add tid to round-robin queue if more frames
1546 * are pending for the tid
1547 */
56dc6336 1548 if (!skb_queue_empty(&tid->buf_q))
7755bad9 1549 ath_tx_queue_tid(txq, tid);
f078f209 1550
7755bad9
BG
1551 if (tid == last_tid ||
1552 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1553 break;
1554 }
f078f209 1555
7755bad9
BG
1556 if (!list_empty(&ac->tid_q)) {
1557 if (!ac->sched) {
1558 ac->sched = true;
1559 list_add_tail(&ac->list, &txq->axq_acq);
1560 }
f078f209 1561 }
7755bad9
BG
1562
1563 if (ac == last_ac ||
1564 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1565 return;
e8324357
S
1566 }
1567}
f078f209 1568
e8324357
S
1569/***********/
1570/* TX, DMA */
1571/***********/
1572
f078f209 1573/*
e8324357
S
1574 * Insert a chain of ath_buf (descriptors) on a txq and
1575 * assume the descriptors are already chained together by caller.
f078f209 1576 */
e8324357 1577static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 1578 struct list_head *head, bool internal)
f078f209 1579{
cbe61d8a 1580 struct ath_hw *ah = sc->sc_ah;
c46917bb 1581 struct ath_common *common = ath9k_hw_common(ah);
fce041be
FF
1582 struct ath_buf *bf, *bf_last;
1583 bool puttxbuf = false;
1584 bool edma;
f078f209 1585
e8324357
S
1586 /*
1587 * Insert the frame on the outbound list and
1588 * pass it on to the hardware.
1589 */
f078f209 1590
e8324357
S
1591 if (list_empty(head))
1592 return;
f078f209 1593
fce041be 1594 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
e8324357 1595 bf = list_first_entry(head, struct ath_buf, list);
fce041be 1596 bf_last = list_entry(head->prev, struct ath_buf, list);
f078f209 1597
226afe68
JP
1598 ath_dbg(common, ATH_DBG_QUEUE,
1599 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1600
fce041be
FF
1601 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1602 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
e5003249 1603 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
fce041be 1604 puttxbuf = true;
e8324357 1605 } else {
e5003249
VT
1606 list_splice_tail_init(head, &txq->axq_q);
1607
fce041be
FF
1608 if (txq->axq_link) {
1609 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
226afe68
JP
1610 ath_dbg(common, ATH_DBG_XMIT,
1611 "link[%u] (%p)=%llx (%p)\n",
1612 txq->axq_qnum, txq->axq_link,
1613 ito64(bf->bf_daddr), bf->bf_desc);
fce041be
FF
1614 } else if (!edma)
1615 puttxbuf = true;
1616
1617 txq->axq_link = bf_last->bf_desc;
1618 }
1619
1620 if (puttxbuf) {
1621 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1622 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1623 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1624 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1625 }
1626
1627 if (!edma) {
8d8d3fdc 1628 TX_STAT_INC(txq->axq_qnum, txstart);
e5003249 1629 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1630 }
fce041be
FF
1631
1632 if (!internal) {
1633 txq->axq_depth++;
1634 if (bf_is_ampdu_not_probing(bf))
1635 txq->axq_ampdu_depth++;
1636 }
e8324357 1637}
f078f209 1638
e8324357 1639static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
44f1d26c 1640 struct sk_buff *skb, struct ath_tx_control *txctl)
f078f209 1641{
44f1d26c 1642 struct ath_frame_info *fi = get_frame_info(skb);
04caf863 1643 struct list_head bf_head;
44f1d26c 1644 struct ath_buf *bf;
f078f209 1645
e8324357
S
1646 /*
1647 * Do not queue to h/w when any of the following conditions is true:
1648 * - there are pending frames in software queue
1649 * - the TID is currently paused for ADDBA/BAR request
1650 * - seqno is not within block-ack window
1651 * - h/w queue depth exceeds low water mark
1652 */
56dc6336 1653 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
44f1d26c 1654 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
4b3ba66a 1655 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1656 /*
e8324357
S
1657 * Add this frame to software queue for scheduling later
1658 * for aggregation.
f078f209 1659 */
bda8adda 1660 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
44f1d26c 1661 __skb_queue_tail(&tid->buf_q, skb);
9af73cf7
FF
1662 if (!txctl->an || !txctl->an->sleeping)
1663 ath_tx_queue_tid(txctl->txq, tid);
e8324357
S
1664 return;
1665 }
1666
44f1d26c
FF
1667 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1668 if (!bf)
1669 return;
1670
399c6489 1671 bf->bf_state.bf_type = BUF_AMPDU;
04caf863
FF
1672 INIT_LIST_HEAD(&bf_head);
1673 list_add(&bf->list, &bf_head);
1674
e8324357 1675 /* Add sub-frame to BAW */
44f1d26c 1676 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
e8324357
S
1677
1678 /* Queue to h/w without aggregation */
bda8adda 1679 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
d43f3015 1680 bf->bf_lastbf = bf;
493cf04f 1681 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
fce041be 1682 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
e8324357
S
1683}
1684
82b873af 1685static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c 1686 struct ath_atx_tid *tid, struct sk_buff *skb)
e8324357 1687{
44f1d26c
FF
1688 struct ath_frame_info *fi = get_frame_info(skb);
1689 struct list_head bf_head;
e8324357
S
1690 struct ath_buf *bf;
1691
44f1d26c
FF
1692 bf = fi->bf;
1693 if (!bf)
1694 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1695
1696 if (!bf)
1697 return;
1698
1699 INIT_LIST_HEAD(&bf_head);
1700 list_add_tail(&bf->list, &bf_head);
399c6489 1701 bf->bf_state.bf_type = 0;
e8324357
S
1702
1703 /* update starting sequence number for subsequent ADDBA request */
82b873af
FF
1704 if (tid)
1705 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
e8324357 1706
d43f3015 1707 bf->bf_lastbf = bf;
493cf04f 1708 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
44f1d26c 1709 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
fec247c0 1710 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1711}
1712
2d42efc4
FF
1713static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1714 int framelen)
e8324357
S
1715{
1716 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
1717 struct ieee80211_sta *sta = tx_info->control.sta;
1718 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
6a0ddaef 1719 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2d42efc4 1720 struct ath_frame_info *fi = get_frame_info(skb);
93ae2dd2 1721 struct ath_node *an = NULL;
2d42efc4 1722 enum ath9k_key_type keytype;
e8324357 1723
2d42efc4 1724 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
e8324357 1725
93ae2dd2
FF
1726 if (sta)
1727 an = (struct ath_node *) sta->drv_priv;
1728
2d42efc4
FF
1729 memset(fi, 0, sizeof(*fi));
1730 if (hw_key)
1731 fi->keyix = hw_key->hw_key_idx;
93ae2dd2
FF
1732 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1733 fi->keyix = an->ps_key;
2d42efc4
FF
1734 else
1735 fi->keyix = ATH9K_TXKEYIX_INVALID;
1736 fi->keytype = keytype;
1737 fi->framelen = framelen;
e8324357
S
1738}
1739
ea066d5a
MSS
1740u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1741{
1742 struct ath_hw *ah = sc->sc_ah;
1743 struct ath9k_channel *curchan = ah->curchan;
d77bf3eb
RM
1744 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1745 (curchan->channelFlags & CHANNEL_5GHZ) &&
1746 (chainmask == 0x7) && (rate < 0x90))
ea066d5a
MSS
1747 return 0x3;
1748 else
1749 return chainmask;
1750}
1751
44f1d26c
FF
1752/*
1753 * Assign a descriptor (and sequence number if necessary,
1754 * and map buffer for DMA. Frees skb on error
1755 */
fa05f87a 1756static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
04caf863 1757 struct ath_txq *txq,
fa05f87a 1758 struct ath_atx_tid *tid,
2d42efc4 1759 struct sk_buff *skb)
f078f209 1760{
82b873af 1761 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2d42efc4 1762 struct ath_frame_info *fi = get_frame_info(skb);
fa05f87a 1763 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
82b873af 1764 struct ath_buf *bf;
fa05f87a 1765 u16 seqno;
82b873af
FF
1766
1767 bf = ath_tx_get_buffer(sc);
1768 if (!bf) {
226afe68 1769 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
44f1d26c 1770 goto error;
82b873af 1771 }
e022edbd 1772
528f0c6b 1773 ATH_TXBUF_RESET(bf);
f078f209 1774
fa05f87a
FF
1775 if (tid) {
1776 seqno = tid->seq_next;
1777 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1778 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1779 bf->bf_state.seqno = seqno;
1780 }
1781
f078f209 1782 bf->bf_mpdu = skb;
f8316df1 1783
c1739eb3
BG
1784 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1785 skb->len, DMA_TO_DEVICE);
1786 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1787 bf->bf_mpdu = NULL;
6cf9e995 1788 bf->bf_buf_addr = 0;
3800276a
JP
1789 ath_err(ath9k_hw_common(sc->sc_ah),
1790 "dma_mapping_error() on TX\n");
82b873af 1791 ath_tx_return_buffer(sc, bf);
44f1d26c 1792 goto error;
f8316df1
LR
1793 }
1794
56dc6336 1795 fi->bf = bf;
04caf863
FF
1796
1797 return bf;
44f1d26c
FF
1798
1799error:
1800 dev_kfree_skb_any(skb);
1801 return NULL;
04caf863
FF
1802}
1803
1804/* FIXME: tx power */
44f1d26c 1805static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
04caf863
FF
1806 struct ath_tx_control *txctl)
1807{
04caf863
FF
1808 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1809 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
248a38d0 1810 struct ath_atx_tid *tid = NULL;
fa05f87a 1811 struct ath_buf *bf;
04caf863 1812 u8 tidno;
f078f209 1813
528f0c6b 1814 spin_lock_bh(&txctl->txq->axq_lock);
61e1b0b0
MSS
1815 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1816 ieee80211_is_data_qos(hdr->frame_control)) {
5daefbd0
FF
1817 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1818 IEEE80211_QOS_CTL_TID_MASK;
2d42efc4 1819 tid = ATH_AN_2_TID(txctl->an, tidno);
5daefbd0 1820
066dae93 1821 WARN_ON(tid->ac->txq != txctl->txq);
248a38d0
FF
1822 }
1823
1824 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
04caf863
FF
1825 /*
1826 * Try aggregation if it's a unicast data frame
1827 * and the destination is HT capable.
1828 */
44f1d26c 1829 ath_tx_send_ampdu(sc, tid, skb, txctl);
f078f209 1830 } else {
44f1d26c
FF
1831 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1832 if (!bf)
1833 goto out;
04caf863 1834
82b873af
FF
1835 bf->bf_state.bfs_paprd = txctl->paprd;
1836
9cf04dcc
MSS
1837 if (txctl->paprd)
1838 bf->bf_state.bfs_paprd_timestamp = jiffies;
1839
44f1d26c 1840 ath_tx_send_normal(sc, txctl->txq, tid, skb);
f078f209 1841 }
528f0c6b 1842
fa05f87a 1843out:
528f0c6b 1844 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1845}
1846
f8316df1 1847/* Upon failure caller should free skb */
c52f33d0 1848int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1849 struct ath_tx_control *txctl)
f078f209 1850{
28d16708
FF
1851 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1852 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2d42efc4 1853 struct ieee80211_sta *sta = info->control.sta;
f59a59fe 1854 struct ieee80211_vif *vif = info->control.vif;
9ac58615 1855 struct ath_softc *sc = hw->priv;
84642d6b 1856 struct ath_txq *txq = txctl->txq;
4d91f9f3 1857 int padpos, padsize;
04caf863 1858 int frmlen = skb->len + FCS_LEN;
28d16708 1859 int q;
f078f209 1860
a9927ba3
BG
1861 /* NOTE: sta can be NULL according to net/mac80211.h */
1862 if (sta)
1863 txctl->an = (struct ath_node *)sta->drv_priv;
1864
04caf863
FF
1865 if (info->control.hw_key)
1866 frmlen += info->control.hw_key->icv_len;
1867
f078f209 1868 /*
e8324357
S
1869 * As a temporary workaround, assign seq# here; this will likely need
1870 * to be cleaned up to work better with Beacon transmission and virtual
1871 * BSSes.
f078f209 1872 */
e8324357 1873 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1874 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1875 sc->tx.seq_no += 0x10;
1876 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1877 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1878 }
f078f209 1879
42cecc34
JL
1880 /* Add the padding after the header if this is not already done */
1881 padpos = ath9k_cmn_padpos(hdr->frame_control);
1882 padsize = padpos & 3;
1883 if (padsize && skb->len > padpos) {
1884 if (skb_headroom(skb) < padsize)
1885 return -ENOMEM;
28d16708 1886
42cecc34
JL
1887 skb_push(skb, padsize);
1888 memmove(skb->data, skb->data + padsize, padpos);
6e82bc4a 1889 hdr = (struct ieee80211_hdr *) skb->data;
f078f209 1890 }
f078f209 1891
f59a59fe
FF
1892 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1893 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1894 !ieee80211_is_data(hdr->frame_control))
1895 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1896
2d42efc4
FF
1897 setup_frame_info(hw, skb, frmlen);
1898
1899 /*
1900 * At this point, the vif, hw_key and sta pointers in the tx control
1901 * info are no longer valid (overwritten by the ath_frame_info data.
1902 */
1903
28d16708
FF
1904 q = skb_get_queue_mapping(skb);
1905 spin_lock_bh(&txq->axq_lock);
1906 if (txq == sc->tx.txq_map[q] &&
1907 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
7545daf4 1908 ieee80211_stop_queue(sc->hw, q);
28d16708 1909 txq->stopped = 1;
f078f209 1910 }
28d16708 1911 spin_unlock_bh(&txq->axq_lock);
f078f209 1912
44f1d26c
FF
1913 ath_tx_start_dma(sc, skb, txctl);
1914 return 0;
f078f209
LR
1915}
1916
e8324357
S
1917/*****************/
1918/* TX Completion */
1919/*****************/
528f0c6b 1920
e8324357 1921static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
0f9dc298 1922 int tx_flags, struct ath_txq *txq)
528f0c6b 1923{
e8324357
S
1924 struct ieee80211_hw *hw = sc->hw;
1925 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1926 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1927 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1928 int q, padpos, padsize;
528f0c6b 1929
226afe68 1930 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1931
6b2c4032 1932 if (tx_flags & ATH_TX_BAR)
e8324357 1933 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1934
55797b1a 1935 if (!(tx_flags & ATH_TX_ERROR))
e8324357
S
1936 /* Frame was ACKed */
1937 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b 1938
42cecc34
JL
1939 padpos = ath9k_cmn_padpos(hdr->frame_control);
1940 padsize = padpos & 3;
1941 if (padsize && skb->len>padpos+padsize) {
1942 /*
1943 * Remove MAC header padding before giving the frame back to
1944 * mac80211.
1945 */
1946 memmove(skb->data + padsize, skb->data, padpos);
1947 skb_pull(skb, padsize);
e8324357 1948 }
528f0c6b 1949
1b04b930
S
1950 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1951 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
226afe68
JP
1952 ath_dbg(common, ATH_DBG_PS,
1953 "Going back to sleep after having received TX status (0x%lx)\n",
1b04b930
S
1954 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1955 PS_WAIT_FOR_CAB |
1956 PS_WAIT_FOR_PSPOLL_DATA |
1957 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1958 }
1959
7545daf4
FF
1960 q = skb_get_queue_mapping(skb);
1961 if (txq == sc->tx.txq_map[q]) {
1962 spin_lock_bh(&txq->axq_lock);
1963 if (WARN_ON(--txq->pending_frames < 0))
1964 txq->pending_frames = 0;
92460412 1965
7545daf4
FF
1966 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1967 ieee80211_wake_queue(sc->hw, q);
1968 txq->stopped = 0;
066dae93 1969 }
7545daf4 1970 spin_unlock_bh(&txq->axq_lock);
97923b14 1971 }
7545daf4
FF
1972
1973 ieee80211_tx_status(hw, skb);
e8324357 1974}
f078f209 1975
e8324357 1976static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1977 struct ath_txq *txq, struct list_head *bf_q,
1978 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1979{
e8324357 1980 struct sk_buff *skb = bf->bf_mpdu;
3afd21e7 1981 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
e8324357 1982 unsigned long flags;
6b2c4032 1983 int tx_flags = 0;
f078f209 1984
e8324357 1985 if (sendbar)
6b2c4032 1986 tx_flags = ATH_TX_BAR;
f078f209 1987
55797b1a 1988 if (!txok)
6b2c4032 1989 tx_flags |= ATH_TX_ERROR;
f078f209 1990
3afd21e7
FF
1991 if (ts->ts_status & ATH9K_TXERR_FILT)
1992 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1993
c1739eb3 1994 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 1995 bf->bf_buf_addr = 0;
9f42c2b6
FF
1996
1997 if (bf->bf_state.bfs_paprd) {
9cf04dcc
MSS
1998 if (time_after(jiffies,
1999 bf->bf_state.bfs_paprd_timestamp +
2000 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 2001 dev_kfree_skb_any(skb);
78a18172 2002 else
ca369eb4 2003 complete(&sc->paprd_complete);
9f42c2b6 2004 } else {
55797b1a 2005 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
0f9dc298 2006 ath_tx_complete(sc, skb, tx_flags, txq);
9f42c2b6 2007 }
6cf9e995
BG
2008 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2009 * accidentally reference it later.
2010 */
2011 bf->bf_mpdu = NULL;
e8324357
S
2012
2013 /*
2014 * Return the list of ath_buf of this mpdu to free queue
2015 */
2016 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2017 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2018 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
2019}
2020
0cdd5c60
FF
2021static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2022 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 2023 int txok)
f078f209 2024{
a22be22a 2025 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 2026 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 2027 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0cdd5c60 2028 struct ieee80211_hw *hw = sc->hw;
f0c255a0 2029 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 2030 u8 i, tx_rateindex;
f078f209 2031
95e4acb7 2032 if (txok)
db1a052b 2033 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2034
db1a052b 2035 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2036 WARN_ON(tx_rateindex >= hw->max_rates);
2037
3afd21e7 2038 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
d969847c 2039 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2040
b572d033 2041 BUG_ON(nbad > nframes);
ebd02287 2042 }
185d1589
RM
2043 tx_info->status.ampdu_len = nframes;
2044 tx_info->status.ampdu_ack_len = nframes - nbad;
ebd02287 2045
db1a052b 2046 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
3afd21e7 2047 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
f0c255a0
FF
2048 /*
2049 * If an underrun error is seen assume it as an excessive
2050 * retry only if max frame trigger level has been reached
2051 * (2 KB for single stream, and 4 KB for dual stream).
2052 * Adjust the long retry as if the frame was tried
2053 * hw->max_rate_tries times to affect how rate control updates
2054 * PER for the failed rate.
2055 * In case of congestion on the bus penalizing this type of
2056 * underruns should help hardware actually transmit new frames
2057 * successfully by eventually preferring slower rates.
2058 * This itself should also alleviate congestion on the bus.
2059 */
3afd21e7
FF
2060 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2061 ATH9K_TX_DELIM_UNDERRUN)) &&
2062 ieee80211_is_data(hdr->frame_control) &&
83860c59 2063 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
f0c255a0
FF
2064 tx_info->status.rates[tx_rateindex].count =
2065 hw->max_rate_tries;
f078f209 2066 }
8a92e2ee 2067
545750d3 2068 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2069 tx_info->status.rates[i].count = 0;
545750d3
FF
2070 tx_info->status.rates[i].idx = -1;
2071 }
8a92e2ee 2072
78c4653a 2073 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2074}
2075
fce041be
FF
2076static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2077 struct ath_tx_status *ts, struct ath_buf *bf,
2078 struct list_head *bf_head)
5479de6e
RM
2079 __releases(txq->axq_lock)
2080 __acquires(txq->axq_lock)
fce041be
FF
2081{
2082 int txok;
2083
2084 txq->axq_depth--;
2085 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2086 txq->axq_tx_inprogress = false;
2087 if (bf_is_ampdu_not_probing(bf))
2088 txq->axq_ampdu_depth--;
2089
2090 spin_unlock_bh(&txq->axq_lock);
2091
2092 if (!bf_isampdu(bf)) {
3afd21e7 2093 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
fce041be
FF
2094 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
2095 } else
2096 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2097
2098 spin_lock_bh(&txq->axq_lock);
2099
2100 if (sc->sc_flags & SC_OP_TXAGGR)
2101 ath_txq_schedule(sc, txq);
2102}
2103
e8324357 2104static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2105{
cbe61d8a 2106 struct ath_hw *ah = sc->sc_ah;
c46917bb 2107 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2108 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2109 struct list_head bf_head;
e8324357 2110 struct ath_desc *ds;
29bffa96 2111 struct ath_tx_status ts;
e8324357 2112 int status;
f078f209 2113
226afe68
JP
2114 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2115 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2116 txq->axq_link);
f078f209 2117
fce041be 2118 spin_lock_bh(&txq->axq_lock);
f078f209 2119 for (;;) {
236de514
FF
2120 if (work_pending(&sc->hw_reset_work))
2121 break;
2122
f078f209
LR
2123 if (list_empty(&txq->axq_q)) {
2124 txq->axq_link = NULL;
86271e46 2125 if (sc->sc_flags & SC_OP_TXAGGR)
082f6536 2126 ath_txq_schedule(sc, txq);
f078f209
LR
2127 break;
2128 }
f078f209
LR
2129 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2130
e8324357
S
2131 /*
2132 * There is a race condition that a BH gets scheduled
2133 * after sw writes TxE and before hw re-load the last
2134 * descriptor to get the newly chained one.
2135 * Software must keep the last DONE descriptor as a
2136 * holding descriptor - software does so by marking
2137 * it with the STALE flag.
2138 */
2139 bf_held = NULL;
a119cc49 2140 if (bf->bf_stale) {
e8324357 2141 bf_held = bf;
fce041be 2142 if (list_is_last(&bf_held->list, &txq->axq_q))
e8324357 2143 break;
fce041be
FF
2144
2145 bf = list_entry(bf_held->list.next, struct ath_buf,
2146 list);
f078f209
LR
2147 }
2148
2149 lastbf = bf->bf_lastbf;
e8324357 2150 ds = lastbf->bf_desc;
f078f209 2151
29bffa96
FF
2152 memset(&ts, 0, sizeof(ts));
2153 status = ath9k_hw_txprocdesc(ah, ds, &ts);
fce041be 2154 if (status == -EINPROGRESS)
e8324357 2155 break;
fce041be 2156
2dac4fb9 2157 TX_STAT_INC(txq->axq_qnum, txprocdesc);
f078f209 2158
e8324357
S
2159 /*
2160 * Remove ath_buf's of the same transmit unit from txq,
2161 * however leave the last descriptor back as the holding
2162 * descriptor for hw.
2163 */
a119cc49 2164 lastbf->bf_stale = true;
e8324357 2165 INIT_LIST_HEAD(&bf_head);
e8324357
S
2166 if (!list_is_singular(&lastbf->list))
2167 list_cut_position(&bf_head,
2168 &txq->axq_q, lastbf->list.prev);
f078f209 2169
fce041be 2170 if (bf_held) {
0a8cea84 2171 list_del(&bf_held->list);
0a8cea84 2172 ath_tx_return_buffer(sc, bf_held);
e8324357 2173 }
f078f209 2174
fce041be 2175 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
8469cdef 2176 }
fce041be 2177 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2178}
2179
305fe47f 2180static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2181{
2182 struct ath_softc *sc = container_of(work, struct ath_softc,
2183 tx_complete_work.work);
2184 struct ath_txq *txq;
2185 int i;
2186 bool needreset = false;
60f2d1d5
BG
2187#ifdef CONFIG_ATH9K_DEBUGFS
2188 sc->tx_complete_poll_work_seen++;
2189#endif
164ace38
SB
2190
2191 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2192 if (ATH_TXQ_SETUP(sc, i)) {
2193 txq = &sc->tx.txq[i];
2194 spin_lock_bh(&txq->axq_lock);
2195 if (txq->axq_depth) {
2196 if (txq->axq_tx_inprogress) {
2197 needreset = true;
2198 spin_unlock_bh(&txq->axq_lock);
2199 break;
2200 } else {
2201 txq->axq_tx_inprogress = true;
2202 }
2203 }
2204 spin_unlock_bh(&txq->axq_lock);
2205 }
2206
2207 if (needreset) {
226afe68
JP
2208 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2209 "tx hung, resetting the chip\n");
030d6294 2210 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
236de514 2211 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
164ace38
SB
2212 }
2213
42935eca 2214 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2215 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2216}
2217
2218
f078f209 2219
e8324357 2220void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2221{
e8324357
S
2222 int i;
2223 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2224
e8324357 2225 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2226
e8324357
S
2227 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2228 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2229 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2230 }
2231}
2232
e5003249
VT
2233void ath_tx_edma_tasklet(struct ath_softc *sc)
2234{
fce041be 2235 struct ath_tx_status ts;
e5003249
VT
2236 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2237 struct ath_hw *ah = sc->sc_ah;
2238 struct ath_txq *txq;
2239 struct ath_buf *bf, *lastbf;
2240 struct list_head bf_head;
2241 int status;
e5003249
VT
2242
2243 for (;;) {
236de514
FF
2244 if (work_pending(&sc->hw_reset_work))
2245 break;
2246
fce041be 2247 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
e5003249
VT
2248 if (status == -EINPROGRESS)
2249 break;
2250 if (status == -EIO) {
226afe68
JP
2251 ath_dbg(common, ATH_DBG_XMIT,
2252 "Error processing tx status\n");
e5003249
VT
2253 break;
2254 }
2255
2256 /* Skip beacon completions */
fce041be 2257 if (ts.qid == sc->beacon.beaconq)
e5003249
VT
2258 continue;
2259
fce041be 2260 txq = &sc->tx.txq[ts.qid];
e5003249
VT
2261
2262 spin_lock_bh(&txq->axq_lock);
fce041be 2263
e5003249
VT
2264 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2265 spin_unlock_bh(&txq->axq_lock);
2266 return;
2267 }
2268
2269 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2270 struct ath_buf, list);
2271 lastbf = bf->bf_lastbf;
2272
2273 INIT_LIST_HEAD(&bf_head);
2274 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2275 &lastbf->list);
e5003249 2276
fce041be
FF
2277 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2278 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
e5003249 2279
fce041be
FF
2280 if (!list_empty(&txq->axq_q)) {
2281 struct list_head bf_q;
60f2d1d5 2282
fce041be
FF
2283 INIT_LIST_HEAD(&bf_q);
2284 txq->axq_link = NULL;
2285 list_splice_tail_init(&txq->axq_q, &bf_q);
2286 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2287 }
2288 }
86271e46 2289
fce041be 2290 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
e5003249
VT
2291 spin_unlock_bh(&txq->axq_lock);
2292 }
2293}
2294
e8324357
S
2295/*****************/
2296/* Init, Cleanup */
2297/*****************/
f078f209 2298
5088c2f1
VT
2299static int ath_txstatus_setup(struct ath_softc *sc, int size)
2300{
2301 struct ath_descdma *dd = &sc->txsdma;
2302 u8 txs_len = sc->sc_ah->caps.txs_len;
2303
2304 dd->dd_desc_len = size * txs_len;
2305 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2306 &dd->dd_desc_paddr, GFP_KERNEL);
2307 if (!dd->dd_desc)
2308 return -ENOMEM;
2309
2310 return 0;
2311}
2312
2313static int ath_tx_edma_init(struct ath_softc *sc)
2314{
2315 int err;
2316
2317 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2318 if (!err)
2319 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2320 sc->txsdma.dd_desc_paddr,
2321 ATH_TXSTATUS_RING_SIZE);
2322
2323 return err;
2324}
2325
2326static void ath_tx_edma_cleanup(struct ath_softc *sc)
2327{
2328 struct ath_descdma *dd = &sc->txsdma;
2329
2330 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2331 dd->dd_desc_paddr);
2332}
2333
e8324357 2334int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2335{
c46917bb 2336 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2337 int error = 0;
f078f209 2338
797fe5cb 2339 spin_lock_init(&sc->tx.txbuflock);
f078f209 2340
797fe5cb 2341 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2342 "tx", nbufs, 1, 1);
797fe5cb 2343 if (error != 0) {
3800276a
JP
2344 ath_err(common,
2345 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2346 goto err;
2347 }
f078f209 2348
797fe5cb 2349 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2350 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2351 if (error != 0) {
3800276a
JP
2352 ath_err(common,
2353 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2354 goto err;
2355 }
f078f209 2356
164ace38
SB
2357 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2358
5088c2f1
VT
2359 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2360 error = ath_tx_edma_init(sc);
2361 if (error)
2362 goto err;
2363 }
2364
797fe5cb 2365err:
e8324357
S
2366 if (error != 0)
2367 ath_tx_cleanup(sc);
f078f209 2368
e8324357 2369 return error;
f078f209
LR
2370}
2371
797fe5cb 2372void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2373{
2374 if (sc->beacon.bdma.dd_desc_len != 0)
2375 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2376
2377 if (sc->tx.txdma.dd_desc_len != 0)
2378 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2379
2380 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2381 ath_tx_edma_cleanup(sc);
e8324357 2382}
f078f209
LR
2383
2384void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2385{
c5170163
S
2386 struct ath_atx_tid *tid;
2387 struct ath_atx_ac *ac;
2388 int tidno, acno;
f078f209 2389
8ee5afbc 2390 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2391 tidno < WME_NUM_TID;
2392 tidno++, tid++) {
2393 tid->an = an;
2394 tid->tidno = tidno;
2395 tid->seq_start = tid->seq_next = 0;
2396 tid->baw_size = WME_MAX_BA;
2397 tid->baw_head = tid->baw_tail = 0;
2398 tid->sched = false;
e8324357 2399 tid->paused = false;
a37c2c79 2400 tid->state &= ~AGGR_CLEANUP;
56dc6336 2401 __skb_queue_head_init(&tid->buf_q);
c5170163 2402 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2403 tid->ac = &an->ac[acno];
a37c2c79
S
2404 tid->state &= ~AGGR_ADDBA_COMPLETE;
2405 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2406 }
f078f209 2407
8ee5afbc 2408 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2409 acno < WME_NUM_AC; acno++, ac++) {
2410 ac->sched = false;
066dae93 2411 ac->txq = sc->tx.txq_map[acno];
c5170163 2412 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2413 }
2414}
2415
b5aa9bf9 2416void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2417{
2b40994c
FF
2418 struct ath_atx_ac *ac;
2419 struct ath_atx_tid *tid;
f078f209 2420 struct ath_txq *txq;
066dae93 2421 int tidno;
e8324357 2422
2b40994c
FF
2423 for (tidno = 0, tid = &an->tid[tidno];
2424 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2425
2b40994c 2426 ac = tid->ac;
066dae93 2427 txq = ac->txq;
f078f209 2428
2b40994c
FF
2429 spin_lock_bh(&txq->axq_lock);
2430
2431 if (tid->sched) {
2432 list_del(&tid->list);
2433 tid->sched = false;
2434 }
2435
2436 if (ac->sched) {
2437 list_del(&ac->list);
2438 tid->ac->sched = false;
f078f209 2439 }
2b40994c
FF
2440
2441 ath_tid_drain(sc, txq, tid);
2442 tid->state &= ~AGGR_ADDBA_COMPLETE;
2443 tid->state &= ~AGGR_CLEANUP;
2444
2445 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2446 }
2447}
This page took 1.114101 seconds and 5 git commands to generate.