phy: Micrel KS8995MA 5-ports 10/100 managed Ethernet switch support added
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
5b68138e 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
b7f080cf 17#include <linux/dma-mapping.h>
394cf0a1 18#include "ath9k.h"
b622a720 19#include "ar9003_mac.h"
f078f209
LR
20
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
f078f209 35
c6663876 36static u16 bits_per_symbol[][2] = {
f078f209
LR
37 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
46};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
82b873af 50static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c
FF
51 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 55 struct ath_txq *txq, struct list_head *bf_q,
156369fa 56 struct ath_tx_status *ts, int txok);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 58 struct list_head *head, bool internal);
0cdd5c60
FF
59static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 61 int txok);
90fa539c
FF
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
44f1d26c
FF
64static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
c4288390 68
545750d3 69enum {
0e668cde
FF
70 MCS_HT20,
71 MCS_HT20_SGI,
545750d3
FF
72 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
0e668cde
FF
76static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
88 },
89 [MCS_HT40] = {
0e668cde
FF
90 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
94 },
95 [MCS_HT40_SGI] = {
0e668cde
FF
96 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
100 }
101};
102
e8324357
S
103/*********************/
104/* Aggregation logic */
105/*********************/
f078f209 106
e8324357 107static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 108{
e8324357 109 struct ath_atx_ac *ac = tid->ac;
ff37e337 110
e8324357
S
111 if (tid->paused)
112 return;
ff37e337 113
e8324357
S
114 if (tid->sched)
115 return;
ff37e337 116
e8324357
S
117 tid->sched = true;
118 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 119
e8324357
S
120 if (ac->sched)
121 return;
f078f209 122
e8324357
S
123 ac->sched = true;
124 list_add_tail(&ac->list, &txq->axq_acq);
125}
f078f209 126
e8324357 127static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 128{
066dae93 129 struct ath_txq *txq = tid->ac->txq;
e6a9854b 130
75401849 131 WARN_ON(!tid->paused);
f078f209 132
75401849
LB
133 spin_lock_bh(&txq->axq_lock);
134 tid->paused = false;
f078f209 135
56dc6336 136 if (skb_queue_empty(&tid->buf_q))
e8324357 137 goto unlock;
f078f209 138
e8324357
S
139 ath_tx_queue_tid(txq, tid);
140 ath_txq_schedule(sc, txq);
141unlock:
142 spin_unlock_bh(&txq->axq_lock);
528f0c6b 143}
f078f209 144
2d42efc4 145static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
76e45221
FF
146{
147 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
148 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
149 sizeof(tx_info->rate_driver_data));
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
76e45221
FF
151}
152
156369fa
FF
153static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
154{
155 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
156 seqno << IEEE80211_SEQ_SEQ_SHIFT);
157}
158
e8324357 159static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 160{
066dae93 161 struct ath_txq *txq = tid->ac->txq;
56dc6336 162 struct sk_buff *skb;
e8324357
S
163 struct ath_buf *bf;
164 struct list_head bf_head;
90fa539c 165 struct ath_tx_status ts;
2d42efc4 166 struct ath_frame_info *fi;
156369fa 167 bool sendbar = false;
f078f209 168
90fa539c 169 INIT_LIST_HEAD(&bf_head);
e6a9854b 170
90fa539c 171 memset(&ts, 0, sizeof(ts));
f078f209 172
56dc6336
FF
173 while ((skb = __skb_dequeue(&tid->buf_q))) {
174 fi = get_frame_info(skb);
175 bf = fi->bf;
176
44f1d26c
FF
177 if (bf && fi->retries) {
178 list_add_tail(&bf->list, &bf_head);
6a0ddaef 179 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
156369fa
FF
180 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
181 sendbar = true;
90fa539c 182 } else {
44f1d26c 183 ath_tx_send_normal(sc, txq, NULL, skb);
90fa539c 184 }
528f0c6b 185 }
f078f209 186
4eb287a4
NM
187 if (tid->baw_head == tid->baw_tail) {
188 tid->state &= ~AGGR_ADDBA_COMPLETE;
189 tid->state &= ~AGGR_CLEANUP;
190 }
191
156369fa
FF
192 if (sendbar)
193 ath_send_bar(tid, tid->seq_start);
528f0c6b 194}
f078f209 195
e8324357
S
196static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
197 int seqno)
528f0c6b 198{
e8324357 199 int index, cindex;
f078f209 200
e8324357
S
201 index = ATH_BA_INDEX(tid->seq_start, seqno);
202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 203
81ee13ba 204 __clear_bit(cindex, tid->tx_buf);
528f0c6b 205
81ee13ba 206 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
207 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
208 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
f9437543
FF
209 if (tid->bar_index >= 0)
210 tid->bar_index--;
e8324357 211 }
528f0c6b 212}
f078f209 213
e8324357 214static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 215 u16 seqno)
528f0c6b 216{
e8324357 217 int index, cindex;
528f0c6b 218
2d3bcba0 219 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 220 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 221 __set_bit(cindex, tid->tx_buf);
f078f209 222
e8324357
S
223 if (index >= ((tid->baw_tail - tid->baw_head) &
224 (ATH_TID_MAX_BUFS - 1))) {
225 tid->baw_tail = cindex;
226 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 227 }
f078f209
LR
228}
229
230/*
e8324357
S
231 * TODO: For frame(s) that are in the retry state, we will reuse the
232 * sequence number(s) without setting the retry bit. The
233 * alternative is to give up on these and BAR the receiver's window
234 * forward.
f078f209 235 */
e8324357
S
236static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
237 struct ath_atx_tid *tid)
f078f209 238
f078f209 239{
56dc6336 240 struct sk_buff *skb;
e8324357
S
241 struct ath_buf *bf;
242 struct list_head bf_head;
db1a052b 243 struct ath_tx_status ts;
2d42efc4 244 struct ath_frame_info *fi;
db1a052b
FF
245
246 memset(&ts, 0, sizeof(ts));
e8324357 247 INIT_LIST_HEAD(&bf_head);
f078f209 248
56dc6336
FF
249 while ((skb = __skb_dequeue(&tid->buf_q))) {
250 fi = get_frame_info(skb);
251 bf = fi->bf;
f078f209 252
44f1d26c 253 if (!bf) {
44f1d26c 254 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
44f1d26c
FF
255 continue;
256 }
257
56dc6336 258 list_add_tail(&bf->list, &bf_head);
f078f209 259
2d42efc4 260 if (fi->retries)
6a0ddaef 261 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
f078f209 262
156369fa 263 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
e8324357 264 }
f078f209 265
e8324357
S
266 tid->seq_next = tid->seq_start;
267 tid->baw_tail = tid->baw_head;
f9437543 268 tid->bar_index = -1;
f078f209
LR
269}
270
fec247c0 271static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
da647626 272 struct sk_buff *skb, int count)
f078f209 273{
8b7f8532 274 struct ath_frame_info *fi = get_frame_info(skb);
f11cc949 275 struct ath_buf *bf = fi->bf;
e8324357 276 struct ieee80211_hdr *hdr;
da647626 277 int prev = fi->retries;
f078f209 278
fec247c0 279 TX_STAT_INC(txq->axq_qnum, a_retries);
da647626
FF
280 fi->retries += count;
281
282 if (prev > 0)
2d42efc4 283 return;
f078f209 284
e8324357
S
285 hdr = (struct ieee80211_hdr *)skb->data;
286 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f11cc949
FF
287 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
288 sizeof(*hdr), DMA_TO_DEVICE);
f078f209
LR
289}
290
0a8cea84 291static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 292{
0a8cea84 293 struct ath_buf *bf = NULL;
d43f3015
S
294
295 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
296
297 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
298 spin_unlock_bh(&sc->tx.txbuflock);
299 return NULL;
300 }
0a8cea84
FF
301
302 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
303 list_del(&bf->list);
304
d43f3015
S
305 spin_unlock_bh(&sc->tx.txbuflock);
306
0a8cea84
FF
307 return bf;
308}
309
310static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
311{
312 spin_lock_bh(&sc->tx.txbuflock);
313 list_add_tail(&bf->list, &sc->tx.txbuf);
314 spin_unlock_bh(&sc->tx.txbuflock);
315}
316
317static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
318{
319 struct ath_buf *tbf;
320
321 tbf = ath_tx_get_buffer(sc);
322 if (WARN_ON(!tbf))
323 return NULL;
324
d43f3015
S
325 ATH_TXBUF_RESET(tbf);
326
327 tbf->bf_mpdu = bf->bf_mpdu;
328 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 329 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 330 tbf->bf_state = bf->bf_state;
d43f3015
S
331
332 return tbf;
333}
334
b572d033
FF
335static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
336 struct ath_tx_status *ts, int txok,
337 int *nframes, int *nbad)
338{
2d42efc4 339 struct ath_frame_info *fi;
b572d033
FF
340 u16 seq_st = 0;
341 u32 ba[WME_BA_BMP_SIZE >> 5];
342 int ba_index;
343 int isaggr = 0;
344
345 *nbad = 0;
346 *nframes = 0;
347
b572d033
FF
348 isaggr = bf_isaggr(bf);
349 if (isaggr) {
350 seq_st = ts->ts_seqnum;
351 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
352 }
353
354 while (bf) {
2d42efc4 355 fi = get_frame_info(bf->bf_mpdu);
6a0ddaef 356 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
b572d033
FF
357
358 (*nframes)++;
359 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
360 (*nbad)++;
361
362 bf = bf->bf_next;
363 }
364}
365
366
d43f3015
S
367static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
368 struct ath_buf *bf, struct list_head *bf_q,
c5992618 369 struct ath_tx_status *ts, int txok, bool retry)
f078f209 370{
e8324357
S
371 struct ath_node *an = NULL;
372 struct sk_buff *skb;
1286ec6d 373 struct ieee80211_sta *sta;
0cdd5c60 374 struct ieee80211_hw *hw = sc->hw;
1286ec6d 375 struct ieee80211_hdr *hdr;
76d5a9e8 376 struct ieee80211_tx_info *tx_info;
e8324357 377 struct ath_atx_tid *tid = NULL;
d43f3015 378 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
56dc6336
FF
379 struct list_head bf_head;
380 struct sk_buff_head bf_pending;
156369fa 381 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
f078f209 382 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
383 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
384 bool rc_update = true;
78c4653a 385 struct ieee80211_tx_rate rates[4];
2d42efc4 386 struct ath_frame_info *fi;
ebd02287 387 int nframes;
5daefbd0 388 u8 tidno;
daa5c408 389 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
da647626 390 int i, retries;
156369fa 391 int bar_index = -1;
f078f209 392
a22be22a 393 skb = bf->bf_mpdu;
1286ec6d
S
394 hdr = (struct ieee80211_hdr *)skb->data;
395
76d5a9e8 396 tx_info = IEEE80211_SKB_CB(skb);
76d5a9e8 397
78c4653a
FF
398 memcpy(rates, tx_info->control.rates, sizeof(rates));
399
da647626
FF
400 retries = ts->ts_longretry + 1;
401 for (i = 0; i < ts->ts_rateindex; i++)
402 retries += rates[i].count;
403
1286ec6d 404 rcu_read_lock();
f078f209 405
686b9cb9 406 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
407 if (!sta) {
408 rcu_read_unlock();
73e19463 409
31e79a59
FF
410 INIT_LIST_HEAD(&bf_head);
411 while (bf) {
412 bf_next = bf->bf_next;
413
fce041be 414 if (!bf->bf_stale || bf_next != NULL)
31e79a59
FF
415 list_move_tail(&bf->list, &bf_head);
416
156369fa 417 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
31e79a59
FF
418
419 bf = bf_next;
420 }
1286ec6d 421 return;
f078f209
LR
422 }
423
1286ec6d 424 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
425 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
426 tid = ATH_AN_2_TID(an, tidno);
156369fa 427 seq_first = tid->seq_start;
1286ec6d 428
b11b160d
FF
429 /*
430 * The hardware occasionally sends a tx status for the wrong TID.
431 * In this case, the BA status cannot be considered valid and all
432 * subframes need to be retransmitted
433 */
5daefbd0 434 if (tidno != ts->tid)
b11b160d
FF
435 txok = false;
436
e8324357 437 isaggr = bf_isaggr(bf);
d43f3015 438 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 439
d43f3015 440 if (isaggr && txok) {
db1a052b
FF
441 if (ts->ts_flags & ATH9K_TX_BA) {
442 seq_st = ts->ts_seqnum;
443 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 444 } else {
d43f3015
S
445 /*
446 * AR5416 can become deaf/mute when BA
447 * issue happens. Chip needs to be reset.
448 * But AP code may have sychronization issues
449 * when perform internal reset in this routine.
450 * Only enable reset in STA mode for now.
451 */
2660b81a 452 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 453 needreset = 1;
e8324357 454 }
f078f209
LR
455 }
456
56dc6336 457 __skb_queue_head_init(&bf_pending);
f078f209 458
b572d033 459 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
e8324357 460 while (bf) {
6a0ddaef
FF
461 u16 seqno = bf->bf_state.seqno;
462
f0b8220c 463 txfail = txpending = sendbar = 0;
e8324357 464 bf_next = bf->bf_next;
f078f209 465
78c4653a
FF
466 skb = bf->bf_mpdu;
467 tx_info = IEEE80211_SKB_CB(skb);
2d42efc4 468 fi = get_frame_info(skb);
78c4653a 469
6a0ddaef 470 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
e8324357
S
471 /* transmit completion, subframe is
472 * acked by block ack */
0934af23 473 acked_cnt++;
e8324357
S
474 } else if (!isaggr && txok) {
475 /* transmit completion */
0934af23 476 acked_cnt++;
b0477013
FF
477 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
478 /*
479 * cleanup in progress, just fail
480 * the un-acked sub-frames
481 */
482 txfail = 1;
483 } else if (flush) {
484 txpending = 1;
485 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
486 if (txok || !an->sleeping)
487 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
488 retries);
489
490 txpending = 1;
e8324357 491 } else {
b0477013
FF
492 txfail = 1;
493 txfail_cnt++;
494 bar_index = max_t(int, bar_index,
495 ATH_BA_INDEX(seq_first, seqno));
e8324357 496 }
f078f209 497
fce041be
FF
498 /*
499 * Make sure the last desc is reclaimed if it
500 * not a holding desc.
501 */
56dc6336
FF
502 INIT_LIST_HEAD(&bf_head);
503 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
504 bf_next != NULL || !bf_last->bf_stale)
d43f3015 505 list_move_tail(&bf->list, &bf_head);
f078f209 506
90fa539c 507 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
508 /*
509 * complete the acked-ones/xretried ones; update
510 * block-ack window
511 */
6a0ddaef 512 ath_tx_update_baw(sc, tid, seqno);
f078f209 513
8a92e2ee 514 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 515 memcpy(tx_info->control.rates, rates, sizeof(rates));
3afd21e7 516 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
8a92e2ee 517 rc_update = false;
8a92e2ee
VT
518 }
519
db1a052b 520 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
156369fa 521 !txfail);
e8324357 522 } else {
d43f3015 523 /* retry the un-acked ones */
b0477013
FF
524 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
525 bf->bf_next == NULL && bf_last->bf_stale) {
526 struct ath_buf *tbf;
527
528 tbf = ath_clone_txbuf(sc, bf_last);
529 /*
530 * Update tx baw and complete the
531 * frame with failed status if we
532 * run out of tx buf.
533 */
534 if (!tbf) {
b0477013 535 ath_tx_update_baw(sc, tid, seqno);
b0477013
FF
536
537 ath_tx_complete_buf(sc, bf, txq,
538 &bf_head, ts, 0);
539 bar_index = max_t(int, bar_index,
540 ATH_BA_INDEX(seq_first, seqno));
541 break;
c41d92dc 542 }
b0477013
FF
543
544 fi->bf = tbf;
e8324357
S
545 }
546
547 /*
548 * Put this buffer to the temporary pending
549 * queue to retain ordering
550 */
56dc6336 551 __skb_queue_tail(&bf_pending, skb);
e8324357
S
552 }
553
554 bf = bf_next;
f078f209 555 }
f078f209 556
f9437543
FF
557 if (bar_index >= 0) {
558 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
156369fa 559 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
f9437543
FF
560 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
561 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
562 }
156369fa 563
4cee7861 564 /* prepend un-acked frames to the beginning of the pending frame queue */
56dc6336 565 if (!skb_queue_empty(&bf_pending)) {
5519541d 566 if (an->sleeping)
042ec453 567 ieee80211_sta_set_buffered(sta, tid->tidno, true);
5519541d 568
56dc6336 569 skb_queue_splice(&bf_pending, &tid->buf_q);
26a64259 570 if (!an->sleeping) {
9af73cf7 571 ath_tx_queue_tid(txq, tid);
26a64259
FF
572
573 if (ts->ts_status & ATH9K_TXERR_FILT)
574 tid->ac->clear_ps_filter = true;
575 }
4cee7861
FF
576 }
577
4eb287a4 578 if (tid->state & AGGR_CLEANUP)
90fa539c
FF
579 ath_tx_flush_tid(sc, tid);
580
1286ec6d
S
581 rcu_read_unlock();
582
030d6294
FF
583 if (needreset) {
584 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
236de514 585 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
030d6294 586 }
e8324357 587}
f078f209 588
1a6e9d0f
RM
589static bool ath_lookup_legacy(struct ath_buf *bf)
590{
591 struct sk_buff *skb;
592 struct ieee80211_tx_info *tx_info;
593 struct ieee80211_tx_rate *rates;
594 int i;
595
596 skb = bf->bf_mpdu;
597 tx_info = IEEE80211_SKB_CB(skb);
598 rates = tx_info->control.rates;
599
059ee09b
FF
600 for (i = 0; i < 4; i++) {
601 if (!rates[i].count || rates[i].idx < 0)
602 break;
603
1a6e9d0f
RM
604 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
605 return true;
606 }
607
608 return false;
609}
610
e8324357
S
611static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
612 struct ath_atx_tid *tid)
f078f209 613{
528f0c6b
S
614 struct sk_buff *skb;
615 struct ieee80211_tx_info *tx_info;
a8efee4f 616 struct ieee80211_tx_rate *rates;
7dc181c2 617 struct ath_mci_profile *mci = &sc->btcoex.mci;
d43f3015 618 u32 max_4ms_framelen, frmlen;
4ef70841 619 u16 aggr_limit, legacy = 0;
e8324357 620 int i;
528f0c6b 621
a22be22a 622 skb = bf->bf_mpdu;
528f0c6b 623 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 624 rates = tx_info->control.rates;
528f0c6b 625
e8324357
S
626 /*
627 * Find the lowest frame length among the rate series that will have a
628 * 4ms transmit duration.
629 * TODO - TXOP limit needs to be considered.
630 */
631 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 632
e8324357 633 for (i = 0; i < 4; i++) {
b0477013 634 int modeidx;
e8324357 635
b0477013
FF
636 if (!rates[i].count)
637 continue;
545750d3 638
b0477013
FF
639 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
640 legacy = 1;
641 break;
f078f209 642 }
b0477013
FF
643
644 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
645 modeidx = MCS_HT40;
646 else
647 modeidx = MCS_HT20;
648
649 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
650 modeidx++;
651
652 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
653 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209 654 }
e63835b0 655
f078f209 656 /*
e8324357
S
657 * limit aggregate size by the minimum rate if rate selected is
658 * not a probe rate, if rate selected is a probe rate then
659 * avoid aggregation of this packet.
f078f209 660 */
e8324357
S
661 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
662 return 0;
f078f209 663
7dc181c2
RM
664 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
665 aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
666 else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
1773912b
VT
667 aggr_limit = min((max_4ms_framelen * 3) / 8,
668 (u32)ATH_AMPDU_LIMIT_MAX);
669 else
670 aggr_limit = min(max_4ms_framelen,
671 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 672
e8324357 673 /*
25985edc
LDM
674 * h/w can accept aggregates up to 16 bit lengths (65535).
675 * The IE, however can hold up to 65536, which shows up here
e8324357 676 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 677 */
4ef70841
S
678 if (tid->an->maxampdu)
679 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 680
e8324357
S
681 return aggr_limit;
682}
f078f209 683
e8324357 684/*
d43f3015 685 * Returns the number of delimiters to be added to
e8324357 686 * meet the minimum required mpdudensity.
e8324357
S
687 */
688static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
7a12dfdb
RM
689 struct ath_buf *bf, u16 frmlen,
690 bool first_subfrm)
e8324357 691{
7a12dfdb 692#define FIRST_DESC_NDELIMS 60
e8324357
S
693 struct sk_buff *skb = bf->bf_mpdu;
694 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 695 u32 nsymbits, nsymbols;
e8324357 696 u16 minlen;
545750d3 697 u8 flags, rix;
c6663876 698 int width, streams, half_gi, ndelim, mindelim;
2d42efc4 699 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
e8324357
S
700
701 /* Select standard number of delimiters based on frame length alone */
702 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
703
704 /*
e8324357
S
705 * If encryption enabled, hardware requires some more padding between
706 * subframes.
707 * TODO - this could be improved to be dependent on the rate.
708 * The hardware can keep up at lower rates, but not higher rates
f078f209 709 */
4f6760b0
RM
710 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
711 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
e8324357 712 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 713
7a12dfdb
RM
714 /*
715 * Add delimiter when using RTS/CTS with aggregation
716 * and non enterprise AR9003 card
717 */
3459731a
FF
718 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
719 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
7a12dfdb
RM
720 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
721
e8324357
S
722 /*
723 * Convert desired mpdu density from microeconds to bytes based
724 * on highest rate in rate series (i.e. first rate) to determine
725 * required minimum length for subframe. Take into account
726 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 727 *
e8324357
S
728 * If there is no mpdu density restriction, no further calculation
729 * is needed.
730 */
4ef70841
S
731
732 if (tid->an->mpdudensity == 0)
e8324357 733 return ndelim;
f078f209 734
e8324357
S
735 rix = tx_info->control.rates[0].idx;
736 flags = tx_info->control.rates[0].flags;
e8324357
S
737 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
738 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 739
e8324357 740 if (half_gi)
4ef70841 741 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 742 else
4ef70841 743 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 744
e8324357
S
745 if (nsymbols == 0)
746 nsymbols = 1;
f078f209 747
c6663876
FF
748 streams = HT_RC_2_STREAMS(rix);
749 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 750 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 751
e8324357 752 if (frmlen < minlen) {
e8324357
S
753 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
754 ndelim = max(mindelim, ndelim);
f078f209
LR
755 }
756
e8324357 757 return ndelim;
f078f209
LR
758}
759
e8324357 760static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 761 struct ath_txq *txq,
d43f3015 762 struct ath_atx_tid *tid,
269c44bc
FF
763 struct list_head *bf_q,
764 int *aggr_len)
f078f209 765{
e8324357 766#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
56dc6336 767 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
d43f3015 768 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
769 u16 aggr_limit = 0, al = 0, bpad = 0,
770 al_delta, h_baw = tid->baw_size / 2;
771 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 772 struct ieee80211_tx_info *tx_info;
2d42efc4 773 struct ath_frame_info *fi;
56dc6336 774 struct sk_buff *skb;
6a0ddaef 775 u16 seqno;
f078f209 776
e8324357 777 do {
56dc6336
FF
778 skb = skb_peek(&tid->buf_q);
779 fi = get_frame_info(skb);
780 bf = fi->bf;
44f1d26c
FF
781 if (!fi->bf)
782 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
56dc6336 783
44f1d26c
FF
784 if (!bf)
785 continue;
786
399c6489 787 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
44f1d26c 788 seqno = bf->bf_state.seqno;
f078f209 789
d43f3015 790 /* do not step over block-ack window */
6a0ddaef 791 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
e8324357
S
792 status = ATH_AGGR_BAW_CLOSED;
793 break;
794 }
f078f209 795
f9437543
FF
796 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
797 struct ath_tx_status ts = {};
798 struct list_head bf_head;
799
800 INIT_LIST_HEAD(&bf_head);
801 list_add(&bf->list, &bf_head);
802 __skb_unlink(skb, &tid->buf_q);
803 ath_tx_update_baw(sc, tid, seqno);
804 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
805 continue;
806 }
807
808 if (!bf_first)
809 bf_first = bf;
810
e8324357
S
811 if (!rl) {
812 aggr_limit = ath_lookup_rate(sc, bf, tid);
813 rl = 1;
814 }
f078f209 815
d43f3015 816 /* do not exceed aggregation limit */
2d42efc4 817 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
f078f209 818
d43f3015 819 if (nframes &&
1a6e9d0f
RM
820 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
821 ath_lookup_legacy(bf))) {
e8324357
S
822 status = ATH_AGGR_LIMITED;
823 break;
824 }
f078f209 825
0299a50a 826 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
bdf2dbfb 827 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
0299a50a
FF
828 break;
829
d43f3015
S
830 /* do not exceed subframe limit */
831 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
832 status = ATH_AGGR_LIMITED;
833 break;
834 }
f078f209 835
d43f3015 836 /* add padding for previous frame to aggregation length */
e8324357 837 al += bpad + al_delta;
f078f209 838
e8324357
S
839 /*
840 * Get the delimiters needed to meet the MPDU
841 * density for this node.
842 */
7a12dfdb
RM
843 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
844 !nframes);
e8324357 845 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 846
7a12dfdb 847 nframes++;
e8324357 848 bf->bf_next = NULL;
f078f209 849
d43f3015 850 /* link buffers of this frame to the aggregate */
2d42efc4 851 if (!fi->retries)
6a0ddaef 852 ath_tx_addto_baw(sc, tid, seqno);
399c6489 853 bf->bf_state.ndelim = ndelim;
56dc6336
FF
854
855 __skb_unlink(skb, &tid->buf_q);
856 list_add_tail(&bf->list, bf_q);
399c6489 857 if (bf_prev)
e8324357 858 bf_prev->bf_next = bf;
399c6489 859
e8324357 860 bf_prev = bf;
fec247c0 861
56dc6336 862 } while (!skb_queue_empty(&tid->buf_q));
f078f209 863
269c44bc 864 *aggr_len = al;
d43f3015 865
e8324357
S
866 return status;
867#undef PADBYTES
868}
f078f209 869
38dad7ba
FF
870/*
871 * rix - rate index
872 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
873 * width - 0 for 20 MHz, 1 for 40 MHz
874 * half_gi - to use 4us v/s 3.6 us for symbol time
875 */
876static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
877 int width, int half_gi, bool shortPreamble)
878{
879 u32 nbits, nsymbits, duration, nsymbols;
880 int streams;
881
882 /* find number of symbols: PLCP + data */
883 streams = HT_RC_2_STREAMS(rix);
884 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
885 nsymbits = bits_per_symbol[rix % 8][width] * streams;
886 nsymbols = (nbits + nsymbits - 1) / nsymbits;
887
888 if (!half_gi)
889 duration = SYMBOL_TIME(nsymbols);
890 else
891 duration = SYMBOL_TIME_HALFGI(nsymbols);
892
893 /* addup duration for legacy/ht training and signal fields */
894 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
895
896 return duration;
897}
898
493cf04f
FF
899static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
900 struct ath_tx_info *info, int len)
38dad7ba
FF
901{
902 struct ath_hw *ah = sc->sc_ah;
38dad7ba
FF
903 struct sk_buff *skb;
904 struct ieee80211_tx_info *tx_info;
905 struct ieee80211_tx_rate *rates;
906 const struct ieee80211_rate *rate;
907 struct ieee80211_hdr *hdr;
493cf04f
FF
908 int i;
909 u8 rix = 0;
38dad7ba
FF
910
911 skb = bf->bf_mpdu;
912 tx_info = IEEE80211_SKB_CB(skb);
913 rates = tx_info->control.rates;
914 hdr = (struct ieee80211_hdr *)skb->data;
493cf04f
FF
915
916 /* set dur_update_en for l-sig computation except for PS-Poll frames */
917 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
38dad7ba
FF
918
919 /*
920 * We check if Short Preamble is needed for the CTS rate by
921 * checking the BSS's global flag.
922 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
923 */
924 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
493cf04f 925 info->rtscts_rate = rate->hw_value;
38dad7ba 926 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
493cf04f 927 info->rtscts_rate |= rate->hw_value_short;
38dad7ba
FF
928
929 for (i = 0; i < 4; i++) {
930 bool is_40, is_sgi, is_sp;
931 int phy;
932
933 if (!rates[i].count || (rates[i].idx < 0))
934 continue;
935
936 rix = rates[i].idx;
493cf04f 937 info->rates[i].Tries = rates[i].count;
38dad7ba
FF
938
939 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
493cf04f
FF
940 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
941 info->flags |= ATH9K_TXDESC_RTSENA;
38dad7ba 942 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
493cf04f
FF
943 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
944 info->flags |= ATH9K_TXDESC_CTSENA;
38dad7ba
FF
945 }
946
947 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
493cf04f 948 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
38dad7ba 949 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
493cf04f 950 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
38dad7ba
FF
951
952 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
953 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
954 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
955
956 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
957 /* MCS rates */
493cf04f
FF
958 info->rates[i].Rate = rix | 0x80;
959 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
960 ah->txchainmask, info->rates[i].Rate);
961 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
38dad7ba
FF
962 is_40, is_sgi, is_sp);
963 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
493cf04f 964 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
38dad7ba
FF
965 continue;
966 }
967
968 /* legacy rates */
969 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
970 !(rate->flags & IEEE80211_RATE_ERP_G))
971 phy = WLAN_RC_PHY_CCK;
972 else
973 phy = WLAN_RC_PHY_OFDM;
974
975 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
493cf04f 976 info->rates[i].Rate = rate->hw_value;
38dad7ba
FF
977 if (rate->hw_value_short) {
978 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
493cf04f 979 info->rates[i].Rate |= rate->hw_value_short;
38dad7ba
FF
980 } else {
981 is_sp = false;
982 }
983
984 if (bf->bf_state.bfs_paprd)
493cf04f 985 info->rates[i].ChSel = ah->txchainmask;
38dad7ba 986 else
493cf04f
FF
987 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
988 ah->txchainmask, info->rates[i].Rate);
38dad7ba 989
493cf04f 990 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
38dad7ba
FF
991 phy, rate->bitrate * 100, len, rix, is_sp);
992 }
993
994 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
995 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
493cf04f 996 info->flags &= ~ATH9K_TXDESC_RTSENA;
38dad7ba
FF
997
998 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
493cf04f
FF
999 if (info->flags & ATH9K_TXDESC_RTSENA)
1000 info->flags &= ~ATH9K_TXDESC_CTSENA;
1001}
38dad7ba 1002
493cf04f
FF
1003static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1004{
1005 struct ieee80211_hdr *hdr;
1006 enum ath9k_pkt_type htype;
1007 __le16 fc;
1008
1009 hdr = (struct ieee80211_hdr *)skb->data;
1010 fc = hdr->frame_control;
38dad7ba 1011
493cf04f
FF
1012 if (ieee80211_is_beacon(fc))
1013 htype = ATH9K_PKT_TYPE_BEACON;
1014 else if (ieee80211_is_probe_resp(fc))
1015 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1016 else if (ieee80211_is_atim(fc))
1017 htype = ATH9K_PKT_TYPE_ATIM;
1018 else if (ieee80211_is_pspoll(fc))
1019 htype = ATH9K_PKT_TYPE_PSPOLL;
1020 else
1021 htype = ATH9K_PKT_TYPE_NORMAL;
1022
1023 return htype;
38dad7ba
FF
1024}
1025
493cf04f
FF
1026static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1027 struct ath_txq *txq, int len)
399c6489
FF
1028{
1029 struct ath_hw *ah = sc->sc_ah;
1030 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1031 struct ath_buf *bf_first = bf;
493cf04f 1032 struct ath_tx_info info;
399c6489 1033 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
399c6489 1034
493cf04f
FF
1035 memset(&info, 0, sizeof(info));
1036 info.is_first = true;
1037 info.is_last = true;
1038 info.txpower = MAX_RATE_POWER;
1039 info.qcu = txq->axq_qnum;
1040
1041 info.flags = ATH9K_TXDESC_INTREQ;
1042 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1043 info.flags |= ATH9K_TXDESC_NOACK;
1044 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1045 info.flags |= ATH9K_TXDESC_LDPC;
1046
1047 ath_buf_set_rate(sc, bf, &info, len);
1048
1049 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1050 info.flags |= ATH9K_TXDESC_CLRDMASK;
1051
1052 if (bf->bf_state.bfs_paprd)
1053 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
399c6489 1054
399c6489
FF
1055
1056 while (bf) {
493cf04f
FF
1057 struct sk_buff *skb = bf->bf_mpdu;
1058 struct ath_frame_info *fi = get_frame_info(skb);
1059
1060 info.type = get_hw_packet_type(skb);
399c6489 1061 if (bf->bf_next)
493cf04f 1062 info.link = bf->bf_next->bf_daddr;
399c6489 1063 else
493cf04f
FF
1064 info.link = 0;
1065
42cecc34
JL
1066 info.buf_addr[0] = bf->bf_buf_addr;
1067 info.buf_len[0] = skb->len;
493cf04f
FF
1068 info.pkt_len = fi->framelen;
1069 info.keyix = fi->keyix;
1070 info.keytype = fi->keytype;
1071
1072 if (aggr) {
399c6489 1073 if (bf == bf_first)
493cf04f
FF
1074 info.aggr = AGGR_BUF_FIRST;
1075 else if (!bf->bf_next)
1076 info.aggr = AGGR_BUF_LAST;
1077 else
1078 info.aggr = AGGR_BUF_MIDDLE;
399c6489 1079
493cf04f
FF
1080 info.ndelim = bf->bf_state.ndelim;
1081 info.aggr_len = len;
399c6489
FF
1082 }
1083
493cf04f 1084 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
399c6489
FF
1085 bf = bf->bf_next;
1086 }
1087}
1088
e8324357
S
1089static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1090 struct ath_atx_tid *tid)
1091{
d43f3015 1092 struct ath_buf *bf;
e8324357 1093 enum ATH_AGGR_STATUS status;
399c6489 1094 struct ieee80211_tx_info *tx_info;
e8324357 1095 struct list_head bf_q;
269c44bc 1096 int aggr_len;
f078f209 1097
e8324357 1098 do {
56dc6336 1099 if (skb_queue_empty(&tid->buf_q))
e8324357 1100 return;
f078f209 1101
e8324357
S
1102 INIT_LIST_HEAD(&bf_q);
1103
269c44bc 1104 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
f078f209 1105
f078f209 1106 /*
d43f3015
S
1107 * no frames picked up to be aggregated;
1108 * block-ack window is not open.
f078f209 1109 */
e8324357
S
1110 if (list_empty(&bf_q))
1111 break;
f078f209 1112
e8324357 1113 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 1114 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
399c6489 1115 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
f078f209 1116
5519541d
FF
1117 if (tid->ac->clear_ps_filter) {
1118 tid->ac->clear_ps_filter = false;
399c6489
FF
1119 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1120 } else {
1121 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
5519541d
FF
1122 }
1123
d43f3015 1124 /* if only one frame, send as non-aggregate */
b572d033 1125 if (bf == bf->bf_lastbf) {
399c6489
FF
1126 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1127 bf->bf_state.bf_type = BUF_AMPDU;
1128 } else {
1129 TX_STAT_INC(txq->axq_qnum, a_aggr);
e8324357 1130 }
f078f209 1131
493cf04f 1132 ath_tx_fill_desc(sc, bf, txq, aggr_len);
fce041be 1133 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
4b3ba66a 1134 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
e8324357
S
1135 status != ATH_AGGR_BAW_CLOSED);
1136}
1137
231c3a1f
FF
1138int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1139 u16 tid, u16 *ssn)
e8324357
S
1140{
1141 struct ath_atx_tid *txtid;
1142 struct ath_node *an;
1143
1144 an = (struct ath_node *)sta->drv_priv;
f83da965 1145 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
1146
1147 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1148 return -EAGAIN;
1149
f83da965 1150 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 1151 txtid->paused = true;
49447f2f 1152 *ssn = txtid->seq_start = txtid->seq_next;
f9437543 1153 txtid->bar_index = -1;
231c3a1f 1154
2ed72229
FF
1155 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1156 txtid->baw_head = txtid->baw_tail = 0;
1157
231c3a1f 1158 return 0;
e8324357 1159}
f078f209 1160
f83da965 1161void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
1162{
1163 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1164 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 1165 struct ath_txq *txq = txtid->ac->txq;
f078f209 1166
e8324357 1167 if (txtid->state & AGGR_CLEANUP)
f83da965 1168 return;
f078f209 1169
e8324357 1170 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 1171 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 1172 return;
e8324357 1173 }
f078f209 1174
e8324357 1175 spin_lock_bh(&txq->axq_lock);
75401849 1176 txtid->paused = true;
f078f209 1177
90fa539c
FF
1178 /*
1179 * If frames are still being transmitted for this TID, they will be
1180 * cleaned up during tx completion. To prevent race conditions, this
1181 * TID can only be reused after all in-progress subframes have been
1182 * completed.
1183 */
1184 if (txtid->baw_head != txtid->baw_tail)
e8324357 1185 txtid->state |= AGGR_CLEANUP;
90fa539c 1186 else
e8324357 1187 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
1188
1189 ath_tx_flush_tid(sc, txtid);
3ad29529 1190 spin_unlock_bh(&txq->axq_lock);
e8324357 1191}
f078f209 1192
042ec453
JB
1193void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1194 struct ath_node *an)
5519541d
FF
1195{
1196 struct ath_atx_tid *tid;
1197 struct ath_atx_ac *ac;
1198 struct ath_txq *txq;
042ec453 1199 bool buffered;
5519541d
FF
1200 int tidno;
1201
1202 for (tidno = 0, tid = &an->tid[tidno];
1203 tidno < WME_NUM_TID; tidno++, tid++) {
1204
1205 if (!tid->sched)
1206 continue;
1207
1208 ac = tid->ac;
1209 txq = ac->txq;
1210
1211 spin_lock_bh(&txq->axq_lock);
1212
042ec453 1213 buffered = !skb_queue_empty(&tid->buf_q);
5519541d
FF
1214
1215 tid->sched = false;
1216 list_del(&tid->list);
1217
1218 if (ac->sched) {
1219 ac->sched = false;
1220 list_del(&ac->list);
1221 }
1222
1223 spin_unlock_bh(&txq->axq_lock);
5519541d 1224
042ec453
JB
1225 ieee80211_sta_set_buffered(sta, tidno, buffered);
1226 }
5519541d
FF
1227}
1228
1229void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1230{
1231 struct ath_atx_tid *tid;
1232 struct ath_atx_ac *ac;
1233 struct ath_txq *txq;
1234 int tidno;
1235
1236 for (tidno = 0, tid = &an->tid[tidno];
1237 tidno < WME_NUM_TID; tidno++, tid++) {
1238
1239 ac = tid->ac;
1240 txq = ac->txq;
1241
1242 spin_lock_bh(&txq->axq_lock);
1243 ac->clear_ps_filter = true;
1244
56dc6336 1245 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
5519541d
FF
1246 ath_tx_queue_tid(txq, tid);
1247 ath_txq_schedule(sc, txq);
1248 }
1249
1250 spin_unlock_bh(&txq->axq_lock);
1251 }
1252}
1253
e8324357
S
1254void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1255{
1256 struct ath_atx_tid *txtid;
1257 struct ath_node *an;
1258
1259 an = (struct ath_node *)sta->drv_priv;
1260
1261 if (sc->sc_flags & SC_OP_TXAGGR) {
1262 txtid = ATH_AN_2_TID(an, tid);
1263 txtid->baw_size =
1264 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1265 txtid->state |= AGGR_ADDBA_COMPLETE;
1266 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1267 ath_tx_resume_tid(sc, txtid);
1268 }
f078f209
LR
1269}
1270
e8324357
S
1271/********************/
1272/* Queue Management */
1273/********************/
f078f209 1274
e8324357
S
1275static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1276 struct ath_txq *txq)
f078f209 1277{
e8324357
S
1278 struct ath_atx_ac *ac, *ac_tmp;
1279 struct ath_atx_tid *tid, *tid_tmp;
f078f209 1280
e8324357
S
1281 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1282 list_del(&ac->list);
1283 ac->sched = false;
1284 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1285 list_del(&tid->list);
1286 tid->sched = false;
1287 ath_tid_drain(sc, txq, tid);
1288 }
f078f209
LR
1289 }
1290}
1291
e8324357 1292struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 1293{
cbe61d8a 1294 struct ath_hw *ah = sc->sc_ah;
e8324357 1295 struct ath9k_tx_queue_info qi;
066dae93
FF
1296 static const int subtype_txq_to_hwq[] = {
1297 [WME_AC_BE] = ATH_TXQ_AC_BE,
1298 [WME_AC_BK] = ATH_TXQ_AC_BK,
1299 [WME_AC_VI] = ATH_TXQ_AC_VI,
1300 [WME_AC_VO] = ATH_TXQ_AC_VO,
1301 };
60f2d1d5 1302 int axq_qnum, i;
f078f209 1303
e8324357 1304 memset(&qi, 0, sizeof(qi));
066dae93 1305 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
1306 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1307 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1308 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1309 qi.tqi_physCompBuf = 0;
f078f209
LR
1310
1311 /*
e8324357
S
1312 * Enable interrupts only for EOL and DESC conditions.
1313 * We mark tx descriptors to receive a DESC interrupt
1314 * when a tx queue gets deep; otherwise waiting for the
1315 * EOL to reap descriptors. Note that this is done to
1316 * reduce interrupt load and this only defers reaping
1317 * descriptors, never transmitting frames. Aside from
1318 * reducing interrupts this also permits more concurrency.
1319 * The only potential downside is if the tx queue backs
1320 * up in which case the top half of the kernel may backup
1321 * due to a lack of tx descriptors.
1322 *
1323 * The UAPSD queue is an exception, since we take a desc-
1324 * based intr on the EOSP frames.
f078f209 1325 */
afe754d6
VT
1326 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1327 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
1328 TXQ_FLAG_TXERRINT_ENABLE;
1329 } else {
1330 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1331 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1332 else
1333 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1334 TXQ_FLAG_TXDESCINT_ENABLE;
1335 }
60f2d1d5
BG
1336 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1337 if (axq_qnum == -1) {
f078f209 1338 /*
e8324357
S
1339 * NB: don't print a message, this happens
1340 * normally on parts with too few tx queues
f078f209 1341 */
e8324357 1342 return NULL;
f078f209 1343 }
60f2d1d5
BG
1344 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1345 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
f078f209 1346
60f2d1d5
BG
1347 txq->axq_qnum = axq_qnum;
1348 txq->mac80211_qnum = -1;
e8324357
S
1349 txq->axq_link = NULL;
1350 INIT_LIST_HEAD(&txq->axq_q);
1351 INIT_LIST_HEAD(&txq->axq_acq);
1352 spin_lock_init(&txq->axq_lock);
1353 txq->axq_depth = 0;
4b3ba66a 1354 txq->axq_ampdu_depth = 0;
164ace38 1355 txq->axq_tx_inprogress = false;
60f2d1d5 1356 sc->tx.txqsetup |= 1<<axq_qnum;
e5003249
VT
1357
1358 txq->txq_headidx = txq->txq_tailidx = 0;
1359 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1360 INIT_LIST_HEAD(&txq->txq_fifo[i]);
e8324357 1361 }
60f2d1d5 1362 return &sc->tx.txq[axq_qnum];
f078f209
LR
1363}
1364
e8324357
S
1365int ath_txq_update(struct ath_softc *sc, int qnum,
1366 struct ath9k_tx_queue_info *qinfo)
1367{
cbe61d8a 1368 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1369 int error = 0;
1370 struct ath9k_tx_queue_info qi;
1371
1372 if (qnum == sc->beacon.beaconq) {
1373 /*
1374 * XXX: for beacon queue, we just save the parameter.
1375 * It will be picked up by ath_beaconq_config when
1376 * it's necessary.
1377 */
1378 sc->beacon.beacon_qi = *qinfo;
f078f209 1379 return 0;
e8324357 1380 }
f078f209 1381
9680e8a3 1382 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1383
1384 ath9k_hw_get_txq_props(ah, qnum, &qi);
1385 qi.tqi_aifs = qinfo->tqi_aifs;
1386 qi.tqi_cwmin = qinfo->tqi_cwmin;
1387 qi.tqi_cwmax = qinfo->tqi_cwmax;
1388 qi.tqi_burstTime = qinfo->tqi_burstTime;
1389 qi.tqi_readyTime = qinfo->tqi_readyTime;
1390
1391 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
3800276a
JP
1392 ath_err(ath9k_hw_common(sc->sc_ah),
1393 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1394 error = -EIO;
1395 } else {
1396 ath9k_hw_resettxqueue(ah, qnum);
1397 }
1398
1399 return error;
1400}
1401
1402int ath_cabq_update(struct ath_softc *sc)
1403{
1404 struct ath9k_tx_queue_info qi;
9814f6b3 1405 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
e8324357 1406 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1407
e8324357 1408 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1409 /*
e8324357 1410 * Ensure the readytime % is within the bounds.
f078f209 1411 */
17d7904d
S
1412 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1413 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1414 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1415 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1416
9814f6b3 1417 qi.tqi_readyTime = (cur_conf->beacon_interval *
fdbf7335 1418 sc->config.cabqReadytime) / 100;
e8324357
S
1419 ath_txq_update(sc, qnum, &qi);
1420
1421 return 0;
f078f209
LR
1422}
1423
4b3ba66a
FF
1424static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1425{
1426 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1427 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1428}
1429
fce041be
FF
1430static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1431 struct list_head *list, bool retry_tx)
f078f209 1432{
e8324357
S
1433 struct ath_buf *bf, *lastbf;
1434 struct list_head bf_head;
db1a052b
FF
1435 struct ath_tx_status ts;
1436
1437 memset(&ts, 0, sizeof(ts));
daa5c408 1438 ts.ts_status = ATH9K_TX_FLUSH;
e8324357 1439 INIT_LIST_HEAD(&bf_head);
f078f209 1440
fce041be
FF
1441 while (!list_empty(list)) {
1442 bf = list_first_entry(list, struct ath_buf, list);
f078f209 1443
fce041be
FF
1444 if (bf->bf_stale) {
1445 list_del(&bf->list);
f078f209 1446
fce041be
FF
1447 ath_tx_return_buffer(sc, bf);
1448 continue;
e8324357 1449 }
f078f209 1450
e8324357 1451 lastbf = bf->bf_lastbf;
fce041be 1452 list_cut_position(&bf_head, list, &lastbf->list);
e5003249 1453
e8324357 1454 txq->axq_depth--;
4b3ba66a
FF
1455 if (bf_is_ampdu_not_probing(bf))
1456 txq->axq_ampdu_depth--;
e8324357
S
1457
1458 if (bf_isampdu(bf))
c5992618
FF
1459 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1460 retry_tx);
e8324357 1461 else
156369fa 1462 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
f078f209 1463 }
fce041be 1464}
f078f209 1465
fce041be
FF
1466/*
1467 * Drain a given TX queue (could be Beacon or Data)
1468 *
1469 * This assumes output has been stopped and
1470 * we do not need to block ath_tx_tasklet.
1471 */
1472void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1473{
164ace38 1474 spin_lock_bh(&txq->axq_lock);
e5003249 1475 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
fce041be 1476 int idx = txq->txq_tailidx;
e5003249 1477
fce041be
FF
1478 while (!list_empty(&txq->txq_fifo[idx])) {
1479 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1480 retry_tx);
1481
1482 INCR(idx, ATH_TXFIFO_DEPTH);
e5003249 1483 }
fce041be 1484 txq->txq_tailidx = idx;
e5003249 1485 }
e609e2ea 1486
fce041be
FF
1487 txq->axq_link = NULL;
1488 txq->axq_tx_inprogress = false;
1489 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
1490
e609e2ea 1491 /* flush any pending frames if aggregation is enabled */
fce041be
FF
1492 if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
1493 ath_txq_drain_pending_buffers(sc, txq);
1494
1495 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
1496}
1497
080e1a25 1498bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1499{
cbe61d8a 1500 struct ath_hw *ah = sc->sc_ah;
c46917bb 1501 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405 1502 struct ath_txq *txq;
34d25810
FF
1503 int i;
1504 u32 npend = 0;
043a0405
S
1505
1506 if (sc->sc_flags & SC_OP_INVALID)
080e1a25 1507 return true;
043a0405 1508
0d51cccc 1509 ath9k_hw_abort_tx_dma(ah);
043a0405 1510
0d51cccc 1511 /* Check if any queue remains active */
043a0405 1512 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
0d51cccc
FF
1513 if (!ATH_TXQ_SETUP(sc, i))
1514 continue;
1515
34d25810
FF
1516 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1517 npend |= BIT(i);
043a0405
S
1518 }
1519
080e1a25 1520 if (npend)
34d25810 1521 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
043a0405
S
1522
1523 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
92460412
FF
1524 if (!ATH_TXQ_SETUP(sc, i))
1525 continue;
1526
1527 /*
1528 * The caller will resume queues with ieee80211_wake_queues.
1529 * Mark the queue as not stopped to prevent ath_tx_complete
1530 * from waking the queue too early.
1531 */
1532 txq = &sc->tx.txq[i];
1533 txq->stopped = false;
1534 ath_draintxq(sc, txq, retry_tx);
043a0405 1535 }
080e1a25
FF
1536
1537 return !npend;
e8324357 1538}
f078f209 1539
043a0405 1540void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1541{
043a0405
S
1542 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1543 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1544}
f078f209 1545
7755bad9
BG
1546/* For each axq_acq entry, for each tid, try to schedule packets
1547 * for transmit until ampdu_depth has reached min Q depth.
1548 */
e8324357
S
1549void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1550{
7755bad9
BG
1551 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1552 struct ath_atx_tid *tid, *last_tid;
f078f209 1553
236de514 1554 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
21f28e6f 1555 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
e8324357 1556 return;
f078f209 1557
e8324357 1558 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
7755bad9 1559 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
f078f209 1560
7755bad9
BG
1561 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1562 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1563 list_del(&ac->list);
1564 ac->sched = false;
f078f209 1565
7755bad9
BG
1566 while (!list_empty(&ac->tid_q)) {
1567 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1568 list);
1569 list_del(&tid->list);
1570 tid->sched = false;
f078f209 1571
7755bad9
BG
1572 if (tid->paused)
1573 continue;
f078f209 1574
7755bad9 1575 ath_tx_sched_aggr(sc, txq, tid);
f078f209 1576
7755bad9
BG
1577 /*
1578 * add tid to round-robin queue if more frames
1579 * are pending for the tid
1580 */
56dc6336 1581 if (!skb_queue_empty(&tid->buf_q))
7755bad9 1582 ath_tx_queue_tid(txq, tid);
f078f209 1583
7755bad9
BG
1584 if (tid == last_tid ||
1585 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1586 break;
1587 }
f078f209 1588
b0477013
FF
1589 if (!list_empty(&ac->tid_q) && !ac->sched) {
1590 ac->sched = true;
1591 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1592 }
7755bad9
BG
1593
1594 if (ac == last_ac ||
1595 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1596 return;
e8324357
S
1597 }
1598}
f078f209 1599
e8324357
S
1600/***********/
1601/* TX, DMA */
1602/***********/
1603
f078f209 1604/*
e8324357
S
1605 * Insert a chain of ath_buf (descriptors) on a txq and
1606 * assume the descriptors are already chained together by caller.
f078f209 1607 */
e8324357 1608static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 1609 struct list_head *head, bool internal)
f078f209 1610{
cbe61d8a 1611 struct ath_hw *ah = sc->sc_ah;
c46917bb 1612 struct ath_common *common = ath9k_hw_common(ah);
fce041be
FF
1613 struct ath_buf *bf, *bf_last;
1614 bool puttxbuf = false;
1615 bool edma;
f078f209 1616
e8324357
S
1617 /*
1618 * Insert the frame on the outbound list and
1619 * pass it on to the hardware.
1620 */
f078f209 1621
e8324357
S
1622 if (list_empty(head))
1623 return;
f078f209 1624
fce041be 1625 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
e8324357 1626 bf = list_first_entry(head, struct ath_buf, list);
fce041be 1627 bf_last = list_entry(head->prev, struct ath_buf, list);
f078f209 1628
226afe68
JP
1629 ath_dbg(common, ATH_DBG_QUEUE,
1630 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1631
fce041be
FF
1632 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1633 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
e5003249 1634 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
fce041be 1635 puttxbuf = true;
e8324357 1636 } else {
e5003249
VT
1637 list_splice_tail_init(head, &txq->axq_q);
1638
fce041be
FF
1639 if (txq->axq_link) {
1640 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
226afe68
JP
1641 ath_dbg(common, ATH_DBG_XMIT,
1642 "link[%u] (%p)=%llx (%p)\n",
1643 txq->axq_qnum, txq->axq_link,
1644 ito64(bf->bf_daddr), bf->bf_desc);
fce041be
FF
1645 } else if (!edma)
1646 puttxbuf = true;
1647
1648 txq->axq_link = bf_last->bf_desc;
1649 }
1650
1651 if (puttxbuf) {
1652 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1653 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1654 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1655 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1656 }
1657
1658 if (!edma) {
8d8d3fdc 1659 TX_STAT_INC(txq->axq_qnum, txstart);
e5003249 1660 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1661 }
fce041be
FF
1662
1663 if (!internal) {
1664 txq->axq_depth++;
1665 if (bf_is_ampdu_not_probing(bf))
1666 txq->axq_ampdu_depth++;
1667 }
e8324357 1668}
f078f209 1669
e8324357 1670static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
44f1d26c 1671 struct sk_buff *skb, struct ath_tx_control *txctl)
f078f209 1672{
44f1d26c 1673 struct ath_frame_info *fi = get_frame_info(skb);
04caf863 1674 struct list_head bf_head;
44f1d26c 1675 struct ath_buf *bf;
f078f209 1676
e8324357
S
1677 /*
1678 * Do not queue to h/w when any of the following conditions is true:
1679 * - there are pending frames in software queue
1680 * - the TID is currently paused for ADDBA/BAR request
1681 * - seqno is not within block-ack window
1682 * - h/w queue depth exceeds low water mark
1683 */
56dc6336 1684 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
44f1d26c 1685 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
4b3ba66a 1686 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1687 /*
e8324357
S
1688 * Add this frame to software queue for scheduling later
1689 * for aggregation.
f078f209 1690 */
bda8adda 1691 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
44f1d26c 1692 __skb_queue_tail(&tid->buf_q, skb);
9af73cf7
FF
1693 if (!txctl->an || !txctl->an->sleeping)
1694 ath_tx_queue_tid(txctl->txq, tid);
e8324357
S
1695 return;
1696 }
1697
44f1d26c
FF
1698 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1699 if (!bf)
1700 return;
1701
399c6489 1702 bf->bf_state.bf_type = BUF_AMPDU;
04caf863
FF
1703 INIT_LIST_HEAD(&bf_head);
1704 list_add(&bf->list, &bf_head);
1705
e8324357 1706 /* Add sub-frame to BAW */
44f1d26c 1707 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
e8324357
S
1708
1709 /* Queue to h/w without aggregation */
bda8adda 1710 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
d43f3015 1711 bf->bf_lastbf = bf;
493cf04f 1712 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
fce041be 1713 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
e8324357
S
1714}
1715
82b873af 1716static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c 1717 struct ath_atx_tid *tid, struct sk_buff *skb)
e8324357 1718{
44f1d26c
FF
1719 struct ath_frame_info *fi = get_frame_info(skb);
1720 struct list_head bf_head;
e8324357
S
1721 struct ath_buf *bf;
1722
44f1d26c
FF
1723 bf = fi->bf;
1724 if (!bf)
1725 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1726
1727 if (!bf)
1728 return;
1729
1730 INIT_LIST_HEAD(&bf_head);
1731 list_add_tail(&bf->list, &bf_head);
399c6489 1732 bf->bf_state.bf_type = 0;
e8324357 1733
d43f3015 1734 bf->bf_lastbf = bf;
493cf04f 1735 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
44f1d26c 1736 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
fec247c0 1737 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1738}
1739
2d42efc4
FF
1740static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1741 int framelen)
e8324357
S
1742{
1743 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
1744 struct ieee80211_sta *sta = tx_info->control.sta;
1745 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
6a0ddaef 1746 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2d42efc4 1747 struct ath_frame_info *fi = get_frame_info(skb);
93ae2dd2 1748 struct ath_node *an = NULL;
2d42efc4 1749 enum ath9k_key_type keytype;
e8324357 1750
2d42efc4 1751 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
e8324357 1752
93ae2dd2
FF
1753 if (sta)
1754 an = (struct ath_node *) sta->drv_priv;
1755
2d42efc4
FF
1756 memset(fi, 0, sizeof(*fi));
1757 if (hw_key)
1758 fi->keyix = hw_key->hw_key_idx;
93ae2dd2
FF
1759 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1760 fi->keyix = an->ps_key;
2d42efc4
FF
1761 else
1762 fi->keyix = ATH9K_TXKEYIX_INVALID;
1763 fi->keytype = keytype;
1764 fi->framelen = framelen;
e8324357
S
1765}
1766
ea066d5a
MSS
1767u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1768{
1769 struct ath_hw *ah = sc->sc_ah;
1770 struct ath9k_channel *curchan = ah->curchan;
d77bf3eb
RM
1771 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1772 (curchan->channelFlags & CHANNEL_5GHZ) &&
1773 (chainmask == 0x7) && (rate < 0x90))
ea066d5a
MSS
1774 return 0x3;
1775 else
1776 return chainmask;
1777}
1778
44f1d26c
FF
1779/*
1780 * Assign a descriptor (and sequence number if necessary,
1781 * and map buffer for DMA. Frees skb on error
1782 */
fa05f87a 1783static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
04caf863 1784 struct ath_txq *txq,
fa05f87a 1785 struct ath_atx_tid *tid,
2d42efc4 1786 struct sk_buff *skb)
f078f209 1787{
82b873af 1788 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2d42efc4 1789 struct ath_frame_info *fi = get_frame_info(skb);
fa05f87a 1790 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
82b873af 1791 struct ath_buf *bf;
fa05f87a 1792 u16 seqno;
82b873af
FF
1793
1794 bf = ath_tx_get_buffer(sc);
1795 if (!bf) {
226afe68 1796 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
44f1d26c 1797 goto error;
82b873af 1798 }
e022edbd 1799
528f0c6b 1800 ATH_TXBUF_RESET(bf);
f078f209 1801
fa05f87a
FF
1802 if (tid) {
1803 seqno = tid->seq_next;
1804 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1805 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1806 bf->bf_state.seqno = seqno;
1807 }
1808
f078f209 1809 bf->bf_mpdu = skb;
f8316df1 1810
c1739eb3
BG
1811 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1812 skb->len, DMA_TO_DEVICE);
1813 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1814 bf->bf_mpdu = NULL;
6cf9e995 1815 bf->bf_buf_addr = 0;
3800276a
JP
1816 ath_err(ath9k_hw_common(sc->sc_ah),
1817 "dma_mapping_error() on TX\n");
82b873af 1818 ath_tx_return_buffer(sc, bf);
44f1d26c 1819 goto error;
f8316df1
LR
1820 }
1821
56dc6336 1822 fi->bf = bf;
04caf863
FF
1823
1824 return bf;
44f1d26c
FF
1825
1826error:
1827 dev_kfree_skb_any(skb);
1828 return NULL;
04caf863
FF
1829}
1830
1831/* FIXME: tx power */
44f1d26c 1832static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
04caf863
FF
1833 struct ath_tx_control *txctl)
1834{
04caf863
FF
1835 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1836 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
248a38d0 1837 struct ath_atx_tid *tid = NULL;
fa05f87a 1838 struct ath_buf *bf;
04caf863 1839 u8 tidno;
f078f209 1840
61e1b0b0
MSS
1841 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1842 ieee80211_is_data_qos(hdr->frame_control)) {
5daefbd0
FF
1843 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1844 IEEE80211_QOS_CTL_TID_MASK;
2d42efc4 1845 tid = ATH_AN_2_TID(txctl->an, tidno);
5daefbd0 1846
066dae93 1847 WARN_ON(tid->ac->txq != txctl->txq);
248a38d0
FF
1848 }
1849
1850 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
04caf863
FF
1851 /*
1852 * Try aggregation if it's a unicast data frame
1853 * and the destination is HT capable.
1854 */
44f1d26c 1855 ath_tx_send_ampdu(sc, tid, skb, txctl);
f078f209 1856 } else {
44f1d26c
FF
1857 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1858 if (!bf)
3ad29529 1859 return;
04caf863 1860
82b873af
FF
1861 bf->bf_state.bfs_paprd = txctl->paprd;
1862
9cf04dcc
MSS
1863 if (txctl->paprd)
1864 bf->bf_state.bfs_paprd_timestamp = jiffies;
1865
44f1d26c 1866 ath_tx_send_normal(sc, txctl->txq, tid, skb);
f078f209 1867 }
f078f209
LR
1868}
1869
f8316df1 1870/* Upon failure caller should free skb */
c52f33d0 1871int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1872 struct ath_tx_control *txctl)
f078f209 1873{
28d16708
FF
1874 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1875 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2d42efc4 1876 struct ieee80211_sta *sta = info->control.sta;
f59a59fe 1877 struct ieee80211_vif *vif = info->control.vif;
9ac58615 1878 struct ath_softc *sc = hw->priv;
84642d6b 1879 struct ath_txq *txq = txctl->txq;
4d91f9f3 1880 int padpos, padsize;
04caf863 1881 int frmlen = skb->len + FCS_LEN;
28d16708 1882 int q;
f078f209 1883
a9927ba3
BG
1884 /* NOTE: sta can be NULL according to net/mac80211.h */
1885 if (sta)
1886 txctl->an = (struct ath_node *)sta->drv_priv;
1887
04caf863
FF
1888 if (info->control.hw_key)
1889 frmlen += info->control.hw_key->icv_len;
1890
f078f209 1891 /*
e8324357
S
1892 * As a temporary workaround, assign seq# here; this will likely need
1893 * to be cleaned up to work better with Beacon transmission and virtual
1894 * BSSes.
f078f209 1895 */
e8324357 1896 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1897 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1898 sc->tx.seq_no += 0x10;
1899 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1900 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1901 }
f078f209 1902
42cecc34
JL
1903 /* Add the padding after the header if this is not already done */
1904 padpos = ath9k_cmn_padpos(hdr->frame_control);
1905 padsize = padpos & 3;
1906 if (padsize && skb->len > padpos) {
1907 if (skb_headroom(skb) < padsize)
1908 return -ENOMEM;
28d16708 1909
42cecc34
JL
1910 skb_push(skb, padsize);
1911 memmove(skb->data, skb->data + padsize, padpos);
6e82bc4a 1912 hdr = (struct ieee80211_hdr *) skb->data;
f078f209 1913 }
f078f209 1914
f59a59fe
FF
1915 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1916 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1917 !ieee80211_is_data(hdr->frame_control))
1918 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1919
2d42efc4
FF
1920 setup_frame_info(hw, skb, frmlen);
1921
1922 /*
1923 * At this point, the vif, hw_key and sta pointers in the tx control
1924 * info are no longer valid (overwritten by the ath_frame_info data.
1925 */
1926
28d16708
FF
1927 q = skb_get_queue_mapping(skb);
1928 spin_lock_bh(&txq->axq_lock);
1929 if (txq == sc->tx.txq_map[q] &&
1930 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
7545daf4 1931 ieee80211_stop_queue(sc->hw, q);
28d16708 1932 txq->stopped = 1;
f078f209 1933 }
f078f209 1934
44f1d26c 1935 ath_tx_start_dma(sc, skb, txctl);
3ad29529
FF
1936
1937 spin_unlock_bh(&txq->axq_lock);
1938
44f1d26c 1939 return 0;
f078f209
LR
1940}
1941
e8324357
S
1942/*****************/
1943/* TX Completion */
1944/*****************/
528f0c6b 1945
e8324357 1946static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
0f9dc298 1947 int tx_flags, struct ath_txq *txq)
528f0c6b 1948{
e8324357
S
1949 struct ieee80211_hw *hw = sc->hw;
1950 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1951 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1952 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1953 int q, padpos, padsize;
528f0c6b 1954
226afe68 1955 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1956
55797b1a 1957 if (!(tx_flags & ATH_TX_ERROR))
e8324357
S
1958 /* Frame was ACKed */
1959 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b 1960
42cecc34
JL
1961 padpos = ath9k_cmn_padpos(hdr->frame_control);
1962 padsize = padpos & 3;
1963 if (padsize && skb->len>padpos+padsize) {
1964 /*
1965 * Remove MAC header padding before giving the frame back to
1966 * mac80211.
1967 */
1968 memmove(skb->data + padsize, skb->data, padpos);
1969 skb_pull(skb, padsize);
e8324357 1970 }
528f0c6b 1971
c8e8868e 1972 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
1b04b930 1973 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
226afe68
JP
1974 ath_dbg(common, ATH_DBG_PS,
1975 "Going back to sleep after having received TX status (0x%lx)\n",
1b04b930
S
1976 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1977 PS_WAIT_FOR_CAB |
1978 PS_WAIT_FOR_PSPOLL_DATA |
1979 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1980 }
1981
7545daf4
FF
1982 q = skb_get_queue_mapping(skb);
1983 if (txq == sc->tx.txq_map[q]) {
7545daf4
FF
1984 if (WARN_ON(--txq->pending_frames < 0))
1985 txq->pending_frames = 0;
92460412 1986
7545daf4
FF
1987 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1988 ieee80211_wake_queue(sc->hw, q);
1989 txq->stopped = 0;
066dae93 1990 }
97923b14 1991 }
7545daf4
FF
1992
1993 ieee80211_tx_status(hw, skb);
e8324357 1994}
f078f209 1995
e8324357 1996static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 1997 struct ath_txq *txq, struct list_head *bf_q,
156369fa 1998 struct ath_tx_status *ts, int txok)
f078f209 1999{
e8324357 2000 struct sk_buff *skb = bf->bf_mpdu;
3afd21e7 2001 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
e8324357 2002 unsigned long flags;
6b2c4032 2003 int tx_flags = 0;
f078f209 2004
55797b1a 2005 if (!txok)
6b2c4032 2006 tx_flags |= ATH_TX_ERROR;
f078f209 2007
3afd21e7
FF
2008 if (ts->ts_status & ATH9K_TXERR_FILT)
2009 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2010
c1739eb3 2011 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 2012 bf->bf_buf_addr = 0;
9f42c2b6
FF
2013
2014 if (bf->bf_state.bfs_paprd) {
9cf04dcc
MSS
2015 if (time_after(jiffies,
2016 bf->bf_state.bfs_paprd_timestamp +
2017 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 2018 dev_kfree_skb_any(skb);
78a18172 2019 else
ca369eb4 2020 complete(&sc->paprd_complete);
9f42c2b6 2021 } else {
55797b1a 2022 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
0f9dc298 2023 ath_tx_complete(sc, skb, tx_flags, txq);
9f42c2b6 2024 }
6cf9e995
BG
2025 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2026 * accidentally reference it later.
2027 */
2028 bf->bf_mpdu = NULL;
e8324357
S
2029
2030 /*
2031 * Return the list of ath_buf of this mpdu to free queue
2032 */
2033 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2034 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2035 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
2036}
2037
0cdd5c60
FF
2038static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2039 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 2040 int txok)
f078f209 2041{
a22be22a 2042 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 2043 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 2044 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0cdd5c60 2045 struct ieee80211_hw *hw = sc->hw;
f0c255a0 2046 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 2047 u8 i, tx_rateindex;
f078f209 2048
95e4acb7 2049 if (txok)
db1a052b 2050 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2051
db1a052b 2052 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2053 WARN_ON(tx_rateindex >= hw->max_rates);
2054
3afd21e7 2055 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
d969847c 2056 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2057
b572d033 2058 BUG_ON(nbad > nframes);
ebd02287 2059 }
185d1589
RM
2060 tx_info->status.ampdu_len = nframes;
2061 tx_info->status.ampdu_ack_len = nframes - nbad;
ebd02287 2062
db1a052b 2063 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
3afd21e7 2064 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
f0c255a0
FF
2065 /*
2066 * If an underrun error is seen assume it as an excessive
2067 * retry only if max frame trigger level has been reached
2068 * (2 KB for single stream, and 4 KB for dual stream).
2069 * Adjust the long retry as if the frame was tried
2070 * hw->max_rate_tries times to affect how rate control updates
2071 * PER for the failed rate.
2072 * In case of congestion on the bus penalizing this type of
2073 * underruns should help hardware actually transmit new frames
2074 * successfully by eventually preferring slower rates.
2075 * This itself should also alleviate congestion on the bus.
2076 */
3afd21e7
FF
2077 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2078 ATH9K_TX_DELIM_UNDERRUN)) &&
2079 ieee80211_is_data(hdr->frame_control) &&
83860c59 2080 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
f0c255a0
FF
2081 tx_info->status.rates[tx_rateindex].count =
2082 hw->max_rate_tries;
f078f209 2083 }
8a92e2ee 2084
545750d3 2085 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2086 tx_info->status.rates[i].count = 0;
545750d3
FF
2087 tx_info->status.rates[i].idx = -1;
2088 }
8a92e2ee 2089
78c4653a 2090 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2091}
2092
fce041be
FF
2093static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2094 struct ath_tx_status *ts, struct ath_buf *bf,
2095 struct list_head *bf_head)
2096{
2097 int txok;
2098
2099 txq->axq_depth--;
2100 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2101 txq->axq_tx_inprogress = false;
2102 if (bf_is_ampdu_not_probing(bf))
2103 txq->axq_ampdu_depth--;
2104
fce041be 2105 if (!bf_isampdu(bf)) {
3afd21e7 2106 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
156369fa 2107 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
fce041be
FF
2108 } else
2109 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2110
fce041be
FF
2111 if (sc->sc_flags & SC_OP_TXAGGR)
2112 ath_txq_schedule(sc, txq);
2113}
2114
e8324357 2115static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2116{
cbe61d8a 2117 struct ath_hw *ah = sc->sc_ah;
c46917bb 2118 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2119 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2120 struct list_head bf_head;
e8324357 2121 struct ath_desc *ds;
29bffa96 2122 struct ath_tx_status ts;
e8324357 2123 int status;
f078f209 2124
226afe68
JP
2125 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2126 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2127 txq->axq_link);
f078f209 2128
fce041be 2129 spin_lock_bh(&txq->axq_lock);
f078f209 2130 for (;;) {
236de514
FF
2131 if (work_pending(&sc->hw_reset_work))
2132 break;
2133
f078f209
LR
2134 if (list_empty(&txq->axq_q)) {
2135 txq->axq_link = NULL;
86271e46 2136 if (sc->sc_flags & SC_OP_TXAGGR)
082f6536 2137 ath_txq_schedule(sc, txq);
f078f209
LR
2138 break;
2139 }
f078f209
LR
2140 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2141
e8324357
S
2142 /*
2143 * There is a race condition that a BH gets scheduled
2144 * after sw writes TxE and before hw re-load the last
2145 * descriptor to get the newly chained one.
2146 * Software must keep the last DONE descriptor as a
2147 * holding descriptor - software does so by marking
2148 * it with the STALE flag.
2149 */
2150 bf_held = NULL;
a119cc49 2151 if (bf->bf_stale) {
e8324357 2152 bf_held = bf;
fce041be 2153 if (list_is_last(&bf_held->list, &txq->axq_q))
e8324357 2154 break;
fce041be
FF
2155
2156 bf = list_entry(bf_held->list.next, struct ath_buf,
2157 list);
f078f209
LR
2158 }
2159
2160 lastbf = bf->bf_lastbf;
e8324357 2161 ds = lastbf->bf_desc;
f078f209 2162
29bffa96
FF
2163 memset(&ts, 0, sizeof(ts));
2164 status = ath9k_hw_txprocdesc(ah, ds, &ts);
fce041be 2165 if (status == -EINPROGRESS)
e8324357 2166 break;
fce041be 2167
2dac4fb9 2168 TX_STAT_INC(txq->axq_qnum, txprocdesc);
f078f209 2169
e8324357
S
2170 /*
2171 * Remove ath_buf's of the same transmit unit from txq,
2172 * however leave the last descriptor back as the holding
2173 * descriptor for hw.
2174 */
a119cc49 2175 lastbf->bf_stale = true;
e8324357 2176 INIT_LIST_HEAD(&bf_head);
e8324357
S
2177 if (!list_is_singular(&lastbf->list))
2178 list_cut_position(&bf_head,
2179 &txq->axq_q, lastbf->list.prev);
f078f209 2180
fce041be 2181 if (bf_held) {
0a8cea84 2182 list_del(&bf_held->list);
0a8cea84 2183 ath_tx_return_buffer(sc, bf_held);
e8324357 2184 }
f078f209 2185
fce041be 2186 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
8469cdef 2187 }
fce041be 2188 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2189}
2190
305fe47f 2191static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2192{
2193 struct ath_softc *sc = container_of(work, struct ath_softc,
2194 tx_complete_work.work);
2195 struct ath_txq *txq;
2196 int i;
2197 bool needreset = false;
60f2d1d5
BG
2198#ifdef CONFIG_ATH9K_DEBUGFS
2199 sc->tx_complete_poll_work_seen++;
2200#endif
164ace38
SB
2201
2202 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2203 if (ATH_TXQ_SETUP(sc, i)) {
2204 txq = &sc->tx.txq[i];
2205 spin_lock_bh(&txq->axq_lock);
2206 if (txq->axq_depth) {
2207 if (txq->axq_tx_inprogress) {
2208 needreset = true;
2209 spin_unlock_bh(&txq->axq_lock);
2210 break;
2211 } else {
2212 txq->axq_tx_inprogress = true;
2213 }
2214 }
2215 spin_unlock_bh(&txq->axq_lock);
2216 }
2217
2218 if (needreset) {
226afe68
JP
2219 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2220 "tx hung, resetting the chip\n");
030d6294 2221 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
236de514 2222 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
164ace38
SB
2223 }
2224
42935eca 2225 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2226 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2227}
2228
2229
f078f209 2230
e8324357 2231void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2232{
e8324357
S
2233 int i;
2234 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2235
e8324357 2236 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2237
e8324357
S
2238 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2239 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2240 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2241 }
2242}
2243
e5003249
VT
2244void ath_tx_edma_tasklet(struct ath_softc *sc)
2245{
fce041be 2246 struct ath_tx_status ts;
e5003249
VT
2247 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2248 struct ath_hw *ah = sc->sc_ah;
2249 struct ath_txq *txq;
2250 struct ath_buf *bf, *lastbf;
2251 struct list_head bf_head;
2252 int status;
e5003249
VT
2253
2254 for (;;) {
236de514
FF
2255 if (work_pending(&sc->hw_reset_work))
2256 break;
2257
fce041be 2258 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
e5003249
VT
2259 if (status == -EINPROGRESS)
2260 break;
2261 if (status == -EIO) {
226afe68
JP
2262 ath_dbg(common, ATH_DBG_XMIT,
2263 "Error processing tx status\n");
e5003249
VT
2264 break;
2265 }
2266
2267 /* Skip beacon completions */
fce041be 2268 if (ts.qid == sc->beacon.beaconq)
e5003249
VT
2269 continue;
2270
fce041be 2271 txq = &sc->tx.txq[ts.qid];
e5003249
VT
2272
2273 spin_lock_bh(&txq->axq_lock);
fce041be 2274
e5003249
VT
2275 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2276 spin_unlock_bh(&txq->axq_lock);
2277 return;
2278 }
2279
2280 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2281 struct ath_buf, list);
2282 lastbf = bf->bf_lastbf;
2283
2284 INIT_LIST_HEAD(&bf_head);
2285 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2286 &lastbf->list);
e5003249 2287
fce041be
FF
2288 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2289 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
e5003249 2290
fce041be
FF
2291 if (!list_empty(&txq->axq_q)) {
2292 struct list_head bf_q;
60f2d1d5 2293
fce041be
FF
2294 INIT_LIST_HEAD(&bf_q);
2295 txq->axq_link = NULL;
2296 list_splice_tail_init(&txq->axq_q, &bf_q);
2297 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2298 }
2299 }
86271e46 2300
fce041be 2301 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
e5003249
VT
2302 spin_unlock_bh(&txq->axq_lock);
2303 }
2304}
2305
e8324357
S
2306/*****************/
2307/* Init, Cleanup */
2308/*****************/
f078f209 2309
5088c2f1
VT
2310static int ath_txstatus_setup(struct ath_softc *sc, int size)
2311{
2312 struct ath_descdma *dd = &sc->txsdma;
2313 u8 txs_len = sc->sc_ah->caps.txs_len;
2314
2315 dd->dd_desc_len = size * txs_len;
2316 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2317 &dd->dd_desc_paddr, GFP_KERNEL);
2318 if (!dd->dd_desc)
2319 return -ENOMEM;
2320
2321 return 0;
2322}
2323
2324static int ath_tx_edma_init(struct ath_softc *sc)
2325{
2326 int err;
2327
2328 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2329 if (!err)
2330 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2331 sc->txsdma.dd_desc_paddr,
2332 ATH_TXSTATUS_RING_SIZE);
2333
2334 return err;
2335}
2336
2337static void ath_tx_edma_cleanup(struct ath_softc *sc)
2338{
2339 struct ath_descdma *dd = &sc->txsdma;
2340
2341 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2342 dd->dd_desc_paddr);
2343}
2344
e8324357 2345int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2346{
c46917bb 2347 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2348 int error = 0;
f078f209 2349
797fe5cb 2350 spin_lock_init(&sc->tx.txbuflock);
f078f209 2351
797fe5cb 2352 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2353 "tx", nbufs, 1, 1);
797fe5cb 2354 if (error != 0) {
3800276a
JP
2355 ath_err(common,
2356 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2357 goto err;
2358 }
f078f209 2359
797fe5cb 2360 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2361 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2362 if (error != 0) {
3800276a
JP
2363 ath_err(common,
2364 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2365 goto err;
2366 }
f078f209 2367
164ace38
SB
2368 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2369
5088c2f1
VT
2370 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2371 error = ath_tx_edma_init(sc);
2372 if (error)
2373 goto err;
2374 }
2375
797fe5cb 2376err:
e8324357
S
2377 if (error != 0)
2378 ath_tx_cleanup(sc);
f078f209 2379
e8324357 2380 return error;
f078f209
LR
2381}
2382
797fe5cb 2383void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2384{
2385 if (sc->beacon.bdma.dd_desc_len != 0)
2386 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2387
2388 if (sc->tx.txdma.dd_desc_len != 0)
2389 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2390
2391 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2392 ath_tx_edma_cleanup(sc);
e8324357 2393}
f078f209
LR
2394
2395void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2396{
c5170163
S
2397 struct ath_atx_tid *tid;
2398 struct ath_atx_ac *ac;
2399 int tidno, acno;
f078f209 2400
8ee5afbc 2401 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2402 tidno < WME_NUM_TID;
2403 tidno++, tid++) {
2404 tid->an = an;
2405 tid->tidno = tidno;
2406 tid->seq_start = tid->seq_next = 0;
2407 tid->baw_size = WME_MAX_BA;
2408 tid->baw_head = tid->baw_tail = 0;
2409 tid->sched = false;
e8324357 2410 tid->paused = false;
a37c2c79 2411 tid->state &= ~AGGR_CLEANUP;
56dc6336 2412 __skb_queue_head_init(&tid->buf_q);
c5170163 2413 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2414 tid->ac = &an->ac[acno];
a37c2c79
S
2415 tid->state &= ~AGGR_ADDBA_COMPLETE;
2416 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2417 }
f078f209 2418
8ee5afbc 2419 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2420 acno < WME_NUM_AC; acno++, ac++) {
2421 ac->sched = false;
066dae93 2422 ac->txq = sc->tx.txq_map[acno];
c5170163 2423 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2424 }
2425}
2426
b5aa9bf9 2427void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2428{
2b40994c
FF
2429 struct ath_atx_ac *ac;
2430 struct ath_atx_tid *tid;
f078f209 2431 struct ath_txq *txq;
066dae93 2432 int tidno;
e8324357 2433
2b40994c
FF
2434 for (tidno = 0, tid = &an->tid[tidno];
2435 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2436
2b40994c 2437 ac = tid->ac;
066dae93 2438 txq = ac->txq;
f078f209 2439
2b40994c
FF
2440 spin_lock_bh(&txq->axq_lock);
2441
2442 if (tid->sched) {
2443 list_del(&tid->list);
2444 tid->sched = false;
2445 }
2446
2447 if (ac->sched) {
2448 list_del(&ac->list);
2449 tid->ac->sched = false;
f078f209 2450 }
2b40994c
FF
2451
2452 ath_tid_drain(sc, txq, tid);
2453 tid->state &= ~AGGR_ADDBA_COMPLETE;
2454 tid->state &= ~AGGR_CLEANUP;
2455
2456 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2457 }
2458}
This page took 0.724326 seconds and 5 git commands to generate.