Merge branch 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
5b68138e 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
b7f080cf 17#include <linux/dma-mapping.h>
394cf0a1 18#include "ath9k.h"
b622a720 19#include "ar9003_mac.h"
f078f209
LR
20
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
f078f209 35
c6663876 36static u16 bits_per_symbol[][2] = {
f078f209
LR
37 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
46};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
82b873af 50static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c
FF
51 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 55 struct ath_txq *txq, struct list_head *bf_q,
156369fa 56 struct ath_tx_status *ts, int txok);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 58 struct list_head *head, bool internal);
0cdd5c60
FF
59static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 61 int txok);
90fa539c
FF
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
44f1d26c
FF
64static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
81357a28
FF
67 struct sk_buff *skb,
68 bool dequeue);
c4288390 69
545750d3 70enum {
0e668cde
FF
71 MCS_HT20,
72 MCS_HT20_SGI,
545750d3
FF
73 MCS_HT40,
74 MCS_HT40_SGI,
75};
76
0e668cde
FF
77static int ath_max_4ms_framelen[4][32] = {
78 [MCS_HT20] = {
79 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
80 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
81 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
82 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
83 },
84 [MCS_HT20_SGI] = {
85 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
86 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
87 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
88 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
89 },
90 [MCS_HT40] = {
0e668cde
FF
91 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
92 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
93 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
94 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
95 },
96 [MCS_HT40_SGI] = {
0e668cde
FF
97 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
98 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
99 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
100 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
101 }
102};
103
e8324357
S
104/*********************/
105/* Aggregation logic */
106/*********************/
f078f209 107
23de5dc9 108static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
1512a486 109 __acquires(&txq->axq_lock)
23de5dc9
FF
110{
111 spin_lock_bh(&txq->axq_lock);
112}
113
114static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
1512a486 115 __releases(&txq->axq_lock)
23de5dc9
FF
116{
117 spin_unlock_bh(&txq->axq_lock);
118}
119
120static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
1512a486 121 __releases(&txq->axq_lock)
23de5dc9
FF
122{
123 struct sk_buff_head q;
124 struct sk_buff *skb;
125
126 __skb_queue_head_init(&q);
127 skb_queue_splice_init(&txq->complete_q, &q);
128 spin_unlock_bh(&txq->axq_lock);
129
130 while ((skb = __skb_dequeue(&q)))
131 ieee80211_tx_status(sc->hw, skb);
132}
133
e8324357 134static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 135{
e8324357 136 struct ath_atx_ac *ac = tid->ac;
ff37e337 137
e8324357
S
138 if (tid->paused)
139 return;
ff37e337 140
e8324357
S
141 if (tid->sched)
142 return;
ff37e337 143
e8324357
S
144 tid->sched = true;
145 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 146
e8324357
S
147 if (ac->sched)
148 return;
f078f209 149
e8324357
S
150 ac->sched = true;
151 list_add_tail(&ac->list, &txq->axq_acq);
152}
f078f209 153
e8324357 154static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 155{
066dae93 156 struct ath_txq *txq = tid->ac->txq;
e6a9854b 157
75401849 158 WARN_ON(!tid->paused);
f078f209 159
23de5dc9 160 ath_txq_lock(sc, txq);
75401849 161 tid->paused = false;
f078f209 162
56dc6336 163 if (skb_queue_empty(&tid->buf_q))
e8324357 164 goto unlock;
f078f209 165
e8324357
S
166 ath_tx_queue_tid(txq, tid);
167 ath_txq_schedule(sc, txq);
168unlock:
23de5dc9 169 ath_txq_unlock_complete(sc, txq);
528f0c6b 170}
f078f209 171
2d42efc4 172static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
76e45221
FF
173{
174 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
175 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
176 sizeof(tx_info->rate_driver_data));
177 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
76e45221
FF
178}
179
156369fa
FF
180static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
181{
182 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
183 seqno << IEEE80211_SEQ_SEQ_SHIFT);
184}
185
e8324357 186static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 187{
066dae93 188 struct ath_txq *txq = tid->ac->txq;
56dc6336 189 struct sk_buff *skb;
e8324357
S
190 struct ath_buf *bf;
191 struct list_head bf_head;
90fa539c 192 struct ath_tx_status ts;
2d42efc4 193 struct ath_frame_info *fi;
156369fa 194 bool sendbar = false;
f078f209 195
90fa539c 196 INIT_LIST_HEAD(&bf_head);
e6a9854b 197
90fa539c 198 memset(&ts, 0, sizeof(ts));
f078f209 199
56dc6336
FF
200 while ((skb = __skb_dequeue(&tid->buf_q))) {
201 fi = get_frame_info(skb);
202 bf = fi->bf;
203
44f1d26c
FF
204 if (bf && fi->retries) {
205 list_add_tail(&bf->list, &bf_head);
6a0ddaef 206 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
156369fa
FF
207 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
208 sendbar = true;
90fa539c 209 } else {
44f1d26c 210 ath_tx_send_normal(sc, txq, NULL, skb);
90fa539c 211 }
528f0c6b 212 }
f078f209 213
4eb287a4
NM
214 if (tid->baw_head == tid->baw_tail) {
215 tid->state &= ~AGGR_ADDBA_COMPLETE;
216 tid->state &= ~AGGR_CLEANUP;
217 }
218
23de5dc9
FF
219 if (sendbar) {
220 ath_txq_unlock(sc, txq);
156369fa 221 ath_send_bar(tid, tid->seq_start);
23de5dc9
FF
222 ath_txq_lock(sc, txq);
223 }
528f0c6b 224}
f078f209 225
e8324357
S
226static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
227 int seqno)
528f0c6b 228{
e8324357 229 int index, cindex;
f078f209 230
e8324357
S
231 index = ATH_BA_INDEX(tid->seq_start, seqno);
232 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 233
81ee13ba 234 __clear_bit(cindex, tid->tx_buf);
528f0c6b 235
81ee13ba 236 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
237 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
238 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
f9437543
FF
239 if (tid->bar_index >= 0)
240 tid->bar_index--;
e8324357 241 }
528f0c6b 242}
f078f209 243
e8324357 244static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 245 u16 seqno)
528f0c6b 246{
e8324357 247 int index, cindex;
528f0c6b 248
2d3bcba0 249 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 250 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 251 __set_bit(cindex, tid->tx_buf);
f078f209 252
e8324357
S
253 if (index >= ((tid->baw_tail - tid->baw_head) &
254 (ATH_TID_MAX_BUFS - 1))) {
255 tid->baw_tail = cindex;
256 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 257 }
f078f209
LR
258}
259
260/*
e8324357
S
261 * TODO: For frame(s) that are in the retry state, we will reuse the
262 * sequence number(s) without setting the retry bit. The
263 * alternative is to give up on these and BAR the receiver's window
264 * forward.
f078f209 265 */
e8324357
S
266static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
267 struct ath_atx_tid *tid)
f078f209 268
f078f209 269{
56dc6336 270 struct sk_buff *skb;
e8324357
S
271 struct ath_buf *bf;
272 struct list_head bf_head;
db1a052b 273 struct ath_tx_status ts;
2d42efc4 274 struct ath_frame_info *fi;
db1a052b
FF
275
276 memset(&ts, 0, sizeof(ts));
e8324357 277 INIT_LIST_HEAD(&bf_head);
f078f209 278
56dc6336
FF
279 while ((skb = __skb_dequeue(&tid->buf_q))) {
280 fi = get_frame_info(skb);
281 bf = fi->bf;
f078f209 282
44f1d26c 283 if (!bf) {
44f1d26c 284 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
44f1d26c
FF
285 continue;
286 }
287
56dc6336 288 list_add_tail(&bf->list, &bf_head);
f078f209 289
2d42efc4 290 if (fi->retries)
6a0ddaef 291 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
f078f209 292
156369fa 293 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
e8324357 294 }
f078f209 295
e8324357
S
296 tid->seq_next = tid->seq_start;
297 tid->baw_tail = tid->baw_head;
f9437543 298 tid->bar_index = -1;
f078f209
LR
299}
300
fec247c0 301static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
da647626 302 struct sk_buff *skb, int count)
f078f209 303{
8b7f8532 304 struct ath_frame_info *fi = get_frame_info(skb);
f11cc949 305 struct ath_buf *bf = fi->bf;
e8324357 306 struct ieee80211_hdr *hdr;
da647626 307 int prev = fi->retries;
f078f209 308
fec247c0 309 TX_STAT_INC(txq->axq_qnum, a_retries);
da647626
FF
310 fi->retries += count;
311
312 if (prev > 0)
2d42efc4 313 return;
f078f209 314
e8324357
S
315 hdr = (struct ieee80211_hdr *)skb->data;
316 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f11cc949
FF
317 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
318 sizeof(*hdr), DMA_TO_DEVICE);
f078f209
LR
319}
320
0a8cea84 321static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 322{
0a8cea84 323 struct ath_buf *bf = NULL;
d43f3015
S
324
325 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
326
327 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
328 spin_unlock_bh(&sc->tx.txbuflock);
329 return NULL;
330 }
0a8cea84
FF
331
332 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
333 list_del(&bf->list);
334
d43f3015
S
335 spin_unlock_bh(&sc->tx.txbuflock);
336
0a8cea84
FF
337 return bf;
338}
339
340static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
341{
342 spin_lock_bh(&sc->tx.txbuflock);
343 list_add_tail(&bf->list, &sc->tx.txbuf);
344 spin_unlock_bh(&sc->tx.txbuflock);
345}
346
347static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
348{
349 struct ath_buf *tbf;
350
351 tbf = ath_tx_get_buffer(sc);
352 if (WARN_ON(!tbf))
353 return NULL;
354
d43f3015
S
355 ATH_TXBUF_RESET(tbf);
356
357 tbf->bf_mpdu = bf->bf_mpdu;
358 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 359 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 360 tbf->bf_state = bf->bf_state;
d43f3015
S
361
362 return tbf;
363}
364
b572d033
FF
365static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
366 struct ath_tx_status *ts, int txok,
367 int *nframes, int *nbad)
368{
2d42efc4 369 struct ath_frame_info *fi;
b572d033
FF
370 u16 seq_st = 0;
371 u32 ba[WME_BA_BMP_SIZE >> 5];
372 int ba_index;
373 int isaggr = 0;
374
375 *nbad = 0;
376 *nframes = 0;
377
b572d033
FF
378 isaggr = bf_isaggr(bf);
379 if (isaggr) {
380 seq_st = ts->ts_seqnum;
381 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
382 }
383
384 while (bf) {
2d42efc4 385 fi = get_frame_info(bf->bf_mpdu);
6a0ddaef 386 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
b572d033
FF
387
388 (*nframes)++;
389 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
390 (*nbad)++;
391
392 bf = bf->bf_next;
393 }
394}
395
396
d43f3015
S
397static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
398 struct ath_buf *bf, struct list_head *bf_q,
c5992618 399 struct ath_tx_status *ts, int txok, bool retry)
f078f209 400{
e8324357
S
401 struct ath_node *an = NULL;
402 struct sk_buff *skb;
1286ec6d 403 struct ieee80211_sta *sta;
0cdd5c60 404 struct ieee80211_hw *hw = sc->hw;
1286ec6d 405 struct ieee80211_hdr *hdr;
76d5a9e8 406 struct ieee80211_tx_info *tx_info;
e8324357 407 struct ath_atx_tid *tid = NULL;
d43f3015 408 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
56dc6336
FF
409 struct list_head bf_head;
410 struct sk_buff_head bf_pending;
156369fa 411 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
f078f209 412 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
413 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
414 bool rc_update = true;
78c4653a 415 struct ieee80211_tx_rate rates[4];
2d42efc4 416 struct ath_frame_info *fi;
ebd02287 417 int nframes;
5daefbd0 418 u8 tidno;
daa5c408 419 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
da647626 420 int i, retries;
156369fa 421 int bar_index = -1;
f078f209 422
a22be22a 423 skb = bf->bf_mpdu;
1286ec6d
S
424 hdr = (struct ieee80211_hdr *)skb->data;
425
76d5a9e8 426 tx_info = IEEE80211_SKB_CB(skb);
76d5a9e8 427
78c4653a
FF
428 memcpy(rates, tx_info->control.rates, sizeof(rates));
429
da647626
FF
430 retries = ts->ts_longretry + 1;
431 for (i = 0; i < ts->ts_rateindex; i++)
432 retries += rates[i].count;
433
1286ec6d 434 rcu_read_lock();
f078f209 435
686b9cb9 436 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
437 if (!sta) {
438 rcu_read_unlock();
73e19463 439
31e79a59
FF
440 INIT_LIST_HEAD(&bf_head);
441 while (bf) {
442 bf_next = bf->bf_next;
443
fce041be 444 if (!bf->bf_stale || bf_next != NULL)
31e79a59
FF
445 list_move_tail(&bf->list, &bf_head);
446
156369fa 447 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
31e79a59
FF
448
449 bf = bf_next;
450 }
1286ec6d 451 return;
f078f209
LR
452 }
453
1286ec6d 454 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
455 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
456 tid = ATH_AN_2_TID(an, tidno);
156369fa 457 seq_first = tid->seq_start;
1286ec6d 458
b11b160d
FF
459 /*
460 * The hardware occasionally sends a tx status for the wrong TID.
461 * In this case, the BA status cannot be considered valid and all
462 * subframes need to be retransmitted
463 */
5daefbd0 464 if (tidno != ts->tid)
b11b160d
FF
465 txok = false;
466
e8324357 467 isaggr = bf_isaggr(bf);
d43f3015 468 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 469
d43f3015 470 if (isaggr && txok) {
db1a052b
FF
471 if (ts->ts_flags & ATH9K_TX_BA) {
472 seq_st = ts->ts_seqnum;
473 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 474 } else {
d43f3015
S
475 /*
476 * AR5416 can become deaf/mute when BA
477 * issue happens. Chip needs to be reset.
478 * But AP code may have sychronization issues
479 * when perform internal reset in this routine.
480 * Only enable reset in STA mode for now.
481 */
2660b81a 482 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 483 needreset = 1;
e8324357 484 }
f078f209
LR
485 }
486
56dc6336 487 __skb_queue_head_init(&bf_pending);
f078f209 488
b572d033 489 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
e8324357 490 while (bf) {
6a0ddaef
FF
491 u16 seqno = bf->bf_state.seqno;
492
f0b8220c 493 txfail = txpending = sendbar = 0;
e8324357 494 bf_next = bf->bf_next;
f078f209 495
78c4653a
FF
496 skb = bf->bf_mpdu;
497 tx_info = IEEE80211_SKB_CB(skb);
2d42efc4 498 fi = get_frame_info(skb);
78c4653a 499
6a0ddaef 500 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
e8324357
S
501 /* transmit completion, subframe is
502 * acked by block ack */
0934af23 503 acked_cnt++;
e8324357
S
504 } else if (!isaggr && txok) {
505 /* transmit completion */
0934af23 506 acked_cnt++;
b0477013
FF
507 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
508 /*
509 * cleanup in progress, just fail
510 * the un-acked sub-frames
511 */
512 txfail = 1;
513 } else if (flush) {
514 txpending = 1;
515 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
516 if (txok || !an->sleeping)
517 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
518 retries);
519
520 txpending = 1;
e8324357 521 } else {
b0477013
FF
522 txfail = 1;
523 txfail_cnt++;
524 bar_index = max_t(int, bar_index,
525 ATH_BA_INDEX(seq_first, seqno));
e8324357 526 }
f078f209 527
fce041be
FF
528 /*
529 * Make sure the last desc is reclaimed if it
530 * not a holding desc.
531 */
56dc6336
FF
532 INIT_LIST_HEAD(&bf_head);
533 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
534 bf_next != NULL || !bf_last->bf_stale)
d43f3015 535 list_move_tail(&bf->list, &bf_head);
f078f209 536
90fa539c 537 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
538 /*
539 * complete the acked-ones/xretried ones; update
540 * block-ack window
541 */
6a0ddaef 542 ath_tx_update_baw(sc, tid, seqno);
f078f209 543
8a92e2ee 544 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 545 memcpy(tx_info->control.rates, rates, sizeof(rates));
3afd21e7 546 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
8a92e2ee 547 rc_update = false;
8a92e2ee
VT
548 }
549
db1a052b 550 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
156369fa 551 !txfail);
e8324357 552 } else {
d43f3015 553 /* retry the un-acked ones */
b0477013
FF
554 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
555 bf->bf_next == NULL && bf_last->bf_stale) {
556 struct ath_buf *tbf;
557
558 tbf = ath_clone_txbuf(sc, bf_last);
559 /*
560 * Update tx baw and complete the
561 * frame with failed status if we
562 * run out of tx buf.
563 */
564 if (!tbf) {
b0477013 565 ath_tx_update_baw(sc, tid, seqno);
b0477013
FF
566
567 ath_tx_complete_buf(sc, bf, txq,
568 &bf_head, ts, 0);
569 bar_index = max_t(int, bar_index,
570 ATH_BA_INDEX(seq_first, seqno));
571 break;
c41d92dc 572 }
b0477013
FF
573
574 fi->bf = tbf;
e8324357
S
575 }
576
577 /*
578 * Put this buffer to the temporary pending
579 * queue to retain ordering
580 */
56dc6336 581 __skb_queue_tail(&bf_pending, skb);
e8324357
S
582 }
583
584 bf = bf_next;
f078f209 585 }
f078f209 586
4cee7861 587 /* prepend un-acked frames to the beginning of the pending frame queue */
56dc6336 588 if (!skb_queue_empty(&bf_pending)) {
5519541d 589 if (an->sleeping)
042ec453 590 ieee80211_sta_set_buffered(sta, tid->tidno, true);
5519541d 591
56dc6336 592 skb_queue_splice(&bf_pending, &tid->buf_q);
26a64259 593 if (!an->sleeping) {
9af73cf7 594 ath_tx_queue_tid(txq, tid);
26a64259
FF
595
596 if (ts->ts_status & ATH9K_TXERR_FILT)
597 tid->ac->clear_ps_filter = true;
598 }
4cee7861
FF
599 }
600
23de5dc9
FF
601 if (bar_index >= 0) {
602 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
603
604 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
605 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
606
607 ath_txq_unlock(sc, txq);
608 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
609 ath_txq_lock(sc, txq);
610 }
611
4eb287a4 612 if (tid->state & AGGR_CLEANUP)
90fa539c
FF
613 ath_tx_flush_tid(sc, tid);
614
1286ec6d
S
615 rcu_read_unlock();
616
030d6294
FF
617 if (needreset) {
618 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
236de514 619 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
030d6294 620 }
e8324357 621}
f078f209 622
1a6e9d0f
RM
623static bool ath_lookup_legacy(struct ath_buf *bf)
624{
625 struct sk_buff *skb;
626 struct ieee80211_tx_info *tx_info;
627 struct ieee80211_tx_rate *rates;
628 int i;
629
630 skb = bf->bf_mpdu;
631 tx_info = IEEE80211_SKB_CB(skb);
632 rates = tx_info->control.rates;
633
059ee09b
FF
634 for (i = 0; i < 4; i++) {
635 if (!rates[i].count || rates[i].idx < 0)
636 break;
637
1a6e9d0f
RM
638 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
639 return true;
640 }
641
642 return false;
643}
644
e8324357
S
645static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
646 struct ath_atx_tid *tid)
f078f209 647{
528f0c6b
S
648 struct sk_buff *skb;
649 struct ieee80211_tx_info *tx_info;
a8efee4f 650 struct ieee80211_tx_rate *rates;
d43f3015 651 u32 max_4ms_framelen, frmlen;
c0ac53fa 652 u16 aggr_limit, bt_aggr_limit, legacy = 0;
e8324357 653 int i;
528f0c6b 654
a22be22a 655 skb = bf->bf_mpdu;
528f0c6b 656 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 657 rates = tx_info->control.rates;
528f0c6b 658
e8324357
S
659 /*
660 * Find the lowest frame length among the rate series that will have a
661 * 4ms transmit duration.
662 * TODO - TXOP limit needs to be considered.
663 */
664 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 665
e8324357 666 for (i = 0; i < 4; i++) {
b0477013 667 int modeidx;
e8324357 668
b0477013
FF
669 if (!rates[i].count)
670 continue;
545750d3 671
b0477013
FF
672 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
673 legacy = 1;
674 break;
f078f209 675 }
b0477013
FF
676
677 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
678 modeidx = MCS_HT40;
679 else
680 modeidx = MCS_HT20;
681
682 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
683 modeidx++;
684
685 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
686 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209 687 }
e63835b0 688
f078f209 689 /*
e8324357
S
690 * limit aggregate size by the minimum rate if rate selected is
691 * not a probe rate, if rate selected is a probe rate then
692 * avoid aggregation of this packet.
f078f209 693 */
e8324357
S
694 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
695 return 0;
f078f209 696
c0ac53fa
SM
697 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);
698
699 /*
700 * Override the default aggregation limit for BTCOEX.
701 */
702 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
703 if (bt_aggr_limit)
704 aggr_limit = bt_aggr_limit;
f078f209 705
e8324357 706 /*
25985edc
LDM
707 * h/w can accept aggregates up to 16 bit lengths (65535).
708 * The IE, however can hold up to 65536, which shows up here
e8324357 709 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 710 */
4ef70841
S
711 if (tid->an->maxampdu)
712 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 713
e8324357
S
714 return aggr_limit;
715}
f078f209 716
e8324357 717/*
d43f3015 718 * Returns the number of delimiters to be added to
e8324357 719 * meet the minimum required mpdudensity.
e8324357
S
720 */
721static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
7a12dfdb
RM
722 struct ath_buf *bf, u16 frmlen,
723 bool first_subfrm)
e8324357 724{
7a12dfdb 725#define FIRST_DESC_NDELIMS 60
e8324357
S
726 struct sk_buff *skb = bf->bf_mpdu;
727 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 728 u32 nsymbits, nsymbols;
e8324357 729 u16 minlen;
545750d3 730 u8 flags, rix;
c6663876 731 int width, streams, half_gi, ndelim, mindelim;
2d42efc4 732 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
e8324357
S
733
734 /* Select standard number of delimiters based on frame length alone */
735 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
736
737 /*
e8324357
S
738 * If encryption enabled, hardware requires some more padding between
739 * subframes.
740 * TODO - this could be improved to be dependent on the rate.
741 * The hardware can keep up at lower rates, but not higher rates
f078f209 742 */
4f6760b0
RM
743 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
744 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
e8324357 745 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 746
7a12dfdb
RM
747 /*
748 * Add delimiter when using RTS/CTS with aggregation
749 * and non enterprise AR9003 card
750 */
3459731a
FF
751 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
752 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
7a12dfdb
RM
753 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
754
e8324357
S
755 /*
756 * Convert desired mpdu density from microeconds to bytes based
757 * on highest rate in rate series (i.e. first rate) to determine
758 * required minimum length for subframe. Take into account
759 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 760 *
e8324357
S
761 * If there is no mpdu density restriction, no further calculation
762 * is needed.
763 */
4ef70841
S
764
765 if (tid->an->mpdudensity == 0)
e8324357 766 return ndelim;
f078f209 767
e8324357
S
768 rix = tx_info->control.rates[0].idx;
769 flags = tx_info->control.rates[0].flags;
e8324357
S
770 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
771 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 772
e8324357 773 if (half_gi)
4ef70841 774 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 775 else
4ef70841 776 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 777
e8324357
S
778 if (nsymbols == 0)
779 nsymbols = 1;
f078f209 780
c6663876
FF
781 streams = HT_RC_2_STREAMS(rix);
782 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 783 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 784
e8324357 785 if (frmlen < minlen) {
e8324357
S
786 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
787 ndelim = max(mindelim, ndelim);
f078f209
LR
788 }
789
e8324357 790 return ndelim;
f078f209
LR
791}
792
e8324357 793static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 794 struct ath_txq *txq,
d43f3015 795 struct ath_atx_tid *tid,
269c44bc
FF
796 struct list_head *bf_q,
797 int *aggr_len)
f078f209 798{
e8324357 799#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
56dc6336 800 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
d43f3015 801 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
802 u16 aggr_limit = 0, al = 0, bpad = 0,
803 al_delta, h_baw = tid->baw_size / 2;
804 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 805 struct ieee80211_tx_info *tx_info;
2d42efc4 806 struct ath_frame_info *fi;
56dc6336 807 struct sk_buff *skb;
6a0ddaef 808 u16 seqno;
f078f209 809
e8324357 810 do {
56dc6336
FF
811 skb = skb_peek(&tid->buf_q);
812 fi = get_frame_info(skb);
813 bf = fi->bf;
44f1d26c 814 if (!fi->bf)
81357a28 815 bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
56dc6336 816
44f1d26c
FF
817 if (!bf)
818 continue;
819
399c6489 820 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
44f1d26c 821 seqno = bf->bf_state.seqno;
f078f209 822
d43f3015 823 /* do not step over block-ack window */
6a0ddaef 824 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
e8324357
S
825 status = ATH_AGGR_BAW_CLOSED;
826 break;
827 }
f078f209 828
f9437543
FF
829 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
830 struct ath_tx_status ts = {};
831 struct list_head bf_head;
832
833 INIT_LIST_HEAD(&bf_head);
834 list_add(&bf->list, &bf_head);
835 __skb_unlink(skb, &tid->buf_q);
836 ath_tx_update_baw(sc, tid, seqno);
837 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
838 continue;
839 }
840
841 if (!bf_first)
842 bf_first = bf;
843
e8324357
S
844 if (!rl) {
845 aggr_limit = ath_lookup_rate(sc, bf, tid);
846 rl = 1;
847 }
f078f209 848
d43f3015 849 /* do not exceed aggregation limit */
2d42efc4 850 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
f078f209 851
d43f3015 852 if (nframes &&
1a6e9d0f
RM
853 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
854 ath_lookup_legacy(bf))) {
e8324357
S
855 status = ATH_AGGR_LIMITED;
856 break;
857 }
f078f209 858
0299a50a 859 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
bdf2dbfb 860 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
0299a50a
FF
861 break;
862
d43f3015
S
863 /* do not exceed subframe limit */
864 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
865 status = ATH_AGGR_LIMITED;
866 break;
867 }
f078f209 868
d43f3015 869 /* add padding for previous frame to aggregation length */
e8324357 870 al += bpad + al_delta;
f078f209 871
e8324357
S
872 /*
873 * Get the delimiters needed to meet the MPDU
874 * density for this node.
875 */
7a12dfdb
RM
876 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
877 !nframes);
e8324357 878 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 879
7a12dfdb 880 nframes++;
e8324357 881 bf->bf_next = NULL;
f078f209 882
d43f3015 883 /* link buffers of this frame to the aggregate */
2d42efc4 884 if (!fi->retries)
6a0ddaef 885 ath_tx_addto_baw(sc, tid, seqno);
399c6489 886 bf->bf_state.ndelim = ndelim;
56dc6336
FF
887
888 __skb_unlink(skb, &tid->buf_q);
889 list_add_tail(&bf->list, bf_q);
399c6489 890 if (bf_prev)
e8324357 891 bf_prev->bf_next = bf;
399c6489 892
e8324357 893 bf_prev = bf;
fec247c0 894
56dc6336 895 } while (!skb_queue_empty(&tid->buf_q));
f078f209 896
269c44bc 897 *aggr_len = al;
d43f3015 898
e8324357
S
899 return status;
900#undef PADBYTES
901}
f078f209 902
38dad7ba
FF
903/*
904 * rix - rate index
905 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
906 * width - 0 for 20 MHz, 1 for 40 MHz
907 * half_gi - to use 4us v/s 3.6 us for symbol time
908 */
909static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
910 int width, int half_gi, bool shortPreamble)
911{
912 u32 nbits, nsymbits, duration, nsymbols;
913 int streams;
914
915 /* find number of symbols: PLCP + data */
916 streams = HT_RC_2_STREAMS(rix);
917 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
918 nsymbits = bits_per_symbol[rix % 8][width] * streams;
919 nsymbols = (nbits + nsymbits - 1) / nsymbits;
920
921 if (!half_gi)
922 duration = SYMBOL_TIME(nsymbols);
923 else
924 duration = SYMBOL_TIME_HALFGI(nsymbols);
925
926 /* addup duration for legacy/ht training and signal fields */
927 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
928
929 return duration;
930}
931
493cf04f
FF
932static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
933 struct ath_tx_info *info, int len)
38dad7ba
FF
934{
935 struct ath_hw *ah = sc->sc_ah;
38dad7ba
FF
936 struct sk_buff *skb;
937 struct ieee80211_tx_info *tx_info;
938 struct ieee80211_tx_rate *rates;
939 const struct ieee80211_rate *rate;
940 struct ieee80211_hdr *hdr;
80b08a8d 941 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
493cf04f
FF
942 int i;
943 u8 rix = 0;
38dad7ba
FF
944
945 skb = bf->bf_mpdu;
946 tx_info = IEEE80211_SKB_CB(skb);
947 rates = tx_info->control.rates;
948 hdr = (struct ieee80211_hdr *)skb->data;
493cf04f
FF
949
950 /* set dur_update_en for l-sig computation except for PS-Poll frames */
951 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
80b08a8d 952 info->rtscts_rate = fi->rtscts_rate;
38dad7ba
FF
953
954 for (i = 0; i < 4; i++) {
955 bool is_40, is_sgi, is_sp;
956 int phy;
957
958 if (!rates[i].count || (rates[i].idx < 0))
959 continue;
960
961 rix = rates[i].idx;
493cf04f 962 info->rates[i].Tries = rates[i].count;
38dad7ba
FF
963
964 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
493cf04f
FF
965 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
966 info->flags |= ATH9K_TXDESC_RTSENA;
38dad7ba 967 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
493cf04f
FF
968 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
969 info->flags |= ATH9K_TXDESC_CTSENA;
38dad7ba
FF
970 }
971
972 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
493cf04f 973 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
38dad7ba 974 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
493cf04f 975 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
38dad7ba
FF
976
977 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
978 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
979 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
980
981 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
982 /* MCS rates */
493cf04f
FF
983 info->rates[i].Rate = rix | 0x80;
984 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
985 ah->txchainmask, info->rates[i].Rate);
986 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
38dad7ba
FF
987 is_40, is_sgi, is_sp);
988 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
493cf04f 989 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
38dad7ba
FF
990 continue;
991 }
992
993 /* legacy rates */
76591bea 994 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
38dad7ba
FF
995 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
996 !(rate->flags & IEEE80211_RATE_ERP_G))
997 phy = WLAN_RC_PHY_CCK;
998 else
999 phy = WLAN_RC_PHY_OFDM;
1000
493cf04f 1001 info->rates[i].Rate = rate->hw_value;
38dad7ba
FF
1002 if (rate->hw_value_short) {
1003 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
493cf04f 1004 info->rates[i].Rate |= rate->hw_value_short;
38dad7ba
FF
1005 } else {
1006 is_sp = false;
1007 }
1008
1009 if (bf->bf_state.bfs_paprd)
493cf04f 1010 info->rates[i].ChSel = ah->txchainmask;
38dad7ba 1011 else
493cf04f
FF
1012 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1013 ah->txchainmask, info->rates[i].Rate);
38dad7ba 1014
493cf04f 1015 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
38dad7ba
FF
1016 phy, rate->bitrate * 100, len, rix, is_sp);
1017 }
1018
1019 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1020 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
493cf04f 1021 info->flags &= ~ATH9K_TXDESC_RTSENA;
38dad7ba
FF
1022
1023 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
493cf04f
FF
1024 if (info->flags & ATH9K_TXDESC_RTSENA)
1025 info->flags &= ~ATH9K_TXDESC_CTSENA;
1026}
38dad7ba 1027
493cf04f
FF
1028static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1029{
1030 struct ieee80211_hdr *hdr;
1031 enum ath9k_pkt_type htype;
1032 __le16 fc;
1033
1034 hdr = (struct ieee80211_hdr *)skb->data;
1035 fc = hdr->frame_control;
38dad7ba 1036
493cf04f
FF
1037 if (ieee80211_is_beacon(fc))
1038 htype = ATH9K_PKT_TYPE_BEACON;
1039 else if (ieee80211_is_probe_resp(fc))
1040 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1041 else if (ieee80211_is_atim(fc))
1042 htype = ATH9K_PKT_TYPE_ATIM;
1043 else if (ieee80211_is_pspoll(fc))
1044 htype = ATH9K_PKT_TYPE_PSPOLL;
1045 else
1046 htype = ATH9K_PKT_TYPE_NORMAL;
1047
1048 return htype;
38dad7ba
FF
1049}
1050
493cf04f
FF
1051static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1052 struct ath_txq *txq, int len)
399c6489
FF
1053{
1054 struct ath_hw *ah = sc->sc_ah;
1055 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1056 struct ath_buf *bf_first = bf;
493cf04f 1057 struct ath_tx_info info;
399c6489 1058 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
399c6489 1059
493cf04f
FF
1060 memset(&info, 0, sizeof(info));
1061 info.is_first = true;
1062 info.is_last = true;
1063 info.txpower = MAX_RATE_POWER;
1064 info.qcu = txq->axq_qnum;
1065
1066 info.flags = ATH9K_TXDESC_INTREQ;
1067 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1068 info.flags |= ATH9K_TXDESC_NOACK;
1069 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1070 info.flags |= ATH9K_TXDESC_LDPC;
1071
1072 ath_buf_set_rate(sc, bf, &info, len);
1073
1074 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1075 info.flags |= ATH9K_TXDESC_CLRDMASK;
1076
1077 if (bf->bf_state.bfs_paprd)
1078 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
399c6489 1079
399c6489
FF
1080
1081 while (bf) {
493cf04f
FF
1082 struct sk_buff *skb = bf->bf_mpdu;
1083 struct ath_frame_info *fi = get_frame_info(skb);
1084
1085 info.type = get_hw_packet_type(skb);
399c6489 1086 if (bf->bf_next)
493cf04f 1087 info.link = bf->bf_next->bf_daddr;
399c6489 1088 else
493cf04f
FF
1089 info.link = 0;
1090
42cecc34
JL
1091 info.buf_addr[0] = bf->bf_buf_addr;
1092 info.buf_len[0] = skb->len;
493cf04f
FF
1093 info.pkt_len = fi->framelen;
1094 info.keyix = fi->keyix;
1095 info.keytype = fi->keytype;
1096
1097 if (aggr) {
399c6489 1098 if (bf == bf_first)
493cf04f
FF
1099 info.aggr = AGGR_BUF_FIRST;
1100 else if (!bf->bf_next)
1101 info.aggr = AGGR_BUF_LAST;
1102 else
1103 info.aggr = AGGR_BUF_MIDDLE;
399c6489 1104
493cf04f
FF
1105 info.ndelim = bf->bf_state.ndelim;
1106 info.aggr_len = len;
399c6489
FF
1107 }
1108
493cf04f 1109 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
399c6489
FF
1110 bf = bf->bf_next;
1111 }
1112}
1113
e8324357
S
1114static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1115 struct ath_atx_tid *tid)
1116{
d43f3015 1117 struct ath_buf *bf;
e8324357 1118 enum ATH_AGGR_STATUS status;
399c6489 1119 struct ieee80211_tx_info *tx_info;
e8324357 1120 struct list_head bf_q;
269c44bc 1121 int aggr_len;
f078f209 1122
e8324357 1123 do {
56dc6336 1124 if (skb_queue_empty(&tid->buf_q))
e8324357 1125 return;
f078f209 1126
e8324357
S
1127 INIT_LIST_HEAD(&bf_q);
1128
269c44bc 1129 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
f078f209 1130
f078f209 1131 /*
d43f3015
S
1132 * no frames picked up to be aggregated;
1133 * block-ack window is not open.
f078f209 1134 */
e8324357
S
1135 if (list_empty(&bf_q))
1136 break;
f078f209 1137
e8324357 1138 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 1139 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
399c6489 1140 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
f078f209 1141
5519541d
FF
1142 if (tid->ac->clear_ps_filter) {
1143 tid->ac->clear_ps_filter = false;
399c6489
FF
1144 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1145 } else {
1146 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
5519541d
FF
1147 }
1148
d43f3015 1149 /* if only one frame, send as non-aggregate */
b572d033 1150 if (bf == bf->bf_lastbf) {
399c6489
FF
1151 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1152 bf->bf_state.bf_type = BUF_AMPDU;
1153 } else {
1154 TX_STAT_INC(txq->axq_qnum, a_aggr);
e8324357 1155 }
f078f209 1156
493cf04f 1157 ath_tx_fill_desc(sc, bf, txq, aggr_len);
fce041be 1158 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
4b3ba66a 1159 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
e8324357
S
1160 status != ATH_AGGR_BAW_CLOSED);
1161}
1162
231c3a1f
FF
1163int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1164 u16 tid, u16 *ssn)
e8324357
S
1165{
1166 struct ath_atx_tid *txtid;
1167 struct ath_node *an;
1168
1169 an = (struct ath_node *)sta->drv_priv;
f83da965 1170 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
1171
1172 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1173 return -EAGAIN;
1174
f83da965 1175 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 1176 txtid->paused = true;
49447f2f 1177 *ssn = txtid->seq_start = txtid->seq_next;
f9437543 1178 txtid->bar_index = -1;
231c3a1f 1179
2ed72229
FF
1180 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1181 txtid->baw_head = txtid->baw_tail = 0;
1182
231c3a1f 1183 return 0;
e8324357 1184}
f078f209 1185
f83da965 1186void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
1187{
1188 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1189 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 1190 struct ath_txq *txq = txtid->ac->txq;
f078f209 1191
e8324357 1192 if (txtid->state & AGGR_CLEANUP)
f83da965 1193 return;
f078f209 1194
e8324357 1195 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 1196 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 1197 return;
e8324357 1198 }
f078f209 1199
23de5dc9 1200 ath_txq_lock(sc, txq);
75401849 1201 txtid->paused = true;
f078f209 1202
90fa539c
FF
1203 /*
1204 * If frames are still being transmitted for this TID, they will be
1205 * cleaned up during tx completion. To prevent race conditions, this
1206 * TID can only be reused after all in-progress subframes have been
1207 * completed.
1208 */
1209 if (txtid->baw_head != txtid->baw_tail)
e8324357 1210 txtid->state |= AGGR_CLEANUP;
90fa539c 1211 else
e8324357 1212 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
1213
1214 ath_tx_flush_tid(sc, txtid);
23de5dc9 1215 ath_txq_unlock_complete(sc, txq);
e8324357 1216}
f078f209 1217
042ec453
JB
1218void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1219 struct ath_node *an)
5519541d
FF
1220{
1221 struct ath_atx_tid *tid;
1222 struct ath_atx_ac *ac;
1223 struct ath_txq *txq;
042ec453 1224 bool buffered;
5519541d
FF
1225 int tidno;
1226
1227 for (tidno = 0, tid = &an->tid[tidno];
1228 tidno < WME_NUM_TID; tidno++, tid++) {
1229
1230 if (!tid->sched)
1231 continue;
1232
1233 ac = tid->ac;
1234 txq = ac->txq;
1235
23de5dc9 1236 ath_txq_lock(sc, txq);
5519541d 1237
042ec453 1238 buffered = !skb_queue_empty(&tid->buf_q);
5519541d
FF
1239
1240 tid->sched = false;
1241 list_del(&tid->list);
1242
1243 if (ac->sched) {
1244 ac->sched = false;
1245 list_del(&ac->list);
1246 }
1247
23de5dc9 1248 ath_txq_unlock(sc, txq);
5519541d 1249
042ec453
JB
1250 ieee80211_sta_set_buffered(sta, tidno, buffered);
1251 }
5519541d
FF
1252}
1253
1254void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1255{
1256 struct ath_atx_tid *tid;
1257 struct ath_atx_ac *ac;
1258 struct ath_txq *txq;
1259 int tidno;
1260
1261 for (tidno = 0, tid = &an->tid[tidno];
1262 tidno < WME_NUM_TID; tidno++, tid++) {
1263
1264 ac = tid->ac;
1265 txq = ac->txq;
1266
23de5dc9 1267 ath_txq_lock(sc, txq);
5519541d
FF
1268 ac->clear_ps_filter = true;
1269
56dc6336 1270 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
5519541d
FF
1271 ath_tx_queue_tid(txq, tid);
1272 ath_txq_schedule(sc, txq);
1273 }
1274
23de5dc9 1275 ath_txq_unlock_complete(sc, txq);
5519541d
FF
1276 }
1277}
1278
e8324357
S
1279void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1280{
1281 struct ath_atx_tid *txtid;
1282 struct ath_node *an;
1283
1284 an = (struct ath_node *)sta->drv_priv;
1285
3d4e20f2
SM
1286 txtid = ATH_AN_2_TID(an, tid);
1287 txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1288 txtid->state |= AGGR_ADDBA_COMPLETE;
1289 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1290 ath_tx_resume_tid(sc, txtid);
f078f209
LR
1291}
1292
e8324357
S
1293/********************/
1294/* Queue Management */
1295/********************/
f078f209 1296
e8324357
S
1297static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1298 struct ath_txq *txq)
f078f209 1299{
e8324357
S
1300 struct ath_atx_ac *ac, *ac_tmp;
1301 struct ath_atx_tid *tid, *tid_tmp;
f078f209 1302
e8324357
S
1303 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1304 list_del(&ac->list);
1305 ac->sched = false;
1306 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1307 list_del(&tid->list);
1308 tid->sched = false;
1309 ath_tid_drain(sc, txq, tid);
1310 }
f078f209
LR
1311 }
1312}
1313
e8324357 1314struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 1315{
cbe61d8a 1316 struct ath_hw *ah = sc->sc_ah;
e8324357 1317 struct ath9k_tx_queue_info qi;
066dae93
FF
1318 static const int subtype_txq_to_hwq[] = {
1319 [WME_AC_BE] = ATH_TXQ_AC_BE,
1320 [WME_AC_BK] = ATH_TXQ_AC_BK,
1321 [WME_AC_VI] = ATH_TXQ_AC_VI,
1322 [WME_AC_VO] = ATH_TXQ_AC_VO,
1323 };
60f2d1d5 1324 int axq_qnum, i;
f078f209 1325
e8324357 1326 memset(&qi, 0, sizeof(qi));
066dae93 1327 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
1328 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1329 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1330 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1331 qi.tqi_physCompBuf = 0;
f078f209
LR
1332
1333 /*
e8324357
S
1334 * Enable interrupts only for EOL and DESC conditions.
1335 * We mark tx descriptors to receive a DESC interrupt
1336 * when a tx queue gets deep; otherwise waiting for the
1337 * EOL to reap descriptors. Note that this is done to
1338 * reduce interrupt load and this only defers reaping
1339 * descriptors, never transmitting frames. Aside from
1340 * reducing interrupts this also permits more concurrency.
1341 * The only potential downside is if the tx queue backs
1342 * up in which case the top half of the kernel may backup
1343 * due to a lack of tx descriptors.
1344 *
1345 * The UAPSD queue is an exception, since we take a desc-
1346 * based intr on the EOSP frames.
f078f209 1347 */
afe754d6 1348 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
ce8fdf6e 1349 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
afe754d6
VT
1350 } else {
1351 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1352 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1353 else
1354 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1355 TXQ_FLAG_TXDESCINT_ENABLE;
1356 }
60f2d1d5
BG
1357 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1358 if (axq_qnum == -1) {
f078f209 1359 /*
e8324357
S
1360 * NB: don't print a message, this happens
1361 * normally on parts with too few tx queues
f078f209 1362 */
e8324357 1363 return NULL;
f078f209 1364 }
60f2d1d5
BG
1365 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1366 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
f078f209 1367
60f2d1d5
BG
1368 txq->axq_qnum = axq_qnum;
1369 txq->mac80211_qnum = -1;
e8324357 1370 txq->axq_link = NULL;
23de5dc9 1371 __skb_queue_head_init(&txq->complete_q);
e8324357
S
1372 INIT_LIST_HEAD(&txq->axq_q);
1373 INIT_LIST_HEAD(&txq->axq_acq);
1374 spin_lock_init(&txq->axq_lock);
1375 txq->axq_depth = 0;
4b3ba66a 1376 txq->axq_ampdu_depth = 0;
164ace38 1377 txq->axq_tx_inprogress = false;
60f2d1d5 1378 sc->tx.txqsetup |= 1<<axq_qnum;
e5003249
VT
1379
1380 txq->txq_headidx = txq->txq_tailidx = 0;
1381 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1382 INIT_LIST_HEAD(&txq->txq_fifo[i]);
e8324357 1383 }
60f2d1d5 1384 return &sc->tx.txq[axq_qnum];
f078f209
LR
1385}
1386
e8324357
S
1387int ath_txq_update(struct ath_softc *sc, int qnum,
1388 struct ath9k_tx_queue_info *qinfo)
1389{
cbe61d8a 1390 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1391 int error = 0;
1392 struct ath9k_tx_queue_info qi;
1393
1394 if (qnum == sc->beacon.beaconq) {
1395 /*
1396 * XXX: for beacon queue, we just save the parameter.
1397 * It will be picked up by ath_beaconq_config when
1398 * it's necessary.
1399 */
1400 sc->beacon.beacon_qi = *qinfo;
f078f209 1401 return 0;
e8324357 1402 }
f078f209 1403
9680e8a3 1404 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1405
1406 ath9k_hw_get_txq_props(ah, qnum, &qi);
1407 qi.tqi_aifs = qinfo->tqi_aifs;
1408 qi.tqi_cwmin = qinfo->tqi_cwmin;
1409 qi.tqi_cwmax = qinfo->tqi_cwmax;
1410 qi.tqi_burstTime = qinfo->tqi_burstTime;
1411 qi.tqi_readyTime = qinfo->tqi_readyTime;
1412
1413 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
3800276a
JP
1414 ath_err(ath9k_hw_common(sc->sc_ah),
1415 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1416 error = -EIO;
1417 } else {
1418 ath9k_hw_resettxqueue(ah, qnum);
1419 }
1420
1421 return error;
1422}
1423
1424int ath_cabq_update(struct ath_softc *sc)
1425{
1426 struct ath9k_tx_queue_info qi;
9814f6b3 1427 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
e8324357 1428 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1429
e8324357 1430 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1431 /*
e8324357 1432 * Ensure the readytime % is within the bounds.
f078f209 1433 */
17d7904d
S
1434 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1435 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1436 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1437 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1438
9814f6b3 1439 qi.tqi_readyTime = (cur_conf->beacon_interval *
fdbf7335 1440 sc->config.cabqReadytime) / 100;
e8324357
S
1441 ath_txq_update(sc, qnum, &qi);
1442
1443 return 0;
f078f209
LR
1444}
1445
4b3ba66a
FF
1446static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1447{
1448 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1449 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1450}
1451
fce041be
FF
1452static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1453 struct list_head *list, bool retry_tx)
f078f209 1454{
e8324357
S
1455 struct ath_buf *bf, *lastbf;
1456 struct list_head bf_head;
db1a052b
FF
1457 struct ath_tx_status ts;
1458
1459 memset(&ts, 0, sizeof(ts));
daa5c408 1460 ts.ts_status = ATH9K_TX_FLUSH;
e8324357 1461 INIT_LIST_HEAD(&bf_head);
f078f209 1462
fce041be
FF
1463 while (!list_empty(list)) {
1464 bf = list_first_entry(list, struct ath_buf, list);
f078f209 1465
fce041be
FF
1466 if (bf->bf_stale) {
1467 list_del(&bf->list);
f078f209 1468
fce041be
FF
1469 ath_tx_return_buffer(sc, bf);
1470 continue;
e8324357 1471 }
f078f209 1472
e8324357 1473 lastbf = bf->bf_lastbf;
fce041be 1474 list_cut_position(&bf_head, list, &lastbf->list);
e5003249 1475
e8324357 1476 txq->axq_depth--;
4b3ba66a
FF
1477 if (bf_is_ampdu_not_probing(bf))
1478 txq->axq_ampdu_depth--;
e8324357
S
1479
1480 if (bf_isampdu(bf))
c5992618
FF
1481 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1482 retry_tx);
e8324357 1483 else
156369fa 1484 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
f078f209 1485 }
fce041be 1486}
f078f209 1487
fce041be
FF
1488/*
1489 * Drain a given TX queue (could be Beacon or Data)
1490 *
1491 * This assumes output has been stopped and
1492 * we do not need to block ath_tx_tasklet.
1493 */
1494void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1495{
23de5dc9
FF
1496 ath_txq_lock(sc, txq);
1497
e5003249 1498 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
fce041be 1499 int idx = txq->txq_tailidx;
e5003249 1500
fce041be
FF
1501 while (!list_empty(&txq->txq_fifo[idx])) {
1502 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1503 retry_tx);
1504
1505 INCR(idx, ATH_TXFIFO_DEPTH);
e5003249 1506 }
fce041be 1507 txq->txq_tailidx = idx;
e5003249 1508 }
e609e2ea 1509
fce041be
FF
1510 txq->axq_link = NULL;
1511 txq->axq_tx_inprogress = false;
1512 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
1513
e609e2ea 1514 /* flush any pending frames if aggregation is enabled */
3d4e20f2 1515 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
fce041be
FF
1516 ath_txq_drain_pending_buffers(sc, txq);
1517
23de5dc9 1518 ath_txq_unlock_complete(sc, txq);
f078f209
LR
1519}
1520
080e1a25 1521bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1522{
cbe61d8a 1523 struct ath_hw *ah = sc->sc_ah;
c46917bb 1524 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405 1525 struct ath_txq *txq;
34d25810
FF
1526 int i;
1527 u32 npend = 0;
043a0405
S
1528
1529 if (sc->sc_flags & SC_OP_INVALID)
080e1a25 1530 return true;
043a0405 1531
0d51cccc 1532 ath9k_hw_abort_tx_dma(ah);
043a0405 1533
0d51cccc 1534 /* Check if any queue remains active */
043a0405 1535 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
0d51cccc
FF
1536 if (!ATH_TXQ_SETUP(sc, i))
1537 continue;
1538
34d25810
FF
1539 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1540 npend |= BIT(i);
043a0405
S
1541 }
1542
080e1a25 1543 if (npend)
34d25810 1544 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
043a0405
S
1545
1546 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
92460412
FF
1547 if (!ATH_TXQ_SETUP(sc, i))
1548 continue;
1549
1550 /*
1551 * The caller will resume queues with ieee80211_wake_queues.
1552 * Mark the queue as not stopped to prevent ath_tx_complete
1553 * from waking the queue too early.
1554 */
1555 txq = &sc->tx.txq[i];
1556 txq->stopped = false;
1557 ath_draintxq(sc, txq, retry_tx);
043a0405 1558 }
080e1a25
FF
1559
1560 return !npend;
e8324357 1561}
f078f209 1562
043a0405 1563void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1564{
043a0405
S
1565 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1566 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1567}
f078f209 1568
7755bad9
BG
1569/* For each axq_acq entry, for each tid, try to schedule packets
1570 * for transmit until ampdu_depth has reached min Q depth.
1571 */
e8324357
S
1572void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1573{
7755bad9
BG
1574 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1575 struct ath_atx_tid *tid, *last_tid;
f078f209 1576
236de514 1577 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
21f28e6f 1578 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
e8324357 1579 return;
f078f209 1580
e8324357 1581 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
7755bad9 1582 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
f078f209 1583
7755bad9
BG
1584 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1585 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1586 list_del(&ac->list);
1587 ac->sched = false;
f078f209 1588
7755bad9
BG
1589 while (!list_empty(&ac->tid_q)) {
1590 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1591 list);
1592 list_del(&tid->list);
1593 tid->sched = false;
f078f209 1594
7755bad9
BG
1595 if (tid->paused)
1596 continue;
f078f209 1597
7755bad9 1598 ath_tx_sched_aggr(sc, txq, tid);
f078f209 1599
7755bad9
BG
1600 /*
1601 * add tid to round-robin queue if more frames
1602 * are pending for the tid
1603 */
56dc6336 1604 if (!skb_queue_empty(&tid->buf_q))
7755bad9 1605 ath_tx_queue_tid(txq, tid);
f078f209 1606
7755bad9
BG
1607 if (tid == last_tid ||
1608 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1609 break;
1610 }
f078f209 1611
b0477013
FF
1612 if (!list_empty(&ac->tid_q) && !ac->sched) {
1613 ac->sched = true;
1614 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1615 }
7755bad9
BG
1616
1617 if (ac == last_ac ||
1618 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1619 return;
e8324357
S
1620 }
1621}
f078f209 1622
e8324357
S
1623/***********/
1624/* TX, DMA */
1625/***********/
1626
f078f209 1627/*
e8324357
S
1628 * Insert a chain of ath_buf (descriptors) on a txq and
1629 * assume the descriptors are already chained together by caller.
f078f209 1630 */
e8324357 1631static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 1632 struct list_head *head, bool internal)
f078f209 1633{
cbe61d8a 1634 struct ath_hw *ah = sc->sc_ah;
c46917bb 1635 struct ath_common *common = ath9k_hw_common(ah);
fce041be
FF
1636 struct ath_buf *bf, *bf_last;
1637 bool puttxbuf = false;
1638 bool edma;
f078f209 1639
e8324357
S
1640 /*
1641 * Insert the frame on the outbound list and
1642 * pass it on to the hardware.
1643 */
f078f209 1644
e8324357
S
1645 if (list_empty(head))
1646 return;
f078f209 1647
fce041be 1648 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
e8324357 1649 bf = list_first_entry(head, struct ath_buf, list);
fce041be 1650 bf_last = list_entry(head->prev, struct ath_buf, list);
f078f209 1651
d2182b69
JP
1652 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
1653 txq->axq_qnum, txq->axq_depth);
f078f209 1654
fce041be
FF
1655 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1656 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
e5003249 1657 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
fce041be 1658 puttxbuf = true;
e8324357 1659 } else {
e5003249
VT
1660 list_splice_tail_init(head, &txq->axq_q);
1661
fce041be
FF
1662 if (txq->axq_link) {
1663 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
d2182b69 1664 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
226afe68
JP
1665 txq->axq_qnum, txq->axq_link,
1666 ito64(bf->bf_daddr), bf->bf_desc);
fce041be
FF
1667 } else if (!edma)
1668 puttxbuf = true;
1669
1670 txq->axq_link = bf_last->bf_desc;
1671 }
1672
1673 if (puttxbuf) {
1674 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1675 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
d2182b69 1676 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
fce041be
FF
1677 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1678 }
1679
1680 if (!edma) {
8d8d3fdc 1681 TX_STAT_INC(txq->axq_qnum, txstart);
e5003249 1682 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1683 }
fce041be
FF
1684
1685 if (!internal) {
1686 txq->axq_depth++;
1687 if (bf_is_ampdu_not_probing(bf))
1688 txq->axq_ampdu_depth++;
1689 }
e8324357 1690}
f078f209 1691
e8324357 1692static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
44f1d26c 1693 struct sk_buff *skb, struct ath_tx_control *txctl)
f078f209 1694{
44f1d26c 1695 struct ath_frame_info *fi = get_frame_info(skb);
04caf863 1696 struct list_head bf_head;
44f1d26c 1697 struct ath_buf *bf;
f078f209 1698
e8324357
S
1699 /*
1700 * Do not queue to h/w when any of the following conditions is true:
1701 * - there are pending frames in software queue
1702 * - the TID is currently paused for ADDBA/BAR request
1703 * - seqno is not within block-ack window
1704 * - h/w queue depth exceeds low water mark
1705 */
56dc6336 1706 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
44f1d26c 1707 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
4b3ba66a 1708 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1709 /*
e8324357
S
1710 * Add this frame to software queue for scheduling later
1711 * for aggregation.
f078f209 1712 */
bda8adda 1713 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
44f1d26c 1714 __skb_queue_tail(&tid->buf_q, skb);
9af73cf7
FF
1715 if (!txctl->an || !txctl->an->sleeping)
1716 ath_tx_queue_tid(txctl->txq, tid);
e8324357
S
1717 return;
1718 }
1719
81357a28 1720 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
44f1d26c
FF
1721 if (!bf)
1722 return;
1723
399c6489 1724 bf->bf_state.bf_type = BUF_AMPDU;
04caf863
FF
1725 INIT_LIST_HEAD(&bf_head);
1726 list_add(&bf->list, &bf_head);
1727
e8324357 1728 /* Add sub-frame to BAW */
44f1d26c 1729 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
e8324357
S
1730
1731 /* Queue to h/w without aggregation */
bda8adda 1732 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
d43f3015 1733 bf->bf_lastbf = bf;
493cf04f 1734 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
fce041be 1735 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
e8324357
S
1736}
1737
82b873af 1738static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c 1739 struct ath_atx_tid *tid, struct sk_buff *skb)
e8324357 1740{
44f1d26c
FF
1741 struct ath_frame_info *fi = get_frame_info(skb);
1742 struct list_head bf_head;
e8324357
S
1743 struct ath_buf *bf;
1744
44f1d26c
FF
1745 bf = fi->bf;
1746 if (!bf)
81357a28 1747 bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
44f1d26c
FF
1748
1749 if (!bf)
1750 return;
1751
1752 INIT_LIST_HEAD(&bf_head);
1753 list_add_tail(&bf->list, &bf_head);
399c6489 1754 bf->bf_state.bf_type = 0;
e8324357 1755
d43f3015 1756 bf->bf_lastbf = bf;
493cf04f 1757 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
44f1d26c 1758 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
fec247c0 1759 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1760}
1761
2d42efc4
FF
1762static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1763 int framelen)
e8324357
S
1764{
1765 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
1766 struct ieee80211_sta *sta = tx_info->control.sta;
1767 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
6a0ddaef 1768 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
80b08a8d 1769 const struct ieee80211_rate *rate;
2d42efc4 1770 struct ath_frame_info *fi = get_frame_info(skb);
93ae2dd2 1771 struct ath_node *an = NULL;
2d42efc4 1772 enum ath9k_key_type keytype;
80b08a8d
FF
1773 bool short_preamble = false;
1774
1775 /*
1776 * We check if Short Preamble is needed for the CTS rate by
1777 * checking the BSS's global flag.
1778 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1779 */
1780 if (tx_info->control.vif &&
1781 tx_info->control.vif->bss_conf.use_short_preamble)
1782 short_preamble = true;
e8324357 1783
80b08a8d 1784 rate = ieee80211_get_rts_cts_rate(hw, tx_info);
2d42efc4 1785 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
e8324357 1786
93ae2dd2
FF
1787 if (sta)
1788 an = (struct ath_node *) sta->drv_priv;
1789
2d42efc4
FF
1790 memset(fi, 0, sizeof(*fi));
1791 if (hw_key)
1792 fi->keyix = hw_key->hw_key_idx;
93ae2dd2
FF
1793 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1794 fi->keyix = an->ps_key;
2d42efc4
FF
1795 else
1796 fi->keyix = ATH9K_TXKEYIX_INVALID;
1797 fi->keytype = keytype;
1798 fi->framelen = framelen;
80b08a8d
FF
1799 fi->rtscts_rate = rate->hw_value;
1800 if (short_preamble)
1801 fi->rtscts_rate |= rate->hw_value_short;
e8324357
S
1802}
1803
ea066d5a
MSS
1804u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1805{
1806 struct ath_hw *ah = sc->sc_ah;
1807 struct ath9k_channel *curchan = ah->curchan;
d77bf3eb
RM
1808 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1809 (curchan->channelFlags & CHANNEL_5GHZ) &&
1810 (chainmask == 0x7) && (rate < 0x90))
ea066d5a
MSS
1811 return 0x3;
1812 else
1813 return chainmask;
1814}
1815
44f1d26c
FF
1816/*
1817 * Assign a descriptor (and sequence number if necessary,
1818 * and map buffer for DMA. Frees skb on error
1819 */
fa05f87a 1820static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
04caf863 1821 struct ath_txq *txq,
fa05f87a 1822 struct ath_atx_tid *tid,
81357a28
FF
1823 struct sk_buff *skb,
1824 bool dequeue)
f078f209 1825{
82b873af 1826 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2d42efc4 1827 struct ath_frame_info *fi = get_frame_info(skb);
fa05f87a 1828 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
82b873af 1829 struct ath_buf *bf;
fd09c85f 1830 int fragno;
fa05f87a 1831 u16 seqno;
82b873af
FF
1832
1833 bf = ath_tx_get_buffer(sc);
1834 if (!bf) {
d2182b69 1835 ath_dbg(common, XMIT, "TX buffers are full\n");
44f1d26c 1836 goto error;
82b873af 1837 }
e022edbd 1838
528f0c6b 1839 ATH_TXBUF_RESET(bf);
f078f209 1840
fa05f87a 1841 if (tid) {
fd09c85f 1842 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
fa05f87a
FF
1843 seqno = tid->seq_next;
1844 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
fd09c85f
SM
1845
1846 if (fragno)
1847 hdr->seq_ctrl |= cpu_to_le16(fragno);
1848
1849 if (!ieee80211_has_morefrags(hdr->frame_control))
1850 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1851
fa05f87a
FF
1852 bf->bf_state.seqno = seqno;
1853 }
1854
f078f209 1855 bf->bf_mpdu = skb;
f8316df1 1856
c1739eb3
BG
1857 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1858 skb->len, DMA_TO_DEVICE);
1859 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1860 bf->bf_mpdu = NULL;
6cf9e995 1861 bf->bf_buf_addr = 0;
3800276a
JP
1862 ath_err(ath9k_hw_common(sc->sc_ah),
1863 "dma_mapping_error() on TX\n");
82b873af 1864 ath_tx_return_buffer(sc, bf);
44f1d26c 1865 goto error;
f8316df1
LR
1866 }
1867
56dc6336 1868 fi->bf = bf;
04caf863
FF
1869
1870 return bf;
44f1d26c
FF
1871
1872error:
81357a28
FF
1873 if (dequeue)
1874 __skb_unlink(skb, &tid->buf_q);
44f1d26c
FF
1875 dev_kfree_skb_any(skb);
1876 return NULL;
04caf863
FF
1877}
1878
1879/* FIXME: tx power */
44f1d26c 1880static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
04caf863
FF
1881 struct ath_tx_control *txctl)
1882{
04caf863
FF
1883 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1884 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
248a38d0 1885 struct ath_atx_tid *tid = NULL;
fa05f87a 1886 struct ath_buf *bf;
04caf863 1887 u8 tidno;
f078f209 1888
3d4e20f2 1889 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
61e1b0b0 1890 ieee80211_is_data_qos(hdr->frame_control)) {
5daefbd0
FF
1891 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1892 IEEE80211_QOS_CTL_TID_MASK;
2d42efc4 1893 tid = ATH_AN_2_TID(txctl->an, tidno);
5daefbd0 1894
066dae93 1895 WARN_ON(tid->ac->txq != txctl->txq);
248a38d0
FF
1896 }
1897
1898 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
04caf863
FF
1899 /*
1900 * Try aggregation if it's a unicast data frame
1901 * and the destination is HT capable.
1902 */
44f1d26c 1903 ath_tx_send_ampdu(sc, tid, skb, txctl);
f078f209 1904 } else {
81357a28 1905 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
44f1d26c 1906 if (!bf)
3ad29529 1907 return;
04caf863 1908
82b873af
FF
1909 bf->bf_state.bfs_paprd = txctl->paprd;
1910
9cf04dcc
MSS
1911 if (txctl->paprd)
1912 bf->bf_state.bfs_paprd_timestamp = jiffies;
1913
44f1d26c 1914 ath_tx_send_normal(sc, txctl->txq, tid, skb);
f078f209 1915 }
f078f209
LR
1916}
1917
f8316df1 1918/* Upon failure caller should free skb */
c52f33d0 1919int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1920 struct ath_tx_control *txctl)
f078f209 1921{
28d16708
FF
1922 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1923 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2d42efc4 1924 struct ieee80211_sta *sta = info->control.sta;
f59a59fe 1925 struct ieee80211_vif *vif = info->control.vif;
9ac58615 1926 struct ath_softc *sc = hw->priv;
84642d6b 1927 struct ath_txq *txq = txctl->txq;
4d91f9f3 1928 int padpos, padsize;
04caf863 1929 int frmlen = skb->len + FCS_LEN;
28d16708 1930 int q;
f078f209 1931
a9927ba3
BG
1932 /* NOTE: sta can be NULL according to net/mac80211.h */
1933 if (sta)
1934 txctl->an = (struct ath_node *)sta->drv_priv;
1935
04caf863
FF
1936 if (info->control.hw_key)
1937 frmlen += info->control.hw_key->icv_len;
1938
f078f209 1939 /*
e8324357
S
1940 * As a temporary workaround, assign seq# here; this will likely need
1941 * to be cleaned up to work better with Beacon transmission and virtual
1942 * BSSes.
f078f209 1943 */
e8324357 1944 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1945 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1946 sc->tx.seq_no += 0x10;
1947 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1948 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1949 }
f078f209 1950
42cecc34
JL
1951 /* Add the padding after the header if this is not already done */
1952 padpos = ath9k_cmn_padpos(hdr->frame_control);
1953 padsize = padpos & 3;
1954 if (padsize && skb->len > padpos) {
1955 if (skb_headroom(skb) < padsize)
1956 return -ENOMEM;
28d16708 1957
42cecc34
JL
1958 skb_push(skb, padsize);
1959 memmove(skb->data, skb->data + padsize, padpos);
6e82bc4a 1960 hdr = (struct ieee80211_hdr *) skb->data;
f078f209 1961 }
f078f209 1962
f59a59fe
FF
1963 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1964 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1965 !ieee80211_is_data(hdr->frame_control))
1966 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1967
2d42efc4
FF
1968 setup_frame_info(hw, skb, frmlen);
1969
1970 /*
1971 * At this point, the vif, hw_key and sta pointers in the tx control
1972 * info are no longer valid (overwritten by the ath_frame_info data.
1973 */
1974
28d16708 1975 q = skb_get_queue_mapping(skb);
23de5dc9
FF
1976
1977 ath_txq_lock(sc, txq);
28d16708
FF
1978 if (txq == sc->tx.txq_map[q] &&
1979 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
7545daf4 1980 ieee80211_stop_queue(sc->hw, q);
3db1cd5c 1981 txq->stopped = true;
f078f209 1982 }
f078f209 1983
44f1d26c 1984 ath_tx_start_dma(sc, skb, txctl);
3ad29529 1985
23de5dc9 1986 ath_txq_unlock(sc, txq);
3ad29529 1987
44f1d26c 1988 return 0;
f078f209
LR
1989}
1990
e8324357
S
1991/*****************/
1992/* TX Completion */
1993/*****************/
528f0c6b 1994
e8324357 1995static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
0f9dc298 1996 int tx_flags, struct ath_txq *txq)
528f0c6b 1997{
e8324357 1998 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1999 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 2000 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 2001 int q, padpos, padsize;
528f0c6b 2002
d2182b69 2003 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 2004
55797b1a 2005 if (!(tx_flags & ATH_TX_ERROR))
e8324357
S
2006 /* Frame was ACKed */
2007 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b 2008
42cecc34
JL
2009 padpos = ath9k_cmn_padpos(hdr->frame_control);
2010 padsize = padpos & 3;
2011 if (padsize && skb->len>padpos+padsize) {
2012 /*
2013 * Remove MAC header padding before giving the frame back to
2014 * mac80211.
2015 */
2016 memmove(skb->data + padsize, skb->data, padpos);
2017 skb_pull(skb, padsize);
e8324357 2018 }
528f0c6b 2019
c8e8868e 2020 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
1b04b930 2021 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
d2182b69 2022 ath_dbg(common, PS,
226afe68 2023 "Going back to sleep after having received TX status (0x%lx)\n",
1b04b930
S
2024 sc->ps_flags & (PS_WAIT_FOR_BEACON |
2025 PS_WAIT_FOR_CAB |
2026 PS_WAIT_FOR_PSPOLL_DATA |
2027 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
2028 }
2029
7545daf4
FF
2030 q = skb_get_queue_mapping(skb);
2031 if (txq == sc->tx.txq_map[q]) {
7545daf4
FF
2032 if (WARN_ON(--txq->pending_frames < 0))
2033 txq->pending_frames = 0;
92460412 2034
7545daf4
FF
2035 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2036 ieee80211_wake_queue(sc->hw, q);
3db1cd5c 2037 txq->stopped = false;
066dae93 2038 }
97923b14 2039 }
7545daf4 2040
23de5dc9 2041 __skb_queue_tail(&txq->complete_q, skb);
e8324357 2042}
f078f209 2043
e8324357 2044static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 2045 struct ath_txq *txq, struct list_head *bf_q,
156369fa 2046 struct ath_tx_status *ts, int txok)
f078f209 2047{
e8324357 2048 struct sk_buff *skb = bf->bf_mpdu;
3afd21e7 2049 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
e8324357 2050 unsigned long flags;
6b2c4032 2051 int tx_flags = 0;
f078f209 2052
55797b1a 2053 if (!txok)
6b2c4032 2054 tx_flags |= ATH_TX_ERROR;
f078f209 2055
3afd21e7
FF
2056 if (ts->ts_status & ATH9K_TXERR_FILT)
2057 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2058
c1739eb3 2059 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 2060 bf->bf_buf_addr = 0;
9f42c2b6
FF
2061
2062 if (bf->bf_state.bfs_paprd) {
9cf04dcc
MSS
2063 if (time_after(jiffies,
2064 bf->bf_state.bfs_paprd_timestamp +
2065 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 2066 dev_kfree_skb_any(skb);
78a18172 2067 else
ca369eb4 2068 complete(&sc->paprd_complete);
9f42c2b6 2069 } else {
55797b1a 2070 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
0f9dc298 2071 ath_tx_complete(sc, skb, tx_flags, txq);
9f42c2b6 2072 }
6cf9e995
BG
2073 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2074 * accidentally reference it later.
2075 */
2076 bf->bf_mpdu = NULL;
e8324357
S
2077
2078 /*
2079 * Return the list of ath_buf of this mpdu to free queue
2080 */
2081 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2082 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2083 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
2084}
2085
0cdd5c60
FF
2086static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2087 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 2088 int txok)
f078f209 2089{
a22be22a 2090 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 2091 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 2092 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0cdd5c60 2093 struct ieee80211_hw *hw = sc->hw;
f0c255a0 2094 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 2095 u8 i, tx_rateindex;
f078f209 2096
95e4acb7 2097 if (txok)
db1a052b 2098 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2099
db1a052b 2100 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2101 WARN_ON(tx_rateindex >= hw->max_rates);
2102
3afd21e7 2103 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
d969847c 2104 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2105
b572d033 2106 BUG_ON(nbad > nframes);
ebd02287 2107 }
185d1589
RM
2108 tx_info->status.ampdu_len = nframes;
2109 tx_info->status.ampdu_ack_len = nframes - nbad;
ebd02287 2110
db1a052b 2111 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
3afd21e7 2112 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
f0c255a0
FF
2113 /*
2114 * If an underrun error is seen assume it as an excessive
2115 * retry only if max frame trigger level has been reached
2116 * (2 KB for single stream, and 4 KB for dual stream).
2117 * Adjust the long retry as if the frame was tried
2118 * hw->max_rate_tries times to affect how rate control updates
2119 * PER for the failed rate.
2120 * In case of congestion on the bus penalizing this type of
2121 * underruns should help hardware actually transmit new frames
2122 * successfully by eventually preferring slower rates.
2123 * This itself should also alleviate congestion on the bus.
2124 */
3afd21e7
FF
2125 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2126 ATH9K_TX_DELIM_UNDERRUN)) &&
2127 ieee80211_is_data(hdr->frame_control) &&
83860c59 2128 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
f0c255a0
FF
2129 tx_info->status.rates[tx_rateindex].count =
2130 hw->max_rate_tries;
f078f209 2131 }
8a92e2ee 2132
545750d3 2133 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2134 tx_info->status.rates[i].count = 0;
545750d3
FF
2135 tx_info->status.rates[i].idx = -1;
2136 }
8a92e2ee 2137
78c4653a 2138 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2139}
2140
fce041be
FF
2141static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2142 struct ath_tx_status *ts, struct ath_buf *bf,
2143 struct list_head *bf_head)
2144{
2145 int txok;
2146
2147 txq->axq_depth--;
2148 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2149 txq->axq_tx_inprogress = false;
2150 if (bf_is_ampdu_not_probing(bf))
2151 txq->axq_ampdu_depth--;
2152
fce041be 2153 if (!bf_isampdu(bf)) {
3afd21e7 2154 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
156369fa 2155 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
fce041be
FF
2156 } else
2157 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2158
3d4e20f2 2159 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
fce041be
FF
2160 ath_txq_schedule(sc, txq);
2161}
2162
e8324357 2163static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2164{
cbe61d8a 2165 struct ath_hw *ah = sc->sc_ah;
c46917bb 2166 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2167 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2168 struct list_head bf_head;
e8324357 2169 struct ath_desc *ds;
29bffa96 2170 struct ath_tx_status ts;
e8324357 2171 int status;
f078f209 2172
d2182b69 2173 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
226afe68
JP
2174 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2175 txq->axq_link);
f078f209 2176
23de5dc9 2177 ath_txq_lock(sc, txq);
f078f209 2178 for (;;) {
236de514
FF
2179 if (work_pending(&sc->hw_reset_work))
2180 break;
2181
f078f209
LR
2182 if (list_empty(&txq->axq_q)) {
2183 txq->axq_link = NULL;
3d4e20f2 2184 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
082f6536 2185 ath_txq_schedule(sc, txq);
f078f209
LR
2186 break;
2187 }
f078f209
LR
2188 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2189
e8324357
S
2190 /*
2191 * There is a race condition that a BH gets scheduled
2192 * after sw writes TxE and before hw re-load the last
2193 * descriptor to get the newly chained one.
2194 * Software must keep the last DONE descriptor as a
2195 * holding descriptor - software does so by marking
2196 * it with the STALE flag.
2197 */
2198 bf_held = NULL;
a119cc49 2199 if (bf->bf_stale) {
e8324357 2200 bf_held = bf;
fce041be 2201 if (list_is_last(&bf_held->list, &txq->axq_q))
e8324357 2202 break;
fce041be
FF
2203
2204 bf = list_entry(bf_held->list.next, struct ath_buf,
2205 list);
f078f209
LR
2206 }
2207
2208 lastbf = bf->bf_lastbf;
e8324357 2209 ds = lastbf->bf_desc;
f078f209 2210
29bffa96
FF
2211 memset(&ts, 0, sizeof(ts));
2212 status = ath9k_hw_txprocdesc(ah, ds, &ts);
fce041be 2213 if (status == -EINPROGRESS)
e8324357 2214 break;
fce041be 2215
2dac4fb9 2216 TX_STAT_INC(txq->axq_qnum, txprocdesc);
f078f209 2217
e8324357
S
2218 /*
2219 * Remove ath_buf's of the same transmit unit from txq,
2220 * however leave the last descriptor back as the holding
2221 * descriptor for hw.
2222 */
a119cc49 2223 lastbf->bf_stale = true;
e8324357 2224 INIT_LIST_HEAD(&bf_head);
e8324357
S
2225 if (!list_is_singular(&lastbf->list))
2226 list_cut_position(&bf_head,
2227 &txq->axq_q, lastbf->list.prev);
f078f209 2228
fce041be 2229 if (bf_held) {
0a8cea84 2230 list_del(&bf_held->list);
0a8cea84 2231 ath_tx_return_buffer(sc, bf_held);
e8324357 2232 }
f078f209 2233
fce041be 2234 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
8469cdef 2235 }
23de5dc9 2236 ath_txq_unlock_complete(sc, txq);
8469cdef
S
2237}
2238
305fe47f 2239static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2240{
2241 struct ath_softc *sc = container_of(work, struct ath_softc,
2242 tx_complete_work.work);
2243 struct ath_txq *txq;
2244 int i;
2245 bool needreset = false;
60f2d1d5
BG
2246#ifdef CONFIG_ATH9K_DEBUGFS
2247 sc->tx_complete_poll_work_seen++;
2248#endif
164ace38
SB
2249
2250 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2251 if (ATH_TXQ_SETUP(sc, i)) {
2252 txq = &sc->tx.txq[i];
23de5dc9 2253 ath_txq_lock(sc, txq);
164ace38
SB
2254 if (txq->axq_depth) {
2255 if (txq->axq_tx_inprogress) {
2256 needreset = true;
23de5dc9 2257 ath_txq_unlock(sc, txq);
164ace38
SB
2258 break;
2259 } else {
2260 txq->axq_tx_inprogress = true;
2261 }
2262 }
23de5dc9 2263 ath_txq_unlock_complete(sc, txq);
164ace38
SB
2264 }
2265
2266 if (needreset) {
d2182b69 2267 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
226afe68 2268 "tx hung, resetting the chip\n");
030d6294 2269 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
236de514 2270 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
164ace38
SB
2271 }
2272
42935eca 2273 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2274 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2275}
2276
2277
f078f209 2278
e8324357 2279void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2280{
239c795d
FF
2281 struct ath_hw *ah = sc->sc_ah;
2282 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
e8324357 2283 int i;
f078f209 2284
e8324357
S
2285 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2286 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2287 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2288 }
2289}
2290
e5003249
VT
2291void ath_tx_edma_tasklet(struct ath_softc *sc)
2292{
fce041be 2293 struct ath_tx_status ts;
e5003249
VT
2294 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2295 struct ath_hw *ah = sc->sc_ah;
2296 struct ath_txq *txq;
2297 struct ath_buf *bf, *lastbf;
2298 struct list_head bf_head;
2299 int status;
e5003249
VT
2300
2301 for (;;) {
236de514
FF
2302 if (work_pending(&sc->hw_reset_work))
2303 break;
2304
fce041be 2305 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
e5003249
VT
2306 if (status == -EINPROGRESS)
2307 break;
2308 if (status == -EIO) {
d2182b69 2309 ath_dbg(common, XMIT, "Error processing tx status\n");
e5003249
VT
2310 break;
2311 }
2312
4e0ad259
FF
2313 /* Process beacon completions separately */
2314 if (ts.qid == sc->beacon.beaconq) {
2315 sc->beacon.tx_processed = true;
2316 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
e5003249 2317 continue;
4e0ad259 2318 }
e5003249 2319
fce041be 2320 txq = &sc->tx.txq[ts.qid];
e5003249 2321
23de5dc9 2322 ath_txq_lock(sc, txq);
fce041be 2323
e5003249 2324 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
23de5dc9 2325 ath_txq_unlock(sc, txq);
e5003249
VT
2326 return;
2327 }
2328
2329 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2330 struct ath_buf, list);
2331 lastbf = bf->bf_lastbf;
2332
2333 INIT_LIST_HEAD(&bf_head);
2334 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2335 &lastbf->list);
e5003249 2336
fce041be
FF
2337 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2338 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
e5003249 2339
fce041be
FF
2340 if (!list_empty(&txq->axq_q)) {
2341 struct list_head bf_q;
60f2d1d5 2342
fce041be
FF
2343 INIT_LIST_HEAD(&bf_q);
2344 txq->axq_link = NULL;
2345 list_splice_tail_init(&txq->axq_q, &bf_q);
2346 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2347 }
2348 }
86271e46 2349
fce041be 2350 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
23de5dc9 2351 ath_txq_unlock_complete(sc, txq);
e5003249
VT
2352 }
2353}
2354
e8324357
S
2355/*****************/
2356/* Init, Cleanup */
2357/*****************/
f078f209 2358
5088c2f1
VT
2359static int ath_txstatus_setup(struct ath_softc *sc, int size)
2360{
2361 struct ath_descdma *dd = &sc->txsdma;
2362 u8 txs_len = sc->sc_ah->caps.txs_len;
2363
2364 dd->dd_desc_len = size * txs_len;
2365 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2366 &dd->dd_desc_paddr, GFP_KERNEL);
2367 if (!dd->dd_desc)
2368 return -ENOMEM;
2369
2370 return 0;
2371}
2372
2373static int ath_tx_edma_init(struct ath_softc *sc)
2374{
2375 int err;
2376
2377 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2378 if (!err)
2379 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2380 sc->txsdma.dd_desc_paddr,
2381 ATH_TXSTATUS_RING_SIZE);
2382
2383 return err;
2384}
2385
2386static void ath_tx_edma_cleanup(struct ath_softc *sc)
2387{
2388 struct ath_descdma *dd = &sc->txsdma;
2389
2390 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2391 dd->dd_desc_paddr);
2392}
2393
e8324357 2394int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2395{
c46917bb 2396 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2397 int error = 0;
f078f209 2398
797fe5cb 2399 spin_lock_init(&sc->tx.txbuflock);
f078f209 2400
797fe5cb 2401 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2402 "tx", nbufs, 1, 1);
797fe5cb 2403 if (error != 0) {
3800276a
JP
2404 ath_err(common,
2405 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2406 goto err;
2407 }
f078f209 2408
797fe5cb 2409 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2410 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2411 if (error != 0) {
3800276a
JP
2412 ath_err(common,
2413 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2414 goto err;
2415 }
f078f209 2416
164ace38
SB
2417 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2418
5088c2f1
VT
2419 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2420 error = ath_tx_edma_init(sc);
2421 if (error)
2422 goto err;
2423 }
2424
797fe5cb 2425err:
e8324357
S
2426 if (error != 0)
2427 ath_tx_cleanup(sc);
f078f209 2428
e8324357 2429 return error;
f078f209
LR
2430}
2431
797fe5cb 2432void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2433{
2434 if (sc->beacon.bdma.dd_desc_len != 0)
2435 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2436
2437 if (sc->tx.txdma.dd_desc_len != 0)
2438 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2439
2440 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2441 ath_tx_edma_cleanup(sc);
e8324357 2442}
f078f209
LR
2443
2444void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2445{
c5170163
S
2446 struct ath_atx_tid *tid;
2447 struct ath_atx_ac *ac;
2448 int tidno, acno;
f078f209 2449
8ee5afbc 2450 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2451 tidno < WME_NUM_TID;
2452 tidno++, tid++) {
2453 tid->an = an;
2454 tid->tidno = tidno;
2455 tid->seq_start = tid->seq_next = 0;
2456 tid->baw_size = WME_MAX_BA;
2457 tid->baw_head = tid->baw_tail = 0;
2458 tid->sched = false;
e8324357 2459 tid->paused = false;
a37c2c79 2460 tid->state &= ~AGGR_CLEANUP;
56dc6336 2461 __skb_queue_head_init(&tid->buf_q);
c5170163 2462 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2463 tid->ac = &an->ac[acno];
a37c2c79
S
2464 tid->state &= ~AGGR_ADDBA_COMPLETE;
2465 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2466 }
f078f209 2467
8ee5afbc 2468 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2469 acno < WME_NUM_AC; acno++, ac++) {
2470 ac->sched = false;
066dae93 2471 ac->txq = sc->tx.txq_map[acno];
c5170163 2472 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2473 }
2474}
2475
b5aa9bf9 2476void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2477{
2b40994c
FF
2478 struct ath_atx_ac *ac;
2479 struct ath_atx_tid *tid;
f078f209 2480 struct ath_txq *txq;
066dae93 2481 int tidno;
e8324357 2482
2b40994c
FF
2483 for (tidno = 0, tid = &an->tid[tidno];
2484 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2485
2b40994c 2486 ac = tid->ac;
066dae93 2487 txq = ac->txq;
f078f209 2488
23de5dc9 2489 ath_txq_lock(sc, txq);
2b40994c
FF
2490
2491 if (tid->sched) {
2492 list_del(&tid->list);
2493 tid->sched = false;
2494 }
2495
2496 if (ac->sched) {
2497 list_del(&ac->list);
2498 tid->ac->sched = false;
f078f209 2499 }
2b40994c
FF
2500
2501 ath_tid_drain(sc, txq, tid);
2502 tid->state &= ~AGGR_ADDBA_COMPLETE;
2503 tid->state &= ~AGGR_CLEANUP;
2504
23de5dc9 2505 ath_txq_unlock(sc, txq);
f078f209
LR
2506 }
2507}
This page took 0.788024 seconds and 5 git commands to generate.