Merge 3.4-rc6 into usb-next
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
5b68138e 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
b7f080cf 17#include <linux/dma-mapping.h>
394cf0a1 18#include "ath9k.h"
b622a720 19#include "ar9003_mac.h"
f078f209
LR
20
21#define BITS_PER_BYTE 8
22#define OFDM_PLCP_BITS 22
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
f078f209 35
c6663876 36static u16 bits_per_symbol[][2] = {
f078f209
LR
37 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
46};
47
48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49
82b873af 50static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c
FF
51 struct ath_atx_tid *tid, struct sk_buff *skb);
52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 55 struct ath_txq *txq, struct list_head *bf_q,
156369fa 56 struct ath_tx_status *ts, int txok);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 58 struct list_head *head, bool internal);
0cdd5c60
FF
59static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
60 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 61 int txok);
90fa539c
FF
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
44f1d26c
FF
64static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq,
66 struct ath_atx_tid *tid,
67 struct sk_buff *skb);
c4288390 68
545750d3 69enum {
0e668cde
FF
70 MCS_HT20,
71 MCS_HT20_SGI,
545750d3
FF
72 MCS_HT40,
73 MCS_HT40_SGI,
74};
75
0e668cde
FF
76static int ath_max_4ms_framelen[4][32] = {
77 [MCS_HT20] = {
78 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
79 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
80 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
81 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
82 },
83 [MCS_HT20_SGI] = {
84 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
85 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
86 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
87 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
88 },
89 [MCS_HT40] = {
0e668cde
FF
90 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
91 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
92 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
93 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
94 },
95 [MCS_HT40_SGI] = {
0e668cde
FF
96 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
97 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
98 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
99 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
100 }
101};
102
e8324357
S
103/*********************/
104/* Aggregation logic */
105/*********************/
f078f209 106
23de5dc9 107static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
1512a486 108 __acquires(&txq->axq_lock)
23de5dc9
FF
109{
110 spin_lock_bh(&txq->axq_lock);
111}
112
113static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
1512a486 114 __releases(&txq->axq_lock)
23de5dc9
FF
115{
116 spin_unlock_bh(&txq->axq_lock);
117}
118
119static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
1512a486 120 __releases(&txq->axq_lock)
23de5dc9
FF
121{
122 struct sk_buff_head q;
123 struct sk_buff *skb;
124
125 __skb_queue_head_init(&q);
126 skb_queue_splice_init(&txq->complete_q, &q);
127 spin_unlock_bh(&txq->axq_lock);
128
129 while ((skb = __skb_dequeue(&q)))
130 ieee80211_tx_status(sc->hw, skb);
131}
132
e8324357 133static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 134{
e8324357 135 struct ath_atx_ac *ac = tid->ac;
ff37e337 136
e8324357
S
137 if (tid->paused)
138 return;
ff37e337 139
e8324357
S
140 if (tid->sched)
141 return;
ff37e337 142
e8324357
S
143 tid->sched = true;
144 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 145
e8324357
S
146 if (ac->sched)
147 return;
f078f209 148
e8324357
S
149 ac->sched = true;
150 list_add_tail(&ac->list, &txq->axq_acq);
151}
f078f209 152
e8324357 153static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 154{
066dae93 155 struct ath_txq *txq = tid->ac->txq;
e6a9854b 156
75401849 157 WARN_ON(!tid->paused);
f078f209 158
23de5dc9 159 ath_txq_lock(sc, txq);
75401849 160 tid->paused = false;
f078f209 161
56dc6336 162 if (skb_queue_empty(&tid->buf_q))
e8324357 163 goto unlock;
f078f209 164
e8324357
S
165 ath_tx_queue_tid(txq, tid);
166 ath_txq_schedule(sc, txq);
167unlock:
23de5dc9 168 ath_txq_unlock_complete(sc, txq);
528f0c6b 169}
f078f209 170
2d42efc4 171static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
76e45221
FF
172{
173 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
174 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
175 sizeof(tx_info->rate_driver_data));
176 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
76e45221
FF
177}
178
156369fa
FF
179static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
180{
181 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
182 seqno << IEEE80211_SEQ_SEQ_SHIFT);
183}
184
e8324357 185static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 186{
066dae93 187 struct ath_txq *txq = tid->ac->txq;
56dc6336 188 struct sk_buff *skb;
e8324357
S
189 struct ath_buf *bf;
190 struct list_head bf_head;
90fa539c 191 struct ath_tx_status ts;
2d42efc4 192 struct ath_frame_info *fi;
156369fa 193 bool sendbar = false;
f078f209 194
90fa539c 195 INIT_LIST_HEAD(&bf_head);
e6a9854b 196
90fa539c 197 memset(&ts, 0, sizeof(ts));
f078f209 198
56dc6336
FF
199 while ((skb = __skb_dequeue(&tid->buf_q))) {
200 fi = get_frame_info(skb);
201 bf = fi->bf;
202
44f1d26c
FF
203 if (bf && fi->retries) {
204 list_add_tail(&bf->list, &bf_head);
6a0ddaef 205 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
156369fa
FF
206 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
207 sendbar = true;
90fa539c 208 } else {
44f1d26c 209 ath_tx_send_normal(sc, txq, NULL, skb);
90fa539c 210 }
528f0c6b 211 }
f078f209 212
4eb287a4
NM
213 if (tid->baw_head == tid->baw_tail) {
214 tid->state &= ~AGGR_ADDBA_COMPLETE;
215 tid->state &= ~AGGR_CLEANUP;
216 }
217
23de5dc9
FF
218 if (sendbar) {
219 ath_txq_unlock(sc, txq);
156369fa 220 ath_send_bar(tid, tid->seq_start);
23de5dc9
FF
221 ath_txq_lock(sc, txq);
222 }
528f0c6b 223}
f078f209 224
e8324357
S
225static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
226 int seqno)
528f0c6b 227{
e8324357 228 int index, cindex;
f078f209 229
e8324357
S
230 index = ATH_BA_INDEX(tid->seq_start, seqno);
231 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 232
81ee13ba 233 __clear_bit(cindex, tid->tx_buf);
528f0c6b 234
81ee13ba 235 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
236 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
237 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
f9437543
FF
238 if (tid->bar_index >= 0)
239 tid->bar_index--;
e8324357 240 }
528f0c6b 241}
f078f209 242
e8324357 243static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 244 u16 seqno)
528f0c6b 245{
e8324357 246 int index, cindex;
528f0c6b 247
2d3bcba0 248 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 249 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 250 __set_bit(cindex, tid->tx_buf);
f078f209 251
e8324357
S
252 if (index >= ((tid->baw_tail - tid->baw_head) &
253 (ATH_TID_MAX_BUFS - 1))) {
254 tid->baw_tail = cindex;
255 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 256 }
f078f209
LR
257}
258
259/*
e8324357
S
260 * TODO: For frame(s) that are in the retry state, we will reuse the
261 * sequence number(s) without setting the retry bit. The
262 * alternative is to give up on these and BAR the receiver's window
263 * forward.
f078f209 264 */
e8324357
S
265static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
266 struct ath_atx_tid *tid)
f078f209 267
f078f209 268{
56dc6336 269 struct sk_buff *skb;
e8324357
S
270 struct ath_buf *bf;
271 struct list_head bf_head;
db1a052b 272 struct ath_tx_status ts;
2d42efc4 273 struct ath_frame_info *fi;
db1a052b
FF
274
275 memset(&ts, 0, sizeof(ts));
e8324357 276 INIT_LIST_HEAD(&bf_head);
f078f209 277
56dc6336
FF
278 while ((skb = __skb_dequeue(&tid->buf_q))) {
279 fi = get_frame_info(skb);
280 bf = fi->bf;
f078f209 281
44f1d26c 282 if (!bf) {
44f1d26c 283 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
44f1d26c
FF
284 continue;
285 }
286
56dc6336 287 list_add_tail(&bf->list, &bf_head);
f078f209 288
2d42efc4 289 if (fi->retries)
6a0ddaef 290 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
f078f209 291
156369fa 292 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
e8324357 293 }
f078f209 294
e8324357
S
295 tid->seq_next = tid->seq_start;
296 tid->baw_tail = tid->baw_head;
f9437543 297 tid->bar_index = -1;
f078f209
LR
298}
299
fec247c0 300static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
da647626 301 struct sk_buff *skb, int count)
f078f209 302{
8b7f8532 303 struct ath_frame_info *fi = get_frame_info(skb);
f11cc949 304 struct ath_buf *bf = fi->bf;
e8324357 305 struct ieee80211_hdr *hdr;
da647626 306 int prev = fi->retries;
f078f209 307
fec247c0 308 TX_STAT_INC(txq->axq_qnum, a_retries);
da647626
FF
309 fi->retries += count;
310
311 if (prev > 0)
2d42efc4 312 return;
f078f209 313
e8324357
S
314 hdr = (struct ieee80211_hdr *)skb->data;
315 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f11cc949
FF
316 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
317 sizeof(*hdr), DMA_TO_DEVICE);
f078f209
LR
318}
319
0a8cea84 320static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 321{
0a8cea84 322 struct ath_buf *bf = NULL;
d43f3015
S
323
324 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
325
326 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
327 spin_unlock_bh(&sc->tx.txbuflock);
328 return NULL;
329 }
0a8cea84
FF
330
331 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
332 list_del(&bf->list);
333
d43f3015
S
334 spin_unlock_bh(&sc->tx.txbuflock);
335
0a8cea84
FF
336 return bf;
337}
338
339static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
340{
341 spin_lock_bh(&sc->tx.txbuflock);
342 list_add_tail(&bf->list, &sc->tx.txbuf);
343 spin_unlock_bh(&sc->tx.txbuflock);
344}
345
346static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
347{
348 struct ath_buf *tbf;
349
350 tbf = ath_tx_get_buffer(sc);
351 if (WARN_ON(!tbf))
352 return NULL;
353
d43f3015
S
354 ATH_TXBUF_RESET(tbf);
355
356 tbf->bf_mpdu = bf->bf_mpdu;
357 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 358 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 359 tbf->bf_state = bf->bf_state;
d43f3015
S
360
361 return tbf;
362}
363
b572d033
FF
364static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
365 struct ath_tx_status *ts, int txok,
366 int *nframes, int *nbad)
367{
2d42efc4 368 struct ath_frame_info *fi;
b572d033
FF
369 u16 seq_st = 0;
370 u32 ba[WME_BA_BMP_SIZE >> 5];
371 int ba_index;
372 int isaggr = 0;
373
374 *nbad = 0;
375 *nframes = 0;
376
b572d033
FF
377 isaggr = bf_isaggr(bf);
378 if (isaggr) {
379 seq_st = ts->ts_seqnum;
380 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
381 }
382
383 while (bf) {
2d42efc4 384 fi = get_frame_info(bf->bf_mpdu);
6a0ddaef 385 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
b572d033
FF
386
387 (*nframes)++;
388 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
389 (*nbad)++;
390
391 bf = bf->bf_next;
392 }
393}
394
395
d43f3015
S
396static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
397 struct ath_buf *bf, struct list_head *bf_q,
c5992618 398 struct ath_tx_status *ts, int txok, bool retry)
f078f209 399{
e8324357
S
400 struct ath_node *an = NULL;
401 struct sk_buff *skb;
1286ec6d 402 struct ieee80211_sta *sta;
0cdd5c60 403 struct ieee80211_hw *hw = sc->hw;
1286ec6d 404 struct ieee80211_hdr *hdr;
76d5a9e8 405 struct ieee80211_tx_info *tx_info;
e8324357 406 struct ath_atx_tid *tid = NULL;
d43f3015 407 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
56dc6336
FF
408 struct list_head bf_head;
409 struct sk_buff_head bf_pending;
156369fa 410 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
f078f209 411 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
412 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
413 bool rc_update = true;
78c4653a 414 struct ieee80211_tx_rate rates[4];
2d42efc4 415 struct ath_frame_info *fi;
ebd02287 416 int nframes;
5daefbd0 417 u8 tidno;
daa5c408 418 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
da647626 419 int i, retries;
156369fa 420 int bar_index = -1;
f078f209 421
a22be22a 422 skb = bf->bf_mpdu;
1286ec6d
S
423 hdr = (struct ieee80211_hdr *)skb->data;
424
76d5a9e8 425 tx_info = IEEE80211_SKB_CB(skb);
76d5a9e8 426
78c4653a
FF
427 memcpy(rates, tx_info->control.rates, sizeof(rates));
428
da647626
FF
429 retries = ts->ts_longretry + 1;
430 for (i = 0; i < ts->ts_rateindex; i++)
431 retries += rates[i].count;
432
1286ec6d 433 rcu_read_lock();
f078f209 434
686b9cb9 435 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
436 if (!sta) {
437 rcu_read_unlock();
73e19463 438
31e79a59
FF
439 INIT_LIST_HEAD(&bf_head);
440 while (bf) {
441 bf_next = bf->bf_next;
442
fce041be 443 if (!bf->bf_stale || bf_next != NULL)
31e79a59
FF
444 list_move_tail(&bf->list, &bf_head);
445
156369fa 446 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
31e79a59
FF
447
448 bf = bf_next;
449 }
1286ec6d 450 return;
f078f209
LR
451 }
452
1286ec6d 453 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
454 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
455 tid = ATH_AN_2_TID(an, tidno);
156369fa 456 seq_first = tid->seq_start;
1286ec6d 457
b11b160d
FF
458 /*
459 * The hardware occasionally sends a tx status for the wrong TID.
460 * In this case, the BA status cannot be considered valid and all
461 * subframes need to be retransmitted
462 */
5daefbd0 463 if (tidno != ts->tid)
b11b160d
FF
464 txok = false;
465
e8324357 466 isaggr = bf_isaggr(bf);
d43f3015 467 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 468
d43f3015 469 if (isaggr && txok) {
db1a052b
FF
470 if (ts->ts_flags & ATH9K_TX_BA) {
471 seq_st = ts->ts_seqnum;
472 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 473 } else {
d43f3015
S
474 /*
475 * AR5416 can become deaf/mute when BA
476 * issue happens. Chip needs to be reset.
477 * But AP code may have sychronization issues
478 * when perform internal reset in this routine.
479 * Only enable reset in STA mode for now.
480 */
2660b81a 481 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 482 needreset = 1;
e8324357 483 }
f078f209
LR
484 }
485
56dc6336 486 __skb_queue_head_init(&bf_pending);
f078f209 487
b572d033 488 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
e8324357 489 while (bf) {
6a0ddaef
FF
490 u16 seqno = bf->bf_state.seqno;
491
f0b8220c 492 txfail = txpending = sendbar = 0;
e8324357 493 bf_next = bf->bf_next;
f078f209 494
78c4653a
FF
495 skb = bf->bf_mpdu;
496 tx_info = IEEE80211_SKB_CB(skb);
2d42efc4 497 fi = get_frame_info(skb);
78c4653a 498
6a0ddaef 499 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
e8324357
S
500 /* transmit completion, subframe is
501 * acked by block ack */
0934af23 502 acked_cnt++;
e8324357
S
503 } else if (!isaggr && txok) {
504 /* transmit completion */
0934af23 505 acked_cnt++;
b0477013
FF
506 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
507 /*
508 * cleanup in progress, just fail
509 * the un-acked sub-frames
510 */
511 txfail = 1;
512 } else if (flush) {
513 txpending = 1;
514 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
515 if (txok || !an->sleeping)
516 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
517 retries);
518
519 txpending = 1;
e8324357 520 } else {
b0477013
FF
521 txfail = 1;
522 txfail_cnt++;
523 bar_index = max_t(int, bar_index,
524 ATH_BA_INDEX(seq_first, seqno));
e8324357 525 }
f078f209 526
fce041be
FF
527 /*
528 * Make sure the last desc is reclaimed if it
529 * not a holding desc.
530 */
56dc6336
FF
531 INIT_LIST_HEAD(&bf_head);
532 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
533 bf_next != NULL || !bf_last->bf_stale)
d43f3015 534 list_move_tail(&bf->list, &bf_head);
f078f209 535
90fa539c 536 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
537 /*
538 * complete the acked-ones/xretried ones; update
539 * block-ack window
540 */
6a0ddaef 541 ath_tx_update_baw(sc, tid, seqno);
f078f209 542
8a92e2ee 543 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 544 memcpy(tx_info->control.rates, rates, sizeof(rates));
3afd21e7 545 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok);
8a92e2ee 546 rc_update = false;
8a92e2ee
VT
547 }
548
db1a052b 549 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
156369fa 550 !txfail);
e8324357 551 } else {
d43f3015 552 /* retry the un-acked ones */
b0477013
FF
553 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
554 bf->bf_next == NULL && bf_last->bf_stale) {
555 struct ath_buf *tbf;
556
557 tbf = ath_clone_txbuf(sc, bf_last);
558 /*
559 * Update tx baw and complete the
560 * frame with failed status if we
561 * run out of tx buf.
562 */
563 if (!tbf) {
b0477013 564 ath_tx_update_baw(sc, tid, seqno);
b0477013
FF
565
566 ath_tx_complete_buf(sc, bf, txq,
567 &bf_head, ts, 0);
568 bar_index = max_t(int, bar_index,
569 ATH_BA_INDEX(seq_first, seqno));
570 break;
c41d92dc 571 }
b0477013
FF
572
573 fi->bf = tbf;
e8324357
S
574 }
575
576 /*
577 * Put this buffer to the temporary pending
578 * queue to retain ordering
579 */
56dc6336 580 __skb_queue_tail(&bf_pending, skb);
e8324357
S
581 }
582
583 bf = bf_next;
f078f209 584 }
f078f209 585
4cee7861 586 /* prepend un-acked frames to the beginning of the pending frame queue */
56dc6336 587 if (!skb_queue_empty(&bf_pending)) {
5519541d 588 if (an->sleeping)
042ec453 589 ieee80211_sta_set_buffered(sta, tid->tidno, true);
5519541d 590
56dc6336 591 skb_queue_splice(&bf_pending, &tid->buf_q);
26a64259 592 if (!an->sleeping) {
9af73cf7 593 ath_tx_queue_tid(txq, tid);
26a64259
FF
594
595 if (ts->ts_status & ATH9K_TXERR_FILT)
596 tid->ac->clear_ps_filter = true;
597 }
4cee7861
FF
598 }
599
23de5dc9
FF
600 if (bar_index >= 0) {
601 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
602
603 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
604 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
605
606 ath_txq_unlock(sc, txq);
607 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
608 ath_txq_lock(sc, txq);
609 }
610
4eb287a4 611 if (tid->state & AGGR_CLEANUP)
90fa539c
FF
612 ath_tx_flush_tid(sc, tid);
613
1286ec6d
S
614 rcu_read_unlock();
615
030d6294
FF
616 if (needreset) {
617 RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
236de514 618 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
030d6294 619 }
e8324357 620}
f078f209 621
1a6e9d0f
RM
622static bool ath_lookup_legacy(struct ath_buf *bf)
623{
624 struct sk_buff *skb;
625 struct ieee80211_tx_info *tx_info;
626 struct ieee80211_tx_rate *rates;
627 int i;
628
629 skb = bf->bf_mpdu;
630 tx_info = IEEE80211_SKB_CB(skb);
631 rates = tx_info->control.rates;
632
059ee09b
FF
633 for (i = 0; i < 4; i++) {
634 if (!rates[i].count || rates[i].idx < 0)
635 break;
636
1a6e9d0f
RM
637 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
638 return true;
639 }
640
641 return false;
642}
643
e8324357
S
644static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
645 struct ath_atx_tid *tid)
f078f209 646{
528f0c6b
S
647 struct sk_buff *skb;
648 struct ieee80211_tx_info *tx_info;
a8efee4f 649 struct ieee80211_tx_rate *rates;
d43f3015 650 u32 max_4ms_framelen, frmlen;
c0ac53fa 651 u16 aggr_limit, bt_aggr_limit, legacy = 0;
e8324357 652 int i;
528f0c6b 653
a22be22a 654 skb = bf->bf_mpdu;
528f0c6b 655 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 656 rates = tx_info->control.rates;
528f0c6b 657
e8324357
S
658 /*
659 * Find the lowest frame length among the rate series that will have a
660 * 4ms transmit duration.
661 * TODO - TXOP limit needs to be considered.
662 */
663 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 664
e8324357 665 for (i = 0; i < 4; i++) {
b0477013 666 int modeidx;
e8324357 667
b0477013
FF
668 if (!rates[i].count)
669 continue;
545750d3 670
b0477013
FF
671 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
672 legacy = 1;
673 break;
f078f209 674 }
b0477013
FF
675
676 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
677 modeidx = MCS_HT40;
678 else
679 modeidx = MCS_HT20;
680
681 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
682 modeidx++;
683
684 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
685 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209 686 }
e63835b0 687
f078f209 688 /*
e8324357
S
689 * limit aggregate size by the minimum rate if rate selected is
690 * not a probe rate, if rate selected is a probe rate then
691 * avoid aggregation of this packet.
f078f209 692 */
e8324357
S
693 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
694 return 0;
f078f209 695
c0ac53fa
SM
696 aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX);
697
698 /*
699 * Override the default aggregation limit for BTCOEX.
700 */
701 bt_aggr_limit = ath9k_btcoex_aggr_limit(sc, max_4ms_framelen);
702 if (bt_aggr_limit)
703 aggr_limit = bt_aggr_limit;
f078f209 704
e8324357 705 /*
25985edc
LDM
706 * h/w can accept aggregates up to 16 bit lengths (65535).
707 * The IE, however can hold up to 65536, which shows up here
e8324357 708 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 709 */
4ef70841
S
710 if (tid->an->maxampdu)
711 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 712
e8324357
S
713 return aggr_limit;
714}
f078f209 715
e8324357 716/*
d43f3015 717 * Returns the number of delimiters to be added to
e8324357 718 * meet the minimum required mpdudensity.
e8324357
S
719 */
720static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
7a12dfdb
RM
721 struct ath_buf *bf, u16 frmlen,
722 bool first_subfrm)
e8324357 723{
7a12dfdb 724#define FIRST_DESC_NDELIMS 60
e8324357
S
725 struct sk_buff *skb = bf->bf_mpdu;
726 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 727 u32 nsymbits, nsymbols;
e8324357 728 u16 minlen;
545750d3 729 u8 flags, rix;
c6663876 730 int width, streams, half_gi, ndelim, mindelim;
2d42efc4 731 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
e8324357
S
732
733 /* Select standard number of delimiters based on frame length alone */
734 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
735
736 /*
e8324357
S
737 * If encryption enabled, hardware requires some more padding between
738 * subframes.
739 * TODO - this could be improved to be dependent on the rate.
740 * The hardware can keep up at lower rates, but not higher rates
f078f209 741 */
4f6760b0
RM
742 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
743 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
e8324357 744 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 745
7a12dfdb
RM
746 /*
747 * Add delimiter when using RTS/CTS with aggregation
748 * and non enterprise AR9003 card
749 */
3459731a
FF
750 if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) &&
751 (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE))
7a12dfdb
RM
752 ndelim = max(ndelim, FIRST_DESC_NDELIMS);
753
e8324357
S
754 /*
755 * Convert desired mpdu density from microeconds to bytes based
756 * on highest rate in rate series (i.e. first rate) to determine
757 * required minimum length for subframe. Take into account
758 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 759 *
e8324357
S
760 * If there is no mpdu density restriction, no further calculation
761 * is needed.
762 */
4ef70841
S
763
764 if (tid->an->mpdudensity == 0)
e8324357 765 return ndelim;
f078f209 766
e8324357
S
767 rix = tx_info->control.rates[0].idx;
768 flags = tx_info->control.rates[0].flags;
e8324357
S
769 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
770 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 771
e8324357 772 if (half_gi)
4ef70841 773 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 774 else
4ef70841 775 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 776
e8324357
S
777 if (nsymbols == 0)
778 nsymbols = 1;
f078f209 779
c6663876
FF
780 streams = HT_RC_2_STREAMS(rix);
781 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 782 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 783
e8324357 784 if (frmlen < minlen) {
e8324357
S
785 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
786 ndelim = max(mindelim, ndelim);
f078f209
LR
787 }
788
e8324357 789 return ndelim;
f078f209
LR
790}
791
e8324357 792static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 793 struct ath_txq *txq,
d43f3015 794 struct ath_atx_tid *tid,
269c44bc
FF
795 struct list_head *bf_q,
796 int *aggr_len)
f078f209 797{
e8324357 798#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
56dc6336 799 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
d43f3015 800 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
801 u16 aggr_limit = 0, al = 0, bpad = 0,
802 al_delta, h_baw = tid->baw_size / 2;
803 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 804 struct ieee80211_tx_info *tx_info;
2d42efc4 805 struct ath_frame_info *fi;
56dc6336 806 struct sk_buff *skb;
6a0ddaef 807 u16 seqno;
f078f209 808
e8324357 809 do {
56dc6336
FF
810 skb = skb_peek(&tid->buf_q);
811 fi = get_frame_info(skb);
812 bf = fi->bf;
44f1d26c
FF
813 if (!fi->bf)
814 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
56dc6336 815
44f1d26c
FF
816 if (!bf)
817 continue;
818
399c6489 819 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
44f1d26c 820 seqno = bf->bf_state.seqno;
f078f209 821
d43f3015 822 /* do not step over block-ack window */
6a0ddaef 823 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
e8324357
S
824 status = ATH_AGGR_BAW_CLOSED;
825 break;
826 }
f078f209 827
f9437543
FF
828 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
829 struct ath_tx_status ts = {};
830 struct list_head bf_head;
831
832 INIT_LIST_HEAD(&bf_head);
833 list_add(&bf->list, &bf_head);
834 __skb_unlink(skb, &tid->buf_q);
835 ath_tx_update_baw(sc, tid, seqno);
836 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
837 continue;
838 }
839
840 if (!bf_first)
841 bf_first = bf;
842
e8324357
S
843 if (!rl) {
844 aggr_limit = ath_lookup_rate(sc, bf, tid);
845 rl = 1;
846 }
f078f209 847
d43f3015 848 /* do not exceed aggregation limit */
2d42efc4 849 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
f078f209 850
d43f3015 851 if (nframes &&
1a6e9d0f
RM
852 ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
853 ath_lookup_legacy(bf))) {
e8324357
S
854 status = ATH_AGGR_LIMITED;
855 break;
856 }
f078f209 857
0299a50a 858 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
bdf2dbfb 859 if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
0299a50a
FF
860 break;
861
d43f3015
S
862 /* do not exceed subframe limit */
863 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
864 status = ATH_AGGR_LIMITED;
865 break;
866 }
f078f209 867
d43f3015 868 /* add padding for previous frame to aggregation length */
e8324357 869 al += bpad + al_delta;
f078f209 870
e8324357
S
871 /*
872 * Get the delimiters needed to meet the MPDU
873 * density for this node.
874 */
7a12dfdb
RM
875 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen,
876 !nframes);
e8324357 877 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 878
7a12dfdb 879 nframes++;
e8324357 880 bf->bf_next = NULL;
f078f209 881
d43f3015 882 /* link buffers of this frame to the aggregate */
2d42efc4 883 if (!fi->retries)
6a0ddaef 884 ath_tx_addto_baw(sc, tid, seqno);
399c6489 885 bf->bf_state.ndelim = ndelim;
56dc6336
FF
886
887 __skb_unlink(skb, &tid->buf_q);
888 list_add_tail(&bf->list, bf_q);
399c6489 889 if (bf_prev)
e8324357 890 bf_prev->bf_next = bf;
399c6489 891
e8324357 892 bf_prev = bf;
fec247c0 893
56dc6336 894 } while (!skb_queue_empty(&tid->buf_q));
f078f209 895
269c44bc 896 *aggr_len = al;
d43f3015 897
e8324357
S
898 return status;
899#undef PADBYTES
900}
f078f209 901
38dad7ba
FF
902/*
903 * rix - rate index
904 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
905 * width - 0 for 20 MHz, 1 for 40 MHz
906 * half_gi - to use 4us v/s 3.6 us for symbol time
907 */
908static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
909 int width, int half_gi, bool shortPreamble)
910{
911 u32 nbits, nsymbits, duration, nsymbols;
912 int streams;
913
914 /* find number of symbols: PLCP + data */
915 streams = HT_RC_2_STREAMS(rix);
916 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
917 nsymbits = bits_per_symbol[rix % 8][width] * streams;
918 nsymbols = (nbits + nsymbits - 1) / nsymbits;
919
920 if (!half_gi)
921 duration = SYMBOL_TIME(nsymbols);
922 else
923 duration = SYMBOL_TIME_HALFGI(nsymbols);
924
925 /* addup duration for legacy/ht training and signal fields */
926 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
927
928 return duration;
929}
930
493cf04f
FF
931static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
932 struct ath_tx_info *info, int len)
38dad7ba
FF
933{
934 struct ath_hw *ah = sc->sc_ah;
38dad7ba
FF
935 struct sk_buff *skb;
936 struct ieee80211_tx_info *tx_info;
937 struct ieee80211_tx_rate *rates;
938 const struct ieee80211_rate *rate;
939 struct ieee80211_hdr *hdr;
493cf04f
FF
940 int i;
941 u8 rix = 0;
38dad7ba
FF
942
943 skb = bf->bf_mpdu;
944 tx_info = IEEE80211_SKB_CB(skb);
945 rates = tx_info->control.rates;
946 hdr = (struct ieee80211_hdr *)skb->data;
493cf04f
FF
947
948 /* set dur_update_en for l-sig computation except for PS-Poll frames */
949 info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
38dad7ba
FF
950
951 /*
952 * We check if Short Preamble is needed for the CTS rate by
953 * checking the BSS's global flag.
954 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
955 */
956 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
493cf04f 957 info->rtscts_rate = rate->hw_value;
d47a61aa
SM
958
959 if (tx_info->control.vif &&
960 tx_info->control.vif->bss_conf.use_short_preamble)
493cf04f 961 info->rtscts_rate |= rate->hw_value_short;
38dad7ba
FF
962
963 for (i = 0; i < 4; i++) {
964 bool is_40, is_sgi, is_sp;
965 int phy;
966
967 if (!rates[i].count || (rates[i].idx < 0))
968 continue;
969
970 rix = rates[i].idx;
493cf04f 971 info->rates[i].Tries = rates[i].count;
38dad7ba
FF
972
973 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
493cf04f
FF
974 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
975 info->flags |= ATH9K_TXDESC_RTSENA;
38dad7ba 976 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
493cf04f
FF
977 info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
978 info->flags |= ATH9K_TXDESC_CTSENA;
38dad7ba
FF
979 }
980
981 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
493cf04f 982 info->rates[i].RateFlags |= ATH9K_RATESERIES_2040;
38dad7ba 983 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
493cf04f 984 info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
38dad7ba
FF
985
986 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
987 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
988 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
989
990 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
991 /* MCS rates */
493cf04f
FF
992 info->rates[i].Rate = rix | 0x80;
993 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
994 ah->txchainmask, info->rates[i].Rate);
995 info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
38dad7ba
FF
996 is_40, is_sgi, is_sp);
997 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
493cf04f 998 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
38dad7ba
FF
999 continue;
1000 }
1001
1002 /* legacy rates */
1003 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1004 !(rate->flags & IEEE80211_RATE_ERP_G))
1005 phy = WLAN_RC_PHY_CCK;
1006 else
1007 phy = WLAN_RC_PHY_OFDM;
1008
1009 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
493cf04f 1010 info->rates[i].Rate = rate->hw_value;
38dad7ba
FF
1011 if (rate->hw_value_short) {
1012 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
493cf04f 1013 info->rates[i].Rate |= rate->hw_value_short;
38dad7ba
FF
1014 } else {
1015 is_sp = false;
1016 }
1017
1018 if (bf->bf_state.bfs_paprd)
493cf04f 1019 info->rates[i].ChSel = ah->txchainmask;
38dad7ba 1020 else
493cf04f
FF
1021 info->rates[i].ChSel = ath_txchainmask_reduction(sc,
1022 ah->txchainmask, info->rates[i].Rate);
38dad7ba 1023
493cf04f 1024 info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
38dad7ba
FF
1025 phy, rate->bitrate * 100, len, rix, is_sp);
1026 }
1027
1028 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1029 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
493cf04f 1030 info->flags &= ~ATH9K_TXDESC_RTSENA;
38dad7ba
FF
1031
1032 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
493cf04f
FF
1033 if (info->flags & ATH9K_TXDESC_RTSENA)
1034 info->flags &= ~ATH9K_TXDESC_CTSENA;
1035}
38dad7ba 1036
493cf04f
FF
1037static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1038{
1039 struct ieee80211_hdr *hdr;
1040 enum ath9k_pkt_type htype;
1041 __le16 fc;
1042
1043 hdr = (struct ieee80211_hdr *)skb->data;
1044 fc = hdr->frame_control;
38dad7ba 1045
493cf04f
FF
1046 if (ieee80211_is_beacon(fc))
1047 htype = ATH9K_PKT_TYPE_BEACON;
1048 else if (ieee80211_is_probe_resp(fc))
1049 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1050 else if (ieee80211_is_atim(fc))
1051 htype = ATH9K_PKT_TYPE_ATIM;
1052 else if (ieee80211_is_pspoll(fc))
1053 htype = ATH9K_PKT_TYPE_PSPOLL;
1054 else
1055 htype = ATH9K_PKT_TYPE_NORMAL;
1056
1057 return htype;
38dad7ba
FF
1058}
1059
493cf04f
FF
1060static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
1061 struct ath_txq *txq, int len)
399c6489
FF
1062{
1063 struct ath_hw *ah = sc->sc_ah;
1064 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
1065 struct ath_buf *bf_first = bf;
493cf04f 1066 struct ath_tx_info info;
399c6489 1067 bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
399c6489 1068
493cf04f
FF
1069 memset(&info, 0, sizeof(info));
1070 info.is_first = true;
1071 info.is_last = true;
1072 info.txpower = MAX_RATE_POWER;
1073 info.qcu = txq->axq_qnum;
1074
1075 info.flags = ATH9K_TXDESC_INTREQ;
1076 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1077 info.flags |= ATH9K_TXDESC_NOACK;
1078 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1079 info.flags |= ATH9K_TXDESC_LDPC;
1080
1081 ath_buf_set_rate(sc, bf, &info, len);
1082
1083 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1084 info.flags |= ATH9K_TXDESC_CLRDMASK;
1085
1086 if (bf->bf_state.bfs_paprd)
1087 info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
399c6489 1088
399c6489
FF
1089
1090 while (bf) {
493cf04f
FF
1091 struct sk_buff *skb = bf->bf_mpdu;
1092 struct ath_frame_info *fi = get_frame_info(skb);
1093
1094 info.type = get_hw_packet_type(skb);
399c6489 1095 if (bf->bf_next)
493cf04f 1096 info.link = bf->bf_next->bf_daddr;
399c6489 1097 else
493cf04f
FF
1098 info.link = 0;
1099
42cecc34
JL
1100 info.buf_addr[0] = bf->bf_buf_addr;
1101 info.buf_len[0] = skb->len;
493cf04f
FF
1102 info.pkt_len = fi->framelen;
1103 info.keyix = fi->keyix;
1104 info.keytype = fi->keytype;
1105
1106 if (aggr) {
399c6489 1107 if (bf == bf_first)
493cf04f
FF
1108 info.aggr = AGGR_BUF_FIRST;
1109 else if (!bf->bf_next)
1110 info.aggr = AGGR_BUF_LAST;
1111 else
1112 info.aggr = AGGR_BUF_MIDDLE;
399c6489 1113
493cf04f
FF
1114 info.ndelim = bf->bf_state.ndelim;
1115 info.aggr_len = len;
399c6489
FF
1116 }
1117
493cf04f 1118 ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
399c6489
FF
1119 bf = bf->bf_next;
1120 }
1121}
1122
e8324357
S
1123static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1124 struct ath_atx_tid *tid)
1125{
d43f3015 1126 struct ath_buf *bf;
e8324357 1127 enum ATH_AGGR_STATUS status;
399c6489 1128 struct ieee80211_tx_info *tx_info;
e8324357 1129 struct list_head bf_q;
269c44bc 1130 int aggr_len;
f078f209 1131
e8324357 1132 do {
56dc6336 1133 if (skb_queue_empty(&tid->buf_q))
e8324357 1134 return;
f078f209 1135
e8324357
S
1136 INIT_LIST_HEAD(&bf_q);
1137
269c44bc 1138 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
f078f209 1139
f078f209 1140 /*
d43f3015
S
1141 * no frames picked up to be aggregated;
1142 * block-ack window is not open.
f078f209 1143 */
e8324357
S
1144 if (list_empty(&bf_q))
1145 break;
f078f209 1146
e8324357 1147 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 1148 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
399c6489 1149 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
f078f209 1150
5519541d
FF
1151 if (tid->ac->clear_ps_filter) {
1152 tid->ac->clear_ps_filter = false;
399c6489
FF
1153 tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1154 } else {
1155 tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
5519541d
FF
1156 }
1157
d43f3015 1158 /* if only one frame, send as non-aggregate */
b572d033 1159 if (bf == bf->bf_lastbf) {
399c6489
FF
1160 aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
1161 bf->bf_state.bf_type = BUF_AMPDU;
1162 } else {
1163 TX_STAT_INC(txq->axq_qnum, a_aggr);
e8324357 1164 }
f078f209 1165
493cf04f 1166 ath_tx_fill_desc(sc, bf, txq, aggr_len);
fce041be 1167 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
4b3ba66a 1168 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
e8324357
S
1169 status != ATH_AGGR_BAW_CLOSED);
1170}
1171
231c3a1f
FF
1172int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1173 u16 tid, u16 *ssn)
e8324357
S
1174{
1175 struct ath_atx_tid *txtid;
1176 struct ath_node *an;
1177
1178 an = (struct ath_node *)sta->drv_priv;
f83da965 1179 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
1180
1181 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
1182 return -EAGAIN;
1183
f83da965 1184 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 1185 txtid->paused = true;
49447f2f 1186 *ssn = txtid->seq_start = txtid->seq_next;
f9437543 1187 txtid->bar_index = -1;
231c3a1f 1188
2ed72229
FF
1189 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1190 txtid->baw_head = txtid->baw_tail = 0;
1191
231c3a1f 1192 return 0;
e8324357 1193}
f078f209 1194
f83da965 1195void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
1196{
1197 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1198 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 1199 struct ath_txq *txq = txtid->ac->txq;
f078f209 1200
e8324357 1201 if (txtid->state & AGGR_CLEANUP)
f83da965 1202 return;
f078f209 1203
e8324357 1204 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 1205 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 1206 return;
e8324357 1207 }
f078f209 1208
23de5dc9 1209 ath_txq_lock(sc, txq);
75401849 1210 txtid->paused = true;
f078f209 1211
90fa539c
FF
1212 /*
1213 * If frames are still being transmitted for this TID, they will be
1214 * cleaned up during tx completion. To prevent race conditions, this
1215 * TID can only be reused after all in-progress subframes have been
1216 * completed.
1217 */
1218 if (txtid->baw_head != txtid->baw_tail)
e8324357 1219 txtid->state |= AGGR_CLEANUP;
90fa539c 1220 else
e8324357 1221 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
1222
1223 ath_tx_flush_tid(sc, txtid);
23de5dc9 1224 ath_txq_unlock_complete(sc, txq);
e8324357 1225}
f078f209 1226
042ec453
JB
1227void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1228 struct ath_node *an)
5519541d
FF
1229{
1230 struct ath_atx_tid *tid;
1231 struct ath_atx_ac *ac;
1232 struct ath_txq *txq;
042ec453 1233 bool buffered;
5519541d
FF
1234 int tidno;
1235
1236 for (tidno = 0, tid = &an->tid[tidno];
1237 tidno < WME_NUM_TID; tidno++, tid++) {
1238
1239 if (!tid->sched)
1240 continue;
1241
1242 ac = tid->ac;
1243 txq = ac->txq;
1244
23de5dc9 1245 ath_txq_lock(sc, txq);
5519541d 1246
042ec453 1247 buffered = !skb_queue_empty(&tid->buf_q);
5519541d
FF
1248
1249 tid->sched = false;
1250 list_del(&tid->list);
1251
1252 if (ac->sched) {
1253 ac->sched = false;
1254 list_del(&ac->list);
1255 }
1256
23de5dc9 1257 ath_txq_unlock(sc, txq);
5519541d 1258
042ec453
JB
1259 ieee80211_sta_set_buffered(sta, tidno, buffered);
1260 }
5519541d
FF
1261}
1262
1263void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1264{
1265 struct ath_atx_tid *tid;
1266 struct ath_atx_ac *ac;
1267 struct ath_txq *txq;
1268 int tidno;
1269
1270 for (tidno = 0, tid = &an->tid[tidno];
1271 tidno < WME_NUM_TID; tidno++, tid++) {
1272
1273 ac = tid->ac;
1274 txq = ac->txq;
1275
23de5dc9 1276 ath_txq_lock(sc, txq);
5519541d
FF
1277 ac->clear_ps_filter = true;
1278
56dc6336 1279 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
5519541d
FF
1280 ath_tx_queue_tid(txq, tid);
1281 ath_txq_schedule(sc, txq);
1282 }
1283
23de5dc9 1284 ath_txq_unlock_complete(sc, txq);
5519541d
FF
1285 }
1286}
1287
e8324357
S
1288void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1289{
1290 struct ath_atx_tid *txtid;
1291 struct ath_node *an;
1292
1293 an = (struct ath_node *)sta->drv_priv;
1294
3d4e20f2
SM
1295 txtid = ATH_AN_2_TID(an, tid);
1296 txtid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1297 txtid->state |= AGGR_ADDBA_COMPLETE;
1298 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1299 ath_tx_resume_tid(sc, txtid);
f078f209
LR
1300}
1301
e8324357
S
1302/********************/
1303/* Queue Management */
1304/********************/
f078f209 1305
e8324357
S
1306static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1307 struct ath_txq *txq)
f078f209 1308{
e8324357
S
1309 struct ath_atx_ac *ac, *ac_tmp;
1310 struct ath_atx_tid *tid, *tid_tmp;
f078f209 1311
e8324357
S
1312 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1313 list_del(&ac->list);
1314 ac->sched = false;
1315 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1316 list_del(&tid->list);
1317 tid->sched = false;
1318 ath_tid_drain(sc, txq, tid);
1319 }
f078f209
LR
1320 }
1321}
1322
e8324357 1323struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 1324{
cbe61d8a 1325 struct ath_hw *ah = sc->sc_ah;
e8324357 1326 struct ath9k_tx_queue_info qi;
066dae93
FF
1327 static const int subtype_txq_to_hwq[] = {
1328 [WME_AC_BE] = ATH_TXQ_AC_BE,
1329 [WME_AC_BK] = ATH_TXQ_AC_BK,
1330 [WME_AC_VI] = ATH_TXQ_AC_VI,
1331 [WME_AC_VO] = ATH_TXQ_AC_VO,
1332 };
60f2d1d5 1333 int axq_qnum, i;
f078f209 1334
e8324357 1335 memset(&qi, 0, sizeof(qi));
066dae93 1336 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
1337 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1338 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1339 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1340 qi.tqi_physCompBuf = 0;
f078f209
LR
1341
1342 /*
e8324357
S
1343 * Enable interrupts only for EOL and DESC conditions.
1344 * We mark tx descriptors to receive a DESC interrupt
1345 * when a tx queue gets deep; otherwise waiting for the
1346 * EOL to reap descriptors. Note that this is done to
1347 * reduce interrupt load and this only defers reaping
1348 * descriptors, never transmitting frames. Aside from
1349 * reducing interrupts this also permits more concurrency.
1350 * The only potential downside is if the tx queue backs
1351 * up in which case the top half of the kernel may backup
1352 * due to a lack of tx descriptors.
1353 *
1354 * The UAPSD queue is an exception, since we take a desc-
1355 * based intr on the EOSP frames.
f078f209 1356 */
afe754d6 1357 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
ce8fdf6e 1358 qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
afe754d6
VT
1359 } else {
1360 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1361 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1362 else
1363 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1364 TXQ_FLAG_TXDESCINT_ENABLE;
1365 }
60f2d1d5
BG
1366 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1367 if (axq_qnum == -1) {
f078f209 1368 /*
e8324357
S
1369 * NB: don't print a message, this happens
1370 * normally on parts with too few tx queues
f078f209 1371 */
e8324357 1372 return NULL;
f078f209 1373 }
60f2d1d5
BG
1374 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
1375 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
f078f209 1376
60f2d1d5
BG
1377 txq->axq_qnum = axq_qnum;
1378 txq->mac80211_qnum = -1;
e8324357 1379 txq->axq_link = NULL;
23de5dc9 1380 __skb_queue_head_init(&txq->complete_q);
e8324357
S
1381 INIT_LIST_HEAD(&txq->axq_q);
1382 INIT_LIST_HEAD(&txq->axq_acq);
1383 spin_lock_init(&txq->axq_lock);
1384 txq->axq_depth = 0;
4b3ba66a 1385 txq->axq_ampdu_depth = 0;
164ace38 1386 txq->axq_tx_inprogress = false;
60f2d1d5 1387 sc->tx.txqsetup |= 1<<axq_qnum;
e5003249
VT
1388
1389 txq->txq_headidx = txq->txq_tailidx = 0;
1390 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1391 INIT_LIST_HEAD(&txq->txq_fifo[i]);
e8324357 1392 }
60f2d1d5 1393 return &sc->tx.txq[axq_qnum];
f078f209
LR
1394}
1395
e8324357
S
1396int ath_txq_update(struct ath_softc *sc, int qnum,
1397 struct ath9k_tx_queue_info *qinfo)
1398{
cbe61d8a 1399 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1400 int error = 0;
1401 struct ath9k_tx_queue_info qi;
1402
1403 if (qnum == sc->beacon.beaconq) {
1404 /*
1405 * XXX: for beacon queue, we just save the parameter.
1406 * It will be picked up by ath_beaconq_config when
1407 * it's necessary.
1408 */
1409 sc->beacon.beacon_qi = *qinfo;
f078f209 1410 return 0;
e8324357 1411 }
f078f209 1412
9680e8a3 1413 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1414
1415 ath9k_hw_get_txq_props(ah, qnum, &qi);
1416 qi.tqi_aifs = qinfo->tqi_aifs;
1417 qi.tqi_cwmin = qinfo->tqi_cwmin;
1418 qi.tqi_cwmax = qinfo->tqi_cwmax;
1419 qi.tqi_burstTime = qinfo->tqi_burstTime;
1420 qi.tqi_readyTime = qinfo->tqi_readyTime;
1421
1422 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
3800276a
JP
1423 ath_err(ath9k_hw_common(sc->sc_ah),
1424 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1425 error = -EIO;
1426 } else {
1427 ath9k_hw_resettxqueue(ah, qnum);
1428 }
1429
1430 return error;
1431}
1432
1433int ath_cabq_update(struct ath_softc *sc)
1434{
1435 struct ath9k_tx_queue_info qi;
9814f6b3 1436 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
e8324357 1437 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1438
e8324357 1439 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1440 /*
e8324357 1441 * Ensure the readytime % is within the bounds.
f078f209 1442 */
17d7904d
S
1443 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1444 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1445 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1446 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1447
9814f6b3 1448 qi.tqi_readyTime = (cur_conf->beacon_interval *
fdbf7335 1449 sc->config.cabqReadytime) / 100;
e8324357
S
1450 ath_txq_update(sc, qnum, &qi);
1451
1452 return 0;
f078f209
LR
1453}
1454
4b3ba66a
FF
1455static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1456{
1457 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1458 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1459}
1460
fce041be
FF
1461static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1462 struct list_head *list, bool retry_tx)
f078f209 1463{
e8324357
S
1464 struct ath_buf *bf, *lastbf;
1465 struct list_head bf_head;
db1a052b
FF
1466 struct ath_tx_status ts;
1467
1468 memset(&ts, 0, sizeof(ts));
daa5c408 1469 ts.ts_status = ATH9K_TX_FLUSH;
e8324357 1470 INIT_LIST_HEAD(&bf_head);
f078f209 1471
fce041be
FF
1472 while (!list_empty(list)) {
1473 bf = list_first_entry(list, struct ath_buf, list);
f078f209 1474
fce041be
FF
1475 if (bf->bf_stale) {
1476 list_del(&bf->list);
f078f209 1477
fce041be
FF
1478 ath_tx_return_buffer(sc, bf);
1479 continue;
e8324357 1480 }
f078f209 1481
e8324357 1482 lastbf = bf->bf_lastbf;
fce041be 1483 list_cut_position(&bf_head, list, &lastbf->list);
e5003249 1484
e8324357 1485 txq->axq_depth--;
4b3ba66a
FF
1486 if (bf_is_ampdu_not_probing(bf))
1487 txq->axq_ampdu_depth--;
e8324357
S
1488
1489 if (bf_isampdu(bf))
c5992618
FF
1490 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1491 retry_tx);
e8324357 1492 else
156369fa 1493 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
f078f209 1494 }
fce041be 1495}
f078f209 1496
fce041be
FF
1497/*
1498 * Drain a given TX queue (could be Beacon or Data)
1499 *
1500 * This assumes output has been stopped and
1501 * we do not need to block ath_tx_tasklet.
1502 */
1503void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1504{
23de5dc9
FF
1505 ath_txq_lock(sc, txq);
1506
e5003249 1507 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
fce041be 1508 int idx = txq->txq_tailidx;
e5003249 1509
fce041be
FF
1510 while (!list_empty(&txq->txq_fifo[idx])) {
1511 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
1512 retry_tx);
1513
1514 INCR(idx, ATH_TXFIFO_DEPTH);
e5003249 1515 }
fce041be 1516 txq->txq_tailidx = idx;
e5003249 1517 }
e609e2ea 1518
fce041be
FF
1519 txq->axq_link = NULL;
1520 txq->axq_tx_inprogress = false;
1521 ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
1522
e609e2ea 1523 /* flush any pending frames if aggregation is enabled */
3d4e20f2 1524 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !retry_tx)
fce041be
FF
1525 ath_txq_drain_pending_buffers(sc, txq);
1526
23de5dc9 1527 ath_txq_unlock_complete(sc, txq);
f078f209
LR
1528}
1529
080e1a25 1530bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1531{
cbe61d8a 1532 struct ath_hw *ah = sc->sc_ah;
c46917bb 1533 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405 1534 struct ath_txq *txq;
34d25810
FF
1535 int i;
1536 u32 npend = 0;
043a0405
S
1537
1538 if (sc->sc_flags & SC_OP_INVALID)
080e1a25 1539 return true;
043a0405 1540
0d51cccc 1541 ath9k_hw_abort_tx_dma(ah);
043a0405 1542
0d51cccc 1543 /* Check if any queue remains active */
043a0405 1544 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
0d51cccc
FF
1545 if (!ATH_TXQ_SETUP(sc, i))
1546 continue;
1547
34d25810
FF
1548 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1549 npend |= BIT(i);
043a0405
S
1550 }
1551
080e1a25 1552 if (npend)
34d25810 1553 ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
043a0405
S
1554
1555 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
92460412
FF
1556 if (!ATH_TXQ_SETUP(sc, i))
1557 continue;
1558
1559 /*
1560 * The caller will resume queues with ieee80211_wake_queues.
1561 * Mark the queue as not stopped to prevent ath_tx_complete
1562 * from waking the queue too early.
1563 */
1564 txq = &sc->tx.txq[i];
1565 txq->stopped = false;
1566 ath_draintxq(sc, txq, retry_tx);
043a0405 1567 }
080e1a25
FF
1568
1569 return !npend;
e8324357 1570}
f078f209 1571
043a0405 1572void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1573{
043a0405
S
1574 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1575 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1576}
f078f209 1577
7755bad9
BG
1578/* For each axq_acq entry, for each tid, try to schedule packets
1579 * for transmit until ampdu_depth has reached min Q depth.
1580 */
e8324357
S
1581void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1582{
7755bad9
BG
1583 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1584 struct ath_atx_tid *tid, *last_tid;
f078f209 1585
236de514 1586 if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
21f28e6f 1587 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
e8324357 1588 return;
f078f209 1589
e8324357 1590 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
7755bad9 1591 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
f078f209 1592
7755bad9
BG
1593 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1594 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1595 list_del(&ac->list);
1596 ac->sched = false;
f078f209 1597
7755bad9
BG
1598 while (!list_empty(&ac->tid_q)) {
1599 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1600 list);
1601 list_del(&tid->list);
1602 tid->sched = false;
f078f209 1603
7755bad9
BG
1604 if (tid->paused)
1605 continue;
f078f209 1606
7755bad9 1607 ath_tx_sched_aggr(sc, txq, tid);
f078f209 1608
7755bad9
BG
1609 /*
1610 * add tid to round-robin queue if more frames
1611 * are pending for the tid
1612 */
56dc6336 1613 if (!skb_queue_empty(&tid->buf_q))
7755bad9 1614 ath_tx_queue_tid(txq, tid);
f078f209 1615
7755bad9
BG
1616 if (tid == last_tid ||
1617 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1618 break;
1619 }
f078f209 1620
b0477013
FF
1621 if (!list_empty(&ac->tid_q) && !ac->sched) {
1622 ac->sched = true;
1623 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1624 }
7755bad9
BG
1625
1626 if (ac == last_ac ||
1627 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1628 return;
e8324357
S
1629 }
1630}
f078f209 1631
e8324357
S
1632/***********/
1633/* TX, DMA */
1634/***********/
1635
f078f209 1636/*
e8324357
S
1637 * Insert a chain of ath_buf (descriptors) on a txq and
1638 * assume the descriptors are already chained together by caller.
f078f209 1639 */
e8324357 1640static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
fce041be 1641 struct list_head *head, bool internal)
f078f209 1642{
cbe61d8a 1643 struct ath_hw *ah = sc->sc_ah;
c46917bb 1644 struct ath_common *common = ath9k_hw_common(ah);
fce041be
FF
1645 struct ath_buf *bf, *bf_last;
1646 bool puttxbuf = false;
1647 bool edma;
f078f209 1648
e8324357
S
1649 /*
1650 * Insert the frame on the outbound list and
1651 * pass it on to the hardware.
1652 */
f078f209 1653
e8324357
S
1654 if (list_empty(head))
1655 return;
f078f209 1656
fce041be 1657 edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
e8324357 1658 bf = list_first_entry(head, struct ath_buf, list);
fce041be 1659 bf_last = list_entry(head->prev, struct ath_buf, list);
f078f209 1660
d2182b69
JP
1661 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
1662 txq->axq_qnum, txq->axq_depth);
f078f209 1663
fce041be
FF
1664 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
1665 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
e5003249 1666 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
fce041be 1667 puttxbuf = true;
e8324357 1668 } else {
e5003249
VT
1669 list_splice_tail_init(head, &txq->axq_q);
1670
fce041be
FF
1671 if (txq->axq_link) {
1672 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
d2182b69 1673 ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
226afe68
JP
1674 txq->axq_qnum, txq->axq_link,
1675 ito64(bf->bf_daddr), bf->bf_desc);
fce041be
FF
1676 } else if (!edma)
1677 puttxbuf = true;
1678
1679 txq->axq_link = bf_last->bf_desc;
1680 }
1681
1682 if (puttxbuf) {
1683 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1684 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
d2182b69 1685 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
fce041be
FF
1686 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1687 }
1688
1689 if (!edma) {
8d8d3fdc 1690 TX_STAT_INC(txq->axq_qnum, txstart);
e5003249 1691 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1692 }
fce041be
FF
1693
1694 if (!internal) {
1695 txq->axq_depth++;
1696 if (bf_is_ampdu_not_probing(bf))
1697 txq->axq_ampdu_depth++;
1698 }
e8324357 1699}
f078f209 1700
e8324357 1701static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
44f1d26c 1702 struct sk_buff *skb, struct ath_tx_control *txctl)
f078f209 1703{
44f1d26c 1704 struct ath_frame_info *fi = get_frame_info(skb);
04caf863 1705 struct list_head bf_head;
44f1d26c 1706 struct ath_buf *bf;
f078f209 1707
e8324357
S
1708 /*
1709 * Do not queue to h/w when any of the following conditions is true:
1710 * - there are pending frames in software queue
1711 * - the TID is currently paused for ADDBA/BAR request
1712 * - seqno is not within block-ack window
1713 * - h/w queue depth exceeds low water mark
1714 */
56dc6336 1715 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
44f1d26c 1716 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
4b3ba66a 1717 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1718 /*
e8324357
S
1719 * Add this frame to software queue for scheduling later
1720 * for aggregation.
f078f209 1721 */
bda8adda 1722 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
44f1d26c 1723 __skb_queue_tail(&tid->buf_q, skb);
9af73cf7
FF
1724 if (!txctl->an || !txctl->an->sleeping)
1725 ath_tx_queue_tid(txctl->txq, tid);
e8324357
S
1726 return;
1727 }
1728
44f1d26c
FF
1729 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1730 if (!bf)
1731 return;
1732
399c6489 1733 bf->bf_state.bf_type = BUF_AMPDU;
04caf863
FF
1734 INIT_LIST_HEAD(&bf_head);
1735 list_add(&bf->list, &bf_head);
1736
e8324357 1737 /* Add sub-frame to BAW */
44f1d26c 1738 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
e8324357
S
1739
1740 /* Queue to h/w without aggregation */
bda8adda 1741 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
d43f3015 1742 bf->bf_lastbf = bf;
493cf04f 1743 ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
fce041be 1744 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
e8324357
S
1745}
1746
82b873af 1747static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
44f1d26c 1748 struct ath_atx_tid *tid, struct sk_buff *skb)
e8324357 1749{
44f1d26c
FF
1750 struct ath_frame_info *fi = get_frame_info(skb);
1751 struct list_head bf_head;
e8324357
S
1752 struct ath_buf *bf;
1753
44f1d26c
FF
1754 bf = fi->bf;
1755 if (!bf)
1756 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1757
1758 if (!bf)
1759 return;
1760
1761 INIT_LIST_HEAD(&bf_head);
1762 list_add_tail(&bf->list, &bf_head);
399c6489 1763 bf->bf_state.bf_type = 0;
e8324357 1764
d43f3015 1765 bf->bf_lastbf = bf;
493cf04f 1766 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
44f1d26c 1767 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
fec247c0 1768 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1769}
1770
2d42efc4
FF
1771static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1772 int framelen)
e8324357
S
1773{
1774 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
1775 struct ieee80211_sta *sta = tx_info->control.sta;
1776 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
6a0ddaef 1777 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2d42efc4 1778 struct ath_frame_info *fi = get_frame_info(skb);
93ae2dd2 1779 struct ath_node *an = NULL;
2d42efc4 1780 enum ath9k_key_type keytype;
e8324357 1781
2d42efc4 1782 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
e8324357 1783
93ae2dd2
FF
1784 if (sta)
1785 an = (struct ath_node *) sta->drv_priv;
1786
2d42efc4
FF
1787 memset(fi, 0, sizeof(*fi));
1788 if (hw_key)
1789 fi->keyix = hw_key->hw_key_idx;
93ae2dd2
FF
1790 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1791 fi->keyix = an->ps_key;
2d42efc4
FF
1792 else
1793 fi->keyix = ATH9K_TXKEYIX_INVALID;
1794 fi->keytype = keytype;
1795 fi->framelen = framelen;
e8324357
S
1796}
1797
ea066d5a
MSS
1798u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1799{
1800 struct ath_hw *ah = sc->sc_ah;
1801 struct ath9k_channel *curchan = ah->curchan;
d77bf3eb
RM
1802 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
1803 (curchan->channelFlags & CHANNEL_5GHZ) &&
1804 (chainmask == 0x7) && (rate < 0x90))
ea066d5a
MSS
1805 return 0x3;
1806 else
1807 return chainmask;
1808}
1809
44f1d26c
FF
1810/*
1811 * Assign a descriptor (and sequence number if necessary,
1812 * and map buffer for DMA. Frees skb on error
1813 */
fa05f87a 1814static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
04caf863 1815 struct ath_txq *txq,
fa05f87a 1816 struct ath_atx_tid *tid,
2d42efc4 1817 struct sk_buff *skb)
f078f209 1818{
82b873af 1819 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2d42efc4 1820 struct ath_frame_info *fi = get_frame_info(skb);
fa05f87a 1821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
82b873af 1822 struct ath_buf *bf;
fd09c85f 1823 int fragno;
fa05f87a 1824 u16 seqno;
82b873af
FF
1825
1826 bf = ath_tx_get_buffer(sc);
1827 if (!bf) {
d2182b69 1828 ath_dbg(common, XMIT, "TX buffers are full\n");
44f1d26c 1829 goto error;
82b873af 1830 }
e022edbd 1831
528f0c6b 1832 ATH_TXBUF_RESET(bf);
f078f209 1833
fa05f87a 1834 if (tid) {
fd09c85f 1835 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
fa05f87a
FF
1836 seqno = tid->seq_next;
1837 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
fd09c85f
SM
1838
1839 if (fragno)
1840 hdr->seq_ctrl |= cpu_to_le16(fragno);
1841
1842 if (!ieee80211_has_morefrags(hdr->frame_control))
1843 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1844
fa05f87a
FF
1845 bf->bf_state.seqno = seqno;
1846 }
1847
f078f209 1848 bf->bf_mpdu = skb;
f8316df1 1849
c1739eb3
BG
1850 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1851 skb->len, DMA_TO_DEVICE);
1852 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1853 bf->bf_mpdu = NULL;
6cf9e995 1854 bf->bf_buf_addr = 0;
3800276a
JP
1855 ath_err(ath9k_hw_common(sc->sc_ah),
1856 "dma_mapping_error() on TX\n");
82b873af 1857 ath_tx_return_buffer(sc, bf);
44f1d26c 1858 goto error;
f8316df1
LR
1859 }
1860
56dc6336 1861 fi->bf = bf;
04caf863
FF
1862
1863 return bf;
44f1d26c
FF
1864
1865error:
1866 dev_kfree_skb_any(skb);
1867 return NULL;
04caf863
FF
1868}
1869
1870/* FIXME: tx power */
44f1d26c 1871static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
04caf863
FF
1872 struct ath_tx_control *txctl)
1873{
04caf863
FF
1874 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1875 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
248a38d0 1876 struct ath_atx_tid *tid = NULL;
fa05f87a 1877 struct ath_buf *bf;
04caf863 1878 u8 tidno;
f078f209 1879
3d4e20f2 1880 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && txctl->an &&
61e1b0b0 1881 ieee80211_is_data_qos(hdr->frame_control)) {
5daefbd0
FF
1882 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1883 IEEE80211_QOS_CTL_TID_MASK;
2d42efc4 1884 tid = ATH_AN_2_TID(txctl->an, tidno);
5daefbd0 1885
066dae93 1886 WARN_ON(tid->ac->txq != txctl->txq);
248a38d0
FF
1887 }
1888
1889 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
04caf863
FF
1890 /*
1891 * Try aggregation if it's a unicast data frame
1892 * and the destination is HT capable.
1893 */
44f1d26c 1894 ath_tx_send_ampdu(sc, tid, skb, txctl);
f078f209 1895 } else {
44f1d26c
FF
1896 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1897 if (!bf)
3ad29529 1898 return;
04caf863 1899
82b873af
FF
1900 bf->bf_state.bfs_paprd = txctl->paprd;
1901
9cf04dcc
MSS
1902 if (txctl->paprd)
1903 bf->bf_state.bfs_paprd_timestamp = jiffies;
1904
44f1d26c 1905 ath_tx_send_normal(sc, txctl->txq, tid, skb);
f078f209 1906 }
f078f209
LR
1907}
1908
f8316df1 1909/* Upon failure caller should free skb */
c52f33d0 1910int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1911 struct ath_tx_control *txctl)
f078f209 1912{
28d16708
FF
1913 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1914 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2d42efc4 1915 struct ieee80211_sta *sta = info->control.sta;
f59a59fe 1916 struct ieee80211_vif *vif = info->control.vif;
9ac58615 1917 struct ath_softc *sc = hw->priv;
84642d6b 1918 struct ath_txq *txq = txctl->txq;
4d91f9f3 1919 int padpos, padsize;
04caf863 1920 int frmlen = skb->len + FCS_LEN;
28d16708 1921 int q;
f078f209 1922
a9927ba3
BG
1923 /* NOTE: sta can be NULL according to net/mac80211.h */
1924 if (sta)
1925 txctl->an = (struct ath_node *)sta->drv_priv;
1926
04caf863
FF
1927 if (info->control.hw_key)
1928 frmlen += info->control.hw_key->icv_len;
1929
f078f209 1930 /*
e8324357
S
1931 * As a temporary workaround, assign seq# here; this will likely need
1932 * to be cleaned up to work better with Beacon transmission and virtual
1933 * BSSes.
f078f209 1934 */
e8324357 1935 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1936 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1937 sc->tx.seq_no += 0x10;
1938 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1939 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1940 }
f078f209 1941
42cecc34
JL
1942 /* Add the padding after the header if this is not already done */
1943 padpos = ath9k_cmn_padpos(hdr->frame_control);
1944 padsize = padpos & 3;
1945 if (padsize && skb->len > padpos) {
1946 if (skb_headroom(skb) < padsize)
1947 return -ENOMEM;
28d16708 1948
42cecc34
JL
1949 skb_push(skb, padsize);
1950 memmove(skb->data, skb->data + padsize, padpos);
6e82bc4a 1951 hdr = (struct ieee80211_hdr *) skb->data;
f078f209 1952 }
f078f209 1953
f59a59fe
FF
1954 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1955 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1956 !ieee80211_is_data(hdr->frame_control))
1957 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1958
2d42efc4
FF
1959 setup_frame_info(hw, skb, frmlen);
1960
1961 /*
1962 * At this point, the vif, hw_key and sta pointers in the tx control
1963 * info are no longer valid (overwritten by the ath_frame_info data.
1964 */
1965
28d16708 1966 q = skb_get_queue_mapping(skb);
23de5dc9
FF
1967
1968 ath_txq_lock(sc, txq);
28d16708
FF
1969 if (txq == sc->tx.txq_map[q] &&
1970 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
7545daf4 1971 ieee80211_stop_queue(sc->hw, q);
3db1cd5c 1972 txq->stopped = true;
f078f209 1973 }
f078f209 1974
44f1d26c 1975 ath_tx_start_dma(sc, skb, txctl);
3ad29529 1976
23de5dc9 1977 ath_txq_unlock(sc, txq);
3ad29529 1978
44f1d26c 1979 return 0;
f078f209
LR
1980}
1981
e8324357
S
1982/*****************/
1983/* TX Completion */
1984/*****************/
528f0c6b 1985
e8324357 1986static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
0f9dc298 1987 int tx_flags, struct ath_txq *txq)
528f0c6b 1988{
e8324357 1989 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1990 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1991 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1992 int q, padpos, padsize;
528f0c6b 1993
d2182b69 1994 ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1995
55797b1a 1996 if (!(tx_flags & ATH_TX_ERROR))
e8324357
S
1997 /* Frame was ACKed */
1998 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b 1999
42cecc34
JL
2000 padpos = ath9k_cmn_padpos(hdr->frame_control);
2001 padsize = padpos & 3;
2002 if (padsize && skb->len>padpos+padsize) {
2003 /*
2004 * Remove MAC header padding before giving the frame back to
2005 * mac80211.
2006 */
2007 memmove(skb->data + padsize, skb->data, padpos);
2008 skb_pull(skb, padsize);
e8324357 2009 }
528f0c6b 2010
c8e8868e 2011 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
1b04b930 2012 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
d2182b69 2013 ath_dbg(common, PS,
226afe68 2014 "Going back to sleep after having received TX status (0x%lx)\n",
1b04b930
S
2015 sc->ps_flags & (PS_WAIT_FOR_BEACON |
2016 PS_WAIT_FOR_CAB |
2017 PS_WAIT_FOR_PSPOLL_DATA |
2018 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
2019 }
2020
7545daf4
FF
2021 q = skb_get_queue_mapping(skb);
2022 if (txq == sc->tx.txq_map[q]) {
7545daf4
FF
2023 if (WARN_ON(--txq->pending_frames < 0))
2024 txq->pending_frames = 0;
92460412 2025
7545daf4
FF
2026 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2027 ieee80211_wake_queue(sc->hw, q);
3db1cd5c 2028 txq->stopped = false;
066dae93 2029 }
97923b14 2030 }
7545daf4 2031
23de5dc9 2032 __skb_queue_tail(&txq->complete_q, skb);
e8324357 2033}
f078f209 2034
e8324357 2035static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 2036 struct ath_txq *txq, struct list_head *bf_q,
156369fa 2037 struct ath_tx_status *ts, int txok)
f078f209 2038{
e8324357 2039 struct sk_buff *skb = bf->bf_mpdu;
3afd21e7 2040 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
e8324357 2041 unsigned long flags;
6b2c4032 2042 int tx_flags = 0;
f078f209 2043
55797b1a 2044 if (!txok)
6b2c4032 2045 tx_flags |= ATH_TX_ERROR;
f078f209 2046
3afd21e7
FF
2047 if (ts->ts_status & ATH9K_TXERR_FILT)
2048 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2049
c1739eb3 2050 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 2051 bf->bf_buf_addr = 0;
9f42c2b6
FF
2052
2053 if (bf->bf_state.bfs_paprd) {
9cf04dcc
MSS
2054 if (time_after(jiffies,
2055 bf->bf_state.bfs_paprd_timestamp +
2056 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 2057 dev_kfree_skb_any(skb);
78a18172 2058 else
ca369eb4 2059 complete(&sc->paprd_complete);
9f42c2b6 2060 } else {
55797b1a 2061 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
0f9dc298 2062 ath_tx_complete(sc, skb, tx_flags, txq);
9f42c2b6 2063 }
6cf9e995
BG
2064 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2065 * accidentally reference it later.
2066 */
2067 bf->bf_mpdu = NULL;
e8324357
S
2068
2069 /*
2070 * Return the list of ath_buf of this mpdu to free queue
2071 */
2072 spin_lock_irqsave(&sc->tx.txbuflock, flags);
2073 list_splice_tail_init(bf_q, &sc->tx.txbuf);
2074 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
2075}
2076
0cdd5c60
FF
2077static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2078 struct ath_tx_status *ts, int nframes, int nbad,
3afd21e7 2079 int txok)
f078f209 2080{
a22be22a 2081 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 2082 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 2083 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0cdd5c60 2084 struct ieee80211_hw *hw = sc->hw;
f0c255a0 2085 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 2086 u8 i, tx_rateindex;
f078f209 2087
95e4acb7 2088 if (txok)
db1a052b 2089 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 2090
db1a052b 2091 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
2092 WARN_ON(tx_rateindex >= hw->max_rates);
2093
3afd21e7 2094 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
d969847c 2095 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 2096
b572d033 2097 BUG_ON(nbad > nframes);
ebd02287 2098 }
185d1589
RM
2099 tx_info->status.ampdu_len = nframes;
2100 tx_info->status.ampdu_ack_len = nframes - nbad;
ebd02287 2101
db1a052b 2102 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
3afd21e7 2103 (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
f0c255a0
FF
2104 /*
2105 * If an underrun error is seen assume it as an excessive
2106 * retry only if max frame trigger level has been reached
2107 * (2 KB for single stream, and 4 KB for dual stream).
2108 * Adjust the long retry as if the frame was tried
2109 * hw->max_rate_tries times to affect how rate control updates
2110 * PER for the failed rate.
2111 * In case of congestion on the bus penalizing this type of
2112 * underruns should help hardware actually transmit new frames
2113 * successfully by eventually preferring slower rates.
2114 * This itself should also alleviate congestion on the bus.
2115 */
3afd21e7
FF
2116 if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2117 ATH9K_TX_DELIM_UNDERRUN)) &&
2118 ieee80211_is_data(hdr->frame_control) &&
83860c59 2119 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
f0c255a0
FF
2120 tx_info->status.rates[tx_rateindex].count =
2121 hw->max_rate_tries;
f078f209 2122 }
8a92e2ee 2123
545750d3 2124 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2125 tx_info->status.rates[i].count = 0;
545750d3
FF
2126 tx_info->status.rates[i].idx = -1;
2127 }
8a92e2ee 2128
78c4653a 2129 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2130}
2131
fce041be
FF
2132static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2133 struct ath_tx_status *ts, struct ath_buf *bf,
2134 struct list_head *bf_head)
2135{
2136 int txok;
2137
2138 txq->axq_depth--;
2139 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
2140 txq->axq_tx_inprogress = false;
2141 if (bf_is_ampdu_not_probing(bf))
2142 txq->axq_ampdu_depth--;
2143
fce041be 2144 if (!bf_isampdu(bf)) {
3afd21e7 2145 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
156369fa 2146 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
fce041be
FF
2147 } else
2148 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2149
3d4e20f2 2150 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
fce041be
FF
2151 ath_txq_schedule(sc, txq);
2152}
2153
e8324357 2154static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2155{
cbe61d8a 2156 struct ath_hw *ah = sc->sc_ah;
c46917bb 2157 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2158 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2159 struct list_head bf_head;
e8324357 2160 struct ath_desc *ds;
29bffa96 2161 struct ath_tx_status ts;
e8324357 2162 int status;
f078f209 2163
d2182b69 2164 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
226afe68
JP
2165 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2166 txq->axq_link);
f078f209 2167
23de5dc9 2168 ath_txq_lock(sc, txq);
f078f209 2169 for (;;) {
236de514
FF
2170 if (work_pending(&sc->hw_reset_work))
2171 break;
2172
f078f209
LR
2173 if (list_empty(&txq->axq_q)) {
2174 txq->axq_link = NULL;
3d4e20f2 2175 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
082f6536 2176 ath_txq_schedule(sc, txq);
f078f209
LR
2177 break;
2178 }
f078f209
LR
2179 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2180
e8324357
S
2181 /*
2182 * There is a race condition that a BH gets scheduled
2183 * after sw writes TxE and before hw re-load the last
2184 * descriptor to get the newly chained one.
2185 * Software must keep the last DONE descriptor as a
2186 * holding descriptor - software does so by marking
2187 * it with the STALE flag.
2188 */
2189 bf_held = NULL;
a119cc49 2190 if (bf->bf_stale) {
e8324357 2191 bf_held = bf;
fce041be 2192 if (list_is_last(&bf_held->list, &txq->axq_q))
e8324357 2193 break;
fce041be
FF
2194
2195 bf = list_entry(bf_held->list.next, struct ath_buf,
2196 list);
f078f209
LR
2197 }
2198
2199 lastbf = bf->bf_lastbf;
e8324357 2200 ds = lastbf->bf_desc;
f078f209 2201
29bffa96
FF
2202 memset(&ts, 0, sizeof(ts));
2203 status = ath9k_hw_txprocdesc(ah, ds, &ts);
fce041be 2204 if (status == -EINPROGRESS)
e8324357 2205 break;
fce041be 2206
2dac4fb9 2207 TX_STAT_INC(txq->axq_qnum, txprocdesc);
f078f209 2208
e8324357
S
2209 /*
2210 * Remove ath_buf's of the same transmit unit from txq,
2211 * however leave the last descriptor back as the holding
2212 * descriptor for hw.
2213 */
a119cc49 2214 lastbf->bf_stale = true;
e8324357 2215 INIT_LIST_HEAD(&bf_head);
e8324357
S
2216 if (!list_is_singular(&lastbf->list))
2217 list_cut_position(&bf_head,
2218 &txq->axq_q, lastbf->list.prev);
f078f209 2219
fce041be 2220 if (bf_held) {
0a8cea84 2221 list_del(&bf_held->list);
0a8cea84 2222 ath_tx_return_buffer(sc, bf_held);
e8324357 2223 }
f078f209 2224
fce041be 2225 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
8469cdef 2226 }
23de5dc9 2227 ath_txq_unlock_complete(sc, txq);
8469cdef
S
2228}
2229
305fe47f 2230static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2231{
2232 struct ath_softc *sc = container_of(work, struct ath_softc,
2233 tx_complete_work.work);
2234 struct ath_txq *txq;
2235 int i;
2236 bool needreset = false;
60f2d1d5
BG
2237#ifdef CONFIG_ATH9K_DEBUGFS
2238 sc->tx_complete_poll_work_seen++;
2239#endif
164ace38
SB
2240
2241 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2242 if (ATH_TXQ_SETUP(sc, i)) {
2243 txq = &sc->tx.txq[i];
23de5dc9 2244 ath_txq_lock(sc, txq);
164ace38
SB
2245 if (txq->axq_depth) {
2246 if (txq->axq_tx_inprogress) {
2247 needreset = true;
23de5dc9 2248 ath_txq_unlock(sc, txq);
164ace38
SB
2249 break;
2250 } else {
2251 txq->axq_tx_inprogress = true;
2252 }
2253 }
23de5dc9 2254 ath_txq_unlock_complete(sc, txq);
164ace38
SB
2255 }
2256
2257 if (needreset) {
d2182b69 2258 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
226afe68 2259 "tx hung, resetting the chip\n");
030d6294 2260 RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
236de514 2261 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
164ace38
SB
2262 }
2263
42935eca 2264 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2265 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2266}
2267
2268
f078f209 2269
e8324357 2270void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2271{
239c795d
FF
2272 struct ath_hw *ah = sc->sc_ah;
2273 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
e8324357 2274 int i;
f078f209 2275
e8324357
S
2276 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2277 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2278 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2279 }
2280}
2281
e5003249
VT
2282void ath_tx_edma_tasklet(struct ath_softc *sc)
2283{
fce041be 2284 struct ath_tx_status ts;
e5003249
VT
2285 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2286 struct ath_hw *ah = sc->sc_ah;
2287 struct ath_txq *txq;
2288 struct ath_buf *bf, *lastbf;
2289 struct list_head bf_head;
2290 int status;
e5003249
VT
2291
2292 for (;;) {
236de514
FF
2293 if (work_pending(&sc->hw_reset_work))
2294 break;
2295
fce041be 2296 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
e5003249
VT
2297 if (status == -EINPROGRESS)
2298 break;
2299 if (status == -EIO) {
d2182b69 2300 ath_dbg(common, XMIT, "Error processing tx status\n");
e5003249
VT
2301 break;
2302 }
2303
4e0ad259
FF
2304 /* Process beacon completions separately */
2305 if (ts.qid == sc->beacon.beaconq) {
2306 sc->beacon.tx_processed = true;
2307 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
e5003249 2308 continue;
4e0ad259 2309 }
e5003249 2310
fce041be 2311 txq = &sc->tx.txq[ts.qid];
e5003249 2312
23de5dc9 2313 ath_txq_lock(sc, txq);
fce041be 2314
e5003249 2315 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
23de5dc9 2316 ath_txq_unlock(sc, txq);
e5003249
VT
2317 return;
2318 }
2319
2320 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2321 struct ath_buf, list);
2322 lastbf = bf->bf_lastbf;
2323
2324 INIT_LIST_HEAD(&bf_head);
2325 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2326 &lastbf->list);
e5003249 2327
fce041be
FF
2328 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2329 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
e5003249 2330
fce041be
FF
2331 if (!list_empty(&txq->axq_q)) {
2332 struct list_head bf_q;
60f2d1d5 2333
fce041be
FF
2334 INIT_LIST_HEAD(&bf_q);
2335 txq->axq_link = NULL;
2336 list_splice_tail_init(&txq->axq_q, &bf_q);
2337 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2338 }
2339 }
86271e46 2340
fce041be 2341 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
23de5dc9 2342 ath_txq_unlock_complete(sc, txq);
e5003249
VT
2343 }
2344}
2345
e8324357
S
2346/*****************/
2347/* Init, Cleanup */
2348/*****************/
f078f209 2349
5088c2f1
VT
2350static int ath_txstatus_setup(struct ath_softc *sc, int size)
2351{
2352 struct ath_descdma *dd = &sc->txsdma;
2353 u8 txs_len = sc->sc_ah->caps.txs_len;
2354
2355 dd->dd_desc_len = size * txs_len;
2356 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2357 &dd->dd_desc_paddr, GFP_KERNEL);
2358 if (!dd->dd_desc)
2359 return -ENOMEM;
2360
2361 return 0;
2362}
2363
2364static int ath_tx_edma_init(struct ath_softc *sc)
2365{
2366 int err;
2367
2368 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2369 if (!err)
2370 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2371 sc->txsdma.dd_desc_paddr,
2372 ATH_TXSTATUS_RING_SIZE);
2373
2374 return err;
2375}
2376
2377static void ath_tx_edma_cleanup(struct ath_softc *sc)
2378{
2379 struct ath_descdma *dd = &sc->txsdma;
2380
2381 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2382 dd->dd_desc_paddr);
2383}
2384
e8324357 2385int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2386{
c46917bb 2387 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2388 int error = 0;
f078f209 2389
797fe5cb 2390 spin_lock_init(&sc->tx.txbuflock);
f078f209 2391
797fe5cb 2392 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2393 "tx", nbufs, 1, 1);
797fe5cb 2394 if (error != 0) {
3800276a
JP
2395 ath_err(common,
2396 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2397 goto err;
2398 }
f078f209 2399
797fe5cb 2400 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2401 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2402 if (error != 0) {
3800276a
JP
2403 ath_err(common,
2404 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2405 goto err;
2406 }
f078f209 2407
164ace38
SB
2408 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2409
5088c2f1
VT
2410 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2411 error = ath_tx_edma_init(sc);
2412 if (error)
2413 goto err;
2414 }
2415
797fe5cb 2416err:
e8324357
S
2417 if (error != 0)
2418 ath_tx_cleanup(sc);
f078f209 2419
e8324357 2420 return error;
f078f209
LR
2421}
2422
797fe5cb 2423void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2424{
2425 if (sc->beacon.bdma.dd_desc_len != 0)
2426 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2427
2428 if (sc->tx.txdma.dd_desc_len != 0)
2429 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2430
2431 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2432 ath_tx_edma_cleanup(sc);
e8324357 2433}
f078f209
LR
2434
2435void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2436{
c5170163
S
2437 struct ath_atx_tid *tid;
2438 struct ath_atx_ac *ac;
2439 int tidno, acno;
f078f209 2440
8ee5afbc 2441 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2442 tidno < WME_NUM_TID;
2443 tidno++, tid++) {
2444 tid->an = an;
2445 tid->tidno = tidno;
2446 tid->seq_start = tid->seq_next = 0;
2447 tid->baw_size = WME_MAX_BA;
2448 tid->baw_head = tid->baw_tail = 0;
2449 tid->sched = false;
e8324357 2450 tid->paused = false;
a37c2c79 2451 tid->state &= ~AGGR_CLEANUP;
56dc6336 2452 __skb_queue_head_init(&tid->buf_q);
c5170163 2453 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2454 tid->ac = &an->ac[acno];
a37c2c79
S
2455 tid->state &= ~AGGR_ADDBA_COMPLETE;
2456 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2457 }
f078f209 2458
8ee5afbc 2459 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2460 acno < WME_NUM_AC; acno++, ac++) {
2461 ac->sched = false;
066dae93 2462 ac->txq = sc->tx.txq_map[acno];
c5170163 2463 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2464 }
2465}
2466
b5aa9bf9 2467void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2468{
2b40994c
FF
2469 struct ath_atx_ac *ac;
2470 struct ath_atx_tid *tid;
f078f209 2471 struct ath_txq *txq;
066dae93 2472 int tidno;
e8324357 2473
2b40994c
FF
2474 for (tidno = 0, tid = &an->tid[tidno];
2475 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2476
2b40994c 2477 ac = tid->ac;
066dae93 2478 txq = ac->txq;
f078f209 2479
23de5dc9 2480 ath_txq_lock(sc, txq);
2b40994c
FF
2481
2482 if (tid->sched) {
2483 list_del(&tid->list);
2484 tid->sched = false;
2485 }
2486
2487 if (ac->sched) {
2488 list_del(&ac->list);
2489 tid->ac->sched = false;
f078f209 2490 }
2b40994c
FF
2491
2492 ath_tid_drain(sc, txq, tid);
2493 tid->state &= ~AGGR_ADDBA_COMPLETE;
2494 tid->state &= ~AGGR_CLEANUP;
2495
23de5dc9 2496 ath_txq_unlock(sc, txq);
f078f209
LR
2497 }
2498}
This page took 0.767624 seconds and 5 git commands to generate.