mac80211: fix RX aggregation locking
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
7817e4ce 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
c6663876 37static u16 bits_per_symbol[][2] = {
f078f209
LR
38 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
47};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
82b873af
FF
51static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
2d42efc4 53 struct list_head *bf_head);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357 58 struct list_head *head);
269c44bc 59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
db1a052b 60static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
b572d033 61 int nframes, int nbad, int txok, bool update_rc);
90fa539c
FF
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno);
c4288390 64
545750d3 65enum {
0e668cde
FF
66 MCS_HT20,
67 MCS_HT20_SGI,
545750d3
FF
68 MCS_HT40,
69 MCS_HT40_SGI,
70};
71
0e668cde
FF
72static int ath_max_4ms_framelen[4][32] = {
73 [MCS_HT20] = {
74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
84 },
85 [MCS_HT40] = {
0e668cde
FF
86 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
90 },
91 [MCS_HT40_SGI] = {
0e668cde
FF
92 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
96 }
97};
98
e8324357
S
99/*********************/
100/* Aggregation logic */
101/*********************/
f078f209 102
e8324357 103static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 104{
e8324357 105 struct ath_atx_ac *ac = tid->ac;
ff37e337 106
e8324357
S
107 if (tid->paused)
108 return;
ff37e337 109
e8324357
S
110 if (tid->sched)
111 return;
ff37e337 112
e8324357
S
113 tid->sched = true;
114 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 115
e8324357
S
116 if (ac->sched)
117 return;
f078f209 118
e8324357
S
119 ac->sched = true;
120 list_add_tail(&ac->list, &txq->axq_acq);
121}
f078f209 122
e8324357 123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 124{
066dae93 125 struct ath_txq *txq = tid->ac->txq;
e6a9854b 126
75401849 127 WARN_ON(!tid->paused);
f078f209 128
75401849
LB
129 spin_lock_bh(&txq->axq_lock);
130 tid->paused = false;
f078f209 131
e8324357
S
132 if (list_empty(&tid->buf_q))
133 goto unlock;
f078f209 134
e8324357
S
135 ath_tx_queue_tid(txq, tid);
136 ath_txq_schedule(sc, txq);
137unlock:
138 spin_unlock_bh(&txq->axq_lock);
528f0c6b 139}
f078f209 140
2d42efc4 141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
76e45221
FF
142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
76e45221
FF
147}
148
e8324357 149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 150{
066dae93 151 struct ath_txq *txq = tid->ac->txq;
e8324357
S
152 struct ath_buf *bf;
153 struct list_head bf_head;
90fa539c 154 struct ath_tx_status ts;
2d42efc4 155 struct ath_frame_info *fi;
f078f209 156
90fa539c 157 INIT_LIST_HEAD(&bf_head);
e6a9854b 158
90fa539c 159 memset(&ts, 0, sizeof(ts));
75401849 160 spin_lock_bh(&txq->axq_lock);
f078f209 161
e8324357
S
162 while (!list_empty(&tid->buf_q)) {
163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
d43f3015 164 list_move_tail(&bf->list, &bf_head);
90fa539c 165
e1566d1f 166 spin_unlock_bh(&txq->axq_lock);
2d42efc4
FF
167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
90fa539c
FF
170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
171 } else {
2d42efc4 172 ath_tx_send_normal(sc, txq, tid, &bf_head);
90fa539c 173 }
e1566d1f 174 spin_lock_bh(&txq->axq_lock);
528f0c6b 175 }
f078f209 176
e8324357 177 spin_unlock_bh(&txq->axq_lock);
528f0c6b 178}
f078f209 179
e8324357
S
180static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
181 int seqno)
528f0c6b 182{
e8324357 183 int index, cindex;
f078f209 184
e8324357
S
185 index = ATH_BA_INDEX(tid->seq_start, seqno);
186 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 187
81ee13ba 188 __clear_bit(cindex, tid->tx_buf);
528f0c6b 189
81ee13ba 190 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
191 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
192 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
193 }
528f0c6b 194}
f078f209 195
e8324357 196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
2d3bcba0 197 u16 seqno)
528f0c6b 198{
e8324357 199 int index, cindex;
528f0c6b 200
2d3bcba0 201 index = ATH_BA_INDEX(tid->seq_start, seqno);
e8324357 202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 203 __set_bit(cindex, tid->tx_buf);
f078f209 204
e8324357
S
205 if (index >= ((tid->baw_tail - tid->baw_head) &
206 (ATH_TID_MAX_BUFS - 1))) {
207 tid->baw_tail = cindex;
208 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 209 }
f078f209
LR
210}
211
212/*
e8324357
S
213 * TODO: For frame(s) that are in the retry state, we will reuse the
214 * sequence number(s) without setting the retry bit. The
215 * alternative is to give up on these and BAR the receiver's window
216 * forward.
f078f209 217 */
e8324357
S
218static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
219 struct ath_atx_tid *tid)
f078f209 220
f078f209 221{
e8324357
S
222 struct ath_buf *bf;
223 struct list_head bf_head;
db1a052b 224 struct ath_tx_status ts;
2d42efc4 225 struct ath_frame_info *fi;
db1a052b
FF
226
227 memset(&ts, 0, sizeof(ts));
e8324357 228 INIT_LIST_HEAD(&bf_head);
f078f209 229
e8324357
S
230 for (;;) {
231 if (list_empty(&tid->buf_q))
232 break;
f078f209 233
d43f3015
S
234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
235 list_move_tail(&bf->list, &bf_head);
f078f209 236
2d42efc4
FF
237 fi = get_frame_info(bf->bf_mpdu);
238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
f078f209 240
e8324357 241 spin_unlock(&txq->axq_lock);
db1a052b 242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
243 spin_lock(&txq->axq_lock);
244 }
f078f209 245
e8324357
S
246 tid->seq_next = tid->seq_start;
247 tid->baw_tail = tid->baw_head;
f078f209
LR
248}
249
fec247c0 250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
2d42efc4 251 struct sk_buff *skb)
f078f209 252{
8b7f8532 253 struct ath_frame_info *fi = get_frame_info(skb);
e8324357 254 struct ieee80211_hdr *hdr;
f078f209 255
fec247c0 256 TX_STAT_INC(txq->axq_qnum, a_retries);
8b7f8532 257 if (fi->retries++ > 0)
2d42efc4 258 return;
f078f209 259
e8324357
S
260 hdr = (struct ieee80211_hdr *)skb->data;
261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
262}
263
0a8cea84 264static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 265{
0a8cea84 266 struct ath_buf *bf = NULL;
d43f3015
S
267
268 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
269
270 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
271 spin_unlock_bh(&sc->tx.txbuflock);
272 return NULL;
273 }
0a8cea84
FF
274
275 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
276 list_del(&bf->list);
277
d43f3015
S
278 spin_unlock_bh(&sc->tx.txbuflock);
279
0a8cea84
FF
280 return bf;
281}
282
283static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
284{
285 spin_lock_bh(&sc->tx.txbuflock);
286 list_add_tail(&bf->list, &sc->tx.txbuf);
287 spin_unlock_bh(&sc->tx.txbuflock);
288}
289
290static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
291{
292 struct ath_buf *tbf;
293
294 tbf = ath_tx_get_buffer(sc);
295 if (WARN_ON(!tbf))
296 return NULL;
297
d43f3015
S
298 ATH_TXBUF_RESET(tbf);
299
827e69bf 300 tbf->aphy = bf->aphy;
d43f3015
S
301 tbf->bf_mpdu = bf->bf_mpdu;
302 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 303 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 304 tbf->bf_state = bf->bf_state;
d43f3015
S
305
306 return tbf;
307}
308
b572d033
FF
309static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
310 struct ath_tx_status *ts, int txok,
311 int *nframes, int *nbad)
312{
2d42efc4 313 struct ath_frame_info *fi;
b572d033
FF
314 u16 seq_st = 0;
315 u32 ba[WME_BA_BMP_SIZE >> 5];
316 int ba_index;
317 int isaggr = 0;
318
319 *nbad = 0;
320 *nframes = 0;
321
b572d033
FF
322 isaggr = bf_isaggr(bf);
323 if (isaggr) {
324 seq_st = ts->ts_seqnum;
325 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
326 }
327
328 while (bf) {
2d42efc4
FF
329 fi = get_frame_info(bf->bf_mpdu);
330 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
b572d033
FF
331
332 (*nframes)++;
333 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
334 (*nbad)++;
335
336 bf = bf->bf_next;
337 }
338}
339
340
d43f3015
S
341static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
342 struct ath_buf *bf, struct list_head *bf_q,
c5992618 343 struct ath_tx_status *ts, int txok, bool retry)
f078f209 344{
e8324357
S
345 struct ath_node *an = NULL;
346 struct sk_buff *skb;
1286ec6d 347 struct ieee80211_sta *sta;
76d5a9e8 348 struct ieee80211_hw *hw;
1286ec6d 349 struct ieee80211_hdr *hdr;
76d5a9e8 350 struct ieee80211_tx_info *tx_info;
e8324357 351 struct ath_atx_tid *tid = NULL;
d43f3015 352 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 353 struct list_head bf_head, bf_pending;
0934af23 354 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 355 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
356 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
357 bool rc_update = true;
78c4653a 358 struct ieee80211_tx_rate rates[4];
2d42efc4 359 struct ath_frame_info *fi;
ebd02287 360 int nframes;
5daefbd0 361 u8 tidno;
f078f209 362
a22be22a 363 skb = bf->bf_mpdu;
1286ec6d
S
364 hdr = (struct ieee80211_hdr *)skb->data;
365
76d5a9e8 366 tx_info = IEEE80211_SKB_CB(skb);
827e69bf 367 hw = bf->aphy->hw;
76d5a9e8 368
78c4653a
FF
369 memcpy(rates, tx_info->control.rates, sizeof(rates));
370
1286ec6d 371 rcu_read_lock();
f078f209 372
686b9cb9 373 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
374 if (!sta) {
375 rcu_read_unlock();
73e19463 376
31e79a59
FF
377 INIT_LIST_HEAD(&bf_head);
378 while (bf) {
379 bf_next = bf->bf_next;
380
381 bf->bf_state.bf_type |= BUF_XRETRY;
382 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
383 !bf->bf_stale || bf_next != NULL)
384 list_move_tail(&bf->list, &bf_head);
385
b572d033 386 ath_tx_rc_status(bf, ts, 1, 1, 0, false);
31e79a59
FF
387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
388 0, 0);
389
390 bf = bf_next;
391 }
1286ec6d 392 return;
f078f209
LR
393 }
394
1286ec6d 395 an = (struct ath_node *)sta->drv_priv;
5daefbd0
FF
396 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
397 tid = ATH_AN_2_TID(an, tidno);
1286ec6d 398
b11b160d
FF
399 /*
400 * The hardware occasionally sends a tx status for the wrong TID.
401 * In this case, the BA status cannot be considered valid and all
402 * subframes need to be retransmitted
403 */
5daefbd0 404 if (tidno != ts->tid)
b11b160d
FF
405 txok = false;
406
e8324357 407 isaggr = bf_isaggr(bf);
d43f3015 408 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 409
d43f3015 410 if (isaggr && txok) {
db1a052b
FF
411 if (ts->ts_flags & ATH9K_TX_BA) {
412 seq_st = ts->ts_seqnum;
413 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 414 } else {
d43f3015
S
415 /*
416 * AR5416 can become deaf/mute when BA
417 * issue happens. Chip needs to be reset.
418 * But AP code may have sychronization issues
419 * when perform internal reset in this routine.
420 * Only enable reset in STA mode for now.
421 */
2660b81a 422 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 423 needreset = 1;
e8324357 424 }
f078f209
LR
425 }
426
e8324357
S
427 INIT_LIST_HEAD(&bf_pending);
428 INIT_LIST_HEAD(&bf_head);
f078f209 429
b572d033 430 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
e8324357
S
431 while (bf) {
432 txfail = txpending = 0;
433 bf_next = bf->bf_next;
f078f209 434
78c4653a
FF
435 skb = bf->bf_mpdu;
436 tx_info = IEEE80211_SKB_CB(skb);
2d42efc4 437 fi = get_frame_info(skb);
78c4653a 438
2d42efc4 439 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
e8324357
S
440 /* transmit completion, subframe is
441 * acked by block ack */
0934af23 442 acked_cnt++;
e8324357
S
443 } else if (!isaggr && txok) {
444 /* transmit completion */
0934af23 445 acked_cnt++;
e8324357 446 } else {
c5992618 447 if (!(tid->state & AGGR_CLEANUP) && retry) {
2d42efc4
FF
448 if (fi->retries < ATH_MAX_SW_RETRIES) {
449 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
e8324357
S
450 txpending = 1;
451 } else {
452 bf->bf_state.bf_type |= BUF_XRETRY;
453 txfail = 1;
454 sendbar = 1;
0934af23 455 txfail_cnt++;
e8324357
S
456 }
457 } else {
458 /*
459 * cleanup in progress, just fail
460 * the un-acked sub-frames
461 */
462 txfail = 1;
463 }
464 }
f078f209 465
e5003249
VT
466 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
467 bf_next == NULL) {
cbfe89c6
VT
468 /*
469 * Make sure the last desc is reclaimed if it
470 * not a holding desc.
471 */
472 if (!bf_last->bf_stale)
473 list_move_tail(&bf->list, &bf_head);
474 else
475 INIT_LIST_HEAD(&bf_head);
e8324357 476 } else {
9680e8a3 477 BUG_ON(list_empty(bf_q));
d43f3015 478 list_move_tail(&bf->list, &bf_head);
e8324357 479 }
f078f209 480
90fa539c 481 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
482 /*
483 * complete the acked-ones/xretried ones; update
484 * block-ack window
485 */
486 spin_lock_bh(&txq->axq_lock);
2d42efc4 487 ath_tx_update_baw(sc, tid, fi->seqno);
e8324357 488 spin_unlock_bh(&txq->axq_lock);
f078f209 489
8a92e2ee 490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 491 memcpy(tx_info->control.rates, rates, sizeof(rates));
b572d033 492 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
8a92e2ee
VT
493 rc_update = false;
494 } else {
b572d033 495 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
8a92e2ee
VT
496 }
497
db1a052b
FF
498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
499 !txfail, sendbar);
e8324357 500 } else {
d43f3015 501 /* retry the un-acked ones */
e5003249
VT
502 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
503 if (bf->bf_next == NULL && bf_last->bf_stale) {
504 struct ath_buf *tbf;
505
506 tbf = ath_clone_txbuf(sc, bf_last);
507 /*
508 * Update tx baw and complete the
509 * frame with failed status if we
510 * run out of tx buf.
511 */
512 if (!tbf) {
513 spin_lock_bh(&txq->axq_lock);
2d42efc4 514 ath_tx_update_baw(sc, tid, fi->seqno);
e5003249
VT
515 spin_unlock_bh(&txq->axq_lock);
516
517 bf->bf_state.bf_type |=
518 BUF_XRETRY;
b572d033
FF
519 ath_tx_rc_status(bf, ts, nframes,
520 nbad, 0, false);
e5003249
VT
521 ath_tx_complete_buf(sc, bf, txq,
522 &bf_head,
523 ts, 0, 0);
524 break;
525 }
526
527 ath9k_hw_cleartxdesc(sc->sc_ah,
528 tbf->bf_desc);
529 list_add_tail(&tbf->list, &bf_head);
530 } else {
531 /*
532 * Clear descriptor status words for
533 * software retry
534 */
535 ath9k_hw_cleartxdesc(sc->sc_ah,
536 bf->bf_desc);
c41d92dc 537 }
e8324357
S
538 }
539
540 /*
541 * Put this buffer to the temporary pending
542 * queue to retain ordering
543 */
544 list_splice_tail_init(&bf_head, &bf_pending);
545 }
546
547 bf = bf_next;
f078f209 548 }
f078f209 549
4cee7861
FF
550 /* prepend un-acked frames to the beginning of the pending frame queue */
551 if (!list_empty(&bf_pending)) {
552 spin_lock_bh(&txq->axq_lock);
553 list_splice(&bf_pending, &tid->buf_q);
554 ath_tx_queue_tid(txq, tid);
555 spin_unlock_bh(&txq->axq_lock);
556 }
557
e8324357 558 if (tid->state & AGGR_CLEANUP) {
90fa539c
FF
559 ath_tx_flush_tid(sc, tid);
560
e8324357
S
561 if (tid->baw_head == tid->baw_tail) {
562 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 563 tid->state &= ~AGGR_CLEANUP;
d43f3015 564 }
e8324357 565 }
f078f209 566
1286ec6d
S
567 rcu_read_unlock();
568
e8324357
S
569 if (needreset)
570 ath_reset(sc, false);
e8324357 571}
f078f209 572
e8324357
S
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
574 struct ath_atx_tid *tid)
f078f209 575{
528f0c6b
S
576 struct sk_buff *skb;
577 struct ieee80211_tx_info *tx_info;
a8efee4f 578 struct ieee80211_tx_rate *rates;
d43f3015 579 u32 max_4ms_framelen, frmlen;
4ef70841 580 u16 aggr_limit, legacy = 0;
e8324357 581 int i;
528f0c6b 582
a22be22a 583 skb = bf->bf_mpdu;
528f0c6b 584 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 585 rates = tx_info->control.rates;
528f0c6b 586
e8324357
S
587 /*
588 * Find the lowest frame length among the rate series that will have a
589 * 4ms transmit duration.
590 * TODO - TXOP limit needs to be considered.
591 */
592 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 593
e8324357
S
594 for (i = 0; i < 4; i++) {
595 if (rates[i].count) {
545750d3
FF
596 int modeidx;
597 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
598 legacy = 1;
599 break;
600 }
601
0e668cde 602 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
603 modeidx = MCS_HT40;
604 else
0e668cde
FF
605 modeidx = MCS_HT20;
606
607 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
608 modeidx++;
545750d3
FF
609
610 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 611 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
612 }
613 }
e63835b0 614
f078f209 615 /*
e8324357
S
616 * limit aggregate size by the minimum rate if rate selected is
617 * not a probe rate, if rate selected is a probe rate then
618 * avoid aggregation of this packet.
f078f209 619 */
e8324357
S
620 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
621 return 0;
f078f209 622
1773912b
VT
623 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
624 aggr_limit = min((max_4ms_framelen * 3) / 8,
625 (u32)ATH_AMPDU_LIMIT_MAX);
626 else
627 aggr_limit = min(max_4ms_framelen,
628 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 629
e8324357
S
630 /*
631 * h/w can accept aggregates upto 16 bit lengths (65535).
632 * The IE, however can hold upto 65536, which shows up here
633 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 634 */
4ef70841
S
635 if (tid->an->maxampdu)
636 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 637
e8324357
S
638 return aggr_limit;
639}
f078f209 640
e8324357 641/*
d43f3015 642 * Returns the number of delimiters to be added to
e8324357 643 * meet the minimum required mpdudensity.
e8324357
S
644 */
645static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
646 struct ath_buf *bf, u16 frmlen)
647{
e8324357
S
648 struct sk_buff *skb = bf->bf_mpdu;
649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 650 u32 nsymbits, nsymbols;
e8324357 651 u16 minlen;
545750d3 652 u8 flags, rix;
c6663876 653 int width, streams, half_gi, ndelim, mindelim;
2d42efc4 654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
e8324357
S
655
656 /* Select standard number of delimiters based on frame length alone */
657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
658
659 /*
e8324357
S
660 * If encryption enabled, hardware requires some more padding between
661 * subframes.
662 * TODO - this could be improved to be dependent on the rate.
663 * The hardware can keep up at lower rates, but not higher rates
f078f209 664 */
2d42efc4 665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
e8324357 666 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 667
e8324357
S
668 /*
669 * Convert desired mpdu density from microeconds to bytes based
670 * on highest rate in rate series (i.e. first rate) to determine
671 * required minimum length for subframe. Take into account
672 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 673 *
e8324357
S
674 * If there is no mpdu density restriction, no further calculation
675 * is needed.
676 */
4ef70841
S
677
678 if (tid->an->mpdudensity == 0)
e8324357 679 return ndelim;
f078f209 680
e8324357
S
681 rix = tx_info->control.rates[0].idx;
682 flags = tx_info->control.rates[0].flags;
e8324357
S
683 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
684 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 685
e8324357 686 if (half_gi)
4ef70841 687 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 688 else
4ef70841 689 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 690
e8324357
S
691 if (nsymbols == 0)
692 nsymbols = 1;
f078f209 693
c6663876
FF
694 streams = HT_RC_2_STREAMS(rix);
695 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 696 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 697
e8324357 698 if (frmlen < minlen) {
e8324357
S
699 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
700 ndelim = max(mindelim, ndelim);
f078f209
LR
701 }
702
e8324357 703 return ndelim;
f078f209
LR
704}
705
e8324357 706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 707 struct ath_txq *txq,
d43f3015 708 struct ath_atx_tid *tid,
269c44bc
FF
709 struct list_head *bf_q,
710 int *aggr_len)
f078f209 711{
e8324357 712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
714 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
715 u16 aggr_limit = 0, al = 0, bpad = 0,
716 al_delta, h_baw = tid->baw_size / 2;
717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
0299a50a 718 struct ieee80211_tx_info *tx_info;
2d42efc4 719 struct ath_frame_info *fi;
f078f209 720
e8324357 721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 722
e8324357
S
723 do {
724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
2d42efc4 725 fi = get_frame_info(bf->bf_mpdu);
f078f209 726
d43f3015 727 /* do not step over block-ack window */
2d42efc4 728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
e8324357
S
729 status = ATH_AGGR_BAW_CLOSED;
730 break;
731 }
f078f209 732
e8324357
S
733 if (!rl) {
734 aggr_limit = ath_lookup_rate(sc, bf, tid);
735 rl = 1;
736 }
f078f209 737
d43f3015 738 /* do not exceed aggregation limit */
2d42efc4 739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
f078f209 740
d43f3015
S
741 if (nframes &&
742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
743 status = ATH_AGGR_LIMITED;
744 break;
745 }
f078f209 746
0299a50a
FF
747 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
748 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
749 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
750 break;
751
d43f3015
S
752 /* do not exceed subframe limit */
753 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
754 status = ATH_AGGR_LIMITED;
755 break;
756 }
d43f3015 757 nframes++;
f078f209 758
d43f3015 759 /* add padding for previous frame to aggregation length */
e8324357 760 al += bpad + al_delta;
f078f209 761
e8324357
S
762 /*
763 * Get the delimiters needed to meet the MPDU
764 * density for this node.
765 */
2d42efc4 766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
e8324357 767 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 768
e8324357 769 bf->bf_next = NULL;
87d5efbb 770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 771
d43f3015 772 /* link buffers of this frame to the aggregate */
2d42efc4
FF
773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
d43f3015
S
775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
776 list_move_tail(&bf->list, bf_q);
e8324357
S
777 if (bf_prev) {
778 bf_prev->bf_next = bf;
87d5efbb
VT
779 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
780 bf->bf_daddr);
e8324357
S
781 }
782 bf_prev = bf;
fec247c0 783
e8324357 784 } while (!list_empty(&tid->buf_q));
f078f209 785
269c44bc 786 *aggr_len = al;
d43f3015 787
e8324357
S
788 return status;
789#undef PADBYTES
790}
f078f209 791
e8324357
S
792static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
793 struct ath_atx_tid *tid)
794{
d43f3015 795 struct ath_buf *bf;
e8324357 796 enum ATH_AGGR_STATUS status;
2d42efc4 797 struct ath_frame_info *fi;
e8324357 798 struct list_head bf_q;
269c44bc 799 int aggr_len;
f078f209 800
e8324357
S
801 do {
802 if (list_empty(&tid->buf_q))
803 return;
f078f209 804
e8324357
S
805 INIT_LIST_HEAD(&bf_q);
806
269c44bc 807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
f078f209 808
f078f209 809 /*
d43f3015
S
810 * no frames picked up to be aggregated;
811 * block-ack window is not open.
f078f209 812 */
e8324357
S
813 if (list_empty(&bf_q))
814 break;
f078f209 815
e8324357 816 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 818
d43f3015 819 /* if only one frame, send as non-aggregate */
b572d033 820 if (bf == bf->bf_lastbf) {
2d42efc4
FF
821 fi = get_frame_info(bf->bf_mpdu);
822
e8324357 823 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
2d42efc4 825 ath_buf_set_rate(sc, bf, fi->framelen);
e8324357
S
826 ath_tx_txqaddbuf(sc, txq, &bf_q);
827 continue;
828 }
f078f209 829
d43f3015 830 /* setup first desc of aggregate */
e8324357 831 bf->bf_state.bf_type |= BUF_AGGR;
269c44bc
FF
832 ath_buf_set_rate(sc, bf, aggr_len);
833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
f078f209 834
d43f3015
S
835 /* anchor last desc of aggregate */
836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 837
e8324357 838 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 839 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 840
e8324357
S
841 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
842 status != ATH_AGGR_BAW_CLOSED);
843}
844
231c3a1f
FF
845int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
846 u16 tid, u16 *ssn)
e8324357
S
847{
848 struct ath_atx_tid *txtid;
849 struct ath_node *an;
850
851 an = (struct ath_node *)sta->drv_priv;
f83da965 852 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
853
854 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
855 return -EAGAIN;
856
f83da965 857 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 858 txtid->paused = true;
f83da965 859 *ssn = txtid->seq_start;
231c3a1f
FF
860
861 return 0;
e8324357 862}
f078f209 863
f83da965 864void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
865{
866 struct ath_node *an = (struct ath_node *)sta->drv_priv;
867 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
066dae93 868 struct ath_txq *txq = txtid->ac->txq;
f078f209 869
e8324357 870 if (txtid->state & AGGR_CLEANUP)
f83da965 871 return;
f078f209 872
e8324357 873 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 874 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 875 return;
e8324357 876 }
f078f209 877
e8324357 878 spin_lock_bh(&txq->axq_lock);
75401849 879 txtid->paused = true;
f078f209 880
90fa539c
FF
881 /*
882 * If frames are still being transmitted for this TID, they will be
883 * cleaned up during tx completion. To prevent race conditions, this
884 * TID can only be reused after all in-progress subframes have been
885 * completed.
886 */
887 if (txtid->baw_head != txtid->baw_tail)
e8324357 888 txtid->state |= AGGR_CLEANUP;
90fa539c 889 else
e8324357 890 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
891 spin_unlock_bh(&txq->axq_lock);
892
893 ath_tx_flush_tid(sc, txtid);
e8324357 894}
f078f209 895
e8324357
S
896void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
897{
898 struct ath_atx_tid *txtid;
899 struct ath_node *an;
900
901 an = (struct ath_node *)sta->drv_priv;
902
903 if (sc->sc_flags & SC_OP_TXAGGR) {
904 txtid = ATH_AN_2_TID(an, tid);
905 txtid->baw_size =
906 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
907 txtid->state |= AGGR_ADDBA_COMPLETE;
908 txtid->state &= ~AGGR_ADDBA_PROGRESS;
909 ath_tx_resume_tid(sc, txtid);
910 }
f078f209
LR
911}
912
e8324357
S
913/********************/
914/* Queue Management */
915/********************/
f078f209 916
e8324357
S
917static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
918 struct ath_txq *txq)
f078f209 919{
e8324357
S
920 struct ath_atx_ac *ac, *ac_tmp;
921 struct ath_atx_tid *tid, *tid_tmp;
f078f209 922
e8324357
S
923 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
924 list_del(&ac->list);
925 ac->sched = false;
926 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
927 list_del(&tid->list);
928 tid->sched = false;
929 ath_tid_drain(sc, txq, tid);
930 }
f078f209
LR
931 }
932}
933
e8324357 934struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 935{
cbe61d8a 936 struct ath_hw *ah = sc->sc_ah;
c46917bb 937 struct ath_common *common = ath9k_hw_common(ah);
e8324357 938 struct ath9k_tx_queue_info qi;
066dae93
FF
939 static const int subtype_txq_to_hwq[] = {
940 [WME_AC_BE] = ATH_TXQ_AC_BE,
941 [WME_AC_BK] = ATH_TXQ_AC_BK,
942 [WME_AC_VI] = ATH_TXQ_AC_VI,
943 [WME_AC_VO] = ATH_TXQ_AC_VO,
944 };
e5003249 945 int qnum, i;
f078f209 946
e8324357 947 memset(&qi, 0, sizeof(qi));
066dae93 948 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
e8324357
S
949 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
950 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
951 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
952 qi.tqi_physCompBuf = 0;
f078f209
LR
953
954 /*
e8324357
S
955 * Enable interrupts only for EOL and DESC conditions.
956 * We mark tx descriptors to receive a DESC interrupt
957 * when a tx queue gets deep; otherwise waiting for the
958 * EOL to reap descriptors. Note that this is done to
959 * reduce interrupt load and this only defers reaping
960 * descriptors, never transmitting frames. Aside from
961 * reducing interrupts this also permits more concurrency.
962 * The only potential downside is if the tx queue backs
963 * up in which case the top half of the kernel may backup
964 * due to a lack of tx descriptors.
965 *
966 * The UAPSD queue is an exception, since we take a desc-
967 * based intr on the EOSP frames.
f078f209 968 */
afe754d6
VT
969 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
970 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
971 TXQ_FLAG_TXERRINT_ENABLE;
972 } else {
973 if (qtype == ATH9K_TX_QUEUE_UAPSD)
974 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
975 else
976 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
977 TXQ_FLAG_TXDESCINT_ENABLE;
978 }
e8324357
S
979 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
980 if (qnum == -1) {
f078f209 981 /*
e8324357
S
982 * NB: don't print a message, this happens
983 * normally on parts with too few tx queues
f078f209 984 */
e8324357 985 return NULL;
f078f209 986 }
e8324357 987 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
c46917bb
LR
988 ath_print(common, ATH_DBG_FATAL,
989 "qnum %u out of range, max %u!\n",
990 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
e8324357
S
991 ath9k_hw_releasetxqueue(ah, qnum);
992 return NULL;
993 }
994 if (!ATH_TXQ_SETUP(sc, qnum)) {
995 struct ath_txq *txq = &sc->tx.txq[qnum];
f078f209 996
e8324357
S
997 txq->axq_qnum = qnum;
998 txq->axq_link = NULL;
999 INIT_LIST_HEAD(&txq->axq_q);
1000 INIT_LIST_HEAD(&txq->axq_acq);
1001 spin_lock_init(&txq->axq_lock);
1002 txq->axq_depth = 0;
164ace38 1003 txq->axq_tx_inprogress = false;
e8324357 1004 sc->tx.txqsetup |= 1<<qnum;
e5003249
VT
1005
1006 txq->txq_headidx = txq->txq_tailidx = 0;
1007 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1008 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1009 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357
S
1010 }
1011 return &sc->tx.txq[qnum];
f078f209
LR
1012}
1013
e8324357
S
1014int ath_txq_update(struct ath_softc *sc, int qnum,
1015 struct ath9k_tx_queue_info *qinfo)
1016{
cbe61d8a 1017 struct ath_hw *ah = sc->sc_ah;
e8324357
S
1018 int error = 0;
1019 struct ath9k_tx_queue_info qi;
1020
1021 if (qnum == sc->beacon.beaconq) {
1022 /*
1023 * XXX: for beacon queue, we just save the parameter.
1024 * It will be picked up by ath_beaconq_config when
1025 * it's necessary.
1026 */
1027 sc->beacon.beacon_qi = *qinfo;
f078f209 1028 return 0;
e8324357 1029 }
f078f209 1030
9680e8a3 1031 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
1032
1033 ath9k_hw_get_txq_props(ah, qnum, &qi);
1034 qi.tqi_aifs = qinfo->tqi_aifs;
1035 qi.tqi_cwmin = qinfo->tqi_cwmin;
1036 qi.tqi_cwmax = qinfo->tqi_cwmax;
1037 qi.tqi_burstTime = qinfo->tqi_burstTime;
1038 qi.tqi_readyTime = qinfo->tqi_readyTime;
1039
1040 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
c46917bb
LR
1041 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1042 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
1043 error = -EIO;
1044 } else {
1045 ath9k_hw_resettxqueue(ah, qnum);
1046 }
1047
1048 return error;
1049}
1050
1051int ath_cabq_update(struct ath_softc *sc)
1052{
1053 struct ath9k_tx_queue_info qi;
1054 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 1055
e8324357 1056 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 1057 /*
e8324357 1058 * Ensure the readytime % is within the bounds.
f078f209 1059 */
17d7904d
S
1060 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1061 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1062 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1063 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1064
57c4d7b4 1065 qi.tqi_readyTime = (sc->beacon_interval *
fdbf7335 1066 sc->config.cabqReadytime) / 100;
e8324357
S
1067 ath_txq_update(sc, qnum, &qi);
1068
1069 return 0;
f078f209
LR
1070}
1071
043a0405
S
1072/*
1073 * Drain a given TX queue (could be Beacon or Data)
1074 *
1075 * This assumes output has been stopped and
1076 * we do not need to block ath_tx_tasklet.
1077 */
1078void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1079{
e8324357
S
1080 struct ath_buf *bf, *lastbf;
1081 struct list_head bf_head;
db1a052b
FF
1082 struct ath_tx_status ts;
1083
1084 memset(&ts, 0, sizeof(ts));
e8324357 1085 INIT_LIST_HEAD(&bf_head);
f078f209 1086
e8324357
S
1087 for (;;) {
1088 spin_lock_bh(&txq->axq_lock);
f078f209 1089
e5003249
VT
1090 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1091 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1092 txq->txq_headidx = txq->txq_tailidx = 0;
1093 spin_unlock_bh(&txq->axq_lock);
1094 break;
1095 } else {
1096 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1097 struct ath_buf, list);
1098 }
1099 } else {
1100 if (list_empty(&txq->axq_q)) {
1101 txq->axq_link = NULL;
1102 spin_unlock_bh(&txq->axq_lock);
1103 break;
1104 }
1105 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1106 list);
f078f209 1107
e5003249
VT
1108 if (bf->bf_stale) {
1109 list_del(&bf->list);
1110 spin_unlock_bh(&txq->axq_lock);
f078f209 1111
0a8cea84 1112 ath_tx_return_buffer(sc, bf);
e5003249
VT
1113 continue;
1114 }
e8324357 1115 }
f078f209 1116
e8324357 1117 lastbf = bf->bf_lastbf;
f078f209 1118
e5003249
VT
1119 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1120 list_cut_position(&bf_head,
1121 &txq->txq_fifo[txq->txq_tailidx],
1122 &lastbf->list);
1123 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1124 } else {
1125 /* remove ath_buf's of the same mpdu from txq */
1126 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1127 }
1128
e8324357 1129 txq->axq_depth--;
f078f209 1130
e8324357
S
1131 spin_unlock_bh(&txq->axq_lock);
1132
1133 if (bf_isampdu(bf))
c5992618
FF
1134 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1135 retry_tx);
e8324357 1136 else
db1a052b 1137 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1138 }
1139
164ace38
SB
1140 spin_lock_bh(&txq->axq_lock);
1141 txq->axq_tx_inprogress = false;
1142 spin_unlock_bh(&txq->axq_lock);
1143
e5003249
VT
1144 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1145 spin_lock_bh(&txq->axq_lock);
1146 while (!list_empty(&txq->txq_fifo_pending)) {
1147 bf = list_first_entry(&txq->txq_fifo_pending,
1148 struct ath_buf, list);
1149 list_cut_position(&bf_head,
1150 &txq->txq_fifo_pending,
1151 &bf->bf_lastbf->list);
1152 spin_unlock_bh(&txq->axq_lock);
1153
1154 if (bf_isampdu(bf))
1155 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
c5992618 1156 &ts, 0, retry_tx);
e5003249
VT
1157 else
1158 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1159 &ts, 0, 0);
1160 spin_lock_bh(&txq->axq_lock);
1161 }
1162 spin_unlock_bh(&txq->axq_lock);
1163 }
e609e2ea
FF
1164
1165 /* flush any pending frames if aggregation is enabled */
1166 if (sc->sc_flags & SC_OP_TXAGGR) {
1167 if (!retry_tx) {
1168 spin_lock_bh(&txq->axq_lock);
1169 ath_txq_drain_pending_buffers(sc, txq);
1170 spin_unlock_bh(&txq->axq_lock);
1171 }
1172 }
f078f209
LR
1173}
1174
043a0405 1175void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1176{
cbe61d8a 1177 struct ath_hw *ah = sc->sc_ah;
c46917bb 1178 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1179 struct ath_txq *txq;
1180 int i, npend = 0;
1181
1182 if (sc->sc_flags & SC_OP_INVALID)
1183 return;
1184
1185 /* Stop beacon queue */
1186 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1187
1188 /* Stop data queues */
1189 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1190 if (ATH_TXQ_SETUP(sc, i)) {
1191 txq = &sc->tx.txq[i];
1192 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1193 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1194 }
1195 }
1196
1197 if (npend) {
1198 int r;
1199
e8009e98 1200 ath_print(common, ATH_DBG_FATAL,
9be8ab2e 1201 "Failed to stop TX DMA. Resetting hardware!\n");
043a0405 1202
20bd2a09 1203 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
043a0405 1204 if (r)
c46917bb
LR
1205 ath_print(common, ATH_DBG_FATAL,
1206 "Unable to reset hardware; reset status %d\n",
1207 r);
043a0405
S
1208 }
1209
1210 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1211 if (ATH_TXQ_SETUP(sc, i))
1212 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1213 }
e8324357 1214}
f078f209 1215
043a0405 1216void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1217{
043a0405
S
1218 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1219 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1220}
f078f209 1221
e8324357
S
1222void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1223{
1224 struct ath_atx_ac *ac;
1225 struct ath_atx_tid *tid;
f078f209 1226
e8324357
S
1227 if (list_empty(&txq->axq_acq))
1228 return;
f078f209 1229
e8324357
S
1230 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1231 list_del(&ac->list);
1232 ac->sched = false;
f078f209 1233
e8324357
S
1234 do {
1235 if (list_empty(&ac->tid_q))
1236 return;
f078f209 1237
e8324357
S
1238 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1239 list_del(&tid->list);
1240 tid->sched = false;
f078f209 1241
e8324357
S
1242 if (tid->paused)
1243 continue;
f078f209 1244
164ace38 1245 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
1246
1247 /*
e8324357
S
1248 * add tid to round-robin queue if more frames
1249 * are pending for the tid
f078f209 1250 */
e8324357
S
1251 if (!list_empty(&tid->buf_q))
1252 ath_tx_queue_tid(txq, tid);
f078f209 1253
e8324357
S
1254 break;
1255 } while (!list_empty(&ac->tid_q));
f078f209 1256
e8324357
S
1257 if (!list_empty(&ac->tid_q)) {
1258 if (!ac->sched) {
1259 ac->sched = true;
1260 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1261 }
e8324357
S
1262 }
1263}
f078f209 1264
e8324357
S
1265/***********/
1266/* TX, DMA */
1267/***********/
1268
f078f209 1269/*
e8324357
S
1270 * Insert a chain of ath_buf (descriptors) on a txq and
1271 * assume the descriptors are already chained together by caller.
f078f209 1272 */
e8324357
S
1273static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1274 struct list_head *head)
f078f209 1275{
cbe61d8a 1276 struct ath_hw *ah = sc->sc_ah;
c46917bb 1277 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1278 struct ath_buf *bf;
f078f209 1279
e8324357
S
1280 /*
1281 * Insert the frame on the outbound list and
1282 * pass it on to the hardware.
1283 */
f078f209 1284
e8324357
S
1285 if (list_empty(head))
1286 return;
f078f209 1287
e8324357 1288 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1289
c46917bb
LR
1290 ath_print(common, ATH_DBG_QUEUE,
1291 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1292
e5003249
VT
1293 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1294 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1295 list_splice_tail_init(head, &txq->txq_fifo_pending);
1296 return;
1297 }
1298 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1299 ath_print(common, ATH_DBG_XMIT,
1300 "Initializing tx fifo %d which "
1301 "is non-empty\n",
1302 txq->txq_headidx);
1303 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1304 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1305 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
e8324357 1306 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
c46917bb
LR
1307 ath_print(common, ATH_DBG_XMIT,
1308 "TXDP[%u] = %llx (%p)\n",
1309 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1310 } else {
e5003249
VT
1311 list_splice_tail_init(head, &txq->axq_q);
1312
1313 if (txq->axq_link == NULL) {
1314 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1315 ath_print(common, ATH_DBG_XMIT,
1316 "TXDP[%u] = %llx (%p)\n",
1317 txq->axq_qnum, ito64(bf->bf_daddr),
1318 bf->bf_desc);
1319 } else {
1320 *txq->axq_link = bf->bf_daddr;
1321 ath_print(common, ATH_DBG_XMIT,
1322 "link[%u] (%p)=%llx (%p)\n",
1323 txq->axq_qnum, txq->axq_link,
1324 ito64(bf->bf_daddr), bf->bf_desc);
1325 }
1326 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1327 &txq->axq_link);
1328 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1329 }
e5003249 1330 txq->axq_depth++;
e8324357 1331}
f078f209 1332
e8324357 1333static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
04caf863 1334 struct ath_buf *bf, struct ath_tx_control *txctl)
f078f209 1335{
2d42efc4 1336 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
04caf863 1337 struct list_head bf_head;
f078f209 1338
e8324357 1339 bf->bf_state.bf_type |= BUF_AMPDU;
fec247c0 1340 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
f078f209 1341
e8324357
S
1342 /*
1343 * Do not queue to h/w when any of the following conditions is true:
1344 * - there are pending frames in software queue
1345 * - the TID is currently paused for ADDBA/BAR request
1346 * - seqno is not within block-ack window
1347 * - h/w queue depth exceeds low water mark
1348 */
1349 if (!list_empty(&tid->buf_q) || tid->paused ||
2d42efc4 1350 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
e8324357 1351 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1352 /*
e8324357
S
1353 * Add this frame to software queue for scheduling later
1354 * for aggregation.
f078f209 1355 */
04caf863 1356 list_add_tail(&bf->list, &tid->buf_q);
e8324357
S
1357 ath_tx_queue_tid(txctl->txq, tid);
1358 return;
1359 }
1360
04caf863
FF
1361 INIT_LIST_HEAD(&bf_head);
1362 list_add(&bf->list, &bf_head);
1363
e8324357 1364 /* Add sub-frame to BAW */
2d42efc4
FF
1365 if (!fi->retries)
1366 ath_tx_addto_baw(sc, tid, fi->seqno);
e8324357
S
1367
1368 /* Queue to h/w without aggregation */
d43f3015 1369 bf->bf_lastbf = bf;
2d42efc4 1370 ath_buf_set_rate(sc, bf, fi->framelen);
04caf863 1371 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
e8324357
S
1372}
1373
82b873af
FF
1374static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1375 struct ath_atx_tid *tid,
2d42efc4 1376 struct list_head *bf_head)
e8324357 1377{
2d42efc4 1378 struct ath_frame_info *fi;
e8324357
S
1379 struct ath_buf *bf;
1380
e8324357
S
1381 bf = list_first_entry(bf_head, struct ath_buf, list);
1382 bf->bf_state.bf_type &= ~BUF_AMPDU;
1383
1384 /* update starting sequence number for subsequent ADDBA request */
82b873af
FF
1385 if (tid)
1386 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
e8324357 1387
d43f3015 1388 bf->bf_lastbf = bf;
2d42efc4
FF
1389 fi = get_frame_info(bf->bf_mpdu);
1390 ath_buf_set_rate(sc, bf, fi->framelen);
e8324357 1391 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1392 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1393}
1394
1395static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1396{
1397 struct ieee80211_hdr *hdr;
1398 enum ath9k_pkt_type htype;
1399 __le16 fc;
1400
1401 hdr = (struct ieee80211_hdr *)skb->data;
1402 fc = hdr->frame_control;
1403
1404 if (ieee80211_is_beacon(fc))
1405 htype = ATH9K_PKT_TYPE_BEACON;
1406 else if (ieee80211_is_probe_resp(fc))
1407 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1408 else if (ieee80211_is_atim(fc))
1409 htype = ATH9K_PKT_TYPE_ATIM;
1410 else if (ieee80211_is_pspoll(fc))
1411 htype = ATH9K_PKT_TYPE_PSPOLL;
1412 else
1413 htype = ATH9K_PKT_TYPE_NORMAL;
1414
1415 return htype;
1416}
1417
2d42efc4
FF
1418static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1419 int framelen)
e8324357 1420{
2d42efc4
FF
1421 struct ath_wiphy *aphy = hw->priv;
1422 struct ath_softc *sc = aphy->sc;
e8324357 1423 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2d42efc4
FF
1424 struct ieee80211_sta *sta = tx_info->control.sta;
1425 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
e8324357 1426 struct ieee80211_hdr *hdr;
2d42efc4 1427 struct ath_frame_info *fi = get_frame_info(skb);
e8324357
S
1428 struct ath_node *an;
1429 struct ath_atx_tid *tid;
2d42efc4
FF
1430 enum ath9k_key_type keytype;
1431 u16 seqno = 0;
5daefbd0 1432 u8 tidno;
e8324357 1433
2d42efc4 1434 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
e8324357 1435
e8324357 1436 hdr = (struct ieee80211_hdr *)skb->data;
2d42efc4
FF
1437 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1438 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
e8324357 1439
2d42efc4
FF
1440 an = (struct ath_node *) sta->drv_priv;
1441 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1442
1443 /*
1444 * Override seqno set by upper layer with the one
1445 * in tx aggregation state.
1446 */
1447 tid = ATH_AN_2_TID(an, tidno);
1448 seqno = tid->seq_next;
1449 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1450 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1451 }
1452
1453 memset(fi, 0, sizeof(*fi));
1454 if (hw_key)
1455 fi->keyix = hw_key->hw_key_idx;
1456 else
1457 fi->keyix = ATH9K_TXKEYIX_INVALID;
1458 fi->keytype = keytype;
1459 fi->framelen = framelen;
1460 fi->seqno = seqno;
e8324357
S
1461}
1462
82b873af 1463static int setup_tx_flags(struct sk_buff *skb)
e8324357
S
1464{
1465 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1466 int flags = 0;
1467
1468 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1469 flags |= ATH9K_TXDESC_INTREQ;
1470
1471 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1472 flags |= ATH9K_TXDESC_NOACK;
e8324357 1473
82b873af 1474 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
b0a33448
LR
1475 flags |= ATH9K_TXDESC_LDPC;
1476
e8324357
S
1477 return flags;
1478}
1479
1480/*
1481 * rix - rate index
1482 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1483 * width - 0 for 20 MHz, 1 for 40 MHz
1484 * half_gi - to use 4us v/s 3.6 us for symbol time
1485 */
269c44bc 1486static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
e8324357
S
1487 int width, int half_gi, bool shortPreamble)
1488{
e8324357 1489 u32 nbits, nsymbits, duration, nsymbols;
269c44bc 1490 int streams;
e8324357
S
1491
1492 /* find number of symbols: PLCP + data */
c6663876 1493 streams = HT_RC_2_STREAMS(rix);
e8324357 1494 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1495 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1496 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1497
1498 if (!half_gi)
1499 duration = SYMBOL_TIME(nsymbols);
1500 else
1501 duration = SYMBOL_TIME_HALFGI(nsymbols);
1502
1503 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1504 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1505
1506 return duration;
1507}
1508
269c44bc 1509static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
e8324357 1510{
43c27613 1511 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1512 struct ath9k_11n_rate_series series[4];
1513 struct sk_buff *skb;
1514 struct ieee80211_tx_info *tx_info;
1515 struct ieee80211_tx_rate *rates;
545750d3 1516 const struct ieee80211_rate *rate;
254ad0ff 1517 struct ieee80211_hdr *hdr;
c89424df
S
1518 int i, flags = 0;
1519 u8 rix = 0, ctsrate = 0;
254ad0ff 1520 bool is_pspoll;
e8324357
S
1521
1522 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1523
a22be22a 1524 skb = bf->bf_mpdu;
e8324357
S
1525 tx_info = IEEE80211_SKB_CB(skb);
1526 rates = tx_info->control.rates;
254ad0ff
S
1527 hdr = (struct ieee80211_hdr *)skb->data;
1528 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1529
e8324357 1530 /*
c89424df
S
1531 * We check if Short Preamble is needed for the CTS rate by
1532 * checking the BSS's global flag.
1533 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1534 */
545750d3
FF
1535 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1536 ctsrate = rate->hw_value;
c89424df 1537 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1538 ctsrate |= rate->hw_value_short;
e8324357 1539
e8324357 1540 for (i = 0; i < 4; i++) {
545750d3
FF
1541 bool is_40, is_sgi, is_sp;
1542 int phy;
1543
e8324357
S
1544 if (!rates[i].count || (rates[i].idx < 0))
1545 continue;
1546
1547 rix = rates[i].idx;
e8324357 1548 series[i].Tries = rates[i].count;
43c27613 1549 series[i].ChSel = common->tx_chainmask;
e8324357 1550
27032059
FF
1551 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1552 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
c89424df 1553 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1554 flags |= ATH9K_TXDESC_RTSENA;
1555 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1556 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1557 flags |= ATH9K_TXDESC_CTSENA;
1558 }
1559
c89424df
S
1560 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1561 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1562 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1563 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1564
545750d3
FF
1565 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1566 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1567 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1568
1569 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1570 /* MCS rates */
1571 series[i].Rate = rix | 0x80;
269c44bc 1572 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
545750d3 1573 is_40, is_sgi, is_sp);
074a8c0d
FF
1574 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1575 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1576 continue;
1577 }
1578
1579 /* legcay rates */
1580 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1581 !(rate->flags & IEEE80211_RATE_ERP_G))
1582 phy = WLAN_RC_PHY_CCK;
1583 else
1584 phy = WLAN_RC_PHY_OFDM;
1585
1586 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1587 series[i].Rate = rate->hw_value;
1588 if (rate->hw_value_short) {
1589 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1590 series[i].Rate |= rate->hw_value_short;
1591 } else {
1592 is_sp = false;
1593 }
1594
1595 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
269c44bc 1596 phy, rate->bitrate * 100, len, rix, is_sp);
f078f209
LR
1597 }
1598
27032059 1599 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
269c44bc 1600 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
27032059
FF
1601 flags &= ~ATH9K_TXDESC_RTSENA;
1602
1603 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1604 if (flags & ATH9K_TXDESC_RTSENA)
1605 flags &= ~ATH9K_TXDESC_CTSENA;
1606
e8324357 1607 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1608 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1609 bf->bf_lastbf->bf_desc,
254ad0ff 1610 !is_pspoll, ctsrate,
c89424df 1611 0, series, 4, flags);
f078f209 1612
17d7904d 1613 if (sc->config.ath_aggr_prot && flags)
c89424df 1614 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
f078f209
LR
1615}
1616
82b873af 1617static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
04caf863 1618 struct ath_txq *txq,
2d42efc4 1619 struct sk_buff *skb)
f078f209 1620{
c52f33d0
JM
1621 struct ath_wiphy *aphy = hw->priv;
1622 struct ath_softc *sc = aphy->sc;
04caf863 1623 struct ath_hw *ah = sc->sc_ah;
82b873af 1624 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2d42efc4 1625 struct ath_frame_info *fi = get_frame_info(skb);
82b873af 1626 struct ath_buf *bf;
04caf863 1627 struct ath_desc *ds;
04caf863 1628 int frm_type;
82b873af
FF
1629
1630 bf = ath_tx_get_buffer(sc);
1631 if (!bf) {
1632 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
1633 return NULL;
1634 }
e022edbd 1635
528f0c6b 1636 ATH_TXBUF_RESET(bf);
f078f209 1637
04caf863 1638 bf->aphy = aphy;
82b873af 1639 bf->bf_flags = setup_tx_flags(skb);
f078f209 1640 bf->bf_mpdu = skb;
f8316df1 1641
c1739eb3
BG
1642 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1643 skb->len, DMA_TO_DEVICE);
1644 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1645 bf->bf_mpdu = NULL;
6cf9e995 1646 bf->bf_buf_addr = 0;
c46917bb
LR
1647 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1648 "dma_mapping_error() on TX\n");
82b873af
FF
1649 ath_tx_return_buffer(sc, bf);
1650 return NULL;
f8316df1
LR
1651 }
1652
528f0c6b 1653 frm_type = get_hw_packet_type(skb);
f078f209 1654
f078f209 1655 ds = bf->bf_desc;
87d5efbb 1656 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1657
2d42efc4
FF
1658 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1659 fi->keyix, fi->keytype, bf->bf_flags);
528f0c6b
S
1660
1661 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1662 skb->len, /* segment length */
1663 true, /* first segment */
1664 true, /* last segment */
3f3a1c80 1665 ds, /* first descriptor */
cc610ac0 1666 bf->bf_buf_addr,
04caf863
FF
1667 txq->axq_qnum);
1668
1669
1670 return bf;
1671}
1672
1673/* FIXME: tx power */
1674static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1675 struct ath_tx_control *txctl)
1676{
1677 struct sk_buff *skb = bf->bf_mpdu;
1678 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1679 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
04caf863
FF
1680 struct list_head bf_head;
1681 struct ath_atx_tid *tid;
1682 u8 tidno;
f078f209 1683
528f0c6b 1684 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1685
2d42efc4 1686 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && txctl->an) {
5daefbd0
FF
1687 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1688 IEEE80211_QOS_CTL_TID_MASK;
2d42efc4 1689 tid = ATH_AN_2_TID(txctl->an, tidno);
5daefbd0 1690
066dae93 1691 WARN_ON(tid->ac->txq != txctl->txq);
04caf863
FF
1692 /*
1693 * Try aggregation if it's a unicast data frame
1694 * and the destination is HT capable.
1695 */
1696 ath_tx_send_ampdu(sc, tid, bf, txctl);
f078f209 1697 } else {
04caf863
FF
1698 INIT_LIST_HEAD(&bf_head);
1699 list_add_tail(&bf->list, &bf_head);
1700
61117f01 1701 bf->bf_state.bfs_ftype = txctl->frame_type;
82b873af
FF
1702 bf->bf_state.bfs_paprd = txctl->paprd;
1703
9a6b8270 1704 if (bf->bf_state.bfs_paprd)
04caf863
FF
1705 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1706 bf->bf_state.bfs_paprd);
9a6b8270 1707
2d42efc4 1708 ath_tx_send_normal(sc, txctl->txq, NULL, &bf_head);
f078f209 1709 }
528f0c6b
S
1710
1711 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1712}
1713
f8316df1 1714/* Upon failure caller should free skb */
c52f33d0 1715int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1716 struct ath_tx_control *txctl)
f078f209 1717{
28d16708
FF
1718 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1719 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2d42efc4 1720 struct ieee80211_sta *sta = info->control.sta;
c52f33d0
JM
1721 struct ath_wiphy *aphy = hw->priv;
1722 struct ath_softc *sc = aphy->sc;
84642d6b 1723 struct ath_txq *txq = txctl->txq;
528f0c6b 1724 struct ath_buf *bf;
4d91f9f3 1725 int padpos, padsize;
04caf863 1726 int frmlen = skb->len + FCS_LEN;
28d16708 1727 int q;
f078f209 1728
2d42efc4 1729 txctl->an = (struct ath_node *)sta->drv_priv;
04caf863
FF
1730 if (info->control.hw_key)
1731 frmlen += info->control.hw_key->icv_len;
1732
f078f209 1733 /*
e8324357
S
1734 * As a temporary workaround, assign seq# here; this will likely need
1735 * to be cleaned up to work better with Beacon transmission and virtual
1736 * BSSes.
f078f209 1737 */
e8324357 1738 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1739 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1740 sc->tx.seq_no += 0x10;
1741 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1742 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1743 }
f078f209 1744
e8324357 1745 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1746 padpos = ath9k_cmn_padpos(hdr->frame_control);
1747 padsize = padpos & 3;
28d16708
FF
1748 if (padsize && skb->len > padpos) {
1749 if (skb_headroom(skb) < padsize)
1750 return -ENOMEM;
1751
e8324357 1752 skb_push(skb, padsize);
4d91f9f3 1753 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1754 }
f078f209 1755
2d42efc4
FF
1756 setup_frame_info(hw, skb, frmlen);
1757
1758 /*
1759 * At this point, the vif, hw_key and sta pointers in the tx control
1760 * info are no longer valid (overwritten by the ath_frame_info data.
1761 */
1762
1763 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
28d16708
FF
1764 if (unlikely(!bf))
1765 return -ENOMEM;
f078f209 1766
28d16708
FF
1767 q = skb_get_queue_mapping(skb);
1768 spin_lock_bh(&txq->axq_lock);
1769 if (txq == sc->tx.txq_map[q] &&
1770 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1771 ath_mac80211_stop_queue(sc, q);
1772 txq->stopped = 1;
f078f209 1773 }
28d16708 1774 spin_unlock_bh(&txq->axq_lock);
f078f209 1775
28d16708
FF
1776 ath_tx_start_dma(sc, bf, txctl);
1777
1778 return 0;
f078f209
LR
1779}
1780
e8324357
S
1781/*****************/
1782/* TX Completion */
1783/*****************/
528f0c6b 1784
e8324357 1785static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
61117f01 1786 struct ath_wiphy *aphy, int tx_flags, int ftype,
066dae93 1787 struct ath_txq *txq)
528f0c6b 1788{
e8324357
S
1789 struct ieee80211_hw *hw = sc->hw;
1790 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1791 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1792 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1793 int q, padpos, padsize;
528f0c6b 1794
c46917bb 1795 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1796
827e69bf
FF
1797 if (aphy)
1798 hw = aphy->hw;
528f0c6b 1799
6b2c4032 1800 if (tx_flags & ATH_TX_BAR)
e8324357 1801 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1802
6b2c4032 1803 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1804 /* Frame was ACKed */
1805 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1806 }
1807
4d91f9f3
BP
1808 padpos = ath9k_cmn_padpos(hdr->frame_control);
1809 padsize = padpos & 3;
1810 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1811 /*
1812 * Remove MAC header padding before giving the frame back to
1813 * mac80211.
1814 */
4d91f9f3 1815 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1816 skb_pull(skb, padsize);
1817 }
528f0c6b 1818
1b04b930
S
1819 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1820 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
c46917bb
LR
1821 ath_print(common, ATH_DBG_PS,
1822 "Going back to sleep after having "
f643e51d 1823 "received TX status (0x%lx)\n",
1b04b930
S
1824 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1825 PS_WAIT_FOR_CAB |
1826 PS_WAIT_FOR_PSPOLL_DATA |
1827 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1828 }
1829
61117f01
FF
1830 if (unlikely(ftype))
1831 ath9k_tx_status(hw, skb, ftype);
97923b14
FF
1832 else {
1833 q = skb_get_queue_mapping(skb);
066dae93
FF
1834 if (txq == sc->tx.txq_map[q]) {
1835 spin_lock_bh(&txq->axq_lock);
1836 if (WARN_ON(--txq->pending_frames < 0))
1837 txq->pending_frames = 0;
1838 spin_unlock_bh(&txq->axq_lock);
1839 }
97923b14 1840
827e69bf 1841 ieee80211_tx_status(hw, skb);
97923b14 1842 }
e8324357 1843}
f078f209 1844
e8324357 1845static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1846 struct ath_txq *txq, struct list_head *bf_q,
1847 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1848{
e8324357 1849 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1850 unsigned long flags;
6b2c4032 1851 int tx_flags = 0;
f078f209 1852
e8324357 1853 if (sendbar)
6b2c4032 1854 tx_flags = ATH_TX_BAR;
f078f209 1855
e8324357 1856 if (!txok) {
6b2c4032 1857 tx_flags |= ATH_TX_ERROR;
f078f209 1858
e8324357 1859 if (bf_isxretried(bf))
6b2c4032 1860 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1861 }
1862
c1739eb3 1863 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 1864 bf->bf_buf_addr = 0;
9f42c2b6
FF
1865
1866 if (bf->bf_state.bfs_paprd) {
82259b77 1867 if (!sc->paprd_pending)
ca369eb4 1868 dev_kfree_skb_any(skb);
78a18172 1869 else
ca369eb4 1870 complete(&sc->paprd_complete);
9f42c2b6 1871 } else {
066dae93 1872 ath_debug_stat_tx(sc, bf, ts);
61117f01
FF
1873 ath_tx_complete(sc, skb, bf->aphy, tx_flags,
1874 bf->bf_state.bfs_ftype, txq);
9f42c2b6 1875 }
6cf9e995
BG
1876 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1877 * accidentally reference it later.
1878 */
1879 bf->bf_mpdu = NULL;
e8324357
S
1880
1881 /*
1882 * Return the list of ath_buf of this mpdu to free queue
1883 */
1884 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1885 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1886 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
1887}
1888
db1a052b 1889static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
b572d033 1890 int nframes, int nbad, int txok, bool update_rc)
f078f209 1891{
a22be22a 1892 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 1893 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 1894 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
827e69bf 1895 struct ieee80211_hw *hw = bf->aphy->hw;
f0c255a0
FF
1896 struct ath_softc *sc = bf->aphy->sc;
1897 struct ath_hw *ah = sc->sc_ah;
8a92e2ee 1898 u8 i, tx_rateindex;
f078f209 1899
95e4acb7 1900 if (txok)
db1a052b 1901 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 1902
db1a052b 1903 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
1904 WARN_ON(tx_rateindex >= hw->max_rates);
1905
db1a052b 1906 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 1907 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
ebd02287 1908 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
d969847c 1909 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 1910
b572d033 1911 BUG_ON(nbad > nframes);
ebd02287 1912
b572d033
FF
1913 tx_info->status.ampdu_len = nframes;
1914 tx_info->status.ampdu_ack_len = nframes - nbad;
ebd02287
BS
1915 }
1916
db1a052b 1917 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 1918 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
f0c255a0
FF
1919 /*
1920 * If an underrun error is seen assume it as an excessive
1921 * retry only if max frame trigger level has been reached
1922 * (2 KB for single stream, and 4 KB for dual stream).
1923 * Adjust the long retry as if the frame was tried
1924 * hw->max_rate_tries times to affect how rate control updates
1925 * PER for the failed rate.
1926 * In case of congestion on the bus penalizing this type of
1927 * underruns should help hardware actually transmit new frames
1928 * successfully by eventually preferring slower rates.
1929 * This itself should also alleviate congestion on the bus.
1930 */
1931 if (ieee80211_is_data(hdr->frame_control) &&
1932 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1933 ATH9K_TX_DELIM_UNDERRUN)) &&
1934 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1935 tx_info->status.rates[tx_rateindex].count =
1936 hw->max_rate_tries;
f078f209 1937 }
8a92e2ee 1938
545750d3 1939 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 1940 tx_info->status.rates[i].count = 0;
545750d3
FF
1941 tx_info->status.rates[i].idx = -1;
1942 }
8a92e2ee 1943
78c4653a 1944 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
1945}
1946
066dae93 1947static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
059d806c 1948{
066dae93 1949 struct ath_txq *txq;
97923b14 1950
066dae93 1951 txq = sc->tx.txq_map[qnum];
059d806c 1952 spin_lock_bh(&txq->axq_lock);
066dae93 1953 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
68e8f2fa
VT
1954 if (ath_mac80211_start_queue(sc, qnum))
1955 txq->stopped = 0;
059d806c
S
1956 }
1957 spin_unlock_bh(&txq->axq_lock);
1958}
1959
e8324357 1960static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 1961{
cbe61d8a 1962 struct ath_hw *ah = sc->sc_ah;
c46917bb 1963 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1964 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 1965 struct list_head bf_head;
e8324357 1966 struct ath_desc *ds;
29bffa96 1967 struct ath_tx_status ts;
0934af23 1968 int txok;
e8324357 1969 int status;
066dae93 1970 int qnum;
f078f209 1971
c46917bb
LR
1972 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
1973 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1974 txq->axq_link);
f078f209 1975
f078f209
LR
1976 for (;;) {
1977 spin_lock_bh(&txq->axq_lock);
f078f209
LR
1978 if (list_empty(&txq->axq_q)) {
1979 txq->axq_link = NULL;
f078f209
LR
1980 spin_unlock_bh(&txq->axq_lock);
1981 break;
1982 }
f078f209
LR
1983 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1984
e8324357
S
1985 /*
1986 * There is a race condition that a BH gets scheduled
1987 * after sw writes TxE and before hw re-load the last
1988 * descriptor to get the newly chained one.
1989 * Software must keep the last DONE descriptor as a
1990 * holding descriptor - software does so by marking
1991 * it with the STALE flag.
1992 */
1993 bf_held = NULL;
a119cc49 1994 if (bf->bf_stale) {
e8324357
S
1995 bf_held = bf;
1996 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 1997 spin_unlock_bh(&txq->axq_lock);
e8324357
S
1998 break;
1999 } else {
2000 bf = list_entry(bf_held->list.next,
6ef9b13d 2001 struct ath_buf, list);
e8324357 2002 }
f078f209
LR
2003 }
2004
2005 lastbf = bf->bf_lastbf;
e8324357 2006 ds = lastbf->bf_desc;
f078f209 2007
29bffa96
FF
2008 memset(&ts, 0, sizeof(ts));
2009 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2010 if (status == -EINPROGRESS) {
f078f209 2011 spin_unlock_bh(&txq->axq_lock);
e8324357 2012 break;
f078f209 2013 }
f078f209 2014
e8324357
S
2015 /*
2016 * Remove ath_buf's of the same transmit unit from txq,
2017 * however leave the last descriptor back as the holding
2018 * descriptor for hw.
2019 */
a119cc49 2020 lastbf->bf_stale = true;
e8324357 2021 INIT_LIST_HEAD(&bf_head);
e8324357
S
2022 if (!list_is_singular(&lastbf->list))
2023 list_cut_position(&bf_head,
2024 &txq->axq_q, lastbf->list.prev);
f078f209 2025
e8324357 2026 txq->axq_depth--;
29bffa96 2027 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2028 txq->axq_tx_inprogress = false;
0a8cea84
FF
2029 if (bf_held)
2030 list_del(&bf_held->list);
e8324357 2031 spin_unlock_bh(&txq->axq_lock);
f078f209 2032
0a8cea84
FF
2033 if (bf_held)
2034 ath_tx_return_buffer(sc, bf_held);
f078f209 2035
e8324357
S
2036 if (!bf_isampdu(bf)) {
2037 /*
2038 * This frame is sent out as a single frame.
2039 * Use hardware retry status for this frame.
2040 */
29bffa96 2041 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2042 bf->bf_state.bf_type |= BUF_XRETRY;
b572d033 2043 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
e8324357 2044 }
f078f209 2045
066dae93
FF
2046 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2047
e8324357 2048 if (bf_isampdu(bf))
c5992618
FF
2049 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2050 true);
e8324357 2051 else
29bffa96 2052 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2053
066dae93
FF
2054 if (txq == sc->tx.txq_map[qnum])
2055 ath_wake_mac80211_queue(sc, qnum);
8469cdef 2056
059d806c 2057 spin_lock_bh(&txq->axq_lock);
e8324357
S
2058 if (sc->sc_flags & SC_OP_TXAGGR)
2059 ath_txq_schedule(sc, txq);
2060 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2061 }
2062}
2063
305fe47f 2064static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2065{
2066 struct ath_softc *sc = container_of(work, struct ath_softc,
2067 tx_complete_work.work);
2068 struct ath_txq *txq;
2069 int i;
2070 bool needreset = false;
2071
2072 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2073 if (ATH_TXQ_SETUP(sc, i)) {
2074 txq = &sc->tx.txq[i];
2075 spin_lock_bh(&txq->axq_lock);
2076 if (txq->axq_depth) {
2077 if (txq->axq_tx_inprogress) {
2078 needreset = true;
2079 spin_unlock_bh(&txq->axq_lock);
2080 break;
2081 } else {
2082 txq->axq_tx_inprogress = true;
2083 }
2084 }
2085 spin_unlock_bh(&txq->axq_lock);
2086 }
2087
2088 if (needreset) {
c46917bb
LR
2089 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2090 "tx hung, resetting the chip\n");
332c5566 2091 ath9k_ps_wakeup(sc);
fac6b6a0 2092 ath_reset(sc, true);
332c5566 2093 ath9k_ps_restore(sc);
164ace38
SB
2094 }
2095
42935eca 2096 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2097 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2098}
2099
2100
f078f209 2101
e8324357 2102void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2103{
e8324357
S
2104 int i;
2105 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2106
e8324357 2107 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2108
e8324357
S
2109 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2110 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2111 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2112 }
2113}
2114
e5003249
VT
2115void ath_tx_edma_tasklet(struct ath_softc *sc)
2116{
2117 struct ath_tx_status txs;
2118 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2119 struct ath_hw *ah = sc->sc_ah;
2120 struct ath_txq *txq;
2121 struct ath_buf *bf, *lastbf;
2122 struct list_head bf_head;
2123 int status;
2124 int txok;
066dae93 2125 int qnum;
e5003249
VT
2126
2127 for (;;) {
2128 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2129 if (status == -EINPROGRESS)
2130 break;
2131 if (status == -EIO) {
2132 ath_print(common, ATH_DBG_XMIT,
2133 "Error processing tx status\n");
2134 break;
2135 }
2136
2137 /* Skip beacon completions */
2138 if (txs.qid == sc->beacon.beaconq)
2139 continue;
2140
2141 txq = &sc->tx.txq[txs.qid];
2142
2143 spin_lock_bh(&txq->axq_lock);
2144 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2145 spin_unlock_bh(&txq->axq_lock);
2146 return;
2147 }
2148
2149 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2150 struct ath_buf, list);
2151 lastbf = bf->bf_lastbf;
2152
2153 INIT_LIST_HEAD(&bf_head);
2154 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2155 &lastbf->list);
2156 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2157 txq->axq_depth--;
2158 txq->axq_tx_inprogress = false;
2159 spin_unlock_bh(&txq->axq_lock);
2160
2161 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2162
2163 if (!bf_isampdu(bf)) {
e5003249
VT
2164 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2165 bf->bf_state.bf_type |= BUF_XRETRY;
b572d033 2166 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
e5003249
VT
2167 }
2168
066dae93
FF
2169 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2170
e5003249 2171 if (bf_isampdu(bf))
c5992618
FF
2172 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2173 txok, true);
e5003249
VT
2174 else
2175 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2176 &txs, txok, 0);
2177
066dae93
FF
2178 if (txq == sc->tx.txq_map[qnum])
2179 ath_wake_mac80211_queue(sc, qnum);
7f9f3600 2180
e5003249
VT
2181 spin_lock_bh(&txq->axq_lock);
2182 if (!list_empty(&txq->txq_fifo_pending)) {
2183 INIT_LIST_HEAD(&bf_head);
2184 bf = list_first_entry(&txq->txq_fifo_pending,
2185 struct ath_buf, list);
2186 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2187 &bf->bf_lastbf->list);
2188 ath_tx_txqaddbuf(sc, txq, &bf_head);
2189 } else if (sc->sc_flags & SC_OP_TXAGGR)
2190 ath_txq_schedule(sc, txq);
2191 spin_unlock_bh(&txq->axq_lock);
2192 }
2193}
2194
e8324357
S
2195/*****************/
2196/* Init, Cleanup */
2197/*****************/
f078f209 2198
5088c2f1
VT
2199static int ath_txstatus_setup(struct ath_softc *sc, int size)
2200{
2201 struct ath_descdma *dd = &sc->txsdma;
2202 u8 txs_len = sc->sc_ah->caps.txs_len;
2203
2204 dd->dd_desc_len = size * txs_len;
2205 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2206 &dd->dd_desc_paddr, GFP_KERNEL);
2207 if (!dd->dd_desc)
2208 return -ENOMEM;
2209
2210 return 0;
2211}
2212
2213static int ath_tx_edma_init(struct ath_softc *sc)
2214{
2215 int err;
2216
2217 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2218 if (!err)
2219 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2220 sc->txsdma.dd_desc_paddr,
2221 ATH_TXSTATUS_RING_SIZE);
2222
2223 return err;
2224}
2225
2226static void ath_tx_edma_cleanup(struct ath_softc *sc)
2227{
2228 struct ath_descdma *dd = &sc->txsdma;
2229
2230 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2231 dd->dd_desc_paddr);
2232}
2233
e8324357 2234int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2235{
c46917bb 2236 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2237 int error = 0;
f078f209 2238
797fe5cb 2239 spin_lock_init(&sc->tx.txbuflock);
f078f209 2240
797fe5cb 2241 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2242 "tx", nbufs, 1, 1);
797fe5cb 2243 if (error != 0) {
c46917bb
LR
2244 ath_print(common, ATH_DBG_FATAL,
2245 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2246 goto err;
2247 }
f078f209 2248
797fe5cb 2249 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2250 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2251 if (error != 0) {
c46917bb
LR
2252 ath_print(common, ATH_DBG_FATAL,
2253 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2254 goto err;
2255 }
f078f209 2256
164ace38
SB
2257 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2258
5088c2f1
VT
2259 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2260 error = ath_tx_edma_init(sc);
2261 if (error)
2262 goto err;
2263 }
2264
797fe5cb 2265err:
e8324357
S
2266 if (error != 0)
2267 ath_tx_cleanup(sc);
f078f209 2268
e8324357 2269 return error;
f078f209
LR
2270}
2271
797fe5cb 2272void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2273{
2274 if (sc->beacon.bdma.dd_desc_len != 0)
2275 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2276
2277 if (sc->tx.txdma.dd_desc_len != 0)
2278 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2279
2280 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2281 ath_tx_edma_cleanup(sc);
e8324357 2282}
f078f209
LR
2283
2284void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2285{
c5170163
S
2286 struct ath_atx_tid *tid;
2287 struct ath_atx_ac *ac;
2288 int tidno, acno;
f078f209 2289
8ee5afbc 2290 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2291 tidno < WME_NUM_TID;
2292 tidno++, tid++) {
2293 tid->an = an;
2294 tid->tidno = tidno;
2295 tid->seq_start = tid->seq_next = 0;
2296 tid->baw_size = WME_MAX_BA;
2297 tid->baw_head = tid->baw_tail = 0;
2298 tid->sched = false;
e8324357 2299 tid->paused = false;
a37c2c79 2300 tid->state &= ~AGGR_CLEANUP;
c5170163 2301 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2302 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2303 tid->ac = &an->ac[acno];
a37c2c79
S
2304 tid->state &= ~AGGR_ADDBA_COMPLETE;
2305 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2306 }
f078f209 2307
8ee5afbc 2308 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2309 acno < WME_NUM_AC; acno++, ac++) {
2310 ac->sched = false;
066dae93 2311 ac->txq = sc->tx.txq_map[acno];
c5170163 2312 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2313 }
2314}
2315
b5aa9bf9 2316void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2317{
2b40994c
FF
2318 struct ath_atx_ac *ac;
2319 struct ath_atx_tid *tid;
f078f209 2320 struct ath_txq *txq;
066dae93 2321 int tidno;
e8324357 2322
2b40994c
FF
2323 for (tidno = 0, tid = &an->tid[tidno];
2324 tidno < WME_NUM_TID; tidno++, tid++) {
f078f209 2325
2b40994c 2326 ac = tid->ac;
066dae93 2327 txq = ac->txq;
f078f209 2328
2b40994c
FF
2329 spin_lock_bh(&txq->axq_lock);
2330
2331 if (tid->sched) {
2332 list_del(&tid->list);
2333 tid->sched = false;
2334 }
2335
2336 if (ac->sched) {
2337 list_del(&ac->list);
2338 tid->ac->sched = false;
f078f209 2339 }
2b40994c
FF
2340
2341 ath_tid_drain(sc, txq, tid);
2342 tid->state &= ~AGGR_ADDBA_COMPLETE;
2343 tid->state &= ~AGGR_CLEANUP;
2344
2345 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2346 }
2347}
This page took 0.847686 seconds and 5 git commands to generate.