ath9k: fix crash in ath_update_survey_stats
[deliverable/linux.git] / drivers / net / wireless / ath / ath9k / xmit.c
CommitLineData
f078f209 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f078f209
LR
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
b622a720 18#include "ar9003_mac.h"
f078f209
LR
19
20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22
7817e4ce 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
f078f209
LR
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8
25#define L_LTF 8
26#define L_SIG 4
27#define HT_SIG 8
28#define HT_STF 4
29#define HT_LTF(_ns) (4 * (_ns))
30#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34
35#define OFDM_SIFS_TIME 16
36
c6663876 37static u16 bits_per_symbol[][2] = {
f078f209
LR
38 /* 20MHz 40MHz */
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
f078f209
LR
47};
48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50
c37452b0
S
51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid,
53 struct list_head *bf_head);
e8324357 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar);
102e0572 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
e8324357
S
58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
0934af23 60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 63 int nbad, int txok, bool update_rc);
90fa539c
FF
64static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno);
c4288390 66
545750d3 67enum {
0e668cde
FF
68 MCS_HT20,
69 MCS_HT20_SGI,
545750d3
FF
70 MCS_HT40,
71 MCS_HT40_SGI,
72};
73
0e668cde
FF
74static int ath_max_4ms_framelen[4][32] = {
75 [MCS_HT20] = {
76 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
77 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
78 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
79 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
80 },
81 [MCS_HT20_SGI] = {
82 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
83 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
84 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
85 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
545750d3
FF
86 },
87 [MCS_HT40] = {
0e668cde
FF
88 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
89 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
90 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
91 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
92 },
93 [MCS_HT40_SGI] = {
0e668cde
FF
94 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
95 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
96 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
97 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
545750d3
FF
98 }
99};
100
e8324357
S
101/*********************/
102/* Aggregation logic */
103/*********************/
f078f209 104
e8324357 105static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
ff37e337 106{
e8324357 107 struct ath_atx_ac *ac = tid->ac;
ff37e337 108
e8324357
S
109 if (tid->paused)
110 return;
ff37e337 111
e8324357
S
112 if (tid->sched)
113 return;
ff37e337 114
e8324357
S
115 tid->sched = true;
116 list_add_tail(&tid->list, &ac->tid_q);
528f0c6b 117
e8324357
S
118 if (ac->sched)
119 return;
f078f209 120
e8324357
S
121 ac->sched = true;
122 list_add_tail(&ac->list, &txq->axq_acq);
123}
f078f209 124
e8324357 125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
f078f209 126{
e8324357 127 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
e6a9854b 128
75401849 129 WARN_ON(!tid->paused);
f078f209 130
75401849
LB
131 spin_lock_bh(&txq->axq_lock);
132 tid->paused = false;
f078f209 133
e8324357
S
134 if (list_empty(&tid->buf_q))
135 goto unlock;
f078f209 136
e8324357
S
137 ath_tx_queue_tid(txq, tid);
138 ath_txq_schedule(sc, txq);
139unlock:
140 spin_unlock_bh(&txq->axq_lock);
528f0c6b 141}
f078f209 142
e8324357 143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
528f0c6b 144{
e8324357
S
145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
146 struct ath_buf *bf;
147 struct list_head bf_head;
90fa539c 148 struct ath_tx_status ts;
f078f209 149
90fa539c 150 INIT_LIST_HEAD(&bf_head);
e6a9854b 151
90fa539c 152 memset(&ts, 0, sizeof(ts));
75401849 153 spin_lock_bh(&txq->axq_lock);
f078f209 154
e8324357
S
155 while (!list_empty(&tid->buf_q)) {
156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
d43f3015 157 list_move_tail(&bf->list, &bf_head);
90fa539c
FF
158
159 if (bf_isretried(bf)) {
160 ath_tx_update_baw(sc, tid, bf->bf_seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
164 }
528f0c6b 165 }
f078f209 166
e8324357 167 spin_unlock_bh(&txq->axq_lock);
528f0c6b 168}
f078f209 169
e8324357
S
170static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
171 int seqno)
528f0c6b 172{
e8324357 173 int index, cindex;
f078f209 174
e8324357
S
175 index = ATH_BA_INDEX(tid->seq_start, seqno);
176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
f078f209 177
81ee13ba 178 __clear_bit(cindex, tid->tx_buf);
528f0c6b 179
81ee13ba 180 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
e8324357
S
181 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
182 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
183 }
528f0c6b 184}
f078f209 185
e8324357
S
186static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 struct ath_buf *bf)
528f0c6b 188{
e8324357 189 int index, cindex;
528f0c6b 190
e8324357
S
191 if (bf_isretried(bf))
192 return;
528f0c6b 193
e8324357
S
194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
81ee13ba 196 __set_bit(cindex, tid->tx_buf);
f078f209 197
e8324357
S
198 if (index >= ((tid->baw_tail - tid->baw_head) &
199 (ATH_TID_MAX_BUFS - 1))) {
200 tid->baw_tail = cindex;
201 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
f078f209 202 }
f078f209
LR
203}
204
205/*
e8324357
S
206 * TODO: For frame(s) that are in the retry state, we will reuse the
207 * sequence number(s) without setting the retry bit. The
208 * alternative is to give up on these and BAR the receiver's window
209 * forward.
f078f209 210 */
e8324357
S
211static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
212 struct ath_atx_tid *tid)
f078f209 213
f078f209 214{
e8324357
S
215 struct ath_buf *bf;
216 struct list_head bf_head;
db1a052b
FF
217 struct ath_tx_status ts;
218
219 memset(&ts, 0, sizeof(ts));
e8324357 220 INIT_LIST_HEAD(&bf_head);
f078f209 221
e8324357
S
222 for (;;) {
223 if (list_empty(&tid->buf_q))
224 break;
f078f209 225
d43f3015
S
226 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
227 list_move_tail(&bf->list, &bf_head);
f078f209 228
e8324357
S
229 if (bf_isretried(bf))
230 ath_tx_update_baw(sc, tid, bf->bf_seqno);
f078f209 231
e8324357 232 spin_unlock(&txq->axq_lock);
db1a052b 233 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
e8324357
S
234 spin_lock(&txq->axq_lock);
235 }
f078f209 236
e8324357
S
237 tid->seq_next = tid->seq_start;
238 tid->baw_tail = tid->baw_head;
f078f209
LR
239}
240
fec247c0
S
241static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
242 struct ath_buf *bf)
f078f209 243{
e8324357
S
244 struct sk_buff *skb;
245 struct ieee80211_hdr *hdr;
f078f209 246
e8324357
S
247 bf->bf_state.bf_type |= BUF_RETRY;
248 bf->bf_retries++;
fec247c0 249 TX_STAT_INC(txq->axq_qnum, a_retries);
f078f209 250
e8324357
S
251 skb = bf->bf_mpdu;
252 hdr = (struct ieee80211_hdr *)skb->data;
253 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
f078f209
LR
254}
255
0a8cea84 256static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
d43f3015 257{
0a8cea84 258 struct ath_buf *bf = NULL;
d43f3015
S
259
260 spin_lock_bh(&sc->tx.txbuflock);
0a8cea84
FF
261
262 if (unlikely(list_empty(&sc->tx.txbuf))) {
8a46097a
VT
263 spin_unlock_bh(&sc->tx.txbuflock);
264 return NULL;
265 }
0a8cea84
FF
266
267 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
268 list_del(&bf->list);
269
d43f3015
S
270 spin_unlock_bh(&sc->tx.txbuflock);
271
0a8cea84
FF
272 return bf;
273}
274
275static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
276{
277 spin_lock_bh(&sc->tx.txbuflock);
278 list_add_tail(&bf->list, &sc->tx.txbuf);
279 spin_unlock_bh(&sc->tx.txbuflock);
280}
281
282static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
283{
284 struct ath_buf *tbf;
285
286 tbf = ath_tx_get_buffer(sc);
287 if (WARN_ON(!tbf))
288 return NULL;
289
d43f3015
S
290 ATH_TXBUF_RESET(tbf);
291
827e69bf 292 tbf->aphy = bf->aphy;
d43f3015
S
293 tbf->bf_mpdu = bf->bf_mpdu;
294 tbf->bf_buf_addr = bf->bf_buf_addr;
d826c832 295 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
d43f3015 296 tbf->bf_state = bf->bf_state;
d43f3015
S
297
298 return tbf;
299}
300
301static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
302 struct ath_buf *bf, struct list_head *bf_q,
db1a052b 303 struct ath_tx_status *ts, int txok)
f078f209 304{
e8324357
S
305 struct ath_node *an = NULL;
306 struct sk_buff *skb;
1286ec6d 307 struct ieee80211_sta *sta;
76d5a9e8 308 struct ieee80211_hw *hw;
1286ec6d 309 struct ieee80211_hdr *hdr;
76d5a9e8 310 struct ieee80211_tx_info *tx_info;
e8324357 311 struct ath_atx_tid *tid = NULL;
d43f3015 312 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
e8324357 313 struct list_head bf_head, bf_pending;
0934af23 314 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
f078f209 315 u32 ba[WME_BA_BMP_SIZE >> 5];
0934af23
VT
316 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
317 bool rc_update = true;
78c4653a 318 struct ieee80211_tx_rate rates[4];
ebd02287 319 int nframes;
f078f209 320
a22be22a 321 skb = bf->bf_mpdu;
1286ec6d
S
322 hdr = (struct ieee80211_hdr *)skb->data;
323
76d5a9e8 324 tx_info = IEEE80211_SKB_CB(skb);
827e69bf 325 hw = bf->aphy->hw;
76d5a9e8 326
78c4653a 327 memcpy(rates, tx_info->control.rates, sizeof(rates));
ebd02287 328 nframes = bf->bf_nframes;
78c4653a 329
1286ec6d 330 rcu_read_lock();
f078f209 331
686b9cb9 332 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
1286ec6d
S
333 if (!sta) {
334 rcu_read_unlock();
73e19463 335
31e79a59
FF
336 INIT_LIST_HEAD(&bf_head);
337 while (bf) {
338 bf_next = bf->bf_next;
339
340 bf->bf_state.bf_type |= BUF_XRETRY;
341 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
342 !bf->bf_stale || bf_next != NULL)
343 list_move_tail(&bf->list, &bf_head);
344
ebd02287 345 ath_tx_rc_status(bf, ts, 1, 0, false);
31e79a59
FF
346 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
347 0, 0);
348
349 bf = bf_next;
350 }
1286ec6d 351 return;
f078f209
LR
352 }
353
1286ec6d
S
354 an = (struct ath_node *)sta->drv_priv;
355 tid = ATH_AN_2_TID(an, bf->bf_tidno);
356
b11b160d
FF
357 /*
358 * The hardware occasionally sends a tx status for the wrong TID.
359 * In this case, the BA status cannot be considered valid and all
360 * subframes need to be retransmitted
361 */
362 if (bf->bf_tidno != ts->tid)
363 txok = false;
364
e8324357 365 isaggr = bf_isaggr(bf);
d43f3015 366 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209 367
d43f3015 368 if (isaggr && txok) {
db1a052b
FF
369 if (ts->ts_flags & ATH9K_TX_BA) {
370 seq_st = ts->ts_seqnum;
371 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 372 } else {
d43f3015
S
373 /*
374 * AR5416 can become deaf/mute when BA
375 * issue happens. Chip needs to be reset.
376 * But AP code may have sychronization issues
377 * when perform internal reset in this routine.
378 * Only enable reset in STA mode for now.
379 */
2660b81a 380 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
d43f3015 381 needreset = 1;
e8324357 382 }
f078f209
LR
383 }
384
e8324357
S
385 INIT_LIST_HEAD(&bf_pending);
386 INIT_LIST_HEAD(&bf_head);
f078f209 387
db1a052b 388 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
e8324357
S
389 while (bf) {
390 txfail = txpending = 0;
391 bf_next = bf->bf_next;
f078f209 392
78c4653a
FF
393 skb = bf->bf_mpdu;
394 tx_info = IEEE80211_SKB_CB(skb);
395
e8324357
S
396 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
397 /* transmit completion, subframe is
398 * acked by block ack */
0934af23 399 acked_cnt++;
e8324357
S
400 } else if (!isaggr && txok) {
401 /* transmit completion */
0934af23 402 acked_cnt++;
e8324357 403 } else {
e8324357 404 if (!(tid->state & AGGR_CLEANUP) &&
6d913f7d 405 !bf_last->bf_tx_aborted) {
e8324357 406 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
fec247c0 407 ath_tx_set_retry(sc, txq, bf);
e8324357
S
408 txpending = 1;
409 } else {
410 bf->bf_state.bf_type |= BUF_XRETRY;
411 txfail = 1;
412 sendbar = 1;
0934af23 413 txfail_cnt++;
e8324357
S
414 }
415 } else {
416 /*
417 * cleanup in progress, just fail
418 * the un-acked sub-frames
419 */
420 txfail = 1;
421 }
422 }
f078f209 423
e5003249
VT
424 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
425 bf_next == NULL) {
cbfe89c6
VT
426 /*
427 * Make sure the last desc is reclaimed if it
428 * not a holding desc.
429 */
430 if (!bf_last->bf_stale)
431 list_move_tail(&bf->list, &bf_head);
432 else
433 INIT_LIST_HEAD(&bf_head);
e8324357 434 } else {
9680e8a3 435 BUG_ON(list_empty(bf_q));
d43f3015 436 list_move_tail(&bf->list, &bf_head);
e8324357 437 }
f078f209 438
90fa539c 439 if (!txpending || (tid->state & AGGR_CLEANUP)) {
e8324357
S
440 /*
441 * complete the acked-ones/xretried ones; update
442 * block-ack window
443 */
444 spin_lock_bh(&txq->axq_lock);
445 ath_tx_update_baw(sc, tid, bf->bf_seqno);
446 spin_unlock_bh(&txq->axq_lock);
f078f209 447
8a92e2ee 448 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
78c4653a 449 memcpy(tx_info->control.rates, rates, sizeof(rates));
ebd02287 450 bf->bf_nframes = nframes;
db1a052b 451 ath_tx_rc_status(bf, ts, nbad, txok, true);
8a92e2ee
VT
452 rc_update = false;
453 } else {
db1a052b 454 ath_tx_rc_status(bf, ts, nbad, txok, false);
8a92e2ee
VT
455 }
456
db1a052b
FF
457 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
458 !txfail, sendbar);
e8324357 459 } else {
d43f3015 460 /* retry the un-acked ones */
e5003249
VT
461 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
462 if (bf->bf_next == NULL && bf_last->bf_stale) {
463 struct ath_buf *tbf;
464
465 tbf = ath_clone_txbuf(sc, bf_last);
466 /*
467 * Update tx baw and complete the
468 * frame with failed status if we
469 * run out of tx buf.
470 */
471 if (!tbf) {
472 spin_lock_bh(&txq->axq_lock);
473 ath_tx_update_baw(sc, tid,
474 bf->bf_seqno);
475 spin_unlock_bh(&txq->axq_lock);
476
477 bf->bf_state.bf_type |=
478 BUF_XRETRY;
479 ath_tx_rc_status(bf, ts, nbad,
480 0, false);
481 ath_tx_complete_buf(sc, bf, txq,
482 &bf_head,
483 ts, 0, 0);
484 break;
485 }
486
487 ath9k_hw_cleartxdesc(sc->sc_ah,
488 tbf->bf_desc);
489 list_add_tail(&tbf->list, &bf_head);
490 } else {
491 /*
492 * Clear descriptor status words for
493 * software retry
494 */
495 ath9k_hw_cleartxdesc(sc->sc_ah,
496 bf->bf_desc);
c41d92dc 497 }
e8324357
S
498 }
499
500 /*
501 * Put this buffer to the temporary pending
502 * queue to retain ordering
503 */
504 list_splice_tail_init(&bf_head, &bf_pending);
505 }
506
507 bf = bf_next;
f078f209 508 }
f078f209 509
4cee7861
FF
510 /* prepend un-acked frames to the beginning of the pending frame queue */
511 if (!list_empty(&bf_pending)) {
512 spin_lock_bh(&txq->axq_lock);
513 list_splice(&bf_pending, &tid->buf_q);
514 ath_tx_queue_tid(txq, tid);
515 spin_unlock_bh(&txq->axq_lock);
516 }
517
e8324357 518 if (tid->state & AGGR_CLEANUP) {
90fa539c
FF
519 ath_tx_flush_tid(sc, tid);
520
e8324357
S
521 if (tid->baw_head == tid->baw_tail) {
522 tid->state &= ~AGGR_ADDBA_COMPLETE;
e8324357 523 tid->state &= ~AGGR_CLEANUP;
d43f3015 524 }
e8324357 525 }
f078f209 526
1286ec6d
S
527 rcu_read_unlock();
528
e8324357
S
529 if (needreset)
530 ath_reset(sc, false);
e8324357 531}
f078f209 532
e8324357
S
533static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
534 struct ath_atx_tid *tid)
f078f209 535{
528f0c6b
S
536 struct sk_buff *skb;
537 struct ieee80211_tx_info *tx_info;
a8efee4f 538 struct ieee80211_tx_rate *rates;
d43f3015 539 u32 max_4ms_framelen, frmlen;
4ef70841 540 u16 aggr_limit, legacy = 0;
e8324357 541 int i;
528f0c6b 542
a22be22a 543 skb = bf->bf_mpdu;
528f0c6b 544 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 545 rates = tx_info->control.rates;
528f0c6b 546
e8324357
S
547 /*
548 * Find the lowest frame length among the rate series that will have a
549 * 4ms transmit duration.
550 * TODO - TXOP limit needs to be considered.
551 */
552 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
e63835b0 553
e8324357
S
554 for (i = 0; i < 4; i++) {
555 if (rates[i].count) {
545750d3
FF
556 int modeidx;
557 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
e8324357
S
558 legacy = 1;
559 break;
560 }
561
0e668cde 562 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
545750d3
FF
563 modeidx = MCS_HT40;
564 else
0e668cde
FF
565 modeidx = MCS_HT20;
566
567 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
568 modeidx++;
545750d3
FF
569
570 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
d43f3015 571 max_4ms_framelen = min(max_4ms_framelen, frmlen);
f078f209
LR
572 }
573 }
e63835b0 574
f078f209 575 /*
e8324357
S
576 * limit aggregate size by the minimum rate if rate selected is
577 * not a probe rate, if rate selected is a probe rate then
578 * avoid aggregation of this packet.
f078f209 579 */
e8324357
S
580 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
581 return 0;
f078f209 582
1773912b
VT
583 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
584 aggr_limit = min((max_4ms_framelen * 3) / 8,
585 (u32)ATH_AMPDU_LIMIT_MAX);
586 else
587 aggr_limit = min(max_4ms_framelen,
588 (u32)ATH_AMPDU_LIMIT_MAX);
f078f209 589
e8324357
S
590 /*
591 * h/w can accept aggregates upto 16 bit lengths (65535).
592 * The IE, however can hold upto 65536, which shows up here
593 * as zero. Ignore 65536 since we are constrained by hw.
f078f209 594 */
4ef70841
S
595 if (tid->an->maxampdu)
596 aggr_limit = min(aggr_limit, tid->an->maxampdu);
f078f209 597
e8324357
S
598 return aggr_limit;
599}
f078f209 600
e8324357 601/*
d43f3015 602 * Returns the number of delimiters to be added to
e8324357 603 * meet the minimum required mpdudensity.
e8324357
S
604 */
605static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
606 struct ath_buf *bf, u16 frmlen)
607{
e8324357
S
608 struct sk_buff *skb = bf->bf_mpdu;
609 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
4ef70841 610 u32 nsymbits, nsymbols;
e8324357 611 u16 minlen;
545750d3 612 u8 flags, rix;
c6663876 613 int width, streams, half_gi, ndelim, mindelim;
e8324357
S
614
615 /* Select standard number of delimiters based on frame length alone */
616 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
f078f209
LR
617
618 /*
e8324357
S
619 * If encryption enabled, hardware requires some more padding between
620 * subframes.
621 * TODO - this could be improved to be dependent on the rate.
622 * The hardware can keep up at lower rates, but not higher rates
f078f209 623 */
e8324357
S
624 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
625 ndelim += ATH_AGGR_ENCRYPTDELIM;
f078f209 626
e8324357
S
627 /*
628 * Convert desired mpdu density from microeconds to bytes based
629 * on highest rate in rate series (i.e. first rate) to determine
630 * required minimum length for subframe. Take into account
631 * whether high rate is 20 or 40Mhz and half or full GI.
4ef70841 632 *
e8324357
S
633 * If there is no mpdu density restriction, no further calculation
634 * is needed.
635 */
4ef70841
S
636
637 if (tid->an->mpdudensity == 0)
e8324357 638 return ndelim;
f078f209 639
e8324357
S
640 rix = tx_info->control.rates[0].idx;
641 flags = tx_info->control.rates[0].flags;
e8324357
S
642 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
643 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209 644
e8324357 645 if (half_gi)
4ef70841 646 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
e8324357 647 else
4ef70841 648 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
f078f209 649
e8324357
S
650 if (nsymbols == 0)
651 nsymbols = 1;
f078f209 652
c6663876
FF
653 streams = HT_RC_2_STREAMS(rix);
654 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357 655 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
f078f209 656
e8324357 657 if (frmlen < minlen) {
e8324357
S
658 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
659 ndelim = max(mindelim, ndelim);
f078f209
LR
660 }
661
e8324357 662 return ndelim;
f078f209
LR
663}
664
e8324357 665static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fec247c0 666 struct ath_txq *txq,
d43f3015
S
667 struct ath_atx_tid *tid,
668 struct list_head *bf_q)
f078f209 669{
e8324357 670#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
d43f3015
S
671 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
672 int rl = 0, nframes = 0, ndelim, prev_al = 0;
e8324357
S
673 u16 aggr_limit = 0, al = 0, bpad = 0,
674 al_delta, h_baw = tid->baw_size / 2;
675 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
f078f209 676
e8324357 677 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 678
e8324357
S
679 do {
680 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
f078f209 681
d43f3015 682 /* do not step over block-ack window */
e8324357
S
683 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
684 status = ATH_AGGR_BAW_CLOSED;
685 break;
686 }
f078f209 687
e8324357
S
688 if (!rl) {
689 aggr_limit = ath_lookup_rate(sc, bf, tid);
690 rl = 1;
691 }
f078f209 692
d43f3015 693 /* do not exceed aggregation limit */
e8324357 694 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
f078f209 695
d43f3015
S
696 if (nframes &&
697 (aggr_limit < (al + bpad + al_delta + prev_al))) {
e8324357
S
698 status = ATH_AGGR_LIMITED;
699 break;
700 }
f078f209 701
d43f3015
S
702 /* do not exceed subframe limit */
703 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
e8324357
S
704 status = ATH_AGGR_LIMITED;
705 break;
706 }
d43f3015 707 nframes++;
f078f209 708
d43f3015 709 /* add padding for previous frame to aggregation length */
e8324357 710 al += bpad + al_delta;
f078f209 711
e8324357
S
712 /*
713 * Get the delimiters needed to meet the MPDU
714 * density for this node.
715 */
716 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
e8324357 717 bpad = PADBYTES(al_delta) + (ndelim << 2);
f078f209 718
e8324357 719 bf->bf_next = NULL;
87d5efbb 720 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
f078f209 721
d43f3015 722 /* link buffers of this frame to the aggregate */
e8324357 723 ath_tx_addto_baw(sc, tid, bf);
d43f3015
S
724 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
725 list_move_tail(&bf->list, bf_q);
e8324357
S
726 if (bf_prev) {
727 bf_prev->bf_next = bf;
87d5efbb
VT
728 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
729 bf->bf_daddr);
e8324357
S
730 }
731 bf_prev = bf;
fec247c0 732
e8324357 733 } while (!list_empty(&tid->buf_q));
f078f209 734
e8324357
S
735 bf_first->bf_al = al;
736 bf_first->bf_nframes = nframes;
d43f3015 737
e8324357
S
738 return status;
739#undef PADBYTES
740}
f078f209 741
e8324357
S
742static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
743 struct ath_atx_tid *tid)
744{
d43f3015 745 struct ath_buf *bf;
e8324357
S
746 enum ATH_AGGR_STATUS status;
747 struct list_head bf_q;
f078f209 748
e8324357
S
749 do {
750 if (list_empty(&tid->buf_q))
751 return;
f078f209 752
e8324357
S
753 INIT_LIST_HEAD(&bf_q);
754
fec247c0 755 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
f078f209 756
f078f209 757 /*
d43f3015
S
758 * no frames picked up to be aggregated;
759 * block-ack window is not open.
f078f209 760 */
e8324357
S
761 if (list_empty(&bf_q))
762 break;
f078f209 763
e8324357 764 bf = list_first_entry(&bf_q, struct ath_buf, list);
d43f3015 765 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
f078f209 766
d43f3015 767 /* if only one frame, send as non-aggregate */
e8324357 768 if (bf->bf_nframes == 1) {
e8324357 769 bf->bf_state.bf_type &= ~BUF_AGGR;
d43f3015 770 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
e8324357
S
771 ath_buf_set_rate(sc, bf);
772 ath_tx_txqaddbuf(sc, txq, &bf_q);
773 continue;
774 }
f078f209 775
d43f3015 776 /* setup first desc of aggregate */
e8324357
S
777 bf->bf_state.bf_type |= BUF_AGGR;
778 ath_buf_set_rate(sc, bf);
779 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
f078f209 780
d43f3015
S
781 /* anchor last desc of aggregate */
782 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
f078f209 783
e8324357 784 ath_tx_txqaddbuf(sc, txq, &bf_q);
fec247c0 785 TX_STAT_INC(txq->axq_qnum, a_aggr);
f078f209 786
e8324357
S
787 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
788 status != ATH_AGGR_BAW_CLOSED);
789}
790
231c3a1f
FF
791int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
792 u16 tid, u16 *ssn)
e8324357
S
793{
794 struct ath_atx_tid *txtid;
795 struct ath_node *an;
796
797 an = (struct ath_node *)sta->drv_priv;
f83da965 798 txtid = ATH_AN_2_TID(an, tid);
231c3a1f
FF
799
800 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
801 return -EAGAIN;
802
f83da965 803 txtid->state |= AGGR_ADDBA_PROGRESS;
75401849 804 txtid->paused = true;
f83da965 805 *ssn = txtid->seq_start;
231c3a1f
FF
806
807 return 0;
e8324357 808}
f078f209 809
f83da965 810void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
e8324357
S
811{
812 struct ath_node *an = (struct ath_node *)sta->drv_priv;
813 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
814 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
f078f209 815
e8324357 816 if (txtid->state & AGGR_CLEANUP)
f83da965 817 return;
f078f209 818
e8324357 819 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
5eae6592 820 txtid->state &= ~AGGR_ADDBA_PROGRESS;
f83da965 821 return;
e8324357 822 }
f078f209 823
e8324357 824 spin_lock_bh(&txq->axq_lock);
75401849 825 txtid->paused = true;
f078f209 826
90fa539c
FF
827 /*
828 * If frames are still being transmitted for this TID, they will be
829 * cleaned up during tx completion. To prevent race conditions, this
830 * TID can only be reused after all in-progress subframes have been
831 * completed.
832 */
833 if (txtid->baw_head != txtid->baw_tail)
e8324357 834 txtid->state |= AGGR_CLEANUP;
90fa539c 835 else
e8324357 836 txtid->state &= ~AGGR_ADDBA_COMPLETE;
90fa539c
FF
837 spin_unlock_bh(&txq->axq_lock);
838
839 ath_tx_flush_tid(sc, txtid);
e8324357 840}
f078f209 841
e8324357
S
842void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
843{
844 struct ath_atx_tid *txtid;
845 struct ath_node *an;
846
847 an = (struct ath_node *)sta->drv_priv;
848
849 if (sc->sc_flags & SC_OP_TXAGGR) {
850 txtid = ATH_AN_2_TID(an, tid);
851 txtid->baw_size =
852 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
853 txtid->state |= AGGR_ADDBA_COMPLETE;
854 txtid->state &= ~AGGR_ADDBA_PROGRESS;
855 ath_tx_resume_tid(sc, txtid);
856 }
f078f209
LR
857}
858
e8324357
S
859/********************/
860/* Queue Management */
861/********************/
f078f209 862
e8324357
S
863static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
864 struct ath_txq *txq)
f078f209 865{
e8324357
S
866 struct ath_atx_ac *ac, *ac_tmp;
867 struct ath_atx_tid *tid, *tid_tmp;
f078f209 868
e8324357
S
869 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
870 list_del(&ac->list);
871 ac->sched = false;
872 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
873 list_del(&tid->list);
874 tid->sched = false;
875 ath_tid_drain(sc, txq, tid);
876 }
f078f209
LR
877 }
878}
879
e8324357 880struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
f078f209 881{
cbe61d8a 882 struct ath_hw *ah = sc->sc_ah;
c46917bb 883 struct ath_common *common = ath9k_hw_common(ah);
e8324357 884 struct ath9k_tx_queue_info qi;
e5003249 885 int qnum, i;
f078f209 886
e8324357
S
887 memset(&qi, 0, sizeof(qi));
888 qi.tqi_subtype = subtype;
889 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
890 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
891 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
892 qi.tqi_physCompBuf = 0;
f078f209
LR
893
894 /*
e8324357
S
895 * Enable interrupts only for EOL and DESC conditions.
896 * We mark tx descriptors to receive a DESC interrupt
897 * when a tx queue gets deep; otherwise waiting for the
898 * EOL to reap descriptors. Note that this is done to
899 * reduce interrupt load and this only defers reaping
900 * descriptors, never transmitting frames. Aside from
901 * reducing interrupts this also permits more concurrency.
902 * The only potential downside is if the tx queue backs
903 * up in which case the top half of the kernel may backup
904 * due to a lack of tx descriptors.
905 *
906 * The UAPSD queue is an exception, since we take a desc-
907 * based intr on the EOSP frames.
f078f209 908 */
afe754d6
VT
909 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
910 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
911 TXQ_FLAG_TXERRINT_ENABLE;
912 } else {
913 if (qtype == ATH9K_TX_QUEUE_UAPSD)
914 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
915 else
916 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
917 TXQ_FLAG_TXDESCINT_ENABLE;
918 }
e8324357
S
919 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
920 if (qnum == -1) {
f078f209 921 /*
e8324357
S
922 * NB: don't print a message, this happens
923 * normally on parts with too few tx queues
f078f209 924 */
e8324357 925 return NULL;
f078f209 926 }
e8324357 927 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
c46917bb
LR
928 ath_print(common, ATH_DBG_FATAL,
929 "qnum %u out of range, max %u!\n",
930 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
e8324357
S
931 ath9k_hw_releasetxqueue(ah, qnum);
932 return NULL;
933 }
934 if (!ATH_TXQ_SETUP(sc, qnum)) {
935 struct ath_txq *txq = &sc->tx.txq[qnum];
f078f209 936
293f2ba8 937 txq->axq_class = subtype;
e8324357
S
938 txq->axq_qnum = qnum;
939 txq->axq_link = NULL;
940 INIT_LIST_HEAD(&txq->axq_q);
941 INIT_LIST_HEAD(&txq->axq_acq);
942 spin_lock_init(&txq->axq_lock);
943 txq->axq_depth = 0;
164ace38 944 txq->axq_tx_inprogress = false;
e8324357 945 sc->tx.txqsetup |= 1<<qnum;
e5003249
VT
946
947 txq->txq_headidx = txq->txq_tailidx = 0;
948 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
949 INIT_LIST_HEAD(&txq->txq_fifo[i]);
950 INIT_LIST_HEAD(&txq->txq_fifo_pending);
e8324357
S
951 }
952 return &sc->tx.txq[qnum];
f078f209
LR
953}
954
e8324357
S
955int ath_txq_update(struct ath_softc *sc, int qnum,
956 struct ath9k_tx_queue_info *qinfo)
957{
cbe61d8a 958 struct ath_hw *ah = sc->sc_ah;
e8324357
S
959 int error = 0;
960 struct ath9k_tx_queue_info qi;
961
962 if (qnum == sc->beacon.beaconq) {
963 /*
964 * XXX: for beacon queue, we just save the parameter.
965 * It will be picked up by ath_beaconq_config when
966 * it's necessary.
967 */
968 sc->beacon.beacon_qi = *qinfo;
f078f209 969 return 0;
e8324357 970 }
f078f209 971
9680e8a3 972 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
e8324357
S
973
974 ath9k_hw_get_txq_props(ah, qnum, &qi);
975 qi.tqi_aifs = qinfo->tqi_aifs;
976 qi.tqi_cwmin = qinfo->tqi_cwmin;
977 qi.tqi_cwmax = qinfo->tqi_cwmax;
978 qi.tqi_burstTime = qinfo->tqi_burstTime;
979 qi.tqi_readyTime = qinfo->tqi_readyTime;
980
981 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
c46917bb
LR
982 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
983 "Unable to update hardware queue %u!\n", qnum);
e8324357
S
984 error = -EIO;
985 } else {
986 ath9k_hw_resettxqueue(ah, qnum);
987 }
988
989 return error;
990}
991
992int ath_cabq_update(struct ath_softc *sc)
993{
994 struct ath9k_tx_queue_info qi;
995 int qnum = sc->beacon.cabq->axq_qnum;
f078f209 996
e8324357 997 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209 998 /*
e8324357 999 * Ensure the readytime % is within the bounds.
f078f209 1000 */
17d7904d
S
1001 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1002 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1003 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1004 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
f078f209 1005
57c4d7b4 1006 qi.tqi_readyTime = (sc->beacon_interval *
fdbf7335 1007 sc->config.cabqReadytime) / 100;
e8324357
S
1008 ath_txq_update(sc, qnum, &qi);
1009
1010 return 0;
f078f209
LR
1011}
1012
043a0405
S
1013/*
1014 * Drain a given TX queue (could be Beacon or Data)
1015 *
1016 * This assumes output has been stopped and
1017 * we do not need to block ath_tx_tasklet.
1018 */
1019void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
f078f209 1020{
e8324357
S
1021 struct ath_buf *bf, *lastbf;
1022 struct list_head bf_head;
db1a052b
FF
1023 struct ath_tx_status ts;
1024
1025 memset(&ts, 0, sizeof(ts));
e8324357 1026 INIT_LIST_HEAD(&bf_head);
f078f209 1027
e8324357
S
1028 for (;;) {
1029 spin_lock_bh(&txq->axq_lock);
f078f209 1030
e5003249
VT
1031 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1032 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1033 txq->txq_headidx = txq->txq_tailidx = 0;
1034 spin_unlock_bh(&txq->axq_lock);
1035 break;
1036 } else {
1037 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1038 struct ath_buf, list);
1039 }
1040 } else {
1041 if (list_empty(&txq->axq_q)) {
1042 txq->axq_link = NULL;
1043 spin_unlock_bh(&txq->axq_lock);
1044 break;
1045 }
1046 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1047 list);
f078f209 1048
e5003249
VT
1049 if (bf->bf_stale) {
1050 list_del(&bf->list);
1051 spin_unlock_bh(&txq->axq_lock);
f078f209 1052
0a8cea84 1053 ath_tx_return_buffer(sc, bf);
e5003249
VT
1054 continue;
1055 }
e8324357 1056 }
f078f209 1057
e8324357 1058 lastbf = bf->bf_lastbf;
6d913f7d
VT
1059 if (!retry_tx)
1060 lastbf->bf_tx_aborted = true;
f078f209 1061
e5003249
VT
1062 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1063 list_cut_position(&bf_head,
1064 &txq->txq_fifo[txq->txq_tailidx],
1065 &lastbf->list);
1066 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1067 } else {
1068 /* remove ath_buf's of the same mpdu from txq */
1069 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1070 }
1071
e8324357 1072 txq->axq_depth--;
f078f209 1073
e8324357
S
1074 spin_unlock_bh(&txq->axq_lock);
1075
1076 if (bf_isampdu(bf))
db1a052b 1077 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
e8324357 1078 else
db1a052b 1079 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
f078f209
LR
1080 }
1081
164ace38
SB
1082 spin_lock_bh(&txq->axq_lock);
1083 txq->axq_tx_inprogress = false;
1084 spin_unlock_bh(&txq->axq_lock);
1085
e8324357
S
1086 /* flush any pending frames if aggregation is enabled */
1087 if (sc->sc_flags & SC_OP_TXAGGR) {
1088 if (!retry_tx) {
1089 spin_lock_bh(&txq->axq_lock);
1090 ath_txq_drain_pending_buffers(sc, txq);
1091 spin_unlock_bh(&txq->axq_lock);
1092 }
1093 }
e5003249
VT
1094
1095 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1096 spin_lock_bh(&txq->axq_lock);
1097 while (!list_empty(&txq->txq_fifo_pending)) {
1098 bf = list_first_entry(&txq->txq_fifo_pending,
1099 struct ath_buf, list);
1100 list_cut_position(&bf_head,
1101 &txq->txq_fifo_pending,
1102 &bf->bf_lastbf->list);
1103 spin_unlock_bh(&txq->axq_lock);
1104
1105 if (bf_isampdu(bf))
1106 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1107 &ts, 0);
1108 else
1109 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1110 &ts, 0, 0);
1111 spin_lock_bh(&txq->axq_lock);
1112 }
1113 spin_unlock_bh(&txq->axq_lock);
1114 }
f078f209
LR
1115}
1116
043a0405 1117void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
f078f209 1118{
cbe61d8a 1119 struct ath_hw *ah = sc->sc_ah;
c46917bb 1120 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
043a0405
S
1121 struct ath_txq *txq;
1122 int i, npend = 0;
1123
1124 if (sc->sc_flags & SC_OP_INVALID)
1125 return;
1126
1127 /* Stop beacon queue */
1128 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1129
1130 /* Stop data queues */
1131 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1132 if (ATH_TXQ_SETUP(sc, i)) {
1133 txq = &sc->tx.txq[i];
1134 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1135 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1136 }
1137 }
1138
1139 if (npend) {
1140 int r;
1141
e8009e98 1142 ath_print(common, ATH_DBG_FATAL,
9be8ab2e 1143 "Failed to stop TX DMA. Resetting hardware!\n");
043a0405
S
1144
1145 spin_lock_bh(&sc->sc_resetlock);
20bd2a09 1146 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
043a0405 1147 if (r)
c46917bb
LR
1148 ath_print(common, ATH_DBG_FATAL,
1149 "Unable to reset hardware; reset status %d\n",
1150 r);
043a0405
S
1151 spin_unlock_bh(&sc->sc_resetlock);
1152 }
1153
1154 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1155 if (ATH_TXQ_SETUP(sc, i))
1156 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1157 }
e8324357 1158}
f078f209 1159
043a0405 1160void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
e8324357 1161{
043a0405
S
1162 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1163 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
e8324357 1164}
f078f209 1165
e8324357
S
1166void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1167{
1168 struct ath_atx_ac *ac;
1169 struct ath_atx_tid *tid;
f078f209 1170
e8324357
S
1171 if (list_empty(&txq->axq_acq))
1172 return;
f078f209 1173
e8324357
S
1174 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1175 list_del(&ac->list);
1176 ac->sched = false;
f078f209 1177
e8324357
S
1178 do {
1179 if (list_empty(&ac->tid_q))
1180 return;
f078f209 1181
e8324357
S
1182 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1183 list_del(&tid->list);
1184 tid->sched = false;
f078f209 1185
e8324357
S
1186 if (tid->paused)
1187 continue;
f078f209 1188
164ace38 1189 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
1190
1191 /*
e8324357
S
1192 * add tid to round-robin queue if more frames
1193 * are pending for the tid
f078f209 1194 */
e8324357
S
1195 if (!list_empty(&tid->buf_q))
1196 ath_tx_queue_tid(txq, tid);
f078f209 1197
e8324357
S
1198 break;
1199 } while (!list_empty(&ac->tid_q));
f078f209 1200
e8324357
S
1201 if (!list_empty(&ac->tid_q)) {
1202 if (!ac->sched) {
1203 ac->sched = true;
1204 list_add_tail(&ac->list, &txq->axq_acq);
f078f209 1205 }
e8324357
S
1206 }
1207}
f078f209 1208
e8324357
S
1209int ath_tx_setup(struct ath_softc *sc, int haltype)
1210{
1211 struct ath_txq *txq;
f078f209 1212
e8324357 1213 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
c46917bb
LR
1214 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1215 "HAL AC %u out of range, max %zu!\n",
e8324357
S
1216 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1217 return 0;
1218 }
1219 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1220 if (txq != NULL) {
1221 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1222 return 1;
1223 } else
1224 return 0;
f078f209
LR
1225}
1226
e8324357
S
1227/***********/
1228/* TX, DMA */
1229/***********/
1230
f078f209 1231/*
e8324357
S
1232 * Insert a chain of ath_buf (descriptors) on a txq and
1233 * assume the descriptors are already chained together by caller.
f078f209 1234 */
e8324357
S
1235static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1236 struct list_head *head)
f078f209 1237{
cbe61d8a 1238 struct ath_hw *ah = sc->sc_ah;
c46917bb 1239 struct ath_common *common = ath9k_hw_common(ah);
e8324357 1240 struct ath_buf *bf;
f078f209 1241
e8324357
S
1242 /*
1243 * Insert the frame on the outbound list and
1244 * pass it on to the hardware.
1245 */
f078f209 1246
e8324357
S
1247 if (list_empty(head))
1248 return;
f078f209 1249
e8324357 1250 bf = list_first_entry(head, struct ath_buf, list);
f078f209 1251
c46917bb
LR
1252 ath_print(common, ATH_DBG_QUEUE,
1253 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209 1254
e5003249
VT
1255 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1256 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1257 list_splice_tail_init(head, &txq->txq_fifo_pending);
1258 return;
1259 }
1260 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1261 ath_print(common, ATH_DBG_XMIT,
1262 "Initializing tx fifo %d which "
1263 "is non-empty\n",
1264 txq->txq_headidx);
1265 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1266 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1267 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
e8324357 1268 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
c46917bb
LR
1269 ath_print(common, ATH_DBG_XMIT,
1270 "TXDP[%u] = %llx (%p)\n",
1271 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
e8324357 1272 } else {
e5003249
VT
1273 list_splice_tail_init(head, &txq->axq_q);
1274
1275 if (txq->axq_link == NULL) {
1276 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1277 ath_print(common, ATH_DBG_XMIT,
1278 "TXDP[%u] = %llx (%p)\n",
1279 txq->axq_qnum, ito64(bf->bf_daddr),
1280 bf->bf_desc);
1281 } else {
1282 *txq->axq_link = bf->bf_daddr;
1283 ath_print(common, ATH_DBG_XMIT,
1284 "link[%u] (%p)=%llx (%p)\n",
1285 txq->axq_qnum, txq->axq_link,
1286 ito64(bf->bf_daddr), bf->bf_desc);
1287 }
1288 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1289 &txq->axq_link);
1290 ath9k_hw_txstart(ah, txq->axq_qnum);
e8324357 1291 }
e5003249 1292 txq->axq_depth++;
e8324357 1293}
f078f209 1294
e8324357
S
1295static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1296 struct list_head *bf_head,
1297 struct ath_tx_control *txctl)
f078f209
LR
1298{
1299 struct ath_buf *bf;
f078f209 1300
e8324357
S
1301 bf = list_first_entry(bf_head, struct ath_buf, list);
1302 bf->bf_state.bf_type |= BUF_AMPDU;
fec247c0 1303 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
f078f209 1304
e8324357
S
1305 /*
1306 * Do not queue to h/w when any of the following conditions is true:
1307 * - there are pending frames in software queue
1308 * - the TID is currently paused for ADDBA/BAR request
1309 * - seqno is not within block-ack window
1310 * - h/w queue depth exceeds low water mark
1311 */
1312 if (!list_empty(&tid->buf_q) || tid->paused ||
1313 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1314 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209 1315 /*
e8324357
S
1316 * Add this frame to software queue for scheduling later
1317 * for aggregation.
f078f209 1318 */
d43f3015 1319 list_move_tail(&bf->list, &tid->buf_q);
e8324357
S
1320 ath_tx_queue_tid(txctl->txq, tid);
1321 return;
1322 }
1323
1324 /* Add sub-frame to BAW */
1325 ath_tx_addto_baw(sc, tid, bf);
1326
1327 /* Queue to h/w without aggregation */
1328 bf->bf_nframes = 1;
d43f3015 1329 bf->bf_lastbf = bf;
e8324357
S
1330 ath_buf_set_rate(sc, bf);
1331 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
e8324357
S
1332}
1333
c37452b0
S
1334static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1335 struct ath_atx_tid *tid,
1336 struct list_head *bf_head)
e8324357
S
1337{
1338 struct ath_buf *bf;
1339
e8324357
S
1340 bf = list_first_entry(bf_head, struct ath_buf, list);
1341 bf->bf_state.bf_type &= ~BUF_AMPDU;
1342
1343 /* update starting sequence number for subsequent ADDBA request */
1344 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1345
1346 bf->bf_nframes = 1;
d43f3015 1347 bf->bf_lastbf = bf;
e8324357
S
1348 ath_buf_set_rate(sc, bf);
1349 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1350 TX_STAT_INC(txq->axq_qnum, queued);
e8324357
S
1351}
1352
c37452b0
S
1353static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1354 struct list_head *bf_head)
1355{
1356 struct ath_buf *bf;
1357
1358 bf = list_first_entry(bf_head, struct ath_buf, list);
1359
1360 bf->bf_lastbf = bf;
1361 bf->bf_nframes = 1;
1362 ath_buf_set_rate(sc, bf);
1363 ath_tx_txqaddbuf(sc, txq, bf_head);
fec247c0 1364 TX_STAT_INC(txq->axq_qnum, queued);
c37452b0
S
1365}
1366
e8324357
S
1367static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1368{
1369 struct ieee80211_hdr *hdr;
1370 enum ath9k_pkt_type htype;
1371 __le16 fc;
1372
1373 hdr = (struct ieee80211_hdr *)skb->data;
1374 fc = hdr->frame_control;
1375
1376 if (ieee80211_is_beacon(fc))
1377 htype = ATH9K_PKT_TYPE_BEACON;
1378 else if (ieee80211_is_probe_resp(fc))
1379 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1380 else if (ieee80211_is_atim(fc))
1381 htype = ATH9K_PKT_TYPE_ATIM;
1382 else if (ieee80211_is_pspoll(fc))
1383 htype = ATH9K_PKT_TYPE_PSPOLL;
1384 else
1385 htype = ATH9K_PKT_TYPE_NORMAL;
1386
1387 return htype;
1388}
1389
e8324357
S
1390static void assign_aggr_tid_seqno(struct sk_buff *skb,
1391 struct ath_buf *bf)
1392{
1393 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1394 struct ieee80211_hdr *hdr;
1395 struct ath_node *an;
1396 struct ath_atx_tid *tid;
1397 __le16 fc;
1398 u8 *qc;
1399
1400 if (!tx_info->control.sta)
1401 return;
1402
1403 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1404 hdr = (struct ieee80211_hdr *)skb->data;
1405 fc = hdr->frame_control;
1406
1407 if (ieee80211_is_data_qos(fc)) {
1408 qc = ieee80211_get_qos_ctl(hdr);
1409 bf->bf_tidno = qc[0] & 0xf;
1410 }
1411
1412 /*
1413 * For HT capable stations, we save tidno for later use.
1414 * We also override seqno set by upper layer with the one
1415 * in tx aggregation state.
e8324357
S
1416 */
1417 tid = ATH_AN_2_TID(an, bf->bf_tidno);
17b182e3 1418 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
e8324357
S
1419 bf->bf_seqno = tid->seq_next;
1420 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1421}
1422
b0a33448 1423static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
e8324357
S
1424{
1425 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1426 int flags = 0;
1427
1428 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1429 flags |= ATH9K_TXDESC_INTREQ;
1430
1431 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1432 flags |= ATH9K_TXDESC_NOACK;
e8324357 1433
b0a33448
LR
1434 if (use_ldpc)
1435 flags |= ATH9K_TXDESC_LDPC;
1436
e8324357
S
1437 return flags;
1438}
1439
1440/*
1441 * rix - rate index
1442 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1443 * width - 0 for 20 MHz, 1 for 40 MHz
1444 * half_gi - to use 4us v/s 3.6 us for symbol time
1445 */
1446static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1447 int width, int half_gi, bool shortPreamble)
1448{
e8324357 1449 u32 nbits, nsymbits, duration, nsymbols;
e8324357
S
1450 int streams, pktlen;
1451
1452 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
e8324357
S
1453
1454 /* find number of symbols: PLCP + data */
c6663876 1455 streams = HT_RC_2_STREAMS(rix);
e8324357 1456 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
c6663876 1457 nsymbits = bits_per_symbol[rix % 8][width] * streams;
e8324357
S
1458 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1459
1460 if (!half_gi)
1461 duration = SYMBOL_TIME(nsymbols);
1462 else
1463 duration = SYMBOL_TIME_HALFGI(nsymbols);
1464
1465 /* addup duration for legacy/ht training and signal fields */
e8324357
S
1466 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1467
1468 return duration;
1469}
1470
1471static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1472{
43c27613 1473 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357
S
1474 struct ath9k_11n_rate_series series[4];
1475 struct sk_buff *skb;
1476 struct ieee80211_tx_info *tx_info;
1477 struct ieee80211_tx_rate *rates;
545750d3 1478 const struct ieee80211_rate *rate;
254ad0ff 1479 struct ieee80211_hdr *hdr;
c89424df
S
1480 int i, flags = 0;
1481 u8 rix = 0, ctsrate = 0;
254ad0ff 1482 bool is_pspoll;
e8324357
S
1483
1484 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1485
a22be22a 1486 skb = bf->bf_mpdu;
e8324357
S
1487 tx_info = IEEE80211_SKB_CB(skb);
1488 rates = tx_info->control.rates;
254ad0ff
S
1489 hdr = (struct ieee80211_hdr *)skb->data;
1490 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
e8324357 1491
e8324357 1492 /*
c89424df
S
1493 * We check if Short Preamble is needed for the CTS rate by
1494 * checking the BSS's global flag.
1495 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
e8324357 1496 */
545750d3
FF
1497 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1498 ctsrate = rate->hw_value;
c89424df 1499 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
545750d3 1500 ctsrate |= rate->hw_value_short;
e8324357 1501
e8324357 1502 for (i = 0; i < 4; i++) {
545750d3
FF
1503 bool is_40, is_sgi, is_sp;
1504 int phy;
1505
e8324357
S
1506 if (!rates[i].count || (rates[i].idx < 0))
1507 continue;
1508
1509 rix = rates[i].idx;
e8324357 1510 series[i].Tries = rates[i].count;
43c27613 1511 series[i].ChSel = common->tx_chainmask;
e8324357 1512
27032059
FF
1513 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1514 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
c89424df 1515 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
27032059
FF
1516 flags |= ATH9K_TXDESC_RTSENA;
1517 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1518 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1519 flags |= ATH9K_TXDESC_CTSENA;
1520 }
1521
c89424df
S
1522 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1523 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1524 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1525 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
e8324357 1526
545750d3
FF
1527 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1528 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1529 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1530
1531 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1532 /* MCS rates */
1533 series[i].Rate = rix | 0x80;
1534 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1535 is_40, is_sgi, is_sp);
074a8c0d
FF
1536 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1537 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
545750d3
FF
1538 continue;
1539 }
1540
1541 /* legcay rates */
1542 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1543 !(rate->flags & IEEE80211_RATE_ERP_G))
1544 phy = WLAN_RC_PHY_CCK;
1545 else
1546 phy = WLAN_RC_PHY_OFDM;
1547
1548 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1549 series[i].Rate = rate->hw_value;
1550 if (rate->hw_value_short) {
1551 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1552 series[i].Rate |= rate->hw_value_short;
1553 } else {
1554 is_sp = false;
1555 }
1556
1557 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1558 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
f078f209
LR
1559 }
1560
27032059
FF
1561 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1562 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1563 flags &= ~ATH9K_TXDESC_RTSENA;
1564
1565 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1566 if (flags & ATH9K_TXDESC_RTSENA)
1567 flags &= ~ATH9K_TXDESC_CTSENA;
1568
e8324357 1569 /* set dur_update_en for l-sig computation except for PS-Poll frames */
c89424df
S
1570 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1571 bf->bf_lastbf->bf_desc,
254ad0ff 1572 !is_pspoll, ctsrate,
c89424df 1573 0, series, 4, flags);
f078f209 1574
17d7904d 1575 if (sc->config.ath_aggr_prot && flags)
c89424df 1576 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
f078f209
LR
1577}
1578
c52f33d0 1579static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
8f93b8b3 1580 struct sk_buff *skb,
528f0c6b 1581 struct ath_tx_control *txctl)
f078f209 1582{
c52f33d0
JM
1583 struct ath_wiphy *aphy = hw->priv;
1584 struct ath_softc *sc = aphy->sc;
528f0c6b
S
1585 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1586 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1587 int hdrlen;
1588 __le16 fc;
1bc14880 1589 int padpos, padsize;
b0a33448 1590 bool use_ldpc = false;
e022edbd 1591
827e69bf
FF
1592 tx_info->pad[0] = 0;
1593 switch (txctl->frame_type) {
c81494d5 1594 case ATH9K_IFT_NOT_INTERNAL:
827e69bf 1595 break;
c81494d5 1596 case ATH9K_IFT_PAUSE:
827e69bf
FF
1597 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1598 /* fall through */
c81494d5 1599 case ATH9K_IFT_UNPAUSE:
827e69bf
FF
1600 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1601 break;
1602 }
528f0c6b
S
1603 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1604 fc = hdr->frame_control;
f078f209 1605
528f0c6b 1606 ATH_TXBUF_RESET(bf);
f078f209 1607
827e69bf 1608 bf->aphy = aphy;
1bc14880
BP
1609 bf->bf_frmlen = skb->len + FCS_LEN;
1610 /* Remove the padding size from bf_frmlen, if any */
1611 padpos = ath9k_cmn_padpos(hdr->frame_control);
1612 padsize = padpos & 3;
1613 if (padsize && skb->len>padpos+padsize) {
1614 bf->bf_frmlen -= padsize;
1615 }
cd3d39a6 1616
9f42c2b6 1617 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
c656bbb5 1618 bf->bf_state.bf_type |= BUF_HT;
b0a33448
LR
1619 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1620 use_ldpc = true;
1621 }
528f0c6b 1622
9f42c2b6 1623 bf->bf_state.bfs_paprd = txctl->paprd;
ca369eb4
VT
1624 if (txctl->paprd)
1625 bf->bf_state.bfs_paprd_timestamp = jiffies;
b0a33448 1626 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
528f0c6b 1627
c17512d8 1628 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
528f0c6b
S
1629 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1630 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1631 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1632 } else {
1633 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1634 }
1635
17b182e3
S
1636 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1637 (sc->sc_flags & SC_OP_TXAGGR))
528f0c6b
S
1638 assign_aggr_tid_seqno(skb, bf);
1639
f078f209 1640 bf->bf_mpdu = skb;
f8316df1 1641
c1739eb3
BG
1642 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1643 skb->len, DMA_TO_DEVICE);
1644 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
f8316df1 1645 bf->bf_mpdu = NULL;
6cf9e995 1646 bf->bf_buf_addr = 0;
c46917bb
LR
1647 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1648 "dma_mapping_error() on TX\n");
f8316df1
LR
1649 return -ENOMEM;
1650 }
1651
7c9fd60f
VT
1652 bf->bf_tx_aborted = false;
1653
f8316df1 1654 return 0;
528f0c6b
S
1655}
1656
1657/* FIXME: tx power */
1658static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
528f0c6b
S
1659 struct ath_tx_control *txctl)
1660{
a22be22a 1661 struct sk_buff *skb = bf->bf_mpdu;
528f0c6b 1662 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c37452b0 1663 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528f0c6b
S
1664 struct ath_node *an = NULL;
1665 struct list_head bf_head;
1666 struct ath_desc *ds;
1667 struct ath_atx_tid *tid;
cbe61d8a 1668 struct ath_hw *ah = sc->sc_ah;
528f0c6b 1669 int frm_type;
c37452b0 1670 __le16 fc;
528f0c6b 1671
528f0c6b 1672 frm_type = get_hw_packet_type(skb);
c37452b0 1673 fc = hdr->frame_control;
528f0c6b
S
1674
1675 INIT_LIST_HEAD(&bf_head);
1676 list_add_tail(&bf->list, &bf_head);
f078f209 1677
f078f209 1678 ds = bf->bf_desc;
87d5efbb 1679 ath9k_hw_set_desc_link(ah, ds, 0);
f078f209 1680
528f0c6b
S
1681 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1682 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1683
1684 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1685 skb->len, /* segment length */
1686 true, /* first segment */
1687 true, /* last segment */
3f3a1c80 1688 ds, /* first descriptor */
cc610ac0
VT
1689 bf->bf_buf_addr,
1690 txctl->txq->axq_qnum);
f078f209 1691
9f42c2b6
FF
1692 if (bf->bf_state.bfs_paprd)
1693 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1694
528f0c6b 1695 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1696
f1617967
JL
1697 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1698 tx_info->control.sta) {
1699 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1700 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1701
c37452b0
S
1702 if (!ieee80211_is_data_qos(fc)) {
1703 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1704 goto tx_done;
1705 }
1706
4fdec031 1707 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
f078f209
LR
1708 /*
1709 * Try aggregation if it's a unicast data frame
1710 * and the destination is HT capable.
1711 */
528f0c6b 1712 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
f078f209
LR
1713 } else {
1714 /*
528f0c6b
S
1715 * Send this frame as regular when ADDBA
1716 * exchange is neither complete nor pending.
f078f209 1717 */
c37452b0
S
1718 ath_tx_send_ht_normal(sc, txctl->txq,
1719 tid, &bf_head);
f078f209
LR
1720 }
1721 } else {
c37452b0 1722 ath_tx_send_normal(sc, txctl->txq, &bf_head);
f078f209 1723 }
528f0c6b 1724
c37452b0 1725tx_done:
528f0c6b 1726 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1727}
1728
f8316df1 1729/* Upon failure caller should free skb */
c52f33d0 1730int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
528f0c6b 1731 struct ath_tx_control *txctl)
f078f209 1732{
c52f33d0
JM
1733 struct ath_wiphy *aphy = hw->priv;
1734 struct ath_softc *sc = aphy->sc;
c46917bb 1735 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
84642d6b 1736 struct ath_txq *txq = txctl->txq;
528f0c6b 1737 struct ath_buf *bf;
97923b14 1738 int q, r;
f078f209 1739
528f0c6b
S
1740 bf = ath_tx_get_buffer(sc);
1741 if (!bf) {
c46917bb 1742 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
528f0c6b
S
1743 return -1;
1744 }
1745
c52f33d0 1746 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
f8316df1 1747 if (unlikely(r)) {
c46917bb 1748 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
c112d0c5
LR
1749
1750 /* upon ath_tx_processq() this TX queue will be resumed, we
1751 * guarantee this will happen by knowing beforehand that
1752 * we will at least have to run TX completionon one buffer
1753 * on the queue */
1754 spin_lock_bh(&txq->axq_lock);
84642d6b 1755 if (!txq->stopped && txq->axq_depth > 1) {
f52de03b 1756 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
c112d0c5
LR
1757 txq->stopped = 1;
1758 }
1759 spin_unlock_bh(&txq->axq_lock);
1760
0a8cea84 1761 ath_tx_return_buffer(sc, bf);
c112d0c5 1762
f8316df1
LR
1763 return r;
1764 }
1765
97923b14
FF
1766 q = skb_get_queue_mapping(skb);
1767 if (q >= 4)
1768 q = 0;
1769
1770 spin_lock_bh(&txq->axq_lock);
1771 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1772 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1773 txq->stopped = 1;
1774 }
1775 spin_unlock_bh(&txq->axq_lock);
1776
8f93b8b3 1777 ath_tx_start_dma(sc, bf, txctl);
f078f209 1778
528f0c6b 1779 return 0;
f078f209
LR
1780}
1781
c52f33d0 1782void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
f078f209 1783{
c52f33d0
JM
1784 struct ath_wiphy *aphy = hw->priv;
1785 struct ath_softc *sc = aphy->sc;
c46917bb 1786 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3
BP
1787 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1788 int padpos, padsize;
e8324357
S
1789 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1790 struct ath_tx_control txctl;
f078f209 1791
e8324357 1792 memset(&txctl, 0, sizeof(struct ath_tx_control));
f078f209
LR
1793
1794 /*
e8324357
S
1795 * As a temporary workaround, assign seq# here; this will likely need
1796 * to be cleaned up to work better with Beacon transmission and virtual
1797 * BSSes.
f078f209 1798 */
e8324357 1799 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
e8324357
S
1800 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1801 sc->tx.seq_no += 0x10;
1802 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1803 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
f078f209 1804 }
f078f209 1805
e8324357 1806 /* Add the padding after the header if this is not already done */
4d91f9f3
BP
1807 padpos = ath9k_cmn_padpos(hdr->frame_control);
1808 padsize = padpos & 3;
1809 if (padsize && skb->len>padpos) {
e8324357 1810 if (skb_headroom(skb) < padsize) {
c46917bb
LR
1811 ath_print(common, ATH_DBG_XMIT,
1812 "TX CABQ padding failed\n");
e8324357
S
1813 dev_kfree_skb_any(skb);
1814 return;
1815 }
1816 skb_push(skb, padsize);
4d91f9f3 1817 memmove(skb->data, skb->data + padsize, padpos);
f078f209 1818 }
f078f209 1819
e8324357 1820 txctl.txq = sc->beacon.cabq;
f078f209 1821
c46917bb
LR
1822 ath_print(common, ATH_DBG_XMIT,
1823 "transmitting CABQ packet, skb: %p\n", skb);
f078f209 1824
c52f33d0 1825 if (ath_tx_start(hw, skb, &txctl) != 0) {
c46917bb 1826 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
e8324357 1827 goto exit;
f078f209 1828 }
f078f209 1829
e8324357
S
1830 return;
1831exit:
1832 dev_kfree_skb_any(skb);
f078f209
LR
1833}
1834
e8324357
S
1835/*****************/
1836/* TX Completion */
1837/*****************/
528f0c6b 1838
e8324357 1839static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
827e69bf 1840 struct ath_wiphy *aphy, int tx_flags)
528f0c6b 1841{
e8324357
S
1842 struct ieee80211_hw *hw = sc->hw;
1843 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
c46917bb 1844 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
4d91f9f3 1845 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
97923b14 1846 int q, padpos, padsize;
528f0c6b 1847
c46917bb 1848 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
528f0c6b 1849
827e69bf
FF
1850 if (aphy)
1851 hw = aphy->hw;
528f0c6b 1852
6b2c4032 1853 if (tx_flags & ATH_TX_BAR)
e8324357 1854 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
e8324357 1855
6b2c4032 1856 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
e8324357
S
1857 /* Frame was ACKed */
1858 tx_info->flags |= IEEE80211_TX_STAT_ACK;
528f0c6b
S
1859 }
1860
4d91f9f3
BP
1861 padpos = ath9k_cmn_padpos(hdr->frame_control);
1862 padsize = padpos & 3;
1863 if (padsize && skb->len>padpos+padsize) {
e8324357
S
1864 /*
1865 * Remove MAC header padding before giving the frame back to
1866 * mac80211.
1867 */
4d91f9f3 1868 memmove(skb->data + padsize, skb->data, padpos);
e8324357
S
1869 skb_pull(skb, padsize);
1870 }
528f0c6b 1871
1b04b930
S
1872 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1873 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
c46917bb
LR
1874 ath_print(common, ATH_DBG_PS,
1875 "Going back to sleep after having "
f643e51d 1876 "received TX status (0x%lx)\n",
1b04b930
S
1877 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1878 PS_WAIT_FOR_CAB |
1879 PS_WAIT_FOR_PSPOLL_DATA |
1880 PS_WAIT_FOR_TX_ACK));
9a23f9ca
JM
1881 }
1882
827e69bf 1883 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
f0ed85c6 1884 ath9k_tx_status(hw, skb);
97923b14
FF
1885 else {
1886 q = skb_get_queue_mapping(skb);
1887 if (q >= 4)
1888 q = 0;
1889
1890 if (--sc->tx.pending_frames[q] < 0)
1891 sc->tx.pending_frames[q] = 0;
1892
827e69bf 1893 ieee80211_tx_status(hw, skb);
97923b14 1894 }
e8324357 1895}
f078f209 1896
e8324357 1897static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
db1a052b
FF
1898 struct ath_txq *txq, struct list_head *bf_q,
1899 struct ath_tx_status *ts, int txok, int sendbar)
f078f209 1900{
e8324357 1901 struct sk_buff *skb = bf->bf_mpdu;
e8324357 1902 unsigned long flags;
6b2c4032 1903 int tx_flags = 0;
f078f209 1904
e8324357 1905 if (sendbar)
6b2c4032 1906 tx_flags = ATH_TX_BAR;
f078f209 1907
e8324357 1908 if (!txok) {
6b2c4032 1909 tx_flags |= ATH_TX_ERROR;
f078f209 1910
e8324357 1911 if (bf_isxretried(bf))
6b2c4032 1912 tx_flags |= ATH_TX_XRETRY;
f078f209
LR
1913 }
1914
c1739eb3 1915 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
6cf9e995 1916 bf->bf_buf_addr = 0;
9f42c2b6
FF
1917
1918 if (bf->bf_state.bfs_paprd) {
ca369eb4
VT
1919 if (time_after(jiffies,
1920 bf->bf_state.bfs_paprd_timestamp +
78a18172 1921 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
ca369eb4 1922 dev_kfree_skb_any(skb);
78a18172 1923 else
ca369eb4 1924 complete(&sc->paprd_complete);
9f42c2b6 1925 } else {
9f42c2b6 1926 ath_debug_stat_tx(sc, txq, bf, ts);
c23cc81a 1927 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
9f42c2b6 1928 }
6cf9e995
BG
1929 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1930 * accidentally reference it later.
1931 */
1932 bf->bf_mpdu = NULL;
e8324357
S
1933
1934 /*
1935 * Return the list of ath_buf of this mpdu to free queue
1936 */
1937 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1938 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1939 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
f078f209
LR
1940}
1941
e8324357 1942static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
db1a052b 1943 struct ath_tx_status *ts, int txok)
f078f209 1944{
e8324357
S
1945 u16 seq_st = 0;
1946 u32 ba[WME_BA_BMP_SIZE >> 5];
1947 int ba_index;
1948 int nbad = 0;
1949 int isaggr = 0;
f078f209 1950
7c9fd60f 1951 if (bf->bf_lastbf->bf_tx_aborted)
e8324357 1952 return 0;
f078f209 1953
e8324357
S
1954 isaggr = bf_isaggr(bf);
1955 if (isaggr) {
db1a052b
FF
1956 seq_st = ts->ts_seqnum;
1957 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
e8324357 1958 }
f078f209 1959
e8324357
S
1960 while (bf) {
1961 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1962 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1963 nbad++;
1964
1965 bf = bf->bf_next;
1966 }
f078f209 1967
e8324357
S
1968 return nbad;
1969}
f078f209 1970
db1a052b 1971static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
8a92e2ee 1972 int nbad, int txok, bool update_rc)
f078f209 1973{
a22be22a 1974 struct sk_buff *skb = bf->bf_mpdu;
254ad0ff 1975 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e8324357 1976 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
827e69bf 1977 struct ieee80211_hw *hw = bf->aphy->hw;
8a92e2ee 1978 u8 i, tx_rateindex;
f078f209 1979
95e4acb7 1980 if (txok)
db1a052b 1981 tx_info->status.ack_signal = ts->ts_rssi;
95e4acb7 1982
db1a052b 1983 tx_rateindex = ts->ts_rateindex;
8a92e2ee
VT
1984 WARN_ON(tx_rateindex >= hw->max_rates);
1985
db1a052b 1986 if (ts->ts_status & ATH9K_TXERR_FILT)
e8324357 1987 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
ebd02287 1988 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
d969847c 1989 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
f078f209 1990
ebd02287
BS
1991 BUG_ON(nbad > bf->bf_nframes);
1992
1993 tx_info->status.ampdu_len = bf->bf_nframes;
1994 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
1995 }
1996
db1a052b 1997 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
8a92e2ee 1998 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
254ad0ff 1999 if (ieee80211_is_data(hdr->frame_control)) {
db1a052b 2000 if (ts->ts_flags &
827e69bf
FF
2001 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2002 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
db1a052b
FF
2003 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2004 (ts->ts_status & ATH9K_TXERR_FIFO))
827e69bf 2005 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
e8324357 2006 }
f078f209 2007 }
8a92e2ee 2008
545750d3 2009 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
8a92e2ee 2010 tx_info->status.rates[i].count = 0;
545750d3
FF
2011 tx_info->status.rates[i].idx = -1;
2012 }
8a92e2ee 2013
78c4653a 2014 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
f078f209
LR
2015}
2016
059d806c
S
2017static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2018{
2019 int qnum;
2020
97923b14
FF
2021 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2022 if (qnum == -1)
2023 return;
2024
059d806c 2025 spin_lock_bh(&txq->axq_lock);
97923b14 2026 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
68e8f2fa
VT
2027 if (ath_mac80211_start_queue(sc, qnum))
2028 txq->stopped = 0;
059d806c
S
2029 }
2030 spin_unlock_bh(&txq->axq_lock);
2031}
2032
e8324357 2033static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209 2034{
cbe61d8a 2035 struct ath_hw *ah = sc->sc_ah;
c46917bb 2036 struct ath_common *common = ath9k_hw_common(ah);
e8324357 2037 struct ath_buf *bf, *lastbf, *bf_held = NULL;
f078f209 2038 struct list_head bf_head;
e8324357 2039 struct ath_desc *ds;
29bffa96 2040 struct ath_tx_status ts;
0934af23 2041 int txok;
e8324357 2042 int status;
f078f209 2043
c46917bb
LR
2044 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2045 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2046 txq->axq_link);
f078f209 2047
f078f209
LR
2048 for (;;) {
2049 spin_lock_bh(&txq->axq_lock);
f078f209
LR
2050 if (list_empty(&txq->axq_q)) {
2051 txq->axq_link = NULL;
f078f209
LR
2052 spin_unlock_bh(&txq->axq_lock);
2053 break;
2054 }
f078f209
LR
2055 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2056
e8324357
S
2057 /*
2058 * There is a race condition that a BH gets scheduled
2059 * after sw writes TxE and before hw re-load the last
2060 * descriptor to get the newly chained one.
2061 * Software must keep the last DONE descriptor as a
2062 * holding descriptor - software does so by marking
2063 * it with the STALE flag.
2064 */
2065 bf_held = NULL;
a119cc49 2066 if (bf->bf_stale) {
e8324357
S
2067 bf_held = bf;
2068 if (list_is_last(&bf_held->list, &txq->axq_q)) {
6ef9b13d 2069 spin_unlock_bh(&txq->axq_lock);
e8324357
S
2070 break;
2071 } else {
2072 bf = list_entry(bf_held->list.next,
6ef9b13d 2073 struct ath_buf, list);
e8324357 2074 }
f078f209
LR
2075 }
2076
2077 lastbf = bf->bf_lastbf;
e8324357 2078 ds = lastbf->bf_desc;
f078f209 2079
29bffa96
FF
2080 memset(&ts, 0, sizeof(ts));
2081 status = ath9k_hw_txprocdesc(ah, ds, &ts);
e8324357 2082 if (status == -EINPROGRESS) {
f078f209 2083 spin_unlock_bh(&txq->axq_lock);
e8324357 2084 break;
f078f209 2085 }
f078f209 2086
e8324357
S
2087 /*
2088 * Remove ath_buf's of the same transmit unit from txq,
2089 * however leave the last descriptor back as the holding
2090 * descriptor for hw.
2091 */
a119cc49 2092 lastbf->bf_stale = true;
e8324357 2093 INIT_LIST_HEAD(&bf_head);
e8324357
S
2094 if (!list_is_singular(&lastbf->list))
2095 list_cut_position(&bf_head,
2096 &txq->axq_q, lastbf->list.prev);
f078f209 2097
e8324357 2098 txq->axq_depth--;
29bffa96 2099 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
164ace38 2100 txq->axq_tx_inprogress = false;
0a8cea84
FF
2101 if (bf_held)
2102 list_del(&bf_held->list);
e8324357 2103 spin_unlock_bh(&txq->axq_lock);
f078f209 2104
0a8cea84
FF
2105 if (bf_held)
2106 ath_tx_return_buffer(sc, bf_held);
f078f209 2107
e8324357
S
2108 if (!bf_isampdu(bf)) {
2109 /*
2110 * This frame is sent out as a single frame.
2111 * Use hardware retry status for this frame.
2112 */
29bffa96 2113 if (ts.ts_status & ATH9K_TXERR_XRETRY)
e8324357 2114 bf->bf_state.bf_type |= BUF_XRETRY;
ebd02287 2115 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
e8324357 2116 }
f078f209 2117
e8324357 2118 if (bf_isampdu(bf))
29bffa96 2119 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
e8324357 2120 else
29bffa96 2121 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
8469cdef 2122
059d806c 2123 ath_wake_mac80211_queue(sc, txq);
8469cdef 2124
059d806c 2125 spin_lock_bh(&txq->axq_lock);
e8324357
S
2126 if (sc->sc_flags & SC_OP_TXAGGR)
2127 ath_txq_schedule(sc, txq);
2128 spin_unlock_bh(&txq->axq_lock);
8469cdef
S
2129 }
2130}
2131
305fe47f 2132static void ath_tx_complete_poll_work(struct work_struct *work)
164ace38
SB
2133{
2134 struct ath_softc *sc = container_of(work, struct ath_softc,
2135 tx_complete_work.work);
2136 struct ath_txq *txq;
2137 int i;
2138 bool needreset = false;
2139
2140 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2141 if (ATH_TXQ_SETUP(sc, i)) {
2142 txq = &sc->tx.txq[i];
2143 spin_lock_bh(&txq->axq_lock);
2144 if (txq->axq_depth) {
2145 if (txq->axq_tx_inprogress) {
2146 needreset = true;
2147 spin_unlock_bh(&txq->axq_lock);
2148 break;
2149 } else {
2150 txq->axq_tx_inprogress = true;
2151 }
2152 }
2153 spin_unlock_bh(&txq->axq_lock);
2154 }
2155
2156 if (needreset) {
c46917bb
LR
2157 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2158 "tx hung, resetting the chip\n");
332c5566 2159 ath9k_ps_wakeup(sc);
164ace38 2160 ath_reset(sc, false);
332c5566 2161 ath9k_ps_restore(sc);
164ace38
SB
2162 }
2163
42935eca 2164 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
164ace38
SB
2165 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2166}
2167
2168
f078f209 2169
e8324357 2170void ath_tx_tasklet(struct ath_softc *sc)
f078f209 2171{
e8324357
S
2172 int i;
2173 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
f078f209 2174
e8324357 2175 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
f078f209 2176
e8324357
S
2177 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2178 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2179 ath_tx_processq(sc, &sc->tx.txq[i]);
f078f209
LR
2180 }
2181}
2182
e5003249
VT
2183void ath_tx_edma_tasklet(struct ath_softc *sc)
2184{
2185 struct ath_tx_status txs;
2186 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2187 struct ath_hw *ah = sc->sc_ah;
2188 struct ath_txq *txq;
2189 struct ath_buf *bf, *lastbf;
2190 struct list_head bf_head;
2191 int status;
2192 int txok;
2193
2194 for (;;) {
2195 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2196 if (status == -EINPROGRESS)
2197 break;
2198 if (status == -EIO) {
2199 ath_print(common, ATH_DBG_XMIT,
2200 "Error processing tx status\n");
2201 break;
2202 }
2203
2204 /* Skip beacon completions */
2205 if (txs.qid == sc->beacon.beaconq)
2206 continue;
2207
2208 txq = &sc->tx.txq[txs.qid];
2209
2210 spin_lock_bh(&txq->axq_lock);
2211 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2212 spin_unlock_bh(&txq->axq_lock);
2213 return;
2214 }
2215
2216 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2217 struct ath_buf, list);
2218 lastbf = bf->bf_lastbf;
2219
2220 INIT_LIST_HEAD(&bf_head);
2221 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2222 &lastbf->list);
2223 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2224 txq->axq_depth--;
2225 txq->axq_tx_inprogress = false;
2226 spin_unlock_bh(&txq->axq_lock);
2227
2228 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2229
2230 if (!bf_isampdu(bf)) {
e5003249
VT
2231 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2232 bf->bf_state.bf_type |= BUF_XRETRY;
ebd02287 2233 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
e5003249
VT
2234 }
2235
2236 if (bf_isampdu(bf))
2237 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2238 else
2239 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2240 &txs, txok, 0);
2241
7f9f3600
FF
2242 ath_wake_mac80211_queue(sc, txq);
2243
e5003249
VT
2244 spin_lock_bh(&txq->axq_lock);
2245 if (!list_empty(&txq->txq_fifo_pending)) {
2246 INIT_LIST_HEAD(&bf_head);
2247 bf = list_first_entry(&txq->txq_fifo_pending,
2248 struct ath_buf, list);
2249 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2250 &bf->bf_lastbf->list);
2251 ath_tx_txqaddbuf(sc, txq, &bf_head);
2252 } else if (sc->sc_flags & SC_OP_TXAGGR)
2253 ath_txq_schedule(sc, txq);
2254 spin_unlock_bh(&txq->axq_lock);
2255 }
2256}
2257
e8324357
S
2258/*****************/
2259/* Init, Cleanup */
2260/*****************/
f078f209 2261
5088c2f1
VT
2262static int ath_txstatus_setup(struct ath_softc *sc, int size)
2263{
2264 struct ath_descdma *dd = &sc->txsdma;
2265 u8 txs_len = sc->sc_ah->caps.txs_len;
2266
2267 dd->dd_desc_len = size * txs_len;
2268 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2269 &dd->dd_desc_paddr, GFP_KERNEL);
2270 if (!dd->dd_desc)
2271 return -ENOMEM;
2272
2273 return 0;
2274}
2275
2276static int ath_tx_edma_init(struct ath_softc *sc)
2277{
2278 int err;
2279
2280 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2281 if (!err)
2282 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2283 sc->txsdma.dd_desc_paddr,
2284 ATH_TXSTATUS_RING_SIZE);
2285
2286 return err;
2287}
2288
2289static void ath_tx_edma_cleanup(struct ath_softc *sc)
2290{
2291 struct ath_descdma *dd = &sc->txsdma;
2292
2293 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2294 dd->dd_desc_paddr);
2295}
2296
e8324357 2297int ath_tx_init(struct ath_softc *sc, int nbufs)
f078f209 2298{
c46917bb 2299 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
e8324357 2300 int error = 0;
f078f209 2301
797fe5cb 2302 spin_lock_init(&sc->tx.txbuflock);
f078f209 2303
797fe5cb 2304 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
4adfcded 2305 "tx", nbufs, 1, 1);
797fe5cb 2306 if (error != 0) {
c46917bb
LR
2307 ath_print(common, ATH_DBG_FATAL,
2308 "Failed to allocate tx descriptors: %d\n", error);
797fe5cb
S
2309 goto err;
2310 }
f078f209 2311
797fe5cb 2312 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
5088c2f1 2313 "beacon", ATH_BCBUF, 1, 1);
797fe5cb 2314 if (error != 0) {
c46917bb
LR
2315 ath_print(common, ATH_DBG_FATAL,
2316 "Failed to allocate beacon descriptors: %d\n", error);
797fe5cb
S
2317 goto err;
2318 }
f078f209 2319
164ace38
SB
2320 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2321
5088c2f1
VT
2322 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2323 error = ath_tx_edma_init(sc);
2324 if (error)
2325 goto err;
2326 }
2327
797fe5cb 2328err:
e8324357
S
2329 if (error != 0)
2330 ath_tx_cleanup(sc);
f078f209 2331
e8324357 2332 return error;
f078f209
LR
2333}
2334
797fe5cb 2335void ath_tx_cleanup(struct ath_softc *sc)
e8324357
S
2336{
2337 if (sc->beacon.bdma.dd_desc_len != 0)
2338 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2339
2340 if (sc->tx.txdma.dd_desc_len != 0)
2341 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
5088c2f1
VT
2342
2343 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2344 ath_tx_edma_cleanup(sc);
e8324357 2345}
f078f209
LR
2346
2347void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2348{
c5170163
S
2349 struct ath_atx_tid *tid;
2350 struct ath_atx_ac *ac;
2351 int tidno, acno;
f078f209 2352
8ee5afbc 2353 for (tidno = 0, tid = &an->tid[tidno];
c5170163
S
2354 tidno < WME_NUM_TID;
2355 tidno++, tid++) {
2356 tid->an = an;
2357 tid->tidno = tidno;
2358 tid->seq_start = tid->seq_next = 0;
2359 tid->baw_size = WME_MAX_BA;
2360 tid->baw_head = tid->baw_tail = 0;
2361 tid->sched = false;
e8324357 2362 tid->paused = false;
a37c2c79 2363 tid->state &= ~AGGR_CLEANUP;
c5170163 2364 INIT_LIST_HEAD(&tid->buf_q);
c5170163 2365 acno = TID_TO_WME_AC(tidno);
8ee5afbc 2366 tid->ac = &an->ac[acno];
a37c2c79
S
2367 tid->state &= ~AGGR_ADDBA_COMPLETE;
2368 tid->state &= ~AGGR_ADDBA_PROGRESS;
c5170163 2369 }
f078f209 2370
8ee5afbc 2371 for (acno = 0, ac = &an->ac[acno];
c5170163
S
2372 acno < WME_NUM_AC; acno++, ac++) {
2373 ac->sched = false;
1d2231e2 2374 ac->qnum = sc->tx.hwq_map[acno];
c5170163 2375 INIT_LIST_HEAD(&ac->tid_q);
f078f209
LR
2376 }
2377}
2378
b5aa9bf9 2379void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209 2380{
2b40994c
FF
2381 struct ath_atx_ac *ac;
2382 struct ath_atx_tid *tid;
f078f209 2383 struct ath_txq *txq;
2b40994c 2384 int i, tidno;
e8324357 2385
2b40994c
FF
2386 for (tidno = 0, tid = &an->tid[tidno];
2387 tidno < WME_NUM_TID; tidno++, tid++) {
2388 i = tid->ac->qnum;
f078f209 2389
2b40994c
FF
2390 if (!ATH_TXQ_SETUP(sc, i))
2391 continue;
f078f209 2392
2b40994c
FF
2393 txq = &sc->tx.txq[i];
2394 ac = tid->ac;
f078f209 2395
2b40994c
FF
2396 spin_lock_bh(&txq->axq_lock);
2397
2398 if (tid->sched) {
2399 list_del(&tid->list);
2400 tid->sched = false;
2401 }
2402
2403 if (ac->sched) {
2404 list_del(&ac->list);
2405 tid->ac->sched = false;
f078f209 2406 }
2b40994c
FF
2407
2408 ath_tid_drain(sc, txq, tid);
2409 tid->state &= ~AGGR_ADDBA_COMPLETE;
2410 tid->state &= ~AGGR_CLEANUP;
2411
2412 spin_unlock_bh(&txq->axq_lock);
f078f209
LR
2413 }
2414}
This page took 0.546623 seconds and 5 git commands to generate.