ath9k: Use GFP_ATOMIC when allocating TX private area
[deliverable/linux.git] / drivers / net / wireless / ath9k / xmit.c
CommitLineData
f078f209
LR
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
f078f209
LR
17#include "core.h"
18
19#define BITS_PER_BYTE 8
20#define OFDM_PLCP_BITS 22
21#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
22#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
34#define OFDM_SIFS_TIME 16
35
36static u32 bits_per_symbol[][2] = {
37 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
46 { 52, 108 }, /* 8: BPSK */
47 { 104, 216 }, /* 9: QPSK 1/2 */
48 { 156, 324 }, /* 10: QPSK 3/4 */
49 { 208, 432 }, /* 11: 16-QAM 1/2 */
50 { 312, 648 }, /* 12: 16-QAM 3/4 */
51 { 416, 864 }, /* 13: 64-QAM 2/3 */
52 { 468, 972 }, /* 14: 64-QAM 3/4 */
53 { 520, 1080 }, /* 15: 64-QAM 5/6 */
54};
55
56#define IS_HT_RATE(_rate) ((_rate) & 0x80)
57
f078f209
LR
58/*
59 * Insert a chain of ath_buf (descriptors) on a txq and
60 * assume the descriptors are already chained together by caller.
61 * NB: must be called with txq lock held
62 */
63
102e0572
S
64static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct list_head *head)
f078f209
LR
66{
67 struct ath_hal *ah = sc->sc_ah;
68 struct ath_buf *bf;
102e0572 69
f078f209
LR
70 /*
71 * Insert the frame on the outbound list and
72 * pass it on to the hardware.
73 */
74
75 if (list_empty(head))
76 return;
77
78 bf = list_first_entry(head, struct ath_buf, list);
79
80 list_splice_tail_init(head, &txq->axq_q);
81 txq->axq_depth++;
82 txq->axq_totalqueued++;
83 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
84
85 DPRINTF(sc, ATH_DBG_QUEUE,
04bd4638 86 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
f078f209
LR
87
88 if (txq->axq_link == NULL) {
89 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
90 DPRINTF(sc, ATH_DBG_XMIT,
04bd4638
S
91 "TXDP[%u] = %llx (%p)\n",
92 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
f078f209
LR
93 } else {
94 *txq->axq_link = bf->bf_daddr;
04bd4638 95 DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
f078f209
LR
96 txq->axq_qnum, txq->axq_link,
97 ito64(bf->bf_daddr), bf->bf_desc);
98 }
99 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
100 ath9k_hw_txstart(ah, txq->axq_qnum);
101}
102
c4288390
S
103static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
104 struct ath_xmit_status *tx_status)
105{
106 struct ieee80211_hw *hw = sc->hw;
107 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
108 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
109
04bd4638 110 DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
c4288390
S
111
112 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
113 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
114 kfree(tx_info_priv);
115 tx_info->rate_driver_data[0] = NULL;
116 }
117
118 if (tx_status->flags & ATH_TX_BAR) {
119 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
120 tx_status->flags &= ~ATH_TX_BAR;
121 }
122
123 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
124 /* Frame was ACKed */
125 tx_info->flags |= IEEE80211_TX_STAT_ACK;
126 }
127
128 tx_info->status.rates[0].count = tx_status->retries + 1;
129
130 ieee80211_tx_status(hw, skb);
131}
132
f078f209
LR
133/* Check if it's okay to send out aggregates */
134
a37c2c79 135static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
f078f209
LR
136{
137 struct ath_atx_tid *tid;
138 tid = ATH_AN_2_TID(an, tidno);
139
a37c2c79
S
140 if (tid->state & AGGR_ADDBA_COMPLETE ||
141 tid->state & AGGR_ADDBA_PROGRESS)
f078f209
LR
142 return 1;
143 else
144 return 0;
145}
146
ff37e337
S
147static void ath_get_beaconconfig(struct ath_softc *sc, int if_id,
148 struct ath_beacon_config *conf)
149{
150 struct ieee80211_hw *hw = sc->hw;
151
152 /* fill in beacon config data */
153
154 conf->beacon_interval = hw->conf.beacon_int;
155 conf->listen_interval = 100;
156 conf->dtim_count = 1;
157 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
158}
159
528f0c6b
S
160/* Calculate Atheros packet type from IEEE80211 packet header */
161
162static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
f078f209 163{
528f0c6b 164 struct ieee80211_hdr *hdr;
f078f209
LR
165 enum ath9k_pkt_type htype;
166 __le16 fc;
167
528f0c6b 168 hdr = (struct ieee80211_hdr *)skb->data;
f078f209
LR
169 fc = hdr->frame_control;
170
f078f209
LR
171 if (ieee80211_is_beacon(fc))
172 htype = ATH9K_PKT_TYPE_BEACON;
173 else if (ieee80211_is_probe_resp(fc))
174 htype = ATH9K_PKT_TYPE_PROBE_RESP;
175 else if (ieee80211_is_atim(fc))
176 htype = ATH9K_PKT_TYPE_ATIM;
177 else if (ieee80211_is_pspoll(fc))
178 htype = ATH9K_PKT_TYPE_PSPOLL;
179 else
180 htype = ATH9K_PKT_TYPE_NORMAL;
181
182 return htype;
183}
184
a8efee4f 185static bool is_pae(struct sk_buff *skb)
f078f209
LR
186{
187 struct ieee80211_hdr *hdr;
f078f209
LR
188 __le16 fc;
189
190 hdr = (struct ieee80211_hdr *)skb->data;
191 fc = hdr->frame_control;
e6a9854b 192
a8efee4f 193 if (ieee80211_is_data(fc)) {
f078f209 194 if (ieee80211_is_nullfunc(fc) ||
528f0c6b
S
195 /* Port Access Entity (IEEE 802.1X) */
196 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
a8efee4f 197 return true;
f078f209 198 }
f078f209
LR
199 }
200
a8efee4f 201 return false;
f078f209
LR
202}
203
528f0c6b 204static int get_hw_crypto_keytype(struct sk_buff *skb)
f078f209 205{
f078f209 206 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
f078f209
LR
207
208 if (tx_info->control.hw_key) {
d0be7cc7 209 if (tx_info->control.hw_key->alg == ALG_WEP)
528f0c6b 210 return ATH9K_KEY_TYPE_WEP;
d0be7cc7 211 else if (tx_info->control.hw_key->alg == ALG_TKIP)
528f0c6b 212 return ATH9K_KEY_TYPE_TKIP;
d0be7cc7 213 else if (tx_info->control.hw_key->alg == ALG_CCMP)
528f0c6b 214 return ATH9K_KEY_TYPE_AES;
f078f209
LR
215 }
216
528f0c6b
S
217 return ATH9K_KEY_TYPE_CLEAR;
218}
f078f209 219
528f0c6b 220/* Called only when tx aggregation is enabled and HT is supported */
e6a9854b 221
528f0c6b
S
222static void assign_aggr_tid_seqno(struct sk_buff *skb,
223 struct ath_buf *bf)
224{
225 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
226 struct ieee80211_hdr *hdr;
227 struct ath_node *an;
228 struct ath_atx_tid *tid;
229 __le16 fc;
230 u8 *qc;
f078f209 231
528f0c6b
S
232 if (!tx_info->control.sta)
233 return;
e6a9854b 234
528f0c6b
S
235 an = (struct ath_node *)tx_info->control.sta->drv_priv;
236 hdr = (struct ieee80211_hdr *)skb->data;
237 fc = hdr->frame_control;
f078f209 238
528f0c6b 239 /* Get tidno */
f078f209 240
528f0c6b
S
241 if (ieee80211_is_data_qos(fc)) {
242 qc = ieee80211_get_qos_ctl(hdr);
243 bf->bf_tidno = qc[0] & 0xf;
244 }
f078f209 245
528f0c6b 246 /* Get seqno */
f078f209 247
a8efee4f 248 if (ieee80211_is_data(fc) && !is_pae(skb)) {
f078f209
LR
249 /* For HT capable stations, we save tidno for later use.
250 * We also override seqno set by upper layer with the one
251 * in tx aggregation state.
252 *
f078f209
LR
253 * If fragmentation is on, the sequence number is
254 * not overridden, since it has been
255 * incremented by the fragmentation routine.
528f0c6b
S
256 *
257 * FIXME: check if the fragmentation threshold exceeds
258 * IEEE80211 max.
f078f209 259 */
528f0c6b
S
260 tid = ATH_AN_2_TID(an, bf->bf_tidno);
261 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
262 IEEE80211_SEQ_SEQ_SHIFT);
263 bf->bf_seqno = tid->seq_next;
264 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
265 }
266}
f078f209 267
528f0c6b
S
268static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
269 struct ath_txq *txq)
270{
271 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
272 int flags = 0;
f078f209 273
528f0c6b
S
274 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
275 flags |= ATH9K_TXDESC_INTREQ;
f078f209 276
528f0c6b
S
277 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
278 flags |= ATH9K_TXDESC_NOACK;
279 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
280 flags |= ATH9K_TXDESC_RTSENA;
281
282 return flags;
283}
f078f209 284
528f0c6b
S
285static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
286{
287 struct ath_buf *bf = NULL;
288
289 spin_lock_bh(&sc->sc_txbuflock);
290
291 if (unlikely(list_empty(&sc->sc_txbuf))) {
292 spin_unlock_bh(&sc->sc_txbuflock);
293 return NULL;
98deeea0 294 }
f078f209 295
528f0c6b
S
296 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
297 list_del(&bf->list);
298
299 spin_unlock_bh(&sc->sc_txbuflock);
300
301 return bf;
f078f209
LR
302}
303
304/* To complete a chain of buffers associated a frame */
305
306static void ath_tx_complete_buf(struct ath_softc *sc,
307 struct ath_buf *bf,
308 struct list_head *bf_q,
309 int txok, int sendbar)
310{
311 struct sk_buff *skb = bf->bf_mpdu;
312 struct ath_xmit_status tx_status;
f078f209
LR
313
314 /*
315 * Set retry information.
316 * NB: Don't use the information in the descriptor, because the frame
317 * could be software retried.
318 */
319 tx_status.retries = bf->bf_retries;
320 tx_status.flags = 0;
321
322 if (sendbar)
323 tx_status.flags = ATH_TX_BAR;
324
325 if (!txok) {
326 tx_status.flags |= ATH_TX_ERROR;
327
cd3d39a6 328 if (bf_isxretried(bf))
f078f209
LR
329 tx_status.flags |= ATH_TX_XRETRY;
330 }
102e0572 331
f078f209 332 /* Unmap this frame */
f078f209 333 pci_unmap_single(sc->pdev,
ff9b662d 334 bf->bf_dmacontext,
f078f209
LR
335 skb->len,
336 PCI_DMA_TODEVICE);
337 /* complete this frame */
528f0c6b 338 ath_tx_complete(sc, skb, &tx_status);
f078f209
LR
339
340 /*
341 * Return the list of ath_buf of this mpdu to free queue
342 */
343 spin_lock_bh(&sc->sc_txbuflock);
344 list_splice_tail_init(bf_q, &sc->sc_txbuf);
345 spin_unlock_bh(&sc->sc_txbuflock);
346}
347
348/*
349 * queue up a dest/ac pair for tx scheduling
350 * NB: must be called with txq lock held
351 */
352
353static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
354{
355 struct ath_atx_ac *ac = tid->ac;
356
357 /*
358 * if tid is paused, hold off
359 */
360 if (tid->paused)
361 return;
362
363 /*
364 * add tid to ac atmost once
365 */
366 if (tid->sched)
367 return;
368
369 tid->sched = true;
370 list_add_tail(&tid->list, &ac->tid_q);
371
372 /*
373 * add node ac to txq atmost once
374 */
375 if (ac->sched)
376 return;
377
378 ac->sched = true;
379 list_add_tail(&ac->list, &txq->axq_acq);
380}
381
382/* pause a tid */
383
384static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
385{
386 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
387
388 spin_lock_bh(&txq->axq_lock);
389
390 tid->paused++;
391
392 spin_unlock_bh(&txq->axq_lock);
393}
394
395/* resume a tid and schedule aggregate */
396
397void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
398{
399 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
400
401 ASSERT(tid->paused > 0);
402 spin_lock_bh(&txq->axq_lock);
403
404 tid->paused--;
405
406 if (tid->paused > 0)
407 goto unlock;
408
409 if (list_empty(&tid->buf_q))
410 goto unlock;
411
412 /*
413 * Add this TID to scheduler and try to send out aggregates
414 */
415 ath_tx_queue_tid(txq, tid);
416 ath_txq_schedule(sc, txq);
417unlock:
418 spin_unlock_bh(&txq->axq_lock);
419}
420
421/* Compute the number of bad frames */
422
b5aa9bf9
S
423static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
424 int txok)
f078f209 425{
f078f209
LR
426 struct ath_buf *bf_last = bf->bf_lastbf;
427 struct ath_desc *ds = bf_last->bf_desc;
428 u16 seq_st = 0;
429 u32 ba[WME_BA_BMP_SIZE >> 5];
430 int ba_index;
431 int nbad = 0;
432 int isaggr = 0;
433
b5aa9bf9 434 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
f078f209
LR
435 return 0;
436
cd3d39a6 437 isaggr = bf_isaggr(bf);
f078f209
LR
438 if (isaggr) {
439 seq_st = ATH_DS_BA_SEQ(ds);
440 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
441 }
442
443 while (bf) {
444 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
445 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
446 nbad++;
447
448 bf = bf->bf_next;
449 }
450
451 return nbad;
452}
453
454static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
455{
456 struct sk_buff *skb;
457 struct ieee80211_hdr *hdr;
458
cd3d39a6 459 bf->bf_state.bf_type |= BUF_RETRY;
f078f209
LR
460 bf->bf_retries++;
461
462 skb = bf->bf_mpdu;
463 hdr = (struct ieee80211_hdr *)skb->data;
464 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
465}
466
467/* Update block ack window */
468
102e0572
S
469static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
470 int seqno)
f078f209
LR
471{
472 int index, cindex;
473
474 index = ATH_BA_INDEX(tid->seq_start, seqno);
475 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
476
477 tid->tx_buf[cindex] = NULL;
478
479 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
480 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
481 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
482 }
483}
484
485/*
486 * ath_pkt_dur - compute packet duration (NB: not NAV)
487 *
488 * rix - rate index
489 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
490 * width - 0 for 20 MHz, 1 for 40 MHz
491 * half_gi - to use 4us v/s 3.6 us for symbol time
492 */
102e0572
S
493static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
494 int width, int half_gi, bool shortPreamble)
f078f209 495{
e63835b0 496 struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode];
f078f209
LR
497 u32 nbits, nsymbits, duration, nsymbols;
498 u8 rc;
499 int streams, pktlen;
500
cd3d39a6 501 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
e63835b0 502 rc = rate_table->info[rix].ratecode;
f078f209 503
e63835b0 504 /* for legacy rates, use old function to compute packet duration */
f078f209 505 if (!IS_HT_RATE(rc))
e63835b0
S
506 return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
507 rix, shortPreamble);
508
509 /* find number of symbols: PLCP + data */
f078f209
LR
510 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
511 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
512 nsymbols = (nbits + nsymbits - 1) / nsymbits;
513
514 if (!half_gi)
515 duration = SYMBOL_TIME(nsymbols);
516 else
517 duration = SYMBOL_TIME_HALFGI(nsymbols);
518
e63835b0 519 /* addup duration for legacy/ht training and signal fields */
f078f209
LR
520 streams = HT_RC_2_STREAMS(rc);
521 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
102e0572 522
f078f209
LR
523 return duration;
524}
525
526/* Rate module function to set rate related fields in tx descriptor */
527
528static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
529{
530 struct ath_hal *ah = sc->sc_ah;
e63835b0 531 struct ath_rate_table *rt;
f078f209
LR
532 struct ath_desc *ds = bf->bf_desc;
533 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
534 struct ath9k_11n_rate_series series[4];
528f0c6b
S
535 struct sk_buff *skb;
536 struct ieee80211_tx_info *tx_info;
a8efee4f 537 struct ieee80211_tx_rate *rates;
e63835b0
S
538 struct ieee80211_hdr *hdr;
539 int i, flags, rtsctsena = 0;
540 u32 ctsduration = 0;
541 u8 rix = 0, cix, ctsrate = 0;
542 __le16 fc;
543
544 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
528f0c6b
S
545
546 skb = (struct sk_buff *)bf->bf_mpdu;
e63835b0
S
547 hdr = (struct ieee80211_hdr *)skb->data;
548 fc = hdr->frame_control;
528f0c6b 549 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 550 rates = tx_info->control.rates;
528f0c6b 551
e63835b0
S
552 if (ieee80211_has_morefrags(fc) ||
553 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
554 rates[1].count = rates[2].count = rates[3].count = 0;
555 rates[1].idx = rates[2].idx = rates[3].idx = 0;
556 rates[0].count = ATH_TXMAXTRY;
557 }
558
559 /* get the cix for the lowest valid rix */
560 rt = sc->hw_rate_table[sc->sc_curmode];
a8efee4f 561 for (i = 3; i >= 0; i--) {
e63835b0 562 if (rates[i].count && (rates[i].idx >= 0)) {
a8efee4f 563 rix = rates[i].idx;
f078f209
LR
564 break;
565 }
566 }
e63835b0 567
f078f209 568 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
e63835b0 569 cix = rt->info[rix].ctrl_rate;
f078f209
LR
570
571 /*
e63835b0
S
572 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
573 * just CTS. Note that this is only done for OFDM/HT unicast frames.
f078f209 574 */
e63835b0 575 if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK)
46d14a58 576 && (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
e63835b0 577 WLAN_RC_PHY_HT(rt->info[rix].phy))) {
f078f209
LR
578 if (sc->sc_protmode == PROT_M_RTSCTS)
579 flags = ATH9K_TXDESC_RTSENA;
580 else if (sc->sc_protmode == PROT_M_CTSONLY)
581 flags = ATH9K_TXDESC_CTSENA;
582
e63835b0 583 cix = rt->info[sc->sc_protrix].ctrl_rate;
f078f209
LR
584 rtsctsena = 1;
585 }
586
e63835b0
S
587 /* For 11n, the default behavior is to enable RTS for hw retried frames.
588 * We enable the global flag here and let rate series flags determine
589 * which rates will actually use RTS.
f078f209 590 */
cd3d39a6 591 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
e63835b0 592 /* 802.11g protection not needed, use our default behavior */
f078f209
LR
593 if (!rtsctsena)
594 flags = ATH9K_TXDESC_RTSENA;
f078f209
LR
595 }
596
e63835b0 597 /* Set protection if aggregate protection on */
f078f209 598 if (sc->sc_config.ath_aggr_prot &&
cd3d39a6 599 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
f078f209 600 flags = ATH9K_TXDESC_RTSENA;
e63835b0 601 cix = rt->info[sc->sc_protrix].ctrl_rate;
f078f209
LR
602 rtsctsena = 1;
603 }
604
e63835b0
S
605 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
606 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
f078f209 607 flags &= ~(ATH9K_TXDESC_RTSENA);
f078f209
LR
608
609 /*
e63835b0
S
610 * CTS transmit rate is derived from the transmit rate by looking in the
611 * h/w rate table. We must also factor in whether or not a short
612 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
f078f209 613 */
e63835b0
S
614 ctsrate = rt->info[cix].ratecode |
615 (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
f078f209
LR
616
617 for (i = 0; i < 4; i++) {
e63835b0 618 if (!rates[i].count || (rates[i].idx < 0))
f078f209
LR
619 continue;
620
a8efee4f 621 rix = rates[i].idx;
f078f209 622
e63835b0
S
623 series[i].Rate = rt->info[rix].ratecode |
624 (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0);
f078f209 625
a8efee4f 626 series[i].Tries = rates[i].count;
f078f209
LR
627
628 series[i].RateFlags = (
a8efee4f 629 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ?
f078f209 630 ATH9K_RATESERIES_RTS_CTS : 0) |
a8efee4f 631 ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ?
f078f209 632 ATH9K_RATESERIES_2040 : 0) |
a8efee4f 633 ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ?
f078f209
LR
634 ATH9K_RATESERIES_HALFGI : 0);
635
102e0572 636 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
a8efee4f
S
637 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
638 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
102e0572 639 bf_isshpreamble(bf));
f078f209 640
ff37e337 641 series[i].ChSel = sc->sc_tx_chainmask;
f078f209
LR
642
643 if (rtsctsena)
644 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
f078f209
LR
645 }
646
e63835b0
S
647 /* set dur_update_en for l-sig computation except for PS-Poll frames */
648 ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
649 ctsrate, ctsduration,
cd3d39a6 650 series, 4, flags);
102e0572 651
f078f209
LR
652 if (sc->sc_config.ath_aggr_prot && flags)
653 ath9k_hw_set11n_burstduration(ah, ds, 8192);
654}
655
656/*
657 * Function to send a normal HT (non-AMPDU) frame
658 * NB: must be called with txq lock held
659 */
f078f209
LR
660static int ath_tx_send_normal(struct ath_softc *sc,
661 struct ath_txq *txq,
662 struct ath_atx_tid *tid,
663 struct list_head *bf_head)
664{
665 struct ath_buf *bf;
f078f209
LR
666
667 BUG_ON(list_empty(bf_head));
668
669 bf = list_first_entry(bf_head, struct ath_buf, list);
cd3d39a6 670 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
f078f209 671
f078f209
LR
672 /* update starting sequence number for subsequent ADDBA request */
673 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
674
675 /* Queue to h/w without aggregation */
676 bf->bf_nframes = 1;
677 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
678 ath_buf_set_rate(sc, bf);
679 ath_tx_txqaddbuf(sc, txq, bf_head);
680
681 return 0;
682}
683
684/* flush tid's software queue and send frames as non-ampdu's */
685
686static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
687{
688 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
689 struct ath_buf *bf;
690 struct list_head bf_head;
691 INIT_LIST_HEAD(&bf_head);
692
693 ASSERT(tid->paused > 0);
694 spin_lock_bh(&txq->axq_lock);
695
696 tid->paused--;
697
698 if (tid->paused > 0) {
699 spin_unlock_bh(&txq->axq_lock);
700 return;
701 }
702
703 while (!list_empty(&tid->buf_q)) {
704 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
cd3d39a6 705 ASSERT(!bf_isretried(bf));
f078f209
LR
706 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
707 ath_tx_send_normal(sc, txq, tid, &bf_head);
708 }
709
710 spin_unlock_bh(&txq->axq_lock);
711}
712
713/* Completion routine of an aggregate */
714
715static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
716 struct ath_txq *txq,
717 struct ath_buf *bf,
718 struct list_head *bf_q,
719 int txok)
720{
528f0c6b
S
721 struct ath_node *an = NULL;
722 struct sk_buff *skb;
723 struct ieee80211_tx_info *tx_info;
724 struct ath_atx_tid *tid = NULL;
f078f209
LR
725 struct ath_buf *bf_last = bf->bf_lastbf;
726 struct ath_desc *ds = bf_last->bf_desc;
727 struct ath_buf *bf_next, *bf_lastq = NULL;
728 struct list_head bf_head, bf_pending;
729 u16 seq_st = 0;
730 u32 ba[WME_BA_BMP_SIZE >> 5];
731 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
f078f209 732
528f0c6b
S
733 skb = (struct sk_buff *)bf->bf_mpdu;
734 tx_info = IEEE80211_SKB_CB(skb);
735
736 if (tx_info->control.sta) {
737 an = (struct ath_node *)tx_info->control.sta->drv_priv;
738 tid = ATH_AN_2_TID(an, bf->bf_tidno);
739 }
740
cd3d39a6 741 isaggr = bf_isaggr(bf);
f078f209
LR
742 if (isaggr) {
743 if (txok) {
744 if (ATH_DS_TX_BA(ds)) {
745 /*
746 * extract starting sequence and
747 * block-ack bitmap
748 */
749 seq_st = ATH_DS_BA_SEQ(ds);
750 memcpy(ba,
751 ATH_DS_BA_BITMAP(ds),
752 WME_BA_BMP_SIZE >> 3);
753 } else {
0345f37b 754 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209
LR
755
756 /*
757 * AR5416 can become deaf/mute when BA
758 * issue happens. Chip needs to be reset.
759 * But AP code may have sychronization issues
760 * when perform internal reset in this routine.
761 * Only enable reset in STA mode for now.
762 */
d97809db
CM
763 if (sc->sc_ah->ah_opmode ==
764 NL80211_IFTYPE_STATION)
f078f209
LR
765 needreset = 1;
766 }
767 } else {
0345f37b 768 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209
LR
769 }
770 }
771
772 INIT_LIST_HEAD(&bf_pending);
773 INIT_LIST_HEAD(&bf_head);
774
775 while (bf) {
776 txfail = txpending = 0;
777 bf_next = bf->bf_next;
778
779 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
780 /* transmit completion, subframe is
781 * acked by block ack */
782 } else if (!isaggr && txok) {
783 /* transmit completion */
784 } else {
785
a37c2c79 786 if (!(tid->state & AGGR_CLEANUP) &&
f078f209
LR
787 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
788 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
789 ath_tx_set_retry(sc, bf);
790 txpending = 1;
791 } else {
cd3d39a6 792 bf->bf_state.bf_type |= BUF_XRETRY;
f078f209
LR
793 txfail = 1;
794 sendbar = 1;
795 }
796 } else {
797 /*
798 * cleanup in progress, just fail
799 * the un-acked sub-frames
800 */
801 txfail = 1;
802 }
803 }
804 /*
805 * Remove ath_buf's of this sub-frame from aggregate queue.
806 */
807 if (bf_next == NULL) { /* last subframe in the aggregate */
808 ASSERT(bf->bf_lastfrm == bf_last);
809
810 /*
811 * The last descriptor of the last sub frame could be
812 * a holding descriptor for h/w. If that's the case,
813 * bf->bf_lastfrm won't be in the bf_q.
814 * Make sure we handle bf_q properly here.
815 */
816
817 if (!list_empty(bf_q)) {
818 bf_lastq = list_entry(bf_q->prev,
819 struct ath_buf, list);
820 list_cut_position(&bf_head,
821 bf_q, &bf_lastq->list);
822 } else {
823 /*
824 * XXX: if the last subframe only has one
825 * descriptor which is also being used as
826 * a holding descriptor. Then the ath_buf
827 * is not in the bf_q at all.
828 */
829 INIT_LIST_HEAD(&bf_head);
830 }
831 } else {
832 ASSERT(!list_empty(bf_q));
833 list_cut_position(&bf_head,
834 bf_q, &bf->bf_lastfrm->list);
835 }
836
837 if (!txpending) {
838 /*
839 * complete the acked-ones/xretried ones; update
840 * block-ack window
841 */
842 spin_lock_bh(&txq->axq_lock);
843 ath_tx_update_baw(sc, tid, bf->bf_seqno);
844 spin_unlock_bh(&txq->axq_lock);
845
846 /* complete this sub-frame */
847 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
848 } else {
849 /*
850 * retry the un-acked ones
851 */
852 /*
853 * XXX: if the last descriptor is holding descriptor,
854 * in order to requeue the frame to software queue, we
855 * need to allocate a new descriptor and
856 * copy the content of holding descriptor to it.
857 */
858 if (bf->bf_next == NULL &&
859 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
860 struct ath_buf *tbf;
861
862 /* allocate new descriptor */
863 spin_lock_bh(&sc->sc_txbuflock);
864 ASSERT(!list_empty((&sc->sc_txbuf)));
865 tbf = list_first_entry(&sc->sc_txbuf,
866 struct ath_buf, list);
867 list_del(&tbf->list);
868 spin_unlock_bh(&sc->sc_txbuflock);
869
870 ATH_TXBUF_RESET(tbf);
871
872 /* copy descriptor content */
873 tbf->bf_mpdu = bf_last->bf_mpdu;
f078f209
LR
874 tbf->bf_buf_addr = bf_last->bf_buf_addr;
875 *(tbf->bf_desc) = *(bf_last->bf_desc);
876
877 /* link it to the frame */
878 if (bf_lastq) {
879 bf_lastq->bf_desc->ds_link =
880 tbf->bf_daddr;
881 bf->bf_lastfrm = tbf;
882 ath9k_hw_cleartxdesc(sc->sc_ah,
883 bf->bf_lastfrm->bf_desc);
884 } else {
885 tbf->bf_state = bf_last->bf_state;
886 tbf->bf_lastfrm = tbf;
887 ath9k_hw_cleartxdesc(sc->sc_ah,
888 tbf->bf_lastfrm->bf_desc);
889
890 /* copy the DMA context */
ff9b662d
S
891 tbf->bf_dmacontext =
892 bf_last->bf_dmacontext;
f078f209
LR
893 }
894 list_add_tail(&tbf->list, &bf_head);
895 } else {
896 /*
897 * Clear descriptor status words for
898 * software retry
899 */
900 ath9k_hw_cleartxdesc(sc->sc_ah,
ff9b662d 901 bf->bf_lastfrm->bf_desc);
f078f209
LR
902 }
903
904 /*
905 * Put this buffer to the temporary pending
906 * queue to retain ordering
907 */
908 list_splice_tail_init(&bf_head, &bf_pending);
909 }
910
911 bf = bf_next;
912 }
913
a37c2c79 914 if (tid->state & AGGR_CLEANUP) {
f078f209
LR
915 /* check to see if we're done with cleaning the h/w queue */
916 spin_lock_bh(&txq->axq_lock);
917
918 if (tid->baw_head == tid->baw_tail) {
a37c2c79 919 tid->state &= ~AGGR_ADDBA_COMPLETE;
f078f209
LR
920 tid->addba_exchangeattempts = 0;
921 spin_unlock_bh(&txq->axq_lock);
922
a37c2c79 923 tid->state &= ~AGGR_CLEANUP;
f078f209
LR
924
925 /* send buffered frames as singles */
926 ath_tx_flush_tid(sc, tid);
927 } else
928 spin_unlock_bh(&txq->axq_lock);
929
930 return;
931 }
932
933 /*
934 * prepend un-acked frames to the beginning of the pending frame queue
935 */
936 if (!list_empty(&bf_pending)) {
937 spin_lock_bh(&txq->axq_lock);
938 /* Note: we _prepend_, we _do_not_ at to
939 * the end of the queue ! */
940 list_splice(&bf_pending, &tid->buf_q);
941 ath_tx_queue_tid(txq, tid);
942 spin_unlock_bh(&txq->axq_lock);
943 }
944
945 if (needreset)
f45144ef 946 ath_reset(sc, false);
f078f209
LR
947
948 return;
949}
950
c4288390
S
951static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
952{
953 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
954 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
955 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
956
7ac47010 957 tx_info_priv->update_rc = false;
c4288390
S
958 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
959 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
960
961 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
962 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
963 if (bf_isdata(bf)) {
964 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
965 sizeof(tx_info_priv->tx));
966 tx_info_priv->n_frames = bf->bf_nframes;
967 tx_info_priv->n_bad_frames = nbad;
7ac47010 968 tx_info_priv->update_rc = true;
c4288390
S
969 }
970 }
971}
972
f078f209
LR
973/* Process completed xmit descriptors from the specified queue */
974
c4288390 975static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209
LR
976{
977 struct ath_hal *ah = sc->sc_ah;
978 struct ath_buf *bf, *lastbf, *bf_held = NULL;
979 struct list_head bf_head;
c4288390
S
980 struct ath_desc *ds;
981 int txok, nbad = 0;
f078f209
LR
982 int status;
983
04bd4638 984 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
f078f209
LR
985 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
986 txq->axq_link);
987
f078f209
LR
988 for (;;) {
989 spin_lock_bh(&txq->axq_lock);
f078f209
LR
990 if (list_empty(&txq->axq_q)) {
991 txq->axq_link = NULL;
992 txq->axq_linkbuf = NULL;
993 spin_unlock_bh(&txq->axq_lock);
994 break;
995 }
996 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
997
998 /*
999 * There is a race condition that a BH gets scheduled
1000 * after sw writes TxE and before hw re-load the last
1001 * descriptor to get the newly chained one.
1002 * Software must keep the last DONE descriptor as a
1003 * holding descriptor - software does so by marking
1004 * it with the STALE flag.
1005 */
1006 bf_held = NULL;
1007 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1008 bf_held = bf;
1009 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1010 /* FIXME:
1011 * The holding descriptor is the last
1012 * descriptor in queue. It's safe to remove
1013 * the last holding descriptor in BH context.
1014 */
1015 spin_unlock_bh(&txq->axq_lock);
1016 break;
1017 } else {
1018 /* Lets work with the next buffer now */
1019 bf = list_entry(bf_held->list.next,
1020 struct ath_buf, list);
1021 }
1022 }
1023
1024 lastbf = bf->bf_lastbf;
1025 ds = lastbf->bf_desc; /* NB: last decriptor */
1026
1027 status = ath9k_hw_txprocdesc(ah, ds);
1028 if (status == -EINPROGRESS) {
1029 spin_unlock_bh(&txq->axq_lock);
1030 break;
1031 }
1032 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1033 txq->axq_lastdsWithCTS = NULL;
1034 if (ds == txq->axq_gatingds)
1035 txq->axq_gatingds = NULL;
1036
1037 /*
1038 * Remove ath_buf's of the same transmit unit from txq,
1039 * however leave the last descriptor back as the holding
1040 * descriptor for hw.
1041 */
1042 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1043 INIT_LIST_HEAD(&bf_head);
1044
1045 if (!list_is_singular(&lastbf->list))
1046 list_cut_position(&bf_head,
1047 &txq->axq_q, lastbf->list.prev);
1048
1049 txq->axq_depth--;
1050
cd3d39a6 1051 if (bf_isaggr(bf))
f078f209
LR
1052 txq->axq_aggr_depth--;
1053
1054 txok = (ds->ds_txstat.ts_status == 0);
1055
1056 spin_unlock_bh(&txq->axq_lock);
1057
1058 if (bf_held) {
1059 list_del(&bf_held->list);
1060 spin_lock_bh(&sc->sc_txbuflock);
1061 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1062 spin_unlock_bh(&sc->sc_txbuflock);
1063 }
1064
cd3d39a6 1065 if (!bf_isampdu(bf)) {
f078f209
LR
1066 /*
1067 * This frame is sent out as a single frame.
1068 * Use hardware retry status for this frame.
1069 */
1070 bf->bf_retries = ds->ds_txstat.ts_longretry;
1071 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
cd3d39a6 1072 bf->bf_state.bf_type |= BUF_XRETRY;
f078f209
LR
1073 nbad = 0;
1074 } else {
1075 nbad = ath_tx_num_badfrms(sc, bf, txok);
1076 }
c4288390
S
1077
1078 ath_tx_rc_status(bf, ds, nbad);
f078f209
LR
1079
1080 /*
1081 * Complete this transmit unit
1082 */
cd3d39a6 1083 if (bf_isampdu(bf))
f078f209
LR
1084 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1085 else
1086 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1087
1088 /* Wake up mac80211 queue */
1089
1090 spin_lock_bh(&txq->axq_lock);
1091 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1092 (ATH_TXBUF - 20)) {
1093 int qnum;
1094 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1095 if (qnum != -1) {
1096 ieee80211_wake_queue(sc->hw, qnum);
1097 txq->stopped = 0;
1098 }
1099
1100 }
1101
1102 /*
1103 * schedule any pending packets if aggregation is enabled
1104 */
672840ac 1105 if (sc->sc_flags & SC_OP_TXAGGR)
f078f209
LR
1106 ath_txq_schedule(sc, txq);
1107 spin_unlock_bh(&txq->axq_lock);
1108 }
f078f209
LR
1109}
1110
1111static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1112{
1113 struct ath_hal *ah = sc->sc_ah;
1114
1115 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
04bd4638
S
1116 DPRINTF(sc, ATH_DBG_XMIT, "tx queue [%u] %x, link %p\n",
1117 txq->axq_qnum, ath9k_hw_gettxbuf(ah, txq->axq_qnum),
1118 txq->axq_link);
f078f209
LR
1119}
1120
1121/* Drain only the data queues */
1122
1123static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1124{
1125 struct ath_hal *ah = sc->sc_ah;
102e0572 1126 int i, status, npend = 0;
f078f209 1127
672840ac 1128 if (!(sc->sc_flags & SC_OP_INVALID)) {
f078f209
LR
1129 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1130 if (ATH_TXQ_SETUP(sc, i)) {
1131 ath_tx_stopdma(sc, &sc->sc_txq[i]);
f078f209
LR
1132 /* The TxDMA may not really be stopped.
1133 * Double check the hal tx pending count */
1134 npend += ath9k_hw_numtxpending(ah,
102e0572 1135 sc->sc_txq[i].axq_qnum);
f078f209
LR
1136 }
1137 }
1138 }
1139
1140 if (npend) {
f078f209 1141 /* TxDMA not stopped, reset the hal */
04bd4638 1142 DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
f078f209
LR
1143
1144 spin_lock_bh(&sc->sc_resetlock);
b4696c8b 1145 if (!ath9k_hw_reset(ah,
927e70e9 1146 sc->sc_ah->ah_curchan,
99405f93 1147 sc->tx_chan_width,
927e70e9
S
1148 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1149 sc->sc_ht_extprotspacing, true, &status)) {
f078f209
LR
1150
1151 DPRINTF(sc, ATH_DBG_FATAL,
04bd4638 1152 "Unable to reset hardware; hal status %u\n",
f078f209
LR
1153 status);
1154 }
1155 spin_unlock_bh(&sc->sc_resetlock);
1156 }
1157
1158 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1159 if (ATH_TXQ_SETUP(sc, i))
1160 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1161 }
1162}
1163
1164/* Add a sub-frame to block ack window */
1165
1166static void ath_tx_addto_baw(struct ath_softc *sc,
1167 struct ath_atx_tid *tid,
1168 struct ath_buf *bf)
1169{
1170 int index, cindex;
1171
cd3d39a6 1172 if (bf_isretried(bf))
f078f209
LR
1173 return;
1174
1175 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1177
1178 ASSERT(tid->tx_buf[cindex] == NULL);
1179 tid->tx_buf[cindex] = bf;
1180
1181 if (index >= ((tid->baw_tail - tid->baw_head) &
1182 (ATH_TID_MAX_BUFS - 1))) {
1183 tid->baw_tail = cindex;
1184 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1185 }
1186}
1187
1188/*
1189 * Function to send an A-MPDU
1190 * NB: must be called with txq lock held
1191 */
f078f209 1192static int ath_tx_send_ampdu(struct ath_softc *sc,
f078f209
LR
1193 struct ath_atx_tid *tid,
1194 struct list_head *bf_head,
1195 struct ath_tx_control *txctl)
1196{
1197 struct ath_buf *bf;
f078f209
LR
1198
1199 BUG_ON(list_empty(bf_head));
1200
1201 bf = list_first_entry(bf_head, struct ath_buf, list);
cd3d39a6 1202 bf->bf_state.bf_type |= BUF_AMPDU;
f078f209
LR
1203
1204 /*
1205 * Do not queue to h/w when any of the following conditions is true:
1206 * - there are pending frames in software queue
1207 * - the TID is currently paused for ADDBA/BAR request
1208 * - seqno is not within block-ack window
1209 * - h/w queue depth exceeds low water mark
1210 */
1211 if (!list_empty(&tid->buf_q) || tid->paused ||
1212 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
528f0c6b 1213 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209
LR
1214 /*
1215 * Add this frame to software queue for scheduling later
1216 * for aggregation.
1217 */
1218 list_splice_tail_init(bf_head, &tid->buf_q);
528f0c6b 1219 ath_tx_queue_tid(txctl->txq, tid);
f078f209
LR
1220 return 0;
1221 }
1222
f078f209
LR
1223 /* Add sub-frame to BAW */
1224 ath_tx_addto_baw(sc, tid, bf);
1225
1226 /* Queue to h/w without aggregation */
1227 bf->bf_nframes = 1;
1228 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1229 ath_buf_set_rate(sc, bf);
528f0c6b 1230 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
102e0572 1231
f078f209
LR
1232 return 0;
1233}
1234
1235/*
1236 * looks up the rate
1237 * returns aggr limit based on lowest of the rates
1238 */
f078f209 1239static u32 ath_lookup_rate(struct ath_softc *sc,
ae5eb026
JB
1240 struct ath_buf *bf,
1241 struct ath_atx_tid *tid)
f078f209 1242{
a8efee4f 1243 struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode];
f078f209
LR
1244 struct sk_buff *skb;
1245 struct ieee80211_tx_info *tx_info;
a8efee4f 1246 struct ieee80211_tx_rate *rates;
f078f209
LR
1247 struct ath_tx_info_priv *tx_info_priv;
1248 u32 max_4ms_framelen, frame_length;
1249 u16 aggr_limit, legacy = 0, maxampdu;
1250 int i;
1251
f078f209
LR
1252 skb = (struct sk_buff *)bf->bf_mpdu;
1253 tx_info = IEEE80211_SKB_CB(skb);
a8efee4f
S
1254 rates = tx_info->control.rates;
1255 tx_info_priv =
1256 (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
f078f209
LR
1257
1258 /*
1259 * Find the lowest frame length among the rate series that will have a
1260 * 4ms transmit duration.
1261 * TODO - TXOP limit needs to be considered.
1262 */
1263 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1264
1265 for (i = 0; i < 4; i++) {
a8efee4f 1266 if (rates[i].count) {
e63835b0 1267 if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) {
f078f209
LR
1268 legacy = 1;
1269 break;
1270 }
1271
a8efee4f
S
1272 frame_length =
1273 rate_table->info[rates[i].idx].max_4ms_framelen;
f078f209
LR
1274 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1275 }
1276 }
1277
1278 /*
1279 * limit aggregate size by the minimum rate if rate selected is
1280 * not a probe rate, if rate selected is a probe rate then
1281 * avoid aggregation of this packet.
1282 */
1283 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1284 return 0;
1285
1286 aggr_limit = min(max_4ms_framelen,
1287 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1288
1289 /*
1290 * h/w can accept aggregates upto 16 bit lengths (65535).
1291 * The IE, however can hold upto 65536, which shows up here
1292 * as zero. Ignore 65536 since we are constrained by hw.
1293 */
ae5eb026 1294 maxampdu = tid->an->maxampdu;
f078f209
LR
1295 if (maxampdu)
1296 aggr_limit = min(aggr_limit, maxampdu);
1297
1298 return aggr_limit;
1299}
1300
1301/*
1302 * returns the number of delimiters to be added to
1303 * meet the minimum required mpdudensity.
1304 * caller should make sure that the rate is HT rate .
1305 */
f078f209 1306static int ath_compute_num_delims(struct ath_softc *sc,
ae5eb026 1307 struct ath_atx_tid *tid,
f078f209
LR
1308 struct ath_buf *bf,
1309 u16 frmlen)
1310{
e63835b0 1311 struct ath_rate_table *rt = sc->hw_rate_table[sc->sc_curmode];
a8efee4f
S
1312 struct sk_buff *skb = bf->bf_mpdu;
1313 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
f078f209
LR
1314 u32 nsymbits, nsymbols, mpdudensity;
1315 u16 minlen;
1316 u8 rc, flags, rix;
1317 int width, half_gi, ndelim, mindelim;
1318
1319 /* Select standard number of delimiters based on frame length alone */
1320 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1321
1322 /*
1323 * If encryption enabled, hardware requires some more padding between
1324 * subframes.
1325 * TODO - this could be improved to be dependent on the rate.
1326 * The hardware can keep up at lower rates, but not higher rates
1327 */
1328 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1329 ndelim += ATH_AGGR_ENCRYPTDELIM;
1330
1331 /*
1332 * Convert desired mpdu density from microeconds to bytes based
1333 * on highest rate in rate series (i.e. first rate) to determine
1334 * required minimum length for subframe. Take into account
1335 * whether high rate is 20 or 40Mhz and half or full GI.
1336 */
ae5eb026 1337 mpdudensity = tid->an->mpdudensity;
f078f209
LR
1338
1339 /*
1340 * If there is no mpdu density restriction, no further calculation
1341 * is needed.
1342 */
1343 if (mpdudensity == 0)
1344 return ndelim;
1345
a8efee4f
S
1346 rix = tx_info->control.rates[0].idx;
1347 flags = tx_info->control.rates[0].flags;
e63835b0 1348 rc = rt->info[rix].ratecode;
a8efee4f
S
1349 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
1350 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209
LR
1351
1352 if (half_gi)
1353 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1354 else
1355 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1356
1357 if (nsymbols == 0)
1358 nsymbols = 1;
1359
1360 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1361 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1362
1363 /* Is frame shorter than required minimum length? */
1364 if (frmlen < minlen) {
1365 /* Get the minimum number of delimiters required. */
1366 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1367 ndelim = max(mindelim, ndelim);
1368 }
1369
1370 return ndelim;
1371}
1372
1373/*
1374 * For aggregation from software buffer queue.
1375 * NB: must be called with txq lock held
1376 */
f078f209
LR
1377static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1378 struct ath_atx_tid *tid,
1379 struct list_head *bf_q,
1380 struct ath_buf **bf_last,
1381 struct aggr_rifs_param *param,
1382 int *prev_frames)
1383{
1384#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1385 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1386 struct list_head bf_head;
1387 int rl = 0, nframes = 0, ndelim;
1388 u16 aggr_limit = 0, al = 0, bpad = 0,
1389 al_delta, h_baw = tid->baw_size / 2;
1390 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
a8efee4f 1391 int prev_al = 0;
f078f209
LR
1392 INIT_LIST_HEAD(&bf_head);
1393
1394 BUG_ON(list_empty(&tid->buf_q));
1395
1396 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1397
1398 do {
1399 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1400
1401 /*
1402 * do not step over block-ack window
1403 */
1404 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1405 status = ATH_AGGR_BAW_CLOSED;
1406 break;
1407 }
1408
1409 if (!rl) {
ae5eb026 1410 aggr_limit = ath_lookup_rate(sc, bf, tid);
f078f209 1411 rl = 1;
f078f209
LR
1412 }
1413
1414 /*
1415 * do not exceed aggregation limit
1416 */
1417 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1418
1419 if (nframes && (aggr_limit <
1420 (al + bpad + al_delta + prev_al))) {
1421 status = ATH_AGGR_LIMITED;
1422 break;
1423 }
1424
1425 /*
1426 * do not exceed subframe limit
1427 */
1428 if ((nframes + *prev_frames) >=
1429 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1430 status = ATH_AGGR_LIMITED;
1431 break;
1432 }
1433
1434 /*
1435 * add padding for previous frame to aggregation length
1436 */
1437 al += bpad + al_delta;
1438
1439 /*
1440 * Get the delimiters needed to meet the MPDU
1441 * density for this node.
1442 */
ae5eb026 1443 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
f078f209
LR
1444
1445 bpad = PADBYTES(al_delta) + (ndelim << 2);
1446
1447 bf->bf_next = NULL;
1448 bf->bf_lastfrm->bf_desc->ds_link = 0;
1449
1450 /*
1451 * this packet is part of an aggregate
1452 * - remove all descriptors belonging to this frame from
1453 * software queue
1454 * - add it to block ack window
1455 * - set up descriptors for aggregation
1456 */
1457 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1458 ath_tx_addto_baw(sc, tid, bf);
1459
1460 list_for_each_entry(tbf, &bf_head, list) {
1461 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1462 tbf->bf_desc, ndelim);
1463 }
1464
1465 /*
1466 * link buffers of this frame to the aggregate
1467 */
1468 list_splice_tail_init(&bf_head, bf_q);
1469 nframes++;
1470
1471 if (bf_prev) {
1472 bf_prev->bf_next = bf;
1473 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1474 }
1475 bf_prev = bf;
1476
1477#ifdef AGGR_NOSHORT
1478 /*
1479 * terminate aggregation on a small packet boundary
1480 */
1481 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1482 status = ATH_AGGR_SHORTPKT;
1483 break;
1484 }
1485#endif
1486 } while (!list_empty(&tid->buf_q));
1487
1488 bf_first->bf_al = al;
1489 bf_first->bf_nframes = nframes;
1490 *bf_last = bf_prev;
1491 return status;
1492#undef PADBYTES
1493}
1494
1495/*
1496 * process pending frames possibly doing a-mpdu aggregation
1497 * NB: must be called with txq lock held
1498 */
f078f209
LR
1499static void ath_tx_sched_aggr(struct ath_softc *sc,
1500 struct ath_txq *txq, struct ath_atx_tid *tid)
1501{
1502 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1503 enum ATH_AGGR_STATUS status;
1504 struct list_head bf_q;
1505 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1506 int prev_frames = 0;
1507
1508 do {
1509 if (list_empty(&tid->buf_q))
1510 return;
1511
1512 INIT_LIST_HEAD(&bf_q);
1513
1514 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
1515 &prev_frames);
1516
1517 /*
1518 * no frames picked up to be aggregated; block-ack
1519 * window is not open
1520 */
1521 if (list_empty(&bf_q))
1522 break;
1523
1524 bf = list_first_entry(&bf_q, struct ath_buf, list);
1525 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1526 bf->bf_lastbf = bf_last;
1527
1528 /*
1529 * if only one frame, send as non-aggregate
1530 */
1531 if (bf->bf_nframes == 1) {
1532 ASSERT(bf->bf_lastfrm == bf_last);
1533
cd3d39a6 1534 bf->bf_state.bf_type &= ~BUF_AGGR;
f078f209
LR
1535 /*
1536 * clear aggr bits for every descriptor
1537 * XXX TODO: is there a way to optimize it?
1538 */
1539 list_for_each_entry(tbf, &bf_q, list) {
1540 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1541 }
1542
1543 ath_buf_set_rate(sc, bf);
1544 ath_tx_txqaddbuf(sc, txq, &bf_q);
1545 continue;
1546 }
1547
1548 /*
1549 * setup first desc with rate and aggr info
1550 */
cd3d39a6 1551 bf->bf_state.bf_type |= BUF_AGGR;
f078f209
LR
1552 ath_buf_set_rate(sc, bf);
1553 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1554
1555 /*
1556 * anchor last frame of aggregate correctly
1557 */
1558 ASSERT(bf_lastaggr);
1559 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1560 tbf = bf_lastaggr;
1561 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1562
1563 /* XXX: We don't enter into this loop, consider removing this */
1564 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1565 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1566 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1567 }
1568
1569 txq->axq_aggr_depth++;
1570
1571 /*
1572 * Normal aggregate, queue to hardware
1573 */
1574 ath_tx_txqaddbuf(sc, txq, &bf_q);
1575
1576 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1577 status != ATH_AGGR_BAW_CLOSED);
1578}
1579
1580/* Called with txq lock held */
1581
1582static void ath_tid_drain(struct ath_softc *sc,
1583 struct ath_txq *txq,
b5aa9bf9
S
1584 struct ath_atx_tid *tid)
1585
f078f209
LR
1586{
1587 struct ath_buf *bf;
1588 struct list_head bf_head;
1589 INIT_LIST_HEAD(&bf_head);
1590
1591 for (;;) {
1592 if (list_empty(&tid->buf_q))
1593 break;
1594 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1595
1596 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1597
1598 /* update baw for software retried frame */
cd3d39a6 1599 if (bf_isretried(bf))
f078f209
LR
1600 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1601
1602 /*
1603 * do not indicate packets while holding txq spinlock.
1604 * unlock is intentional here
1605 */
b5aa9bf9 1606 spin_unlock(&txq->axq_lock);
f078f209
LR
1607
1608 /* complete this sub-frame */
1609 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1610
b5aa9bf9 1611 spin_lock(&txq->axq_lock);
f078f209
LR
1612 }
1613
1614 /*
1615 * TODO: For frame(s) that are in the retry state, we will reuse the
1616 * sequence number(s) without setting the retry bit. The
1617 * alternative is to give up on these and BAR the receiver's window
1618 * forward.
1619 */
1620 tid->seq_next = tid->seq_start;
1621 tid->baw_tail = tid->baw_head;
1622}
1623
1624/*
1625 * Drain all pending buffers
1626 * NB: must be called with txq lock held
1627 */
f078f209 1628static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
b5aa9bf9 1629 struct ath_txq *txq)
f078f209
LR
1630{
1631 struct ath_atx_ac *ac, *ac_tmp;
1632 struct ath_atx_tid *tid, *tid_tmp;
1633
1634 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1635 list_del(&ac->list);
1636 ac->sched = false;
1637 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1638 list_del(&tid->list);
1639 tid->sched = false;
b5aa9bf9 1640 ath_tid_drain(sc, txq, tid);
f078f209
LR
1641 }
1642 }
1643}
1644
f8316df1 1645static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
8f93b8b3 1646 struct sk_buff *skb,
528f0c6b 1647 struct ath_tx_control *txctl)
f078f209 1648{
528f0c6b
S
1649 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1650 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
f078f209 1651 struct ath_tx_info_priv *tx_info_priv;
528f0c6b
S
1652 int hdrlen;
1653 __le16 fc;
e022edbd 1654
c112d0c5
LR
1655 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1656 if (unlikely(!tx_info_priv))
1657 return -ENOMEM;
a8efee4f 1658 tx_info->rate_driver_data[0] = tx_info_priv;
528f0c6b
S
1659 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1660 fc = hdr->frame_control;
f078f209 1661
528f0c6b 1662 ATH_TXBUF_RESET(bf);
f078f209 1663
528f0c6b 1664 /* Frame type */
f078f209 1665
528f0c6b 1666 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
cd3d39a6
S
1667
1668 ieee80211_is_data(fc) ?
1669 (bf->bf_state.bf_type |= BUF_DATA) :
1670 (bf->bf_state.bf_type &= ~BUF_DATA);
1671 ieee80211_is_back_req(fc) ?
1672 (bf->bf_state.bf_type |= BUF_BAR) :
1673 (bf->bf_state.bf_type &= ~BUF_BAR);
1674 ieee80211_is_pspoll(fc) ?
1675 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1676 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
672840ac 1677 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
cd3d39a6
S
1678 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1679 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
a8efee4f 1680 (sc->hw->conf.ht.enabled && !is_pae(skb) &&
528f0c6b
S
1681 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1682 (bf->bf_state.bf_type |= BUF_HT) :
1683 (bf->bf_state.bf_type &= ~BUF_HT);
1684
1685 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1686
1687 /* Crypto */
1688
1689 bf->bf_keytype = get_hw_crypto_keytype(skb);
1690
1691 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1692 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1693 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1694 } else {
1695 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1696 }
1697
528f0c6b
S
1698 /* Assign seqno, tidno */
1699
1700 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
1701 assign_aggr_tid_seqno(skb, bf);
1702
1703 /* DMA setup */
1704
f078f209 1705 bf->bf_mpdu = skb;
f8316df1 1706
528f0c6b
S
1707 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1708 skb->len, PCI_DMA_TODEVICE);
f8316df1
LR
1709 if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_dmacontext))) {
1710 bf->bf_mpdu = NULL;
1711 DPRINTF(sc, ATH_DBG_CONFIG,
1712 "pci_dma_mapping_error() on TX\n");
1713 return -ENOMEM;
1714 }
1715
528f0c6b 1716 bf->bf_buf_addr = bf->bf_dmacontext;
f8316df1 1717 return 0;
528f0c6b
S
1718}
1719
1720/* FIXME: tx power */
1721static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
528f0c6b
S
1722 struct ath_tx_control *txctl)
1723{
1724 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1725 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1726 struct ath_node *an = NULL;
1727 struct list_head bf_head;
1728 struct ath_desc *ds;
1729 struct ath_atx_tid *tid;
1730 struct ath_hal *ah = sc->sc_ah;
1731 int frm_type;
1732
528f0c6b
S
1733 frm_type = get_hw_packet_type(skb);
1734
1735 INIT_LIST_HEAD(&bf_head);
1736 list_add_tail(&bf->list, &bf_head);
f078f209
LR
1737
1738 /* setup descriptor */
528f0c6b 1739
f078f209
LR
1740 ds = bf->bf_desc;
1741 ds->ds_link = 0;
1742 ds->ds_data = bf->bf_buf_addr;
1743
528f0c6b 1744 /* Formulate first tx descriptor with tx controls */
f078f209 1745
528f0c6b
S
1746 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1747 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1748
1749 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1750 skb->len, /* segment length */
1751 true, /* first segment */
1752 true, /* last segment */
1753 ds); /* first descriptor */
f078f209
LR
1754
1755 bf->bf_lastfrm = bf;
f078f209 1756
528f0c6b 1757 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1758
f1617967
JL
1759 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1760 tx_info->control.sta) {
1761 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1762 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1763
528f0c6b 1764 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
f078f209
LR
1765 /*
1766 * Try aggregation if it's a unicast data frame
1767 * and the destination is HT capable.
1768 */
528f0c6b 1769 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
f078f209
LR
1770 } else {
1771 /*
528f0c6b
S
1772 * Send this frame as regular when ADDBA
1773 * exchange is neither complete nor pending.
f078f209 1774 */
528f0c6b
S
1775 ath_tx_send_normal(sc, txctl->txq,
1776 tid, &bf_head);
f078f209
LR
1777 }
1778 } else {
1779 bf->bf_lastbf = bf;
1780 bf->bf_nframes = 1;
f078f209 1781
528f0c6b
S
1782 ath_buf_set_rate(sc, bf);
1783 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
f078f209 1784 }
528f0c6b
S
1785
1786 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1787}
1788
f8316df1 1789/* Upon failure caller should free skb */
528f0c6b
S
1790int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1791 struct ath_tx_control *txctl)
f078f209 1792{
528f0c6b 1793 struct ath_buf *bf;
f8316df1 1794 int r;
f078f209 1795
528f0c6b
S
1796 /* Check if a tx buffer is available */
1797
1798 bf = ath_tx_get_buffer(sc);
1799 if (!bf) {
04bd4638 1800 DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n");
528f0c6b
S
1801 return -1;
1802 }
1803
f8316df1
LR
1804 r = ath_tx_setup_buffer(sc, bf, skb, txctl);
1805 if (unlikely(r)) {
c112d0c5
LR
1806 struct ath_txq *txq = txctl->txq;
1807
f8316df1 1808 DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
c112d0c5
LR
1809
1810 /* upon ath_tx_processq() this TX queue will be resumed, we
1811 * guarantee this will happen by knowing beforehand that
1812 * we will at least have to run TX completionon one buffer
1813 * on the queue */
1814 spin_lock_bh(&txq->axq_lock);
1815 if (ath_txq_depth(sc, txq->axq_qnum) > 1) {
1816 ieee80211_stop_queue(sc->hw,
1817 skb_get_queue_mapping(skb));
1818 txq->stopped = 1;
1819 }
1820 spin_unlock_bh(&txq->axq_lock);
1821
1822 spin_lock_bh(&sc->sc_txbuflock);
f8316df1
LR
1823 list_add_tail(&bf->list, &sc->sc_txbuf);
1824 spin_unlock_bh(&sc->sc_txbuflock);
c112d0c5 1825
f8316df1
LR
1826 return r;
1827 }
1828
8f93b8b3 1829 ath_tx_start_dma(sc, bf, txctl);
f078f209 1830
528f0c6b 1831 return 0;
f078f209
LR
1832}
1833
1834/* Initialize TX queue and h/w */
1835
1836int ath_tx_init(struct ath_softc *sc, int nbufs)
1837{
1838 int error = 0;
1839
1840 do {
1841 spin_lock_init(&sc->sc_txbuflock);
1842
1843 /* Setup tx descriptors */
1844 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
556bb8f1 1845 "tx", nbufs, 1);
f078f209
LR
1846 if (error != 0) {
1847 DPRINTF(sc, ATH_DBG_FATAL,
04bd4638
S
1848 "Failed to allocate tx descriptors: %d\n",
1849 error);
f078f209
LR
1850 break;
1851 }
1852
1853 /* XXX allocate beacon state together with vap */
1854 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
1855 "beacon", ATH_BCBUF, 1);
1856 if (error != 0) {
1857 DPRINTF(sc, ATH_DBG_FATAL,
04bd4638
S
1858 "Failed to allocate beacon descriptors: %d\n",
1859 error);
f078f209
LR
1860 break;
1861 }
1862
1863 } while (0);
1864
1865 if (error != 0)
1866 ath_tx_cleanup(sc);
1867
1868 return error;
1869}
1870
1871/* Reclaim all tx queue resources */
1872
1873int ath_tx_cleanup(struct ath_softc *sc)
1874{
1875 /* cleanup beacon descriptors */
1876 if (sc->sc_bdma.dd_desc_len != 0)
1877 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
1878
1879 /* cleanup tx descriptors */
1880 if (sc->sc_txdma.dd_desc_len != 0)
1881 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
1882
1883 return 0;
1884}
1885
1886/* Setup a h/w transmit queue */
1887
1888struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1889{
1890 struct ath_hal *ah = sc->sc_ah;
ea9880fb 1891 struct ath9k_tx_queue_info qi;
f078f209
LR
1892 int qnum;
1893
0345f37b 1894 memset(&qi, 0, sizeof(qi));
f078f209
LR
1895 qi.tqi_subtype = subtype;
1896 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1897 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1898 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
ea9880fb 1899 qi.tqi_physCompBuf = 0;
f078f209
LR
1900
1901 /*
1902 * Enable interrupts only for EOL and DESC conditions.
1903 * We mark tx descriptors to receive a DESC interrupt
1904 * when a tx queue gets deep; otherwise waiting for the
1905 * EOL to reap descriptors. Note that this is done to
1906 * reduce interrupt load and this only defers reaping
1907 * descriptors, never transmitting frames. Aside from
1908 * reducing interrupts this also permits more concurrency.
1909 * The only potential downside is if the tx queue backs
1910 * up in which case the top half of the kernel may backup
1911 * due to a lack of tx descriptors.
1912 *
1913 * The UAPSD queue is an exception, since we take a desc-
1914 * based intr on the EOSP frames.
1915 */
1916 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1917 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1918 else
1919 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1920 TXQ_FLAG_TXDESCINT_ENABLE;
1921 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1922 if (qnum == -1) {
1923 /*
1924 * NB: don't print a message, this happens
1925 * normally on parts with too few tx queues
1926 */
1927 return NULL;
1928 }
1929 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
1930 DPRINTF(sc, ATH_DBG_FATAL,
04bd4638
S
1931 "qnum %u out of range, max %u!\n",
1932 qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
f078f209
LR
1933 ath9k_hw_releasetxqueue(ah, qnum);
1934 return NULL;
1935 }
1936 if (!ATH_TXQ_SETUP(sc, qnum)) {
1937 struct ath_txq *txq = &sc->sc_txq[qnum];
1938
1939 txq->axq_qnum = qnum;
1940 txq->axq_link = NULL;
1941 INIT_LIST_HEAD(&txq->axq_q);
1942 INIT_LIST_HEAD(&txq->axq_acq);
1943 spin_lock_init(&txq->axq_lock);
1944 txq->axq_depth = 0;
1945 txq->axq_aggr_depth = 0;
1946 txq->axq_totalqueued = 0;
f078f209
LR
1947 txq->axq_linkbuf = NULL;
1948 sc->sc_txqsetup |= 1<<qnum;
1949 }
1950 return &sc->sc_txq[qnum];
1951}
1952
1953/* Reclaim resources for a setup queue */
1954
1955void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1956{
1957 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1958 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
1959}
1960
1961/*
1962 * Setup a hardware data transmit queue for the specified
1963 * access control. The hal may not support all requested
1964 * queues in which case it will return a reference to a
1965 * previously setup queue. We record the mapping from ac's
1966 * to h/w queues for use by ath_tx_start and also track
1967 * the set of h/w queues being used to optimize work in the
1968 * transmit interrupt handler and related routines.
1969 */
1970
1971int ath_tx_setup(struct ath_softc *sc, int haltype)
1972{
1973 struct ath_txq *txq;
1974
1975 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1976 DPRINTF(sc, ATH_DBG_FATAL,
04bd4638
S
1977 "HAL AC %u out of range, max %zu!\n",
1978 haltype, ARRAY_SIZE(sc->sc_haltype2q));
f078f209
LR
1979 return 0;
1980 }
1981 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1982 if (txq != NULL) {
1983 sc->sc_haltype2q[haltype] = txq->axq_qnum;
1984 return 1;
1985 } else
1986 return 0;
1987}
1988
1989int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
1990{
1991 int qnum;
1992
1993 switch (qtype) {
1994 case ATH9K_TX_QUEUE_DATA:
1995 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1996 DPRINTF(sc, ATH_DBG_FATAL,
04bd4638 1997 "HAL AC %u out of range, max %zu!\n",
f078f209
LR
1998 haltype, ARRAY_SIZE(sc->sc_haltype2q));
1999 return -1;
2000 }
2001 qnum = sc->sc_haltype2q[haltype];
2002 break;
2003 case ATH9K_TX_QUEUE_BEACON:
2004 qnum = sc->sc_bhalq;
2005 break;
2006 case ATH9K_TX_QUEUE_CAB:
2007 qnum = sc->sc_cabq->axq_qnum;
2008 break;
2009 default:
2010 qnum = -1;
2011 }
2012 return qnum;
2013}
2014
528f0c6b
S
2015/* Get a transmit queue, if available */
2016
2017struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2018{
2019 struct ath_txq *txq = NULL;
2020 int qnum;
2021
2022 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
2023 txq = &sc->sc_txq[qnum];
2024
2025 spin_lock_bh(&txq->axq_lock);
2026
2027 /* Try to avoid running out of descriptors */
2028 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
2029 DPRINTF(sc, ATH_DBG_FATAL,
04bd4638
S
2030 "TX queue: %d is full, depth: %d\n",
2031 qnum, txq->axq_depth);
528f0c6b
S
2032 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
2033 txq->stopped = 1;
2034 spin_unlock_bh(&txq->axq_lock);
2035 return NULL;
2036 }
2037
2038 spin_unlock_bh(&txq->axq_lock);
2039
2040 return txq;
2041}
2042
f078f209
LR
2043/* Update parameters for a transmit queue */
2044
ea9880fb
S
2045int ath_txq_update(struct ath_softc *sc, int qnum,
2046 struct ath9k_tx_queue_info *qinfo)
f078f209
LR
2047{
2048 struct ath_hal *ah = sc->sc_ah;
2049 int error = 0;
ea9880fb 2050 struct ath9k_tx_queue_info qi;
f078f209
LR
2051
2052 if (qnum == sc->sc_bhalq) {
2053 /*
2054 * XXX: for beacon queue, we just save the parameter.
2055 * It will be picked up by ath_beaconq_config when
2056 * it's necessary.
2057 */
ea9880fb 2058 sc->sc_beacon_qi = *qinfo;
f078f209
LR
2059 return 0;
2060 }
2061
2062 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2063
ea9880fb
S
2064 ath9k_hw_get_txq_props(ah, qnum, &qi);
2065 qi.tqi_aifs = qinfo->tqi_aifs;
2066 qi.tqi_cwmin = qinfo->tqi_cwmin;
2067 qi.tqi_cwmax = qinfo->tqi_cwmax;
2068 qi.tqi_burstTime = qinfo->tqi_burstTime;
2069 qi.tqi_readyTime = qinfo->tqi_readyTime;
f078f209 2070
ea9880fb 2071 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
f078f209 2072 DPRINTF(sc, ATH_DBG_FATAL,
04bd4638 2073 "Unable to update hardware queue %u!\n", qnum);
f078f209
LR
2074 error = -EIO;
2075 } else {
2076 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2077 }
2078
2079 return error;
2080}
2081
2082int ath_cabq_update(struct ath_softc *sc)
2083{
ea9880fb 2084 struct ath9k_tx_queue_info qi;
f078f209
LR
2085 int qnum = sc->sc_cabq->axq_qnum;
2086 struct ath_beacon_config conf;
2087
ea9880fb 2088 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209
LR
2089 /*
2090 * Ensure the readytime % is within the bounds.
2091 */
2092 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2093 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2094 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2095 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2096
2097 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2098 qi.tqi_readyTime =
2099 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2100 ath_txq_update(sc, qnum, &qi);
2101
2102 return 0;
2103}
2104
f078f209
LR
2105/* Deferred processing of transmit interrupt */
2106
2107void ath_tx_tasklet(struct ath_softc *sc)
2108{
1fe1132b 2109 int i;
f078f209
LR
2110 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2111
2112 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2113
2114 /*
2115 * Process each active queue.
2116 */
2117 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2118 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
1fe1132b 2119 ath_tx_processq(sc, &sc->sc_txq[i]);
f078f209 2120 }
f078f209
LR
2121}
2122
2123void ath_tx_draintxq(struct ath_softc *sc,
2124 struct ath_txq *txq, bool retry_tx)
2125{
2126 struct ath_buf *bf, *lastbf;
2127 struct list_head bf_head;
2128
2129 INIT_LIST_HEAD(&bf_head);
2130
2131 /*
2132 * NB: this assumes output has been stopped and
2133 * we do not need to block ath_tx_tasklet
2134 */
2135 for (;;) {
2136 spin_lock_bh(&txq->axq_lock);
2137
2138 if (list_empty(&txq->axq_q)) {
2139 txq->axq_link = NULL;
2140 txq->axq_linkbuf = NULL;
2141 spin_unlock_bh(&txq->axq_lock);
2142 break;
2143 }
2144
2145 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2146
2147 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2148 list_del(&bf->list);
2149 spin_unlock_bh(&txq->axq_lock);
2150
2151 spin_lock_bh(&sc->sc_txbuflock);
2152 list_add_tail(&bf->list, &sc->sc_txbuf);
2153 spin_unlock_bh(&sc->sc_txbuflock);
2154 continue;
2155 }
2156
2157 lastbf = bf->bf_lastbf;
2158 if (!retry_tx)
2159 lastbf->bf_desc->ds_txstat.ts_flags =
2160 ATH9K_TX_SW_ABORTED;
2161
2162 /* remove ath_buf's of the same mpdu from txq */
2163 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2164 txq->axq_depth--;
2165
2166 spin_unlock_bh(&txq->axq_lock);
2167
cd3d39a6 2168 if (bf_isampdu(bf))
f078f209
LR
2169 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2170 else
2171 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2172 }
2173
2174 /* flush any pending frames if aggregation is enabled */
672840ac 2175 if (sc->sc_flags & SC_OP_TXAGGR) {
f078f209
LR
2176 if (!retry_tx) {
2177 spin_lock_bh(&txq->axq_lock);
b5aa9bf9 2178 ath_txq_drain_pending_buffers(sc, txq);
f078f209
LR
2179 spin_unlock_bh(&txq->axq_lock);
2180 }
2181 }
2182}
2183
2184/* Drain the transmit queues and reclaim resources */
2185
2186void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2187{
2188 /* stop beacon queue. The beacon will be freed when
2189 * we go to INIT state */
672840ac 2190 if (!(sc->sc_flags & SC_OP_INVALID)) {
f078f209 2191 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
04bd4638 2192 DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n",
f078f209
LR
2193 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2194 }
2195
2196 ath_drain_txdataq(sc, retry_tx);
2197}
2198
2199u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2200{
2201 return sc->sc_txq[qnum].axq_depth;
2202}
2203
2204u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2205{
2206 return sc->sc_txq[qnum].axq_aggr_depth;
2207}
2208
ccc75c52 2209bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
f078f209
LR
2210{
2211 struct ath_atx_tid *txtid;
f078f209 2212
672840ac 2213 if (!(sc->sc_flags & SC_OP_TXAGGR))
ccc75c52 2214 return false;
f078f209 2215
f078f209
LR
2216 txtid = ATH_AN_2_TID(an, tidno);
2217
a37c2c79
S
2218 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2219 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
f078f209
LR
2220 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2221 txtid->addba_exchangeattempts++;
ccc75c52 2222 return true;
f078f209
LR
2223 }
2224 }
2225
ccc75c52 2226 return false;
f078f209
LR
2227}
2228
2229/* Start TX aggregation */
2230
b5aa9bf9
S
2231int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
2232 u16 tid, u16 *ssn)
f078f209
LR
2233{
2234 struct ath_atx_tid *txtid;
2235 struct ath_node *an;
2236
b5aa9bf9 2237 an = (struct ath_node *)sta->drv_priv;
f078f209 2238
672840ac 2239 if (sc->sc_flags & SC_OP_TXAGGR) {
f078f209 2240 txtid = ATH_AN_2_TID(an, tid);
a37c2c79 2241 txtid->state |= AGGR_ADDBA_PROGRESS;
f078f209
LR
2242 ath_tx_pause_tid(sc, txtid);
2243 }
2244
2245 return 0;
2246}
2247
2248/* Stop tx aggregation */
2249
b5aa9bf9 2250int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
f078f209 2251{
b5aa9bf9 2252 struct ath_node *an = (struct ath_node *)sta->drv_priv;
f078f209
LR
2253
2254 ath_tx_aggr_teardown(sc, an, tid);
2255 return 0;
2256}
2257
8469cdef
S
2258/* Resume tx aggregation */
2259
2260void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2261{
2262 struct ath_atx_tid *txtid;
2263 struct ath_node *an;
2264
2265 an = (struct ath_node *)sta->drv_priv;
2266
2267 if (sc->sc_flags & SC_OP_TXAGGR) {
2268 txtid = ATH_AN_2_TID(an, tid);
2269 txtid->baw_size =
2270 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
2271 txtid->state |= AGGR_ADDBA_COMPLETE;
2272 txtid->state &= ~AGGR_ADDBA_PROGRESS;
2273 ath_tx_resume_tid(sc, txtid);
2274 }
2275}
2276
f078f209
LR
2277/*
2278 * Performs transmit side cleanup when TID changes from aggregated to
2279 * unaggregated.
2280 * - Pause the TID and mark cleanup in progress
2281 * - Discard all retry frames from the s/w queue.
2282 */
2283
b5aa9bf9 2284void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
f078f209
LR
2285{
2286 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2287 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2288 struct ath_buf *bf;
2289 struct list_head bf_head;
2290 INIT_LIST_HEAD(&bf_head);
2291
a37c2c79 2292 if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */
f078f209
LR
2293 return;
2294
a37c2c79 2295 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
f078f209
LR
2296 txtid->addba_exchangeattempts = 0;
2297 return;
2298 }
2299
2300 /* TID must be paused first */
2301 ath_tx_pause_tid(sc, txtid);
2302
2303 /* drop all software retried frames and mark this TID */
2304 spin_lock_bh(&txq->axq_lock);
2305 while (!list_empty(&txtid->buf_q)) {
2306 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
cd3d39a6 2307 if (!bf_isretried(bf)) {
f078f209
LR
2308 /*
2309 * NB: it's based on the assumption that
2310 * software retried frame will always stay
2311 * at the head of software queue.
2312 */
2313 break;
2314 }
2315 list_cut_position(&bf_head,
2316 &txtid->buf_q, &bf->bf_lastfrm->list);
2317 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2318
2319 /* complete this sub-frame */
2320 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2321 }
2322
2323 if (txtid->baw_head != txtid->baw_tail) {
2324 spin_unlock_bh(&txq->axq_lock);
a37c2c79 2325 txtid->state |= AGGR_CLEANUP;
f078f209 2326 } else {
a37c2c79 2327 txtid->state &= ~AGGR_ADDBA_COMPLETE;
f078f209
LR
2328 txtid->addba_exchangeattempts = 0;
2329 spin_unlock_bh(&txq->axq_lock);
2330 ath_tx_flush_tid(sc, txtid);
2331 }
2332}
2333
2334/*
2335 * Tx scheduling logic
2336 * NB: must be called with txq lock held
2337 */
2338
2339void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2340{
2341 struct ath_atx_ac *ac;
2342 struct ath_atx_tid *tid;
2343
2344 /* nothing to schedule */
2345 if (list_empty(&txq->axq_acq))
2346 return;
2347 /*
2348 * get the first node/ac pair on the queue
2349 */
2350 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2351 list_del(&ac->list);
2352 ac->sched = false;
2353
2354 /*
2355 * process a single tid per destination
2356 */
2357 do {
2358 /* nothing to schedule */
2359 if (list_empty(&ac->tid_q))
2360 return;
2361
2362 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2363 list_del(&tid->list);
2364 tid->sched = false;
2365
2366 if (tid->paused) /* check next tid to keep h/w busy */
2367 continue;
2368
43453b33 2369 if ((txq->axq_depth % 2) == 0)
f078f209 2370 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
2371
2372 /*
2373 * add tid to round-robin queue if more frames
2374 * are pending for the tid
2375 */
2376 if (!list_empty(&tid->buf_q))
2377 ath_tx_queue_tid(txq, tid);
2378
2379 /* only schedule one TID at a time */
2380 break;
2381 } while (!list_empty(&ac->tid_q));
2382
2383 /*
2384 * schedule AC if more TIDs need processing
2385 */
2386 if (!list_empty(&ac->tid_q)) {
2387 /*
2388 * add dest ac to txq if not already added
2389 */
2390 if (!ac->sched) {
2391 ac->sched = true;
2392 list_add_tail(&ac->list, &txq->axq_acq);
2393 }
2394 }
2395}
2396
2397/* Initialize per-node transmit state */
2398
2399void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2400{
c5170163
S
2401 struct ath_atx_tid *tid;
2402 struct ath_atx_ac *ac;
2403 int tidno, acno;
f078f209 2404
c5170163
S
2405 /*
2406 * Init per tid tx state
2407 */
2408 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2409 tidno < WME_NUM_TID;
2410 tidno++, tid++) {
2411 tid->an = an;
2412 tid->tidno = tidno;
2413 tid->seq_start = tid->seq_next = 0;
2414 tid->baw_size = WME_MAX_BA;
2415 tid->baw_head = tid->baw_tail = 0;
2416 tid->sched = false;
2417 tid->paused = false;
a37c2c79 2418 tid->state &= ~AGGR_CLEANUP;
c5170163
S
2419 INIT_LIST_HEAD(&tid->buf_q);
2420
2421 acno = TID_TO_WME_AC(tidno);
2422 tid->ac = &an->an_aggr.tx.ac[acno];
2423
2424 /* ADDBA state */
a37c2c79
S
2425 tid->state &= ~AGGR_ADDBA_COMPLETE;
2426 tid->state &= ~AGGR_ADDBA_PROGRESS;
2427 tid->addba_exchangeattempts = 0;
c5170163 2428 }
f078f209 2429
c5170163
S
2430 /*
2431 * Init per ac tx state
2432 */
2433 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2434 acno < WME_NUM_AC; acno++, ac++) {
2435 ac->sched = false;
2436 INIT_LIST_HEAD(&ac->tid_q);
2437
2438 switch (acno) {
2439 case WME_AC_BE:
2440 ac->qnum = ath_tx_get_qnum(sc,
2441 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2442 break;
2443 case WME_AC_BK:
2444 ac->qnum = ath_tx_get_qnum(sc,
2445 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2446 break;
2447 case WME_AC_VI:
2448 ac->qnum = ath_tx_get_qnum(sc,
2449 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2450 break;
2451 case WME_AC_VO:
2452 ac->qnum = ath_tx_get_qnum(sc,
2453 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2454 break;
f078f209
LR
2455 }
2456 }
2457}
2458
2459/* Cleanupthe pending buffers for the node. */
2460
b5aa9bf9 2461void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209
LR
2462{
2463 int i;
2464 struct ath_atx_ac *ac, *ac_tmp;
2465 struct ath_atx_tid *tid, *tid_tmp;
2466 struct ath_txq *txq;
2467 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2468 if (ATH_TXQ_SETUP(sc, i)) {
2469 txq = &sc->sc_txq[i];
2470
b5aa9bf9 2471 spin_lock(&txq->axq_lock);
f078f209
LR
2472
2473 list_for_each_entry_safe(ac,
2474 ac_tmp, &txq->axq_acq, list) {
2475 tid = list_first_entry(&ac->tid_q,
2476 struct ath_atx_tid, list);
2477 if (tid && tid->an != an)
2478 continue;
2479 list_del(&ac->list);
2480 ac->sched = false;
2481
2482 list_for_each_entry_safe(tid,
2483 tid_tmp, &ac->tid_q, list) {
2484 list_del(&tid->list);
2485 tid->sched = false;
b5aa9bf9 2486 ath_tid_drain(sc, txq, tid);
a37c2c79 2487 tid->state &= ~AGGR_ADDBA_COMPLETE;
f078f209 2488 tid->addba_exchangeattempts = 0;
a37c2c79 2489 tid->state &= ~AGGR_CLEANUP;
f078f209
LR
2490 }
2491 }
2492
b5aa9bf9 2493 spin_unlock(&txq->axq_lock);
f078f209
LR
2494 }
2495 }
2496}
2497
e022edbd
JM
2498void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2499{
2500 int hdrlen, padsize;
2501 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2502 struct ath_tx_control txctl;
2503
528f0c6b
S
2504 memset(&txctl, 0, sizeof(struct ath_tx_control));
2505
e022edbd
JM
2506 /*
2507 * As a temporary workaround, assign seq# here; this will likely need
2508 * to be cleaned up to work better with Beacon transmission and virtual
2509 * BSSes.
2510 */
2511 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2512 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2513 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2514 sc->seq_no += 0x10;
2515 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2516 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2517 }
2518
2519 /* Add the padding after the header if this is not already done */
2520 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2521 if (hdrlen & 3) {
2522 padsize = hdrlen % 4;
2523 if (skb_headroom(skb) < padsize) {
04bd4638 2524 DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n");
e022edbd
JM
2525 dev_kfree_skb_any(skb);
2526 return;
2527 }
2528 skb_push(skb, padsize);
2529 memmove(skb->data, skb->data + padsize, hdrlen);
2530 }
2531
528f0c6b
S
2532 txctl.txq = sc->sc_cabq;
2533
04bd4638 2534 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
e022edbd 2535
528f0c6b 2536 if (ath_tx_start(sc, skb, &txctl) != 0) {
04bd4638 2537 DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n");
528f0c6b 2538 goto exit;
e022edbd 2539 }
e022edbd 2540
528f0c6b
S
2541 return;
2542exit:
2543 dev_kfree_skb_any(skb);
2544}
This page took 0.207334 seconds and 5 git commands to generate.