iwlwifi: TX update chicken bits
[deliverable/linux.git] / drivers / net / wireless / ath9k / xmit.c
CommitLineData
f078f209
LR
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
f078f209
LR
17#include "core.h"
18
19#define BITS_PER_BYTE 8
20#define OFDM_PLCP_BITS 22
21#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
22#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8
24#define L_LTF 8
25#define L_SIG 4
26#define HT_SIG 8
27#define HT_STF 4
28#define HT_LTF(_ns) (4 * (_ns))
29#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
33
34#define OFDM_SIFS_TIME 16
35
36static u32 bits_per_symbol[][2] = {
37 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
46 { 52, 108 }, /* 8: BPSK */
47 { 104, 216 }, /* 9: QPSK 1/2 */
48 { 156, 324 }, /* 10: QPSK 3/4 */
49 { 208, 432 }, /* 11: 16-QAM 1/2 */
50 { 312, 648 }, /* 12: 16-QAM 3/4 */
51 { 416, 864 }, /* 13: 64-QAM 2/3 */
52 { 468, 972 }, /* 14: 64-QAM 3/4 */
53 { 520, 1080 }, /* 15: 64-QAM 5/6 */
54};
55
56#define IS_HT_RATE(_rate) ((_rate) & 0x80)
57
f078f209
LR
58/*
59 * Insert a chain of ath_buf (descriptors) on a txq and
60 * assume the descriptors are already chained together by caller.
61 * NB: must be called with txq lock held
62 */
63
102e0572
S
64static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct list_head *head)
f078f209
LR
66{
67 struct ath_hal *ah = sc->sc_ah;
68 struct ath_buf *bf;
102e0572 69
f078f209
LR
70 /*
71 * Insert the frame on the outbound list and
72 * pass it on to the hardware.
73 */
74
75 if (list_empty(head))
76 return;
77
78 bf = list_first_entry(head, struct ath_buf, list);
79
80 list_splice_tail_init(head, &txq->axq_q);
81 txq->axq_depth++;
82 txq->axq_totalqueued++;
83 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
84
85 DPRINTF(sc, ATH_DBG_QUEUE,
86 "%s: txq depth = %d\n", __func__, txq->axq_depth);
87
88 if (txq->axq_link == NULL) {
89 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
90 DPRINTF(sc, ATH_DBG_XMIT,
91 "%s: TXDP[%u] = %llx (%p)\n",
92 __func__, txq->axq_qnum,
93 ito64(bf->bf_daddr), bf->bf_desc);
94 } else {
95 *txq->axq_link = bf->bf_daddr;
96 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
97 __func__,
98 txq->axq_qnum, txq->axq_link,
99 ito64(bf->bf_daddr), bf->bf_desc);
100 }
101 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
102 ath9k_hw_txstart(ah, txq->axq_qnum);
103}
104
c4288390
S
105static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
106 struct ath_xmit_status *tx_status)
107{
108 struct ieee80211_hw *hw = sc->hw;
109 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
110 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
111
112 DPRINTF(sc, ATH_DBG_XMIT,
113 "%s: TX complete: skb: %p\n", __func__, skb);
114
115 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
116 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
117 kfree(tx_info_priv);
118 tx_info->rate_driver_data[0] = NULL;
119 }
120
121 if (tx_status->flags & ATH_TX_BAR) {
122 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
123 tx_status->flags &= ~ATH_TX_BAR;
124 }
125
126 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
127 /* Frame was ACKed */
128 tx_info->flags |= IEEE80211_TX_STAT_ACK;
129 }
130
131 tx_info->status.rates[0].count = tx_status->retries + 1;
132
133 ieee80211_tx_status(hw, skb);
134}
135
f078f209
LR
136/* Check if it's okay to send out aggregates */
137
a37c2c79 138static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
f078f209
LR
139{
140 struct ath_atx_tid *tid;
141 tid = ATH_AN_2_TID(an, tidno);
142
a37c2c79
S
143 if (tid->state & AGGR_ADDBA_COMPLETE ||
144 tid->state & AGGR_ADDBA_PROGRESS)
f078f209
LR
145 return 1;
146 else
147 return 0;
148}
149
528f0c6b
S
150/* Calculate Atheros packet type from IEEE80211 packet header */
151
152static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
f078f209 153{
528f0c6b 154 struct ieee80211_hdr *hdr;
f078f209
LR
155 enum ath9k_pkt_type htype;
156 __le16 fc;
157
528f0c6b 158 hdr = (struct ieee80211_hdr *)skb->data;
f078f209
LR
159 fc = hdr->frame_control;
160
f078f209
LR
161 if (ieee80211_is_beacon(fc))
162 htype = ATH9K_PKT_TYPE_BEACON;
163 else if (ieee80211_is_probe_resp(fc))
164 htype = ATH9K_PKT_TYPE_PROBE_RESP;
165 else if (ieee80211_is_atim(fc))
166 htype = ATH9K_PKT_TYPE_ATIM;
167 else if (ieee80211_is_pspoll(fc))
168 htype = ATH9K_PKT_TYPE_PSPOLL;
169 else
170 htype = ATH9K_PKT_TYPE_NORMAL;
171
172 return htype;
173}
174
a8efee4f 175static bool is_pae(struct sk_buff *skb)
f078f209
LR
176{
177 struct ieee80211_hdr *hdr;
f078f209
LR
178 __le16 fc;
179
180 hdr = (struct ieee80211_hdr *)skb->data;
181 fc = hdr->frame_control;
e6a9854b 182
a8efee4f 183 if (ieee80211_is_data(fc)) {
f078f209 184 if (ieee80211_is_nullfunc(fc) ||
528f0c6b
S
185 /* Port Access Entity (IEEE 802.1X) */
186 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
a8efee4f 187 return true;
f078f209 188 }
f078f209
LR
189 }
190
a8efee4f 191 return false;
f078f209
LR
192}
193
528f0c6b 194static int get_hw_crypto_keytype(struct sk_buff *skb)
f078f209 195{
f078f209 196 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
f078f209
LR
197
198 if (tx_info->control.hw_key) {
d0be7cc7 199 if (tx_info->control.hw_key->alg == ALG_WEP)
528f0c6b 200 return ATH9K_KEY_TYPE_WEP;
d0be7cc7 201 else if (tx_info->control.hw_key->alg == ALG_TKIP)
528f0c6b 202 return ATH9K_KEY_TYPE_TKIP;
d0be7cc7 203 else if (tx_info->control.hw_key->alg == ALG_CCMP)
528f0c6b 204 return ATH9K_KEY_TYPE_AES;
f078f209
LR
205 }
206
528f0c6b
S
207 return ATH9K_KEY_TYPE_CLEAR;
208}
f078f209 209
528f0c6b 210/* Called only when tx aggregation is enabled and HT is supported */
e6a9854b 211
528f0c6b
S
212static void assign_aggr_tid_seqno(struct sk_buff *skb,
213 struct ath_buf *bf)
214{
215 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
216 struct ieee80211_hdr *hdr;
217 struct ath_node *an;
218 struct ath_atx_tid *tid;
219 __le16 fc;
220 u8 *qc;
f078f209 221
528f0c6b
S
222 if (!tx_info->control.sta)
223 return;
e6a9854b 224
528f0c6b
S
225 an = (struct ath_node *)tx_info->control.sta->drv_priv;
226 hdr = (struct ieee80211_hdr *)skb->data;
227 fc = hdr->frame_control;
f078f209 228
528f0c6b 229 /* Get tidno */
f078f209 230
528f0c6b
S
231 if (ieee80211_is_data_qos(fc)) {
232 qc = ieee80211_get_qos_ctl(hdr);
233 bf->bf_tidno = qc[0] & 0xf;
234 }
f078f209 235
528f0c6b 236 /* Get seqno */
f078f209 237
a8efee4f 238 if (ieee80211_is_data(fc) && !is_pae(skb)) {
f078f209
LR
239 /* For HT capable stations, we save tidno for later use.
240 * We also override seqno set by upper layer with the one
241 * in tx aggregation state.
242 *
f078f209
LR
243 * If fragmentation is on, the sequence number is
244 * not overridden, since it has been
245 * incremented by the fragmentation routine.
528f0c6b
S
246 *
247 * FIXME: check if the fragmentation threshold exceeds
248 * IEEE80211 max.
f078f209 249 */
528f0c6b
S
250 tid = ATH_AN_2_TID(an, bf->bf_tidno);
251 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
252 IEEE80211_SEQ_SEQ_SHIFT);
253 bf->bf_seqno = tid->seq_next;
254 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
255 }
256}
f078f209 257
528f0c6b
S
258static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
259 struct ath_txq *txq)
260{
261 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
262 int flags = 0;
f078f209 263
528f0c6b
S
264 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
265 flags |= ATH9K_TXDESC_INTREQ;
f078f209 266
528f0c6b
S
267 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
268 flags |= ATH9K_TXDESC_NOACK;
269 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
270 flags |= ATH9K_TXDESC_RTSENA;
271
272 return flags;
273}
f078f209 274
528f0c6b
S
275static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
276{
277 struct ath_buf *bf = NULL;
278
279 spin_lock_bh(&sc->sc_txbuflock);
280
281 if (unlikely(list_empty(&sc->sc_txbuf))) {
282 spin_unlock_bh(&sc->sc_txbuflock);
283 return NULL;
98deeea0 284 }
f078f209 285
528f0c6b
S
286 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
287 list_del(&bf->list);
288
289 spin_unlock_bh(&sc->sc_txbuflock);
290
291 return bf;
f078f209
LR
292}
293
294/* To complete a chain of buffers associated a frame */
295
296static void ath_tx_complete_buf(struct ath_softc *sc,
297 struct ath_buf *bf,
298 struct list_head *bf_q,
299 int txok, int sendbar)
300{
301 struct sk_buff *skb = bf->bf_mpdu;
302 struct ath_xmit_status tx_status;
f078f209
LR
303
304 /*
305 * Set retry information.
306 * NB: Don't use the information in the descriptor, because the frame
307 * could be software retried.
308 */
309 tx_status.retries = bf->bf_retries;
310 tx_status.flags = 0;
311
312 if (sendbar)
313 tx_status.flags = ATH_TX_BAR;
314
315 if (!txok) {
316 tx_status.flags |= ATH_TX_ERROR;
317
cd3d39a6 318 if (bf_isxretried(bf))
f078f209
LR
319 tx_status.flags |= ATH_TX_XRETRY;
320 }
102e0572 321
f078f209 322 /* Unmap this frame */
f078f209 323 pci_unmap_single(sc->pdev,
ff9b662d 324 bf->bf_dmacontext,
f078f209
LR
325 skb->len,
326 PCI_DMA_TODEVICE);
327 /* complete this frame */
528f0c6b 328 ath_tx_complete(sc, skb, &tx_status);
f078f209
LR
329
330 /*
331 * Return the list of ath_buf of this mpdu to free queue
332 */
333 spin_lock_bh(&sc->sc_txbuflock);
334 list_splice_tail_init(bf_q, &sc->sc_txbuf);
335 spin_unlock_bh(&sc->sc_txbuflock);
336}
337
338/*
339 * queue up a dest/ac pair for tx scheduling
340 * NB: must be called with txq lock held
341 */
342
343static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
344{
345 struct ath_atx_ac *ac = tid->ac;
346
347 /*
348 * if tid is paused, hold off
349 */
350 if (tid->paused)
351 return;
352
353 /*
354 * add tid to ac atmost once
355 */
356 if (tid->sched)
357 return;
358
359 tid->sched = true;
360 list_add_tail(&tid->list, &ac->tid_q);
361
362 /*
363 * add node ac to txq atmost once
364 */
365 if (ac->sched)
366 return;
367
368 ac->sched = true;
369 list_add_tail(&ac->list, &txq->axq_acq);
370}
371
372/* pause a tid */
373
374static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
375{
376 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
377
378 spin_lock_bh(&txq->axq_lock);
379
380 tid->paused++;
381
382 spin_unlock_bh(&txq->axq_lock);
383}
384
385/* resume a tid and schedule aggregate */
386
387void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
388{
389 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
390
391 ASSERT(tid->paused > 0);
392 spin_lock_bh(&txq->axq_lock);
393
394 tid->paused--;
395
396 if (tid->paused > 0)
397 goto unlock;
398
399 if (list_empty(&tid->buf_q))
400 goto unlock;
401
402 /*
403 * Add this TID to scheduler and try to send out aggregates
404 */
405 ath_tx_queue_tid(txq, tid);
406 ath_txq_schedule(sc, txq);
407unlock:
408 spin_unlock_bh(&txq->axq_lock);
409}
410
411/* Compute the number of bad frames */
412
b5aa9bf9
S
413static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
414 int txok)
f078f209 415{
f078f209
LR
416 struct ath_buf *bf_last = bf->bf_lastbf;
417 struct ath_desc *ds = bf_last->bf_desc;
418 u16 seq_st = 0;
419 u32 ba[WME_BA_BMP_SIZE >> 5];
420 int ba_index;
421 int nbad = 0;
422 int isaggr = 0;
423
b5aa9bf9 424 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
f078f209
LR
425 return 0;
426
cd3d39a6 427 isaggr = bf_isaggr(bf);
f078f209
LR
428 if (isaggr) {
429 seq_st = ATH_DS_BA_SEQ(ds);
430 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
431 }
432
433 while (bf) {
434 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
435 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
436 nbad++;
437
438 bf = bf->bf_next;
439 }
440
441 return nbad;
442}
443
444static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
445{
446 struct sk_buff *skb;
447 struct ieee80211_hdr *hdr;
448
cd3d39a6 449 bf->bf_state.bf_type |= BUF_RETRY;
f078f209
LR
450 bf->bf_retries++;
451
452 skb = bf->bf_mpdu;
453 hdr = (struct ieee80211_hdr *)skb->data;
454 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
455}
456
457/* Update block ack window */
458
102e0572
S
459static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
460 int seqno)
f078f209
LR
461{
462 int index, cindex;
463
464 index = ATH_BA_INDEX(tid->seq_start, seqno);
465 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
466
467 tid->tx_buf[cindex] = NULL;
468
469 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
470 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
471 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
472 }
473}
474
475/*
476 * ath_pkt_dur - compute packet duration (NB: not NAV)
477 *
478 * rix - rate index
479 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
480 * width - 0 for 20 MHz, 1 for 40 MHz
481 * half_gi - to use 4us v/s 3.6 us for symbol time
482 */
102e0572
S
483static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
484 int width, int half_gi, bool shortPreamble)
f078f209 485{
e63835b0 486 struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode];
f078f209
LR
487 u32 nbits, nsymbits, duration, nsymbols;
488 u8 rc;
489 int streams, pktlen;
490
cd3d39a6 491 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
e63835b0 492 rc = rate_table->info[rix].ratecode;
f078f209 493
e63835b0 494 /* for legacy rates, use old function to compute packet duration */
f078f209 495 if (!IS_HT_RATE(rc))
e63835b0
S
496 return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
497 rix, shortPreamble);
498
499 /* find number of symbols: PLCP + data */
f078f209
LR
500 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
501 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
502 nsymbols = (nbits + nsymbits - 1) / nsymbits;
503
504 if (!half_gi)
505 duration = SYMBOL_TIME(nsymbols);
506 else
507 duration = SYMBOL_TIME_HALFGI(nsymbols);
508
e63835b0 509 /* addup duration for legacy/ht training and signal fields */
f078f209
LR
510 streams = HT_RC_2_STREAMS(rc);
511 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
102e0572 512
f078f209
LR
513 return duration;
514}
515
516/* Rate module function to set rate related fields in tx descriptor */
517
518static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
519{
520 struct ath_hal *ah = sc->sc_ah;
e63835b0 521 struct ath_rate_table *rt;
f078f209
LR
522 struct ath_desc *ds = bf->bf_desc;
523 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
524 struct ath9k_11n_rate_series series[4];
528f0c6b
S
525 struct ath_node *an = NULL;
526 struct sk_buff *skb;
527 struct ieee80211_tx_info *tx_info;
a8efee4f 528 struct ieee80211_tx_rate *rates;
e63835b0
S
529 struct ieee80211_hdr *hdr;
530 int i, flags, rtsctsena = 0;
531 u32 ctsduration = 0;
532 u8 rix = 0, cix, ctsrate = 0;
533 __le16 fc;
534
535 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
528f0c6b
S
536
537 skb = (struct sk_buff *)bf->bf_mpdu;
e63835b0
S
538 hdr = (struct ieee80211_hdr *)skb->data;
539 fc = hdr->frame_control;
528f0c6b 540 tx_info = IEEE80211_SKB_CB(skb);
e63835b0 541 rates = tx_info->control.rates;
528f0c6b
S
542
543 if (tx_info->control.sta)
544 an = (struct ath_node *)tx_info->control.sta->drv_priv;
f078f209 545
e63835b0
S
546 if (ieee80211_has_morefrags(fc) ||
547 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
548 rates[1].count = rates[2].count = rates[3].count = 0;
549 rates[1].idx = rates[2].idx = rates[3].idx = 0;
550 rates[0].count = ATH_TXMAXTRY;
551 }
552
553 /* get the cix for the lowest valid rix */
554 rt = sc->hw_rate_table[sc->sc_curmode];
a8efee4f 555 for (i = 3; i >= 0; i--) {
e63835b0 556 if (rates[i].count && (rates[i].idx >= 0)) {
a8efee4f 557 rix = rates[i].idx;
f078f209
LR
558 break;
559 }
560 }
e63835b0 561
f078f209 562 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
e63835b0 563 cix = rt->info[rix].ctrl_rate;
f078f209
LR
564
565 /*
e63835b0
S
566 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
567 * just CTS. Note that this is only done for OFDM/HT unicast frames.
f078f209 568 */
e63835b0 569 if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK)
46d14a58 570 && (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
e63835b0 571 WLAN_RC_PHY_HT(rt->info[rix].phy))) {
f078f209
LR
572 if (sc->sc_protmode == PROT_M_RTSCTS)
573 flags = ATH9K_TXDESC_RTSENA;
574 else if (sc->sc_protmode == PROT_M_CTSONLY)
575 flags = ATH9K_TXDESC_CTSENA;
576
e63835b0 577 cix = rt->info[sc->sc_protrix].ctrl_rate;
f078f209
LR
578 rtsctsena = 1;
579 }
580
e63835b0
S
581 /* For 11n, the default behavior is to enable RTS for hw retried frames.
582 * We enable the global flag here and let rate series flags determine
583 * which rates will actually use RTS.
f078f209 584 */
cd3d39a6 585 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
e63835b0 586 /* 802.11g protection not needed, use our default behavior */
f078f209
LR
587 if (!rtsctsena)
588 flags = ATH9K_TXDESC_RTSENA;
f078f209
LR
589 }
590
e63835b0 591 /* Set protection if aggregate protection on */
f078f209 592 if (sc->sc_config.ath_aggr_prot &&
cd3d39a6 593 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
f078f209 594 flags = ATH9K_TXDESC_RTSENA;
e63835b0 595 cix = rt->info[sc->sc_protrix].ctrl_rate;
f078f209
LR
596 rtsctsena = 1;
597 }
598
e63835b0
S
599 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
600 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
f078f209 601 flags &= ~(ATH9K_TXDESC_RTSENA);
f078f209
LR
602
603 /*
e63835b0
S
604 * CTS transmit rate is derived from the transmit rate by looking in the
605 * h/w rate table. We must also factor in whether or not a short
606 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
f078f209 607 */
e63835b0
S
608 ctsrate = rt->info[cix].ratecode |
609 (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
f078f209
LR
610
611 for (i = 0; i < 4; i++) {
e63835b0 612 if (!rates[i].count || (rates[i].idx < 0))
f078f209
LR
613 continue;
614
a8efee4f 615 rix = rates[i].idx;
f078f209 616
e63835b0
S
617 series[i].Rate = rt->info[rix].ratecode |
618 (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0);
f078f209 619
a8efee4f 620 series[i].Tries = rates[i].count;
f078f209
LR
621
622 series[i].RateFlags = (
a8efee4f 623 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ?
f078f209 624 ATH9K_RATESERIES_RTS_CTS : 0) |
a8efee4f 625 ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ?
f078f209 626 ATH9K_RATESERIES_2040 : 0) |
a8efee4f 627 ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ?
f078f209
LR
628 ATH9K_RATESERIES_HALFGI : 0);
629
102e0572 630 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
a8efee4f
S
631 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
632 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
102e0572 633 bf_isshpreamble(bf));
f078f209 634
102e0572
S
635 if (bf_isht(bf) && an)
636 series[i].ChSel = ath_chainmask_sel_logic(sc, an);
43453b33 637 else
f078f209 638 series[i].ChSel = sc->sc_tx_chainmask;
f078f209
LR
639
640 if (rtsctsena)
641 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
f078f209
LR
642 }
643
e63835b0
S
644 /* set dur_update_en for l-sig computation except for PS-Poll frames */
645 ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
646 ctsrate, ctsduration,
cd3d39a6 647 series, 4, flags);
102e0572 648
f078f209
LR
649 if (sc->sc_config.ath_aggr_prot && flags)
650 ath9k_hw_set11n_burstduration(ah, ds, 8192);
651}
652
653/*
654 * Function to send a normal HT (non-AMPDU) frame
655 * NB: must be called with txq lock held
656 */
f078f209
LR
657static int ath_tx_send_normal(struct ath_softc *sc,
658 struct ath_txq *txq,
659 struct ath_atx_tid *tid,
660 struct list_head *bf_head)
661{
662 struct ath_buf *bf;
f078f209
LR
663
664 BUG_ON(list_empty(bf_head));
665
666 bf = list_first_entry(bf_head, struct ath_buf, list);
cd3d39a6 667 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
f078f209 668
f078f209
LR
669 /* update starting sequence number for subsequent ADDBA request */
670 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
671
672 /* Queue to h/w without aggregation */
673 bf->bf_nframes = 1;
674 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
675 ath_buf_set_rate(sc, bf);
676 ath_tx_txqaddbuf(sc, txq, bf_head);
677
678 return 0;
679}
680
681/* flush tid's software queue and send frames as non-ampdu's */
682
683static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
684{
685 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
686 struct ath_buf *bf;
687 struct list_head bf_head;
688 INIT_LIST_HEAD(&bf_head);
689
690 ASSERT(tid->paused > 0);
691 spin_lock_bh(&txq->axq_lock);
692
693 tid->paused--;
694
695 if (tid->paused > 0) {
696 spin_unlock_bh(&txq->axq_lock);
697 return;
698 }
699
700 while (!list_empty(&tid->buf_q)) {
701 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
cd3d39a6 702 ASSERT(!bf_isretried(bf));
f078f209
LR
703 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
704 ath_tx_send_normal(sc, txq, tid, &bf_head);
705 }
706
707 spin_unlock_bh(&txq->axq_lock);
708}
709
710/* Completion routine of an aggregate */
711
712static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
713 struct ath_txq *txq,
714 struct ath_buf *bf,
715 struct list_head *bf_q,
716 int txok)
717{
528f0c6b
S
718 struct ath_node *an = NULL;
719 struct sk_buff *skb;
720 struct ieee80211_tx_info *tx_info;
721 struct ath_atx_tid *tid = NULL;
f078f209
LR
722 struct ath_buf *bf_last = bf->bf_lastbf;
723 struct ath_desc *ds = bf_last->bf_desc;
724 struct ath_buf *bf_next, *bf_lastq = NULL;
725 struct list_head bf_head, bf_pending;
726 u16 seq_st = 0;
727 u32 ba[WME_BA_BMP_SIZE >> 5];
728 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
f078f209 729
528f0c6b
S
730 skb = (struct sk_buff *)bf->bf_mpdu;
731 tx_info = IEEE80211_SKB_CB(skb);
732
733 if (tx_info->control.sta) {
734 an = (struct ath_node *)tx_info->control.sta->drv_priv;
735 tid = ATH_AN_2_TID(an, bf->bf_tidno);
736 }
737
cd3d39a6 738 isaggr = bf_isaggr(bf);
f078f209
LR
739 if (isaggr) {
740 if (txok) {
741 if (ATH_DS_TX_BA(ds)) {
742 /*
743 * extract starting sequence and
744 * block-ack bitmap
745 */
746 seq_st = ATH_DS_BA_SEQ(ds);
747 memcpy(ba,
748 ATH_DS_BA_BITMAP(ds),
749 WME_BA_BMP_SIZE >> 3);
750 } else {
0345f37b 751 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209
LR
752
753 /*
754 * AR5416 can become deaf/mute when BA
755 * issue happens. Chip needs to be reset.
756 * But AP code may have sychronization issues
757 * when perform internal reset in this routine.
758 * Only enable reset in STA mode for now.
759 */
b4696c8b 760 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
f078f209
LR
761 needreset = 1;
762 }
763 } else {
0345f37b 764 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209
LR
765 }
766 }
767
768 INIT_LIST_HEAD(&bf_pending);
769 INIT_LIST_HEAD(&bf_head);
770
771 while (bf) {
772 txfail = txpending = 0;
773 bf_next = bf->bf_next;
774
775 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
776 /* transmit completion, subframe is
777 * acked by block ack */
778 } else if (!isaggr && txok) {
779 /* transmit completion */
780 } else {
781
a37c2c79 782 if (!(tid->state & AGGR_CLEANUP) &&
f078f209
LR
783 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
784 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
785 ath_tx_set_retry(sc, bf);
786 txpending = 1;
787 } else {
cd3d39a6 788 bf->bf_state.bf_type |= BUF_XRETRY;
f078f209
LR
789 txfail = 1;
790 sendbar = 1;
791 }
792 } else {
793 /*
794 * cleanup in progress, just fail
795 * the un-acked sub-frames
796 */
797 txfail = 1;
798 }
799 }
800 /*
801 * Remove ath_buf's of this sub-frame from aggregate queue.
802 */
803 if (bf_next == NULL) { /* last subframe in the aggregate */
804 ASSERT(bf->bf_lastfrm == bf_last);
805
806 /*
807 * The last descriptor of the last sub frame could be
808 * a holding descriptor for h/w. If that's the case,
809 * bf->bf_lastfrm won't be in the bf_q.
810 * Make sure we handle bf_q properly here.
811 */
812
813 if (!list_empty(bf_q)) {
814 bf_lastq = list_entry(bf_q->prev,
815 struct ath_buf, list);
816 list_cut_position(&bf_head,
817 bf_q, &bf_lastq->list);
818 } else {
819 /*
820 * XXX: if the last subframe only has one
821 * descriptor which is also being used as
822 * a holding descriptor. Then the ath_buf
823 * is not in the bf_q at all.
824 */
825 INIT_LIST_HEAD(&bf_head);
826 }
827 } else {
828 ASSERT(!list_empty(bf_q));
829 list_cut_position(&bf_head,
830 bf_q, &bf->bf_lastfrm->list);
831 }
832
833 if (!txpending) {
834 /*
835 * complete the acked-ones/xretried ones; update
836 * block-ack window
837 */
838 spin_lock_bh(&txq->axq_lock);
839 ath_tx_update_baw(sc, tid, bf->bf_seqno);
840 spin_unlock_bh(&txq->axq_lock);
841
842 /* complete this sub-frame */
843 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
844 } else {
845 /*
846 * retry the un-acked ones
847 */
848 /*
849 * XXX: if the last descriptor is holding descriptor,
850 * in order to requeue the frame to software queue, we
851 * need to allocate a new descriptor and
852 * copy the content of holding descriptor to it.
853 */
854 if (bf->bf_next == NULL &&
855 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
856 struct ath_buf *tbf;
857
858 /* allocate new descriptor */
859 spin_lock_bh(&sc->sc_txbuflock);
860 ASSERT(!list_empty((&sc->sc_txbuf)));
861 tbf = list_first_entry(&sc->sc_txbuf,
862 struct ath_buf, list);
863 list_del(&tbf->list);
864 spin_unlock_bh(&sc->sc_txbuflock);
865
866 ATH_TXBUF_RESET(tbf);
867
868 /* copy descriptor content */
869 tbf->bf_mpdu = bf_last->bf_mpdu;
f078f209
LR
870 tbf->bf_buf_addr = bf_last->bf_buf_addr;
871 *(tbf->bf_desc) = *(bf_last->bf_desc);
872
873 /* link it to the frame */
874 if (bf_lastq) {
875 bf_lastq->bf_desc->ds_link =
876 tbf->bf_daddr;
877 bf->bf_lastfrm = tbf;
878 ath9k_hw_cleartxdesc(sc->sc_ah,
879 bf->bf_lastfrm->bf_desc);
880 } else {
881 tbf->bf_state = bf_last->bf_state;
882 tbf->bf_lastfrm = tbf;
883 ath9k_hw_cleartxdesc(sc->sc_ah,
884 tbf->bf_lastfrm->bf_desc);
885
886 /* copy the DMA context */
ff9b662d
S
887 tbf->bf_dmacontext =
888 bf_last->bf_dmacontext;
f078f209
LR
889 }
890 list_add_tail(&tbf->list, &bf_head);
891 } else {
892 /*
893 * Clear descriptor status words for
894 * software retry
895 */
896 ath9k_hw_cleartxdesc(sc->sc_ah,
ff9b662d 897 bf->bf_lastfrm->bf_desc);
f078f209
LR
898 }
899
900 /*
901 * Put this buffer to the temporary pending
902 * queue to retain ordering
903 */
904 list_splice_tail_init(&bf_head, &bf_pending);
905 }
906
907 bf = bf_next;
908 }
909
a37c2c79 910 if (tid->state & AGGR_CLEANUP) {
f078f209
LR
911 /* check to see if we're done with cleaning the h/w queue */
912 spin_lock_bh(&txq->axq_lock);
913
914 if (tid->baw_head == tid->baw_tail) {
a37c2c79 915 tid->state &= ~AGGR_ADDBA_COMPLETE;
f078f209
LR
916 tid->addba_exchangeattempts = 0;
917 spin_unlock_bh(&txq->axq_lock);
918
a37c2c79 919 tid->state &= ~AGGR_CLEANUP;
f078f209
LR
920
921 /* send buffered frames as singles */
922 ath_tx_flush_tid(sc, tid);
923 } else
924 spin_unlock_bh(&txq->axq_lock);
925
926 return;
927 }
928
929 /*
930 * prepend un-acked frames to the beginning of the pending frame queue
931 */
932 if (!list_empty(&bf_pending)) {
933 spin_lock_bh(&txq->axq_lock);
934 /* Note: we _prepend_, we _do_not_ at to
935 * the end of the queue ! */
936 list_splice(&bf_pending, &tid->buf_q);
937 ath_tx_queue_tid(txq, tid);
938 spin_unlock_bh(&txq->axq_lock);
939 }
940
941 if (needreset)
f45144ef 942 ath_reset(sc, false);
f078f209
LR
943
944 return;
945}
946
c4288390
S
947static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
948{
949 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
950 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
951 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
952
953 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
954 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
955
956 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
957 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
958 if (bf_isdata(bf)) {
959 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
960 sizeof(tx_info_priv->tx));
961 tx_info_priv->n_frames = bf->bf_nframes;
962 tx_info_priv->n_bad_frames = nbad;
963 }
964 }
965}
966
f078f209
LR
967/* Process completed xmit descriptors from the specified queue */
968
c4288390 969static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
f078f209
LR
970{
971 struct ath_hal *ah = sc->sc_ah;
972 struct ath_buf *bf, *lastbf, *bf_held = NULL;
973 struct list_head bf_head;
c4288390
S
974 struct ath_desc *ds;
975 int txok, nbad = 0;
f078f209
LR
976 int status;
977
978 DPRINTF(sc, ATH_DBG_QUEUE,
979 "%s: tx queue %d (%x), link %p\n", __func__,
980 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
981 txq->axq_link);
982
f078f209
LR
983 for (;;) {
984 spin_lock_bh(&txq->axq_lock);
f078f209
LR
985 if (list_empty(&txq->axq_q)) {
986 txq->axq_link = NULL;
987 txq->axq_linkbuf = NULL;
988 spin_unlock_bh(&txq->axq_lock);
989 break;
990 }
991 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
992
993 /*
994 * There is a race condition that a BH gets scheduled
995 * after sw writes TxE and before hw re-load the last
996 * descriptor to get the newly chained one.
997 * Software must keep the last DONE descriptor as a
998 * holding descriptor - software does so by marking
999 * it with the STALE flag.
1000 */
1001 bf_held = NULL;
1002 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1003 bf_held = bf;
1004 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1005 /* FIXME:
1006 * The holding descriptor is the last
1007 * descriptor in queue. It's safe to remove
1008 * the last holding descriptor in BH context.
1009 */
1010 spin_unlock_bh(&txq->axq_lock);
1011 break;
1012 } else {
1013 /* Lets work with the next buffer now */
1014 bf = list_entry(bf_held->list.next,
1015 struct ath_buf, list);
1016 }
1017 }
1018
1019 lastbf = bf->bf_lastbf;
1020 ds = lastbf->bf_desc; /* NB: last decriptor */
1021
1022 status = ath9k_hw_txprocdesc(ah, ds);
1023 if (status == -EINPROGRESS) {
1024 spin_unlock_bh(&txq->axq_lock);
1025 break;
1026 }
1027 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1028 txq->axq_lastdsWithCTS = NULL;
1029 if (ds == txq->axq_gatingds)
1030 txq->axq_gatingds = NULL;
1031
1032 /*
1033 * Remove ath_buf's of the same transmit unit from txq,
1034 * however leave the last descriptor back as the holding
1035 * descriptor for hw.
1036 */
1037 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1038 INIT_LIST_HEAD(&bf_head);
1039
1040 if (!list_is_singular(&lastbf->list))
1041 list_cut_position(&bf_head,
1042 &txq->axq_q, lastbf->list.prev);
1043
1044 txq->axq_depth--;
1045
cd3d39a6 1046 if (bf_isaggr(bf))
f078f209
LR
1047 txq->axq_aggr_depth--;
1048
1049 txok = (ds->ds_txstat.ts_status == 0);
1050
1051 spin_unlock_bh(&txq->axq_lock);
1052
1053 if (bf_held) {
1054 list_del(&bf_held->list);
1055 spin_lock_bh(&sc->sc_txbuflock);
1056 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1057 spin_unlock_bh(&sc->sc_txbuflock);
1058 }
1059
cd3d39a6 1060 if (!bf_isampdu(bf)) {
f078f209
LR
1061 /*
1062 * This frame is sent out as a single frame.
1063 * Use hardware retry status for this frame.
1064 */
1065 bf->bf_retries = ds->ds_txstat.ts_longretry;
1066 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
cd3d39a6 1067 bf->bf_state.bf_type |= BUF_XRETRY;
f078f209
LR
1068 nbad = 0;
1069 } else {
1070 nbad = ath_tx_num_badfrms(sc, bf, txok);
1071 }
c4288390
S
1072
1073 ath_tx_rc_status(bf, ds, nbad);
f078f209
LR
1074
1075 /*
1076 * Complete this transmit unit
1077 */
cd3d39a6 1078 if (bf_isampdu(bf))
f078f209
LR
1079 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1080 else
1081 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1082
1083 /* Wake up mac80211 queue */
1084
1085 spin_lock_bh(&txq->axq_lock);
1086 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1087 (ATH_TXBUF - 20)) {
1088 int qnum;
1089 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1090 if (qnum != -1) {
1091 ieee80211_wake_queue(sc->hw, qnum);
1092 txq->stopped = 0;
1093 }
1094
1095 }
1096
1097 /*
1098 * schedule any pending packets if aggregation is enabled
1099 */
672840ac 1100 if (sc->sc_flags & SC_OP_TXAGGR)
f078f209
LR
1101 ath_txq_schedule(sc, txq);
1102 spin_unlock_bh(&txq->axq_lock);
1103 }
f078f209
LR
1104}
1105
1106static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1107{
1108 struct ath_hal *ah = sc->sc_ah;
1109
1110 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1111 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
1112 __func__, txq->axq_qnum,
1113 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
1114}
1115
1116/* Drain only the data queues */
1117
1118static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1119{
1120 struct ath_hal *ah = sc->sc_ah;
102e0572 1121 int i, status, npend = 0;
f078f209 1122
672840ac 1123 if (!(sc->sc_flags & SC_OP_INVALID)) {
f078f209
LR
1124 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1125 if (ATH_TXQ_SETUP(sc, i)) {
1126 ath_tx_stopdma(sc, &sc->sc_txq[i]);
f078f209
LR
1127 /* The TxDMA may not really be stopped.
1128 * Double check the hal tx pending count */
1129 npend += ath9k_hw_numtxpending(ah,
102e0572 1130 sc->sc_txq[i].axq_qnum);
f078f209
LR
1131 }
1132 }
1133 }
1134
1135 if (npend) {
f078f209
LR
1136 /* TxDMA not stopped, reset the hal */
1137 DPRINTF(sc, ATH_DBG_XMIT,
1138 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1139
1140 spin_lock_bh(&sc->sc_resetlock);
b4696c8b 1141 if (!ath9k_hw_reset(ah,
927e70e9
S
1142 sc->sc_ah->ah_curchan,
1143 sc->sc_ht_info.tx_chan_width,
1144 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1145 sc->sc_ht_extprotspacing, true, &status)) {
f078f209
LR
1146
1147 DPRINTF(sc, ATH_DBG_FATAL,
1148 "%s: unable to reset hardware; hal status %u\n",
1149 __func__,
1150 status);
1151 }
1152 spin_unlock_bh(&sc->sc_resetlock);
1153 }
1154
1155 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1156 if (ATH_TXQ_SETUP(sc, i))
1157 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1158 }
1159}
1160
1161/* Add a sub-frame to block ack window */
1162
1163static void ath_tx_addto_baw(struct ath_softc *sc,
1164 struct ath_atx_tid *tid,
1165 struct ath_buf *bf)
1166{
1167 int index, cindex;
1168
cd3d39a6 1169 if (bf_isretried(bf))
f078f209
LR
1170 return;
1171
1172 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1173 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1174
1175 ASSERT(tid->tx_buf[cindex] == NULL);
1176 tid->tx_buf[cindex] = bf;
1177
1178 if (index >= ((tid->baw_tail - tid->baw_head) &
1179 (ATH_TID_MAX_BUFS - 1))) {
1180 tid->baw_tail = cindex;
1181 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1182 }
1183}
1184
1185/*
1186 * Function to send an A-MPDU
1187 * NB: must be called with txq lock held
1188 */
1189
1190static int ath_tx_send_ampdu(struct ath_softc *sc,
f078f209
LR
1191 struct ath_atx_tid *tid,
1192 struct list_head *bf_head,
1193 struct ath_tx_control *txctl)
1194{
1195 struct ath_buf *bf;
f078f209
LR
1196
1197 BUG_ON(list_empty(bf_head));
1198
1199 bf = list_first_entry(bf_head, struct ath_buf, list);
cd3d39a6 1200 bf->bf_state.bf_type |= BUF_AMPDU;
f078f209
LR
1201
1202 /*
1203 * Do not queue to h/w when any of the following conditions is true:
1204 * - there are pending frames in software queue
1205 * - the TID is currently paused for ADDBA/BAR request
1206 * - seqno is not within block-ack window
1207 * - h/w queue depth exceeds low water mark
1208 */
1209 if (!list_empty(&tid->buf_q) || tid->paused ||
1210 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
528f0c6b 1211 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209
LR
1212 /*
1213 * Add this frame to software queue for scheduling later
1214 * for aggregation.
1215 */
1216 list_splice_tail_init(bf_head, &tid->buf_q);
528f0c6b 1217 ath_tx_queue_tid(txctl->txq, tid);
f078f209
LR
1218 return 0;
1219 }
1220
f078f209
LR
1221 /* Add sub-frame to BAW */
1222 ath_tx_addto_baw(sc, tid, bf);
1223
1224 /* Queue to h/w without aggregation */
1225 bf->bf_nframes = 1;
1226 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1227 ath_buf_set_rate(sc, bf);
528f0c6b 1228 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
102e0572 1229
f078f209
LR
1230 return 0;
1231}
1232
1233/*
1234 * looks up the rate
1235 * returns aggr limit based on lowest of the rates
1236 */
1237
1238static u32 ath_lookup_rate(struct ath_softc *sc,
ae5eb026
JB
1239 struct ath_buf *bf,
1240 struct ath_atx_tid *tid)
f078f209 1241{
a8efee4f 1242 struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode];
f078f209
LR
1243 struct sk_buff *skb;
1244 struct ieee80211_tx_info *tx_info;
a8efee4f 1245 struct ieee80211_tx_rate *rates;
f078f209
LR
1246 struct ath_tx_info_priv *tx_info_priv;
1247 u32 max_4ms_framelen, frame_length;
1248 u16 aggr_limit, legacy = 0, maxampdu;
1249 int i;
1250
f078f209
LR
1251 skb = (struct sk_buff *)bf->bf_mpdu;
1252 tx_info = IEEE80211_SKB_CB(skb);
a8efee4f
S
1253 rates = tx_info->control.rates;
1254 tx_info_priv =
1255 (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
f078f209
LR
1256
1257 /*
1258 * Find the lowest frame length among the rate series that will have a
1259 * 4ms transmit duration.
1260 * TODO - TXOP limit needs to be considered.
1261 */
1262 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1263
1264 for (i = 0; i < 4; i++) {
a8efee4f 1265 if (rates[i].count) {
e63835b0 1266 if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) {
f078f209
LR
1267 legacy = 1;
1268 break;
1269 }
1270
a8efee4f
S
1271 frame_length =
1272 rate_table->info[rates[i].idx].max_4ms_framelen;
f078f209
LR
1273 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1274 }
1275 }
1276
1277 /*
1278 * limit aggregate size by the minimum rate if rate selected is
1279 * not a probe rate, if rate selected is a probe rate then
1280 * avoid aggregation of this packet.
1281 */
1282 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1283 return 0;
1284
1285 aggr_limit = min(max_4ms_framelen,
1286 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1287
1288 /*
1289 * h/w can accept aggregates upto 16 bit lengths (65535).
1290 * The IE, however can hold upto 65536, which shows up here
1291 * as zero. Ignore 65536 since we are constrained by hw.
1292 */
ae5eb026 1293 maxampdu = tid->an->maxampdu;
f078f209
LR
1294 if (maxampdu)
1295 aggr_limit = min(aggr_limit, maxampdu);
1296
1297 return aggr_limit;
1298}
1299
1300/*
1301 * returns the number of delimiters to be added to
1302 * meet the minimum required mpdudensity.
1303 * caller should make sure that the rate is HT rate .
1304 */
1305
1306static int ath_compute_num_delims(struct ath_softc *sc,
ae5eb026 1307 struct ath_atx_tid *tid,
f078f209
LR
1308 struct ath_buf *bf,
1309 u16 frmlen)
1310{
e63835b0 1311 struct ath_rate_table *rt = sc->hw_rate_table[sc->sc_curmode];
a8efee4f
S
1312 struct sk_buff *skb = bf->bf_mpdu;
1313 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
f078f209
LR
1314 u32 nsymbits, nsymbols, mpdudensity;
1315 u16 minlen;
1316 u8 rc, flags, rix;
1317 int width, half_gi, ndelim, mindelim;
1318
1319 /* Select standard number of delimiters based on frame length alone */
1320 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1321
1322 /*
1323 * If encryption enabled, hardware requires some more padding between
1324 * subframes.
1325 * TODO - this could be improved to be dependent on the rate.
1326 * The hardware can keep up at lower rates, but not higher rates
1327 */
1328 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1329 ndelim += ATH_AGGR_ENCRYPTDELIM;
1330
1331 /*
1332 * Convert desired mpdu density from microeconds to bytes based
1333 * on highest rate in rate series (i.e. first rate) to determine
1334 * required minimum length for subframe. Take into account
1335 * whether high rate is 20 or 40Mhz and half or full GI.
1336 */
ae5eb026 1337 mpdudensity = tid->an->mpdudensity;
f078f209
LR
1338
1339 /*
1340 * If there is no mpdu density restriction, no further calculation
1341 * is needed.
1342 */
1343 if (mpdudensity == 0)
1344 return ndelim;
1345
a8efee4f
S
1346 rix = tx_info->control.rates[0].idx;
1347 flags = tx_info->control.rates[0].flags;
e63835b0 1348 rc = rt->info[rix].ratecode;
a8efee4f
S
1349 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
1350 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
f078f209
LR
1351
1352 if (half_gi)
1353 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1354 else
1355 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1356
1357 if (nsymbols == 0)
1358 nsymbols = 1;
1359
1360 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1361 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1362
1363 /* Is frame shorter than required minimum length? */
1364 if (frmlen < minlen) {
1365 /* Get the minimum number of delimiters required. */
1366 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1367 ndelim = max(mindelim, ndelim);
1368 }
1369
1370 return ndelim;
1371}
1372
1373/*
1374 * For aggregation from software buffer queue.
1375 * NB: must be called with txq lock held
1376 */
1377
1378static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1379 struct ath_atx_tid *tid,
1380 struct list_head *bf_q,
1381 struct ath_buf **bf_last,
1382 struct aggr_rifs_param *param,
1383 int *prev_frames)
1384{
1385#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1386 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1387 struct list_head bf_head;
1388 int rl = 0, nframes = 0, ndelim;
1389 u16 aggr_limit = 0, al = 0, bpad = 0,
1390 al_delta, h_baw = tid->baw_size / 2;
1391 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
a8efee4f 1392 int prev_al = 0;
f078f209
LR
1393 INIT_LIST_HEAD(&bf_head);
1394
1395 BUG_ON(list_empty(&tid->buf_q));
1396
1397 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1398
1399 do {
1400 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1401
1402 /*
1403 * do not step over block-ack window
1404 */
1405 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1406 status = ATH_AGGR_BAW_CLOSED;
1407 break;
1408 }
1409
1410 if (!rl) {
ae5eb026 1411 aggr_limit = ath_lookup_rate(sc, bf, tid);
f078f209 1412 rl = 1;
f078f209
LR
1413 }
1414
1415 /*
1416 * do not exceed aggregation limit
1417 */
1418 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1419
1420 if (nframes && (aggr_limit <
1421 (al + bpad + al_delta + prev_al))) {
1422 status = ATH_AGGR_LIMITED;
1423 break;
1424 }
1425
1426 /*
1427 * do not exceed subframe limit
1428 */
1429 if ((nframes + *prev_frames) >=
1430 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1431 status = ATH_AGGR_LIMITED;
1432 break;
1433 }
1434
1435 /*
1436 * add padding for previous frame to aggregation length
1437 */
1438 al += bpad + al_delta;
1439
1440 /*
1441 * Get the delimiters needed to meet the MPDU
1442 * density for this node.
1443 */
ae5eb026 1444 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
f078f209
LR
1445
1446 bpad = PADBYTES(al_delta) + (ndelim << 2);
1447
1448 bf->bf_next = NULL;
1449 bf->bf_lastfrm->bf_desc->ds_link = 0;
1450
1451 /*
1452 * this packet is part of an aggregate
1453 * - remove all descriptors belonging to this frame from
1454 * software queue
1455 * - add it to block ack window
1456 * - set up descriptors for aggregation
1457 */
1458 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1459 ath_tx_addto_baw(sc, tid, bf);
1460
1461 list_for_each_entry(tbf, &bf_head, list) {
1462 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1463 tbf->bf_desc, ndelim);
1464 }
1465
1466 /*
1467 * link buffers of this frame to the aggregate
1468 */
1469 list_splice_tail_init(&bf_head, bf_q);
1470 nframes++;
1471
1472 if (bf_prev) {
1473 bf_prev->bf_next = bf;
1474 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1475 }
1476 bf_prev = bf;
1477
1478#ifdef AGGR_NOSHORT
1479 /*
1480 * terminate aggregation on a small packet boundary
1481 */
1482 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1483 status = ATH_AGGR_SHORTPKT;
1484 break;
1485 }
1486#endif
1487 } while (!list_empty(&tid->buf_q));
1488
1489 bf_first->bf_al = al;
1490 bf_first->bf_nframes = nframes;
1491 *bf_last = bf_prev;
1492 return status;
1493#undef PADBYTES
1494}
1495
1496/*
1497 * process pending frames possibly doing a-mpdu aggregation
1498 * NB: must be called with txq lock held
1499 */
1500
1501static void ath_tx_sched_aggr(struct ath_softc *sc,
1502 struct ath_txq *txq, struct ath_atx_tid *tid)
1503{
1504 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1505 enum ATH_AGGR_STATUS status;
1506 struct list_head bf_q;
1507 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1508 int prev_frames = 0;
1509
1510 do {
1511 if (list_empty(&tid->buf_q))
1512 return;
1513
1514 INIT_LIST_HEAD(&bf_q);
1515
1516 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
1517 &prev_frames);
1518
1519 /*
1520 * no frames picked up to be aggregated; block-ack
1521 * window is not open
1522 */
1523 if (list_empty(&bf_q))
1524 break;
1525
1526 bf = list_first_entry(&bf_q, struct ath_buf, list);
1527 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1528 bf->bf_lastbf = bf_last;
1529
1530 /*
1531 * if only one frame, send as non-aggregate
1532 */
1533 if (bf->bf_nframes == 1) {
1534 ASSERT(bf->bf_lastfrm == bf_last);
1535
cd3d39a6 1536 bf->bf_state.bf_type &= ~BUF_AGGR;
f078f209
LR
1537 /*
1538 * clear aggr bits for every descriptor
1539 * XXX TODO: is there a way to optimize it?
1540 */
1541 list_for_each_entry(tbf, &bf_q, list) {
1542 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1543 }
1544
1545 ath_buf_set_rate(sc, bf);
1546 ath_tx_txqaddbuf(sc, txq, &bf_q);
1547 continue;
1548 }
1549
1550 /*
1551 * setup first desc with rate and aggr info
1552 */
cd3d39a6 1553 bf->bf_state.bf_type |= BUF_AGGR;
f078f209
LR
1554 ath_buf_set_rate(sc, bf);
1555 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1556
1557 /*
1558 * anchor last frame of aggregate correctly
1559 */
1560 ASSERT(bf_lastaggr);
1561 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1562 tbf = bf_lastaggr;
1563 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1564
1565 /* XXX: We don't enter into this loop, consider removing this */
1566 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1567 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1568 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1569 }
1570
1571 txq->axq_aggr_depth++;
1572
1573 /*
1574 * Normal aggregate, queue to hardware
1575 */
1576 ath_tx_txqaddbuf(sc, txq, &bf_q);
1577
1578 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1579 status != ATH_AGGR_BAW_CLOSED);
1580}
1581
1582/* Called with txq lock held */
1583
1584static void ath_tid_drain(struct ath_softc *sc,
1585 struct ath_txq *txq,
b5aa9bf9
S
1586 struct ath_atx_tid *tid)
1587
f078f209
LR
1588{
1589 struct ath_buf *bf;
1590 struct list_head bf_head;
1591 INIT_LIST_HEAD(&bf_head);
1592
1593 for (;;) {
1594 if (list_empty(&tid->buf_q))
1595 break;
1596 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1597
1598 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1599
1600 /* update baw for software retried frame */
cd3d39a6 1601 if (bf_isretried(bf))
f078f209
LR
1602 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1603
1604 /*
1605 * do not indicate packets while holding txq spinlock.
1606 * unlock is intentional here
1607 */
b5aa9bf9 1608 spin_unlock(&txq->axq_lock);
f078f209
LR
1609
1610 /* complete this sub-frame */
1611 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1612
b5aa9bf9 1613 spin_lock(&txq->axq_lock);
f078f209
LR
1614 }
1615
1616 /*
1617 * TODO: For frame(s) that are in the retry state, we will reuse the
1618 * sequence number(s) without setting the retry bit. The
1619 * alternative is to give up on these and BAR the receiver's window
1620 * forward.
1621 */
1622 tid->seq_next = tid->seq_start;
1623 tid->baw_tail = tid->baw_head;
1624}
1625
1626/*
1627 * Drain all pending buffers
1628 * NB: must be called with txq lock held
1629 */
1630
1631static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
b5aa9bf9 1632 struct ath_txq *txq)
f078f209
LR
1633{
1634 struct ath_atx_ac *ac, *ac_tmp;
1635 struct ath_atx_tid *tid, *tid_tmp;
1636
1637 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1638 list_del(&ac->list);
1639 ac->sched = false;
1640 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1641 list_del(&tid->list);
1642 tid->sched = false;
b5aa9bf9 1643 ath_tid_drain(sc, txq, tid);
f078f209
LR
1644 }
1645 }
1646}
1647
528f0c6b 1648static void ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
8f93b8b3 1649 struct sk_buff *skb,
528f0c6b 1650 struct ath_tx_control *txctl)
f078f209 1651{
528f0c6b
S
1652 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1653 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
f078f209 1654 struct ath_tx_info_priv *tx_info_priv;
528f0c6b
S
1655 int hdrlen;
1656 __le16 fc;
e022edbd 1657
a8efee4f
S
1658 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_KERNEL);
1659 tx_info->rate_driver_data[0] = tx_info_priv;
528f0c6b
S
1660 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1661 fc = hdr->frame_control;
f078f209 1662
528f0c6b 1663 ATH_TXBUF_RESET(bf);
f078f209 1664
528f0c6b 1665 /* Frame type */
f078f209 1666
528f0c6b 1667 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
cd3d39a6
S
1668
1669 ieee80211_is_data(fc) ?
1670 (bf->bf_state.bf_type |= BUF_DATA) :
1671 (bf->bf_state.bf_type &= ~BUF_DATA);
1672 ieee80211_is_back_req(fc) ?
1673 (bf->bf_state.bf_type |= BUF_BAR) :
1674 (bf->bf_state.bf_type &= ~BUF_BAR);
1675 ieee80211_is_pspoll(fc) ?
1676 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1677 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
672840ac 1678 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
cd3d39a6
S
1679 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1680 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
a8efee4f 1681 (sc->hw->conf.ht.enabled && !is_pae(skb) &&
528f0c6b
S
1682 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1683 (bf->bf_state.bf_type |= BUF_HT) :
1684 (bf->bf_state.bf_type &= ~BUF_HT);
1685
1686 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1687
1688 /* Crypto */
1689
1690 bf->bf_keytype = get_hw_crypto_keytype(skb);
1691
1692 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1693 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1694 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1695 } else {
1696 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1697 }
1698
528f0c6b
S
1699 /* Assign seqno, tidno */
1700
1701 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
1702 assign_aggr_tid_seqno(skb, bf);
1703
1704 /* DMA setup */
1705
f078f209 1706 bf->bf_mpdu = skb;
528f0c6b
S
1707 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1708 skb->len, PCI_DMA_TODEVICE);
1709 bf->bf_buf_addr = bf->bf_dmacontext;
1710}
1711
1712/* FIXME: tx power */
1713static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
528f0c6b
S
1714 struct ath_tx_control *txctl)
1715{
1716 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1717 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1718 struct ath_node *an = NULL;
1719 struct list_head bf_head;
1720 struct ath_desc *ds;
1721 struct ath_atx_tid *tid;
1722 struct ath_hal *ah = sc->sc_ah;
1723 int frm_type;
1724
528f0c6b
S
1725 frm_type = get_hw_packet_type(skb);
1726
1727 INIT_LIST_HEAD(&bf_head);
1728 list_add_tail(&bf->list, &bf_head);
f078f209
LR
1729
1730 /* setup descriptor */
528f0c6b 1731
f078f209
LR
1732 ds = bf->bf_desc;
1733 ds->ds_link = 0;
1734 ds->ds_data = bf->bf_buf_addr;
1735
528f0c6b 1736 /* Formulate first tx descriptor with tx controls */
f078f209 1737
528f0c6b
S
1738 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1739 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1740
1741 ath9k_hw_filltxdesc(ah, ds,
8f93b8b3
S
1742 skb->len, /* segment length */
1743 true, /* first segment */
1744 true, /* last segment */
1745 ds); /* first descriptor */
f078f209
LR
1746
1747 bf->bf_lastfrm = bf;
f078f209 1748
528f0c6b 1749 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1750
f1617967
JL
1751 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1752 tx_info->control.sta) {
1753 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1754 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1755
528f0c6b 1756 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
f078f209
LR
1757 /*
1758 * Try aggregation if it's a unicast data frame
1759 * and the destination is HT capable.
1760 */
528f0c6b 1761 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
f078f209
LR
1762 } else {
1763 /*
528f0c6b
S
1764 * Send this frame as regular when ADDBA
1765 * exchange is neither complete nor pending.
f078f209 1766 */
528f0c6b
S
1767 ath_tx_send_normal(sc, txctl->txq,
1768 tid, &bf_head);
f078f209
LR
1769 }
1770 } else {
1771 bf->bf_lastbf = bf;
1772 bf->bf_nframes = 1;
f078f209 1773
528f0c6b
S
1774 ath_buf_set_rate(sc, bf);
1775 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
f078f209 1776 }
528f0c6b
S
1777
1778 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1779}
1780
528f0c6b
S
1781int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1782 struct ath_tx_control *txctl)
f078f209 1783{
528f0c6b 1784 struct ath_buf *bf;
f078f209 1785
528f0c6b
S
1786 /* Check if a tx buffer is available */
1787
1788 bf = ath_tx_get_buffer(sc);
1789 if (!bf) {
1790 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX buffers are full\n",
1791 __func__);
1792 return -1;
1793 }
1794
8f93b8b3
S
1795 ath_tx_setup_buffer(sc, bf, skb, txctl);
1796 ath_tx_start_dma(sc, bf, txctl);
f078f209 1797
528f0c6b 1798 return 0;
f078f209
LR
1799}
1800
1801/* Initialize TX queue and h/w */
1802
1803int ath_tx_init(struct ath_softc *sc, int nbufs)
1804{
1805 int error = 0;
1806
1807 do {
1808 spin_lock_init(&sc->sc_txbuflock);
1809
1810 /* Setup tx descriptors */
1811 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
556bb8f1 1812 "tx", nbufs, 1);
f078f209
LR
1813 if (error != 0) {
1814 DPRINTF(sc, ATH_DBG_FATAL,
1815 "%s: failed to allocate tx descriptors: %d\n",
1816 __func__, error);
1817 break;
1818 }
1819
1820 /* XXX allocate beacon state together with vap */
1821 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
1822 "beacon", ATH_BCBUF, 1);
1823 if (error != 0) {
1824 DPRINTF(sc, ATH_DBG_FATAL,
1825 "%s: failed to allocate "
1826 "beacon descripotrs: %d\n",
1827 __func__, error);
1828 break;
1829 }
1830
1831 } while (0);
1832
1833 if (error != 0)
1834 ath_tx_cleanup(sc);
1835
1836 return error;
1837}
1838
1839/* Reclaim all tx queue resources */
1840
1841int ath_tx_cleanup(struct ath_softc *sc)
1842{
1843 /* cleanup beacon descriptors */
1844 if (sc->sc_bdma.dd_desc_len != 0)
1845 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
1846
1847 /* cleanup tx descriptors */
1848 if (sc->sc_txdma.dd_desc_len != 0)
1849 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
1850
1851 return 0;
1852}
1853
1854/* Setup a h/w transmit queue */
1855
1856struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1857{
1858 struct ath_hal *ah = sc->sc_ah;
ea9880fb 1859 struct ath9k_tx_queue_info qi;
f078f209
LR
1860 int qnum;
1861
0345f37b 1862 memset(&qi, 0, sizeof(qi));
f078f209
LR
1863 qi.tqi_subtype = subtype;
1864 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1865 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1866 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
ea9880fb 1867 qi.tqi_physCompBuf = 0;
f078f209
LR
1868
1869 /*
1870 * Enable interrupts only for EOL and DESC conditions.
1871 * We mark tx descriptors to receive a DESC interrupt
1872 * when a tx queue gets deep; otherwise waiting for the
1873 * EOL to reap descriptors. Note that this is done to
1874 * reduce interrupt load and this only defers reaping
1875 * descriptors, never transmitting frames. Aside from
1876 * reducing interrupts this also permits more concurrency.
1877 * The only potential downside is if the tx queue backs
1878 * up in which case the top half of the kernel may backup
1879 * due to a lack of tx descriptors.
1880 *
1881 * The UAPSD queue is an exception, since we take a desc-
1882 * based intr on the EOSP frames.
1883 */
1884 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1885 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1886 else
1887 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1888 TXQ_FLAG_TXDESCINT_ENABLE;
1889 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1890 if (qnum == -1) {
1891 /*
1892 * NB: don't print a message, this happens
1893 * normally on parts with too few tx queues
1894 */
1895 return NULL;
1896 }
1897 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
1898 DPRINTF(sc, ATH_DBG_FATAL,
1899 "%s: hal qnum %u out of range, max %u!\n",
1900 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
1901 ath9k_hw_releasetxqueue(ah, qnum);
1902 return NULL;
1903 }
1904 if (!ATH_TXQ_SETUP(sc, qnum)) {
1905 struct ath_txq *txq = &sc->sc_txq[qnum];
1906
1907 txq->axq_qnum = qnum;
1908 txq->axq_link = NULL;
1909 INIT_LIST_HEAD(&txq->axq_q);
1910 INIT_LIST_HEAD(&txq->axq_acq);
1911 spin_lock_init(&txq->axq_lock);
1912 txq->axq_depth = 0;
1913 txq->axq_aggr_depth = 0;
1914 txq->axq_totalqueued = 0;
f078f209
LR
1915 txq->axq_linkbuf = NULL;
1916 sc->sc_txqsetup |= 1<<qnum;
1917 }
1918 return &sc->sc_txq[qnum];
1919}
1920
1921/* Reclaim resources for a setup queue */
1922
1923void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1924{
1925 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1926 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
1927}
1928
1929/*
1930 * Setup a hardware data transmit queue for the specified
1931 * access control. The hal may not support all requested
1932 * queues in which case it will return a reference to a
1933 * previously setup queue. We record the mapping from ac's
1934 * to h/w queues for use by ath_tx_start and also track
1935 * the set of h/w queues being used to optimize work in the
1936 * transmit interrupt handler and related routines.
1937 */
1938
1939int ath_tx_setup(struct ath_softc *sc, int haltype)
1940{
1941 struct ath_txq *txq;
1942
1943 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1944 DPRINTF(sc, ATH_DBG_FATAL,
1945 "%s: HAL AC %u out of range, max %zu!\n",
1946 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
1947 return 0;
1948 }
1949 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1950 if (txq != NULL) {
1951 sc->sc_haltype2q[haltype] = txq->axq_qnum;
1952 return 1;
1953 } else
1954 return 0;
1955}
1956
1957int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
1958{
1959 int qnum;
1960
1961 switch (qtype) {
1962 case ATH9K_TX_QUEUE_DATA:
1963 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1964 DPRINTF(sc, ATH_DBG_FATAL,
1965 "%s: HAL AC %u out of range, max %zu!\n",
1966 __func__,
1967 haltype, ARRAY_SIZE(sc->sc_haltype2q));
1968 return -1;
1969 }
1970 qnum = sc->sc_haltype2q[haltype];
1971 break;
1972 case ATH9K_TX_QUEUE_BEACON:
1973 qnum = sc->sc_bhalq;
1974 break;
1975 case ATH9K_TX_QUEUE_CAB:
1976 qnum = sc->sc_cabq->axq_qnum;
1977 break;
1978 default:
1979 qnum = -1;
1980 }
1981 return qnum;
1982}
1983
528f0c6b
S
1984/* Get a transmit queue, if available */
1985
1986struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
1987{
1988 struct ath_txq *txq = NULL;
1989 int qnum;
1990
1991 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
1992 txq = &sc->sc_txq[qnum];
1993
1994 spin_lock_bh(&txq->axq_lock);
1995
1996 /* Try to avoid running out of descriptors */
1997 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
1998 DPRINTF(sc, ATH_DBG_FATAL,
1999 "%s: TX queue: %d is full, depth: %d\n",
2000 __func__, qnum, txq->axq_depth);
2001 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
2002 txq->stopped = 1;
2003 spin_unlock_bh(&txq->axq_lock);
2004 return NULL;
2005 }
2006
2007 spin_unlock_bh(&txq->axq_lock);
2008
2009 return txq;
2010}
2011
f078f209
LR
2012/* Update parameters for a transmit queue */
2013
ea9880fb
S
2014int ath_txq_update(struct ath_softc *sc, int qnum,
2015 struct ath9k_tx_queue_info *qinfo)
f078f209
LR
2016{
2017 struct ath_hal *ah = sc->sc_ah;
2018 int error = 0;
ea9880fb 2019 struct ath9k_tx_queue_info qi;
f078f209
LR
2020
2021 if (qnum == sc->sc_bhalq) {
2022 /*
2023 * XXX: for beacon queue, we just save the parameter.
2024 * It will be picked up by ath_beaconq_config when
2025 * it's necessary.
2026 */
ea9880fb 2027 sc->sc_beacon_qi = *qinfo;
f078f209
LR
2028 return 0;
2029 }
2030
2031 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2032
ea9880fb
S
2033 ath9k_hw_get_txq_props(ah, qnum, &qi);
2034 qi.tqi_aifs = qinfo->tqi_aifs;
2035 qi.tqi_cwmin = qinfo->tqi_cwmin;
2036 qi.tqi_cwmax = qinfo->tqi_cwmax;
2037 qi.tqi_burstTime = qinfo->tqi_burstTime;
2038 qi.tqi_readyTime = qinfo->tqi_readyTime;
f078f209 2039
ea9880fb 2040 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
f078f209
LR
2041 DPRINTF(sc, ATH_DBG_FATAL,
2042 "%s: unable to update hardware queue %u!\n",
2043 __func__, qnum);
2044 error = -EIO;
2045 } else {
2046 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2047 }
2048
2049 return error;
2050}
2051
2052int ath_cabq_update(struct ath_softc *sc)
2053{
ea9880fb 2054 struct ath9k_tx_queue_info qi;
f078f209
LR
2055 int qnum = sc->sc_cabq->axq_qnum;
2056 struct ath_beacon_config conf;
2057
ea9880fb 2058 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209
LR
2059 /*
2060 * Ensure the readytime % is within the bounds.
2061 */
2062 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2063 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2064 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2065 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2066
2067 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2068 qi.tqi_readyTime =
2069 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2070 ath_txq_update(sc, qnum, &qi);
2071
2072 return 0;
2073}
2074
f078f209
LR
2075/* Deferred processing of transmit interrupt */
2076
2077void ath_tx_tasklet(struct ath_softc *sc)
2078{
1fe1132b 2079 int i;
f078f209
LR
2080 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2081
2082 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2083
2084 /*
2085 * Process each active queue.
2086 */
2087 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2088 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
1fe1132b 2089 ath_tx_processq(sc, &sc->sc_txq[i]);
f078f209 2090 }
f078f209
LR
2091}
2092
2093void ath_tx_draintxq(struct ath_softc *sc,
2094 struct ath_txq *txq, bool retry_tx)
2095{
2096 struct ath_buf *bf, *lastbf;
2097 struct list_head bf_head;
2098
2099 INIT_LIST_HEAD(&bf_head);
2100
2101 /*
2102 * NB: this assumes output has been stopped and
2103 * we do not need to block ath_tx_tasklet
2104 */
2105 for (;;) {
2106 spin_lock_bh(&txq->axq_lock);
2107
2108 if (list_empty(&txq->axq_q)) {
2109 txq->axq_link = NULL;
2110 txq->axq_linkbuf = NULL;
2111 spin_unlock_bh(&txq->axq_lock);
2112 break;
2113 }
2114
2115 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2116
2117 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2118 list_del(&bf->list);
2119 spin_unlock_bh(&txq->axq_lock);
2120
2121 spin_lock_bh(&sc->sc_txbuflock);
2122 list_add_tail(&bf->list, &sc->sc_txbuf);
2123 spin_unlock_bh(&sc->sc_txbuflock);
2124 continue;
2125 }
2126
2127 lastbf = bf->bf_lastbf;
2128 if (!retry_tx)
2129 lastbf->bf_desc->ds_txstat.ts_flags =
2130 ATH9K_TX_SW_ABORTED;
2131
2132 /* remove ath_buf's of the same mpdu from txq */
2133 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2134 txq->axq_depth--;
2135
2136 spin_unlock_bh(&txq->axq_lock);
2137
cd3d39a6 2138 if (bf_isampdu(bf))
f078f209
LR
2139 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2140 else
2141 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2142 }
2143
2144 /* flush any pending frames if aggregation is enabled */
672840ac 2145 if (sc->sc_flags & SC_OP_TXAGGR) {
f078f209
LR
2146 if (!retry_tx) {
2147 spin_lock_bh(&txq->axq_lock);
b5aa9bf9 2148 ath_txq_drain_pending_buffers(sc, txq);
f078f209
LR
2149 spin_unlock_bh(&txq->axq_lock);
2150 }
2151 }
2152}
2153
2154/* Drain the transmit queues and reclaim resources */
2155
2156void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2157{
2158 /* stop beacon queue. The beacon will be freed when
2159 * we go to INIT state */
672840ac 2160 if (!(sc->sc_flags & SC_OP_INVALID)) {
f078f209
LR
2161 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2162 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2163 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2164 }
2165
2166 ath_drain_txdataq(sc, retry_tx);
2167}
2168
2169u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2170{
2171 return sc->sc_txq[qnum].axq_depth;
2172}
2173
2174u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2175{
2176 return sc->sc_txq[qnum].axq_aggr_depth;
2177}
2178
ccc75c52 2179bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
f078f209
LR
2180{
2181 struct ath_atx_tid *txtid;
f078f209 2182
672840ac 2183 if (!(sc->sc_flags & SC_OP_TXAGGR))
ccc75c52 2184 return false;
f078f209 2185
f078f209
LR
2186 txtid = ATH_AN_2_TID(an, tidno);
2187
a37c2c79
S
2188 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2189 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
f078f209
LR
2190 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2191 txtid->addba_exchangeattempts++;
ccc75c52 2192 return true;
f078f209
LR
2193 }
2194 }
2195
ccc75c52 2196 return false;
f078f209
LR
2197}
2198
2199/* Start TX aggregation */
2200
b5aa9bf9
S
2201int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
2202 u16 tid, u16 *ssn)
f078f209
LR
2203{
2204 struct ath_atx_tid *txtid;
2205 struct ath_node *an;
2206
b5aa9bf9 2207 an = (struct ath_node *)sta->drv_priv;
f078f209 2208
672840ac 2209 if (sc->sc_flags & SC_OP_TXAGGR) {
f078f209 2210 txtid = ATH_AN_2_TID(an, tid);
a37c2c79 2211 txtid->state |= AGGR_ADDBA_PROGRESS;
f078f209
LR
2212 ath_tx_pause_tid(sc, txtid);
2213 }
2214
2215 return 0;
2216}
2217
2218/* Stop tx aggregation */
2219
b5aa9bf9 2220int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
f078f209 2221{
b5aa9bf9 2222 struct ath_node *an = (struct ath_node *)sta->drv_priv;
f078f209
LR
2223
2224 ath_tx_aggr_teardown(sc, an, tid);
2225 return 0;
2226}
2227
8469cdef
S
2228/* Resume tx aggregation */
2229
2230void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2231{
2232 struct ath_atx_tid *txtid;
2233 struct ath_node *an;
2234
2235 an = (struct ath_node *)sta->drv_priv;
2236
2237 if (sc->sc_flags & SC_OP_TXAGGR) {
2238 txtid = ATH_AN_2_TID(an, tid);
2239 txtid->baw_size =
2240 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
2241 txtid->state |= AGGR_ADDBA_COMPLETE;
2242 txtid->state &= ~AGGR_ADDBA_PROGRESS;
2243 ath_tx_resume_tid(sc, txtid);
2244 }
2245}
2246
f078f209
LR
2247/*
2248 * Performs transmit side cleanup when TID changes from aggregated to
2249 * unaggregated.
2250 * - Pause the TID and mark cleanup in progress
2251 * - Discard all retry frames from the s/w queue.
2252 */
2253
b5aa9bf9 2254void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
f078f209
LR
2255{
2256 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2257 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2258 struct ath_buf *bf;
2259 struct list_head bf_head;
2260 INIT_LIST_HEAD(&bf_head);
2261
2262 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
2263
a37c2c79 2264 if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */
f078f209
LR
2265 return;
2266
a37c2c79 2267 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
f078f209
LR
2268 txtid->addba_exchangeattempts = 0;
2269 return;
2270 }
2271
2272 /* TID must be paused first */
2273 ath_tx_pause_tid(sc, txtid);
2274
2275 /* drop all software retried frames and mark this TID */
2276 spin_lock_bh(&txq->axq_lock);
2277 while (!list_empty(&txtid->buf_q)) {
2278 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
cd3d39a6 2279 if (!bf_isretried(bf)) {
f078f209
LR
2280 /*
2281 * NB: it's based on the assumption that
2282 * software retried frame will always stay
2283 * at the head of software queue.
2284 */
2285 break;
2286 }
2287 list_cut_position(&bf_head,
2288 &txtid->buf_q, &bf->bf_lastfrm->list);
2289 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2290
2291 /* complete this sub-frame */
2292 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2293 }
2294
2295 if (txtid->baw_head != txtid->baw_tail) {
2296 spin_unlock_bh(&txq->axq_lock);
a37c2c79 2297 txtid->state |= AGGR_CLEANUP;
f078f209 2298 } else {
a37c2c79 2299 txtid->state &= ~AGGR_ADDBA_COMPLETE;
f078f209
LR
2300 txtid->addba_exchangeattempts = 0;
2301 spin_unlock_bh(&txq->axq_lock);
2302 ath_tx_flush_tid(sc, txtid);
2303 }
2304}
2305
2306/*
2307 * Tx scheduling logic
2308 * NB: must be called with txq lock held
2309 */
2310
2311void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2312{
2313 struct ath_atx_ac *ac;
2314 struct ath_atx_tid *tid;
2315
2316 /* nothing to schedule */
2317 if (list_empty(&txq->axq_acq))
2318 return;
2319 /*
2320 * get the first node/ac pair on the queue
2321 */
2322 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2323 list_del(&ac->list);
2324 ac->sched = false;
2325
2326 /*
2327 * process a single tid per destination
2328 */
2329 do {
2330 /* nothing to schedule */
2331 if (list_empty(&ac->tid_q))
2332 return;
2333
2334 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2335 list_del(&tid->list);
2336 tid->sched = false;
2337
2338 if (tid->paused) /* check next tid to keep h/w busy */
2339 continue;
2340
43453b33 2341 if ((txq->axq_depth % 2) == 0)
f078f209 2342 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
2343
2344 /*
2345 * add tid to round-robin queue if more frames
2346 * are pending for the tid
2347 */
2348 if (!list_empty(&tid->buf_q))
2349 ath_tx_queue_tid(txq, tid);
2350
2351 /* only schedule one TID at a time */
2352 break;
2353 } while (!list_empty(&ac->tid_q));
2354
2355 /*
2356 * schedule AC if more TIDs need processing
2357 */
2358 if (!list_empty(&ac->tid_q)) {
2359 /*
2360 * add dest ac to txq if not already added
2361 */
2362 if (!ac->sched) {
2363 ac->sched = true;
2364 list_add_tail(&ac->list, &txq->axq_acq);
2365 }
2366 }
2367}
2368
2369/* Initialize per-node transmit state */
2370
2371void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2372{
c5170163
S
2373 struct ath_atx_tid *tid;
2374 struct ath_atx_ac *ac;
2375 int tidno, acno;
f078f209 2376
c5170163
S
2377 /*
2378 * Init per tid tx state
2379 */
2380 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2381 tidno < WME_NUM_TID;
2382 tidno++, tid++) {
2383 tid->an = an;
2384 tid->tidno = tidno;
2385 tid->seq_start = tid->seq_next = 0;
2386 tid->baw_size = WME_MAX_BA;
2387 tid->baw_head = tid->baw_tail = 0;
2388 tid->sched = false;
2389 tid->paused = false;
a37c2c79 2390 tid->state &= ~AGGR_CLEANUP;
c5170163
S
2391 INIT_LIST_HEAD(&tid->buf_q);
2392
2393 acno = TID_TO_WME_AC(tidno);
2394 tid->ac = &an->an_aggr.tx.ac[acno];
2395
2396 /* ADDBA state */
a37c2c79
S
2397 tid->state &= ~AGGR_ADDBA_COMPLETE;
2398 tid->state &= ~AGGR_ADDBA_PROGRESS;
2399 tid->addba_exchangeattempts = 0;
c5170163 2400 }
f078f209 2401
c5170163
S
2402 /*
2403 * Init per ac tx state
2404 */
2405 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2406 acno < WME_NUM_AC; acno++, ac++) {
2407 ac->sched = false;
2408 INIT_LIST_HEAD(&ac->tid_q);
2409
2410 switch (acno) {
2411 case WME_AC_BE:
2412 ac->qnum = ath_tx_get_qnum(sc,
2413 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2414 break;
2415 case WME_AC_BK:
2416 ac->qnum = ath_tx_get_qnum(sc,
2417 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2418 break;
2419 case WME_AC_VI:
2420 ac->qnum = ath_tx_get_qnum(sc,
2421 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2422 break;
2423 case WME_AC_VO:
2424 ac->qnum = ath_tx_get_qnum(sc,
2425 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2426 break;
f078f209
LR
2427 }
2428 }
2429}
2430
2431/* Cleanupthe pending buffers for the node. */
2432
b5aa9bf9 2433void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209
LR
2434{
2435 int i;
2436 struct ath_atx_ac *ac, *ac_tmp;
2437 struct ath_atx_tid *tid, *tid_tmp;
2438 struct ath_txq *txq;
2439 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2440 if (ATH_TXQ_SETUP(sc, i)) {
2441 txq = &sc->sc_txq[i];
2442
b5aa9bf9 2443 spin_lock(&txq->axq_lock);
f078f209
LR
2444
2445 list_for_each_entry_safe(ac,
2446 ac_tmp, &txq->axq_acq, list) {
2447 tid = list_first_entry(&ac->tid_q,
2448 struct ath_atx_tid, list);
2449 if (tid && tid->an != an)
2450 continue;
2451 list_del(&ac->list);
2452 ac->sched = false;
2453
2454 list_for_each_entry_safe(tid,
2455 tid_tmp, &ac->tid_q, list) {
2456 list_del(&tid->list);
2457 tid->sched = false;
b5aa9bf9 2458 ath_tid_drain(sc, txq, tid);
a37c2c79 2459 tid->state &= ~AGGR_ADDBA_COMPLETE;
f078f209 2460 tid->addba_exchangeattempts = 0;
a37c2c79 2461 tid->state &= ~AGGR_CLEANUP;
f078f209
LR
2462 }
2463 }
2464
b5aa9bf9 2465 spin_unlock(&txq->axq_lock);
f078f209
LR
2466 }
2467 }
2468}
2469
e022edbd
JM
2470void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2471{
2472 int hdrlen, padsize;
2473 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2474 struct ath_tx_control txctl;
2475
528f0c6b
S
2476 memset(&txctl, 0, sizeof(struct ath_tx_control));
2477
e022edbd
JM
2478 /*
2479 * As a temporary workaround, assign seq# here; this will likely need
2480 * to be cleaned up to work better with Beacon transmission and virtual
2481 * BSSes.
2482 */
2483 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2484 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2485 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2486 sc->seq_no += 0x10;
2487 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2488 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2489 }
2490
2491 /* Add the padding after the header if this is not already done */
2492 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2493 if (hdrlen & 3) {
2494 padsize = hdrlen % 4;
2495 if (skb_headroom(skb) < padsize) {
2496 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding "
2497 "failed\n", __func__);
2498 dev_kfree_skb_any(skb);
2499 return;
2500 }
2501 skb_push(skb, padsize);
2502 memmove(skb->data, skb->data + padsize, hdrlen);
2503 }
2504
528f0c6b
S
2505 txctl.txq = sc->sc_cabq;
2506
e022edbd
JM
2507 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
2508 __func__,
2509 skb);
2510
528f0c6b
S
2511 if (ath_tx_start(sc, skb, &txctl) != 0) {
2512 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
2513 goto exit;
e022edbd 2514 }
e022edbd 2515
528f0c6b
S
2516 return;
2517exit:
2518 dev_kfree_skb_any(skb);
2519}
This page took 0.336314 seconds and 5 git commands to generate.