rtl8187: Reduce channel switch delay
[deliverable/linux.git] / drivers / net / wireless / ath9k / xmit.c
CommitLineData
f078f209
LR
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Implementation of transmit path.
19 */
20
21#include "core.h"
22
23#define BITS_PER_BYTE 8
24#define OFDM_PLCP_BITS 22
25#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
26#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
27#define L_STF 8
28#define L_LTF 8
29#define L_SIG 4
30#define HT_SIG 8
31#define HT_STF 4
32#define HT_LTF(_ns) (4 * (_ns))
33#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
34#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
35#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
36#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
37
38#define OFDM_SIFS_TIME 16
39
40static u32 bits_per_symbol[][2] = {
41 /* 20MHz 40MHz */
42 { 26, 54 }, /* 0: BPSK */
43 { 52, 108 }, /* 1: QPSK 1/2 */
44 { 78, 162 }, /* 2: QPSK 3/4 */
45 { 104, 216 }, /* 3: 16-QAM 1/2 */
46 { 156, 324 }, /* 4: 16-QAM 3/4 */
47 { 208, 432 }, /* 5: 64-QAM 2/3 */
48 { 234, 486 }, /* 6: 64-QAM 3/4 */
49 { 260, 540 }, /* 7: 64-QAM 5/6 */
50 { 52, 108 }, /* 8: BPSK */
51 { 104, 216 }, /* 9: QPSK 1/2 */
52 { 156, 324 }, /* 10: QPSK 3/4 */
53 { 208, 432 }, /* 11: 16-QAM 1/2 */
54 { 312, 648 }, /* 12: 16-QAM 3/4 */
55 { 416, 864 }, /* 13: 64-QAM 2/3 */
56 { 468, 972 }, /* 14: 64-QAM 3/4 */
57 { 520, 1080 }, /* 15: 64-QAM 5/6 */
58};
59
60#define IS_HT_RATE(_rate) ((_rate) & 0x80)
61
f078f209
LR
62/*
63 * Insert a chain of ath_buf (descriptors) on a txq and
64 * assume the descriptors are already chained together by caller.
65 * NB: must be called with txq lock held
66 */
67
102e0572
S
68static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
69 struct list_head *head)
f078f209
LR
70{
71 struct ath_hal *ah = sc->sc_ah;
72 struct ath_buf *bf;
102e0572 73
f078f209
LR
74 /*
75 * Insert the frame on the outbound list and
76 * pass it on to the hardware.
77 */
78
79 if (list_empty(head))
80 return;
81
82 bf = list_first_entry(head, struct ath_buf, list);
83
84 list_splice_tail_init(head, &txq->axq_q);
85 txq->axq_depth++;
86 txq->axq_totalqueued++;
87 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
88
89 DPRINTF(sc, ATH_DBG_QUEUE,
90 "%s: txq depth = %d\n", __func__, txq->axq_depth);
91
92 if (txq->axq_link == NULL) {
93 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
94 DPRINTF(sc, ATH_DBG_XMIT,
95 "%s: TXDP[%u] = %llx (%p)\n",
96 __func__, txq->axq_qnum,
97 ito64(bf->bf_daddr), bf->bf_desc);
98 } else {
99 *txq->axq_link = bf->bf_daddr;
100 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
101 __func__,
102 txq->axq_qnum, txq->axq_link,
103 ito64(bf->bf_daddr), bf->bf_desc);
104 }
105 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
106 ath9k_hw_txstart(ah, txq->axq_qnum);
107}
108
109/* Get transmit rate index using rate in Kbps */
110
111static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate)
112{
113 int i;
114 int ndx = 0;
115
116 for (i = 0; i < rt->rateCount; i++) {
117 if (rt->info[i].rateKbps == rate) {
118 ndx = i;
119 break;
120 }
121 }
122
123 return ndx;
124}
125
126/* Check if it's okay to send out aggregates */
127
a37c2c79 128static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
f078f209
LR
129{
130 struct ath_atx_tid *tid;
131 tid = ATH_AN_2_TID(an, tidno);
132
a37c2c79
S
133 if (tid->state & AGGR_ADDBA_COMPLETE ||
134 tid->state & AGGR_ADDBA_PROGRESS)
f078f209
LR
135 return 1;
136 else
137 return 0;
138}
139
528f0c6b
S
140/* Calculate Atheros packet type from IEEE80211 packet header */
141
142static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
f078f209 143{
528f0c6b 144 struct ieee80211_hdr *hdr;
f078f209
LR
145 enum ath9k_pkt_type htype;
146 __le16 fc;
147
528f0c6b 148 hdr = (struct ieee80211_hdr *)skb->data;
f078f209
LR
149 fc = hdr->frame_control;
150
f078f209
LR
151 if (ieee80211_is_beacon(fc))
152 htype = ATH9K_PKT_TYPE_BEACON;
153 else if (ieee80211_is_probe_resp(fc))
154 htype = ATH9K_PKT_TYPE_PROBE_RESP;
155 else if (ieee80211_is_atim(fc))
156 htype = ATH9K_PKT_TYPE_ATIM;
157 else if (ieee80211_is_pspoll(fc))
158 htype = ATH9K_PKT_TYPE_PSPOLL;
159 else
160 htype = ATH9K_PKT_TYPE_NORMAL;
161
162 return htype;
163}
164
528f0c6b 165static bool check_min_rate(struct sk_buff *skb)
f078f209
LR
166{
167 struct ieee80211_hdr *hdr;
528f0c6b 168 bool use_minrate = false;
f078f209
LR
169 __le16 fc;
170
171 hdr = (struct ieee80211_hdr *)skb->data;
172 fc = hdr->frame_control;
e6a9854b 173
f078f209 174 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
528f0c6b 175 use_minrate = true;
f078f209
LR
176 } else if (ieee80211_is_data(fc)) {
177 if (ieee80211_is_nullfunc(fc) ||
528f0c6b
S
178 /* Port Access Entity (IEEE 802.1X) */
179 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
180 use_minrate = true;
f078f209 181 }
f078f209
LR
182 }
183
528f0c6b 184 return use_minrate;
f078f209
LR
185}
186
528f0c6b 187static int get_hw_crypto_keytype(struct sk_buff *skb)
f078f209 188{
f078f209 189 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
f078f209
LR
190
191 if (tx_info->control.hw_key) {
d0be7cc7 192 if (tx_info->control.hw_key->alg == ALG_WEP)
528f0c6b 193 return ATH9K_KEY_TYPE_WEP;
d0be7cc7 194 else if (tx_info->control.hw_key->alg == ALG_TKIP)
528f0c6b 195 return ATH9K_KEY_TYPE_TKIP;
d0be7cc7 196 else if (tx_info->control.hw_key->alg == ALG_CCMP)
528f0c6b 197 return ATH9K_KEY_TYPE_AES;
f078f209
LR
198 }
199
528f0c6b
S
200 return ATH9K_KEY_TYPE_CLEAR;
201}
f078f209 202
528f0c6b
S
203static void setup_rate_retries(struct ath_softc *sc, struct sk_buff *skb)
204{
205 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
206 struct ath_tx_info_priv *tx_info_priv;
207 struct ath_rc_series *rcs;
208 struct ieee80211_hdr *hdr;
209 const struct ath9k_rate_table *rt;
210 bool use_minrate;
211 __le16 fc;
212 u8 rix;
f078f209 213
528f0c6b
S
214 rt = sc->sc_currates;
215 BUG_ON(!rt);
f078f209 216
528f0c6b
S
217 hdr = (struct ieee80211_hdr *)skb->data;
218 fc = hdr->frame_control;
219 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif; /* HACK */
220 rcs = tx_info_priv->rcs;
f078f209 221
528f0c6b
S
222 /* Check if min rates have to be used */
223 use_minrate = check_min_rate(skb);
f078f209 224
528f0c6b
S
225 if (ieee80211_is_data(fc) && !use_minrate) {
226 if (is_multicast_ether_addr(hdr->addr1)) {
227 rcs[0].rix =
228 ath_tx_findindex(rt, tx_info_priv->min_rate);
229 /* mcast packets are not re-tried */
230 rcs[0].tries = 1;
231 }
232 } else {
233 /* for management and control frames,
234 or for NULL and EAPOL frames */
235 if (use_minrate)
236 rcs[0].rix = ath_rate_findrateix(sc, tx_info_priv->min_rate);
237 else
238 rcs[0].rix = 0;
239 rcs[0].tries = ATH_MGT_TXMAXTRY;
240 }
f078f209 241
528f0c6b 242 rix = rcs[0].rix;
f078f209 243
528f0c6b
S
244 if (ieee80211_has_morefrags(fc) ||
245 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
246 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
247 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
248 /* reset tries but keep rate index */
249 rcs[0].tries = ATH_TXMAXTRY;
250 }
251}
f078f209 252
528f0c6b 253/* Called only when tx aggregation is enabled and HT is supported */
e6a9854b 254
528f0c6b
S
255static void assign_aggr_tid_seqno(struct sk_buff *skb,
256 struct ath_buf *bf)
257{
258 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
259 struct ieee80211_hdr *hdr;
260 struct ath_node *an;
261 struct ath_atx_tid *tid;
262 __le16 fc;
263 u8 *qc;
f078f209 264
528f0c6b
S
265 if (!tx_info->control.sta)
266 return;
e6a9854b 267
528f0c6b
S
268 an = (struct ath_node *)tx_info->control.sta->drv_priv;
269 hdr = (struct ieee80211_hdr *)skb->data;
270 fc = hdr->frame_control;
f078f209 271
528f0c6b 272 /* Get tidno */
f078f209 273
528f0c6b
S
274 if (ieee80211_is_data_qos(fc)) {
275 qc = ieee80211_get_qos_ctl(hdr);
276 bf->bf_tidno = qc[0] & 0xf;
277 }
f078f209 278
528f0c6b 279 /* Get seqno */
f078f209 280
528f0c6b 281 if (ieee80211_is_data(fc) && !check_min_rate(skb)) {
f078f209
LR
282 /* For HT capable stations, we save tidno for later use.
283 * We also override seqno set by upper layer with the one
284 * in tx aggregation state.
285 *
f078f209
LR
286 * If fragmentation is on, the sequence number is
287 * not overridden, since it has been
288 * incremented by the fragmentation routine.
528f0c6b
S
289 *
290 * FIXME: check if the fragmentation threshold exceeds
291 * IEEE80211 max.
f078f209 292 */
528f0c6b
S
293 tid = ATH_AN_2_TID(an, bf->bf_tidno);
294 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
295 IEEE80211_SEQ_SEQ_SHIFT);
296 bf->bf_seqno = tid->seq_next;
297 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
298 }
299}
f078f209 300
528f0c6b
S
301static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
302 struct ath_txq *txq)
303{
304 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
305 int flags = 0;
f078f209 306
528f0c6b
S
307 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
308 flags |= ATH9K_TXDESC_INTREQ;
f078f209 309
528f0c6b
S
310 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
311 flags |= ATH9K_TXDESC_NOACK;
312 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
313 flags |= ATH9K_TXDESC_RTSENA;
314
315 return flags;
316}
f078f209 317
528f0c6b
S
318static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
319{
320 struct ath_buf *bf = NULL;
321
322 spin_lock_bh(&sc->sc_txbuflock);
323
324 if (unlikely(list_empty(&sc->sc_txbuf))) {
325 spin_unlock_bh(&sc->sc_txbuflock);
326 return NULL;
98deeea0 327 }
f078f209 328
528f0c6b
S
329 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
330 list_del(&bf->list);
331
332 spin_unlock_bh(&sc->sc_txbuflock);
333
334 return bf;
f078f209
LR
335}
336
337/* To complete a chain of buffers associated a frame */
338
339static void ath_tx_complete_buf(struct ath_softc *sc,
340 struct ath_buf *bf,
341 struct list_head *bf_q,
342 int txok, int sendbar)
343{
344 struct sk_buff *skb = bf->bf_mpdu;
345 struct ath_xmit_status tx_status;
f078f209
LR
346
347 /*
348 * Set retry information.
349 * NB: Don't use the information in the descriptor, because the frame
350 * could be software retried.
351 */
352 tx_status.retries = bf->bf_retries;
353 tx_status.flags = 0;
354
355 if (sendbar)
356 tx_status.flags = ATH_TX_BAR;
357
358 if (!txok) {
359 tx_status.flags |= ATH_TX_ERROR;
360
cd3d39a6 361 if (bf_isxretried(bf))
f078f209
LR
362 tx_status.flags |= ATH_TX_XRETRY;
363 }
102e0572 364
f078f209 365 /* Unmap this frame */
f078f209 366 pci_unmap_single(sc->pdev,
ff9b662d 367 bf->bf_dmacontext,
f078f209
LR
368 skb->len,
369 PCI_DMA_TODEVICE);
370 /* complete this frame */
528f0c6b 371 ath_tx_complete(sc, skb, &tx_status);
f078f209
LR
372
373 /*
374 * Return the list of ath_buf of this mpdu to free queue
375 */
376 spin_lock_bh(&sc->sc_txbuflock);
377 list_splice_tail_init(bf_q, &sc->sc_txbuf);
378 spin_unlock_bh(&sc->sc_txbuflock);
379}
380
381/*
382 * queue up a dest/ac pair for tx scheduling
383 * NB: must be called with txq lock held
384 */
385
386static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
387{
388 struct ath_atx_ac *ac = tid->ac;
389
390 /*
391 * if tid is paused, hold off
392 */
393 if (tid->paused)
394 return;
395
396 /*
397 * add tid to ac atmost once
398 */
399 if (tid->sched)
400 return;
401
402 tid->sched = true;
403 list_add_tail(&tid->list, &ac->tid_q);
404
405 /*
406 * add node ac to txq atmost once
407 */
408 if (ac->sched)
409 return;
410
411 ac->sched = true;
412 list_add_tail(&ac->list, &txq->axq_acq);
413}
414
415/* pause a tid */
416
417static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
418{
419 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
420
421 spin_lock_bh(&txq->axq_lock);
422
423 tid->paused++;
424
425 spin_unlock_bh(&txq->axq_lock);
426}
427
428/* resume a tid and schedule aggregate */
429
430void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
431{
432 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
433
434 ASSERT(tid->paused > 0);
435 spin_lock_bh(&txq->axq_lock);
436
437 tid->paused--;
438
439 if (tid->paused > 0)
440 goto unlock;
441
442 if (list_empty(&tid->buf_q))
443 goto unlock;
444
445 /*
446 * Add this TID to scheduler and try to send out aggregates
447 */
448 ath_tx_queue_tid(txq, tid);
449 ath_txq_schedule(sc, txq);
450unlock:
451 spin_unlock_bh(&txq->axq_lock);
452}
453
454/* Compute the number of bad frames */
455
b5aa9bf9
S
456static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
457 int txok)
f078f209 458{
f078f209
LR
459 struct ath_buf *bf_last = bf->bf_lastbf;
460 struct ath_desc *ds = bf_last->bf_desc;
461 u16 seq_st = 0;
462 u32 ba[WME_BA_BMP_SIZE >> 5];
463 int ba_index;
464 int nbad = 0;
465 int isaggr = 0;
466
b5aa9bf9 467 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
f078f209
LR
468 return 0;
469
cd3d39a6 470 isaggr = bf_isaggr(bf);
f078f209
LR
471 if (isaggr) {
472 seq_st = ATH_DS_BA_SEQ(ds);
473 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
474 }
475
476 while (bf) {
477 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
478 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
479 nbad++;
480
481 bf = bf->bf_next;
482 }
483
484 return nbad;
485}
486
487static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
488{
489 struct sk_buff *skb;
490 struct ieee80211_hdr *hdr;
491
cd3d39a6 492 bf->bf_state.bf_type |= BUF_RETRY;
f078f209
LR
493 bf->bf_retries++;
494
495 skb = bf->bf_mpdu;
496 hdr = (struct ieee80211_hdr *)skb->data;
497 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
498}
499
500/* Update block ack window */
501
102e0572
S
502static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
503 int seqno)
f078f209
LR
504{
505 int index, cindex;
506
507 index = ATH_BA_INDEX(tid->seq_start, seqno);
508 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
509
510 tid->tx_buf[cindex] = NULL;
511
512 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
513 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
514 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
515 }
516}
517
518/*
519 * ath_pkt_dur - compute packet duration (NB: not NAV)
520 *
521 * rix - rate index
522 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
523 * width - 0 for 20 MHz, 1 for 40 MHz
524 * half_gi - to use 4us v/s 3.6 us for symbol time
525 */
526
102e0572
S
527static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
528 int width, int half_gi, bool shortPreamble)
f078f209
LR
529{
530 const struct ath9k_rate_table *rt = sc->sc_currates;
531 u32 nbits, nsymbits, duration, nsymbols;
532 u8 rc;
533 int streams, pktlen;
534
cd3d39a6 535 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
f078f209
LR
536 rc = rt->info[rix].rateCode;
537
538 /*
539 * for legacy rates, use old function to compute packet duration
540 */
541 if (!IS_HT_RATE(rc))
102e0572
S
542 return ath9k_hw_computetxtime(sc->sc_ah, rt, pktlen, rix,
543 shortPreamble);
f078f209
LR
544 /*
545 * find number of symbols: PLCP + data
546 */
547 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
548 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
549 nsymbols = (nbits + nsymbits - 1) / nsymbits;
550
551 if (!half_gi)
552 duration = SYMBOL_TIME(nsymbols);
553 else
554 duration = SYMBOL_TIME_HALFGI(nsymbols);
555
556 /*
557 * addup duration for legacy/ht training and signal fields
558 */
559 streams = HT_RC_2_STREAMS(rc);
560 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
102e0572 561
f078f209
LR
562 return duration;
563}
564
565/* Rate module function to set rate related fields in tx descriptor */
566
567static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
568{
569 struct ath_hal *ah = sc->sc_ah;
570 const struct ath9k_rate_table *rt;
571 struct ath_desc *ds = bf->bf_desc;
572 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
573 struct ath9k_11n_rate_series series[4];
43453b33 574 int i, flags, rtsctsena = 0;
f078f209
LR
575 u32 ctsduration = 0;
576 u8 rix = 0, cix, ctsrate = 0;
528f0c6b
S
577 struct ath_node *an = NULL;
578 struct sk_buff *skb;
579 struct ieee80211_tx_info *tx_info;
580
581 skb = (struct sk_buff *)bf->bf_mpdu;
582 tx_info = IEEE80211_SKB_CB(skb);
583
584 if (tx_info->control.sta)
585 an = (struct ath_node *)tx_info->control.sta->drv_priv;
f078f209
LR
586
587 /*
588 * get the cix for the lowest valid rix.
589 */
590 rt = sc->sc_currates;
591 for (i = 4; i--;) {
592 if (bf->bf_rcs[i].tries) {
593 rix = bf->bf_rcs[i].rix;
594 break;
595 }
596 }
597 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
598 cix = rt->info[rix].controlRate;
599
600 /*
601 * If 802.11g protection is enabled, determine whether
602 * to use RTS/CTS or just CTS. Note that this is only
603 * done for OFDM/HT unicast frames.
604 */
605 if (sc->sc_protmode != PROT_M_NONE &&
606 (rt->info[rix].phy == PHY_OFDM ||
607 rt->info[rix].phy == PHY_HT) &&
608 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
609 if (sc->sc_protmode == PROT_M_RTSCTS)
610 flags = ATH9K_TXDESC_RTSENA;
611 else if (sc->sc_protmode == PROT_M_CTSONLY)
612 flags = ATH9K_TXDESC_CTSENA;
613
614 cix = rt->info[sc->sc_protrix].controlRate;
615 rtsctsena = 1;
616 }
617
618 /* For 11n, the default behavior is to enable RTS for
619 * hw retried frames. We enable the global flag here and
620 * let rate series flags determine which rates will actually
621 * use RTS.
622 */
cd3d39a6 623 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
f078f209
LR
624 /*
625 * 802.11g protection not needed, use our default behavior
626 */
627 if (!rtsctsena)
628 flags = ATH9K_TXDESC_RTSENA;
f078f209
LR
629 }
630
631 /*
632 * Set protection if aggregate protection on
633 */
634 if (sc->sc_config.ath_aggr_prot &&
cd3d39a6 635 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
f078f209
LR
636 flags = ATH9K_TXDESC_RTSENA;
637 cix = rt->info[sc->sc_protrix].controlRate;
638 rtsctsena = 1;
639 }
640
641 /*
642 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
643 */
102e0572 644 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit)) {
f078f209
LR
645 /*
646 * Ensure that in the case of SM Dynamic power save
647 * while we are bursting the second aggregate the
648 * RTS is cleared.
649 */
650 flags &= ~(ATH9K_TXDESC_RTSENA);
651 }
652
653 /*
654 * CTS transmit rate is derived from the transmit rate
655 * by looking in the h/w rate table. We must also factor
656 * in whether or not a short preamble is to be used.
102e0572 657 * NB: cix is set above where RTS/CTS is enabled
f078f209 658 */
f078f209
LR
659 BUG_ON(cix == 0xff);
660 ctsrate = rt->info[cix].rateCode |
cd3d39a6 661 (bf_isshpreamble(bf) ? rt->info[cix].shortPreamble : 0);
f078f209
LR
662
663 /*
664 * Setup HAL rate series
665 */
0345f37b 666 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
f078f209
LR
667
668 for (i = 0; i < 4; i++) {
669 if (!bf->bf_rcs[i].tries)
670 continue;
671
672 rix = bf->bf_rcs[i].rix;
673
674 series[i].Rate = rt->info[rix].rateCode |
cd3d39a6 675 (bf_isshpreamble(bf) ? rt->info[rix].shortPreamble : 0);
f078f209
LR
676
677 series[i].Tries = bf->bf_rcs[i].tries;
678
679 series[i].RateFlags = (
680 (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
681 ATH9K_RATESERIES_RTS_CTS : 0) |
682 ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
683 ATH9K_RATESERIES_2040 : 0) |
684 ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ?
685 ATH9K_RATESERIES_HALFGI : 0);
686
102e0572
S
687 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
688 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
689 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
690 bf_isshpreamble(bf));
f078f209 691
102e0572
S
692 if (bf_isht(bf) && an)
693 series[i].ChSel = ath_chainmask_sel_logic(sc, an);
43453b33 694 else
f078f209 695 series[i].ChSel = sc->sc_tx_chainmask;
f078f209
LR
696
697 if (rtsctsena)
698 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
f078f209
LR
699 }
700
701 /*
702 * For non-HT devices, calculate RTS/CTS duration in software
703 * and disable multi-rate retry.
704 */
60b67f51 705 if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) {
f078f209
LR
706 /*
707 * Compute the transmit duration based on the frame
708 * size and the size of an ACK frame. We call into the
709 * HAL to do the computation since it depends on the
710 * characteristics of the actual PHY being used.
711 *
712 * NB: CTS is assumed the same size as an ACK so we can
713 * use the precalculated ACK durations.
714 */
715 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
cd3d39a6 716 ctsduration += bf_isshpreamble(bf) ?
f078f209
LR
717 rt->info[cix].spAckDuration :
718 rt->info[cix].lpAckDuration;
719 }
720
721 ctsduration += series[0].PktDuration;
722
723 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
cd3d39a6 724 ctsduration += bf_isshpreamble(bf) ?
f078f209
LR
725 rt->info[rix].spAckDuration :
726 rt->info[rix].lpAckDuration;
727 }
728
729 /*
730 * Disable multi-rate retry when using RTS/CTS by clearing
731 * series 1, 2 and 3.
732 */
0345f37b 733 memset(&series[1], 0, sizeof(struct ath9k_11n_rate_series) * 3);
f078f209
LR
734 }
735
736 /*
737 * set dur_update_en for l-sig computation except for PS-Poll frames
738 */
739 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
cd3d39a6
S
740 !bf_ispspoll(bf),
741 ctsrate,
742 ctsduration,
743 series, 4, flags);
102e0572 744
f078f209
LR
745 if (sc->sc_config.ath_aggr_prot && flags)
746 ath9k_hw_set11n_burstduration(ah, ds, 8192);
747}
748
749/*
750 * Function to send a normal HT (non-AMPDU) frame
751 * NB: must be called with txq lock held
752 */
753
754static int ath_tx_send_normal(struct ath_softc *sc,
755 struct ath_txq *txq,
756 struct ath_atx_tid *tid,
757 struct list_head *bf_head)
758{
759 struct ath_buf *bf;
760 struct sk_buff *skb;
761 struct ieee80211_tx_info *tx_info;
762 struct ath_tx_info_priv *tx_info_priv;
763
764 BUG_ON(list_empty(bf_head));
765
766 bf = list_first_entry(bf_head, struct ath_buf, list);
cd3d39a6 767 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
f078f209
LR
768
769 skb = (struct sk_buff *)bf->bf_mpdu;
770 tx_info = IEEE80211_SKB_CB(skb);
e6a9854b
JB
771
772 /* XXX: HACK! */
773 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
f078f209
LR
774 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
775
776 /* update starting sequence number for subsequent ADDBA request */
777 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
778
779 /* Queue to h/w without aggregation */
780 bf->bf_nframes = 1;
781 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
782 ath_buf_set_rate(sc, bf);
783 ath_tx_txqaddbuf(sc, txq, bf_head);
784
785 return 0;
786}
787
788/* flush tid's software queue and send frames as non-ampdu's */
789
790static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
791{
792 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
793 struct ath_buf *bf;
794 struct list_head bf_head;
795 INIT_LIST_HEAD(&bf_head);
796
797 ASSERT(tid->paused > 0);
798 spin_lock_bh(&txq->axq_lock);
799
800 tid->paused--;
801
802 if (tid->paused > 0) {
803 spin_unlock_bh(&txq->axq_lock);
804 return;
805 }
806
807 while (!list_empty(&tid->buf_q)) {
808 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
cd3d39a6 809 ASSERT(!bf_isretried(bf));
f078f209
LR
810 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
811 ath_tx_send_normal(sc, txq, tid, &bf_head);
812 }
813
814 spin_unlock_bh(&txq->axq_lock);
815}
816
817/* Completion routine of an aggregate */
818
819static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
820 struct ath_txq *txq,
821 struct ath_buf *bf,
822 struct list_head *bf_q,
823 int txok)
824{
528f0c6b
S
825 struct ath_node *an = NULL;
826 struct sk_buff *skb;
827 struct ieee80211_tx_info *tx_info;
828 struct ath_atx_tid *tid = NULL;
f078f209
LR
829 struct ath_buf *bf_last = bf->bf_lastbf;
830 struct ath_desc *ds = bf_last->bf_desc;
831 struct ath_buf *bf_next, *bf_lastq = NULL;
832 struct list_head bf_head, bf_pending;
833 u16 seq_st = 0;
834 u32 ba[WME_BA_BMP_SIZE >> 5];
835 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
f078f209 836
528f0c6b
S
837 skb = (struct sk_buff *)bf->bf_mpdu;
838 tx_info = IEEE80211_SKB_CB(skb);
839
840 if (tx_info->control.sta) {
841 an = (struct ath_node *)tx_info->control.sta->drv_priv;
842 tid = ATH_AN_2_TID(an, bf->bf_tidno);
843 }
844
cd3d39a6 845 isaggr = bf_isaggr(bf);
f078f209
LR
846 if (isaggr) {
847 if (txok) {
848 if (ATH_DS_TX_BA(ds)) {
849 /*
850 * extract starting sequence and
851 * block-ack bitmap
852 */
853 seq_st = ATH_DS_BA_SEQ(ds);
854 memcpy(ba,
855 ATH_DS_BA_BITMAP(ds),
856 WME_BA_BMP_SIZE >> 3);
857 } else {
0345f37b 858 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209
LR
859
860 /*
861 * AR5416 can become deaf/mute when BA
862 * issue happens. Chip needs to be reset.
863 * But AP code may have sychronization issues
864 * when perform internal reset in this routine.
865 * Only enable reset in STA mode for now.
866 */
b4696c8b 867 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
f078f209
LR
868 needreset = 1;
869 }
870 } else {
0345f37b 871 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209
LR
872 }
873 }
874
875 INIT_LIST_HEAD(&bf_pending);
876 INIT_LIST_HEAD(&bf_head);
877
878 while (bf) {
879 txfail = txpending = 0;
880 bf_next = bf->bf_next;
881
882 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
883 /* transmit completion, subframe is
884 * acked by block ack */
885 } else if (!isaggr && txok) {
886 /* transmit completion */
887 } else {
888
a37c2c79 889 if (!(tid->state & AGGR_CLEANUP) &&
f078f209
LR
890 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
891 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
892 ath_tx_set_retry(sc, bf);
893 txpending = 1;
894 } else {
cd3d39a6 895 bf->bf_state.bf_type |= BUF_XRETRY;
f078f209
LR
896 txfail = 1;
897 sendbar = 1;
898 }
899 } else {
900 /*
901 * cleanup in progress, just fail
902 * the un-acked sub-frames
903 */
904 txfail = 1;
905 }
906 }
907 /*
908 * Remove ath_buf's of this sub-frame from aggregate queue.
909 */
910 if (bf_next == NULL) { /* last subframe in the aggregate */
911 ASSERT(bf->bf_lastfrm == bf_last);
912
913 /*
914 * The last descriptor of the last sub frame could be
915 * a holding descriptor for h/w. If that's the case,
916 * bf->bf_lastfrm won't be in the bf_q.
917 * Make sure we handle bf_q properly here.
918 */
919
920 if (!list_empty(bf_q)) {
921 bf_lastq = list_entry(bf_q->prev,
922 struct ath_buf, list);
923 list_cut_position(&bf_head,
924 bf_q, &bf_lastq->list);
925 } else {
926 /*
927 * XXX: if the last subframe only has one
928 * descriptor which is also being used as
929 * a holding descriptor. Then the ath_buf
930 * is not in the bf_q at all.
931 */
932 INIT_LIST_HEAD(&bf_head);
933 }
934 } else {
935 ASSERT(!list_empty(bf_q));
936 list_cut_position(&bf_head,
937 bf_q, &bf->bf_lastfrm->list);
938 }
939
940 if (!txpending) {
941 /*
942 * complete the acked-ones/xretried ones; update
943 * block-ack window
944 */
945 spin_lock_bh(&txq->axq_lock);
946 ath_tx_update_baw(sc, tid, bf->bf_seqno);
947 spin_unlock_bh(&txq->axq_lock);
948
949 /* complete this sub-frame */
950 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
951 } else {
952 /*
953 * retry the un-acked ones
954 */
955 /*
956 * XXX: if the last descriptor is holding descriptor,
957 * in order to requeue the frame to software queue, we
958 * need to allocate a new descriptor and
959 * copy the content of holding descriptor to it.
960 */
961 if (bf->bf_next == NULL &&
962 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
963 struct ath_buf *tbf;
964
965 /* allocate new descriptor */
966 spin_lock_bh(&sc->sc_txbuflock);
967 ASSERT(!list_empty((&sc->sc_txbuf)));
968 tbf = list_first_entry(&sc->sc_txbuf,
969 struct ath_buf, list);
970 list_del(&tbf->list);
971 spin_unlock_bh(&sc->sc_txbuflock);
972
973 ATH_TXBUF_RESET(tbf);
974
975 /* copy descriptor content */
976 tbf->bf_mpdu = bf_last->bf_mpdu;
f078f209
LR
977 tbf->bf_buf_addr = bf_last->bf_buf_addr;
978 *(tbf->bf_desc) = *(bf_last->bf_desc);
979
980 /* link it to the frame */
981 if (bf_lastq) {
982 bf_lastq->bf_desc->ds_link =
983 tbf->bf_daddr;
984 bf->bf_lastfrm = tbf;
985 ath9k_hw_cleartxdesc(sc->sc_ah,
986 bf->bf_lastfrm->bf_desc);
987 } else {
988 tbf->bf_state = bf_last->bf_state;
989 tbf->bf_lastfrm = tbf;
990 ath9k_hw_cleartxdesc(sc->sc_ah,
991 tbf->bf_lastfrm->bf_desc);
992
993 /* copy the DMA context */
ff9b662d
S
994 tbf->bf_dmacontext =
995 bf_last->bf_dmacontext;
f078f209
LR
996 }
997 list_add_tail(&tbf->list, &bf_head);
998 } else {
999 /*
1000 * Clear descriptor status words for
1001 * software retry
1002 */
1003 ath9k_hw_cleartxdesc(sc->sc_ah,
ff9b662d 1004 bf->bf_lastfrm->bf_desc);
f078f209
LR
1005 }
1006
1007 /*
1008 * Put this buffer to the temporary pending
1009 * queue to retain ordering
1010 */
1011 list_splice_tail_init(&bf_head, &bf_pending);
1012 }
1013
1014 bf = bf_next;
1015 }
1016
a37c2c79 1017 if (tid->state & AGGR_CLEANUP) {
f078f209
LR
1018 /* check to see if we're done with cleaning the h/w queue */
1019 spin_lock_bh(&txq->axq_lock);
1020
1021 if (tid->baw_head == tid->baw_tail) {
a37c2c79 1022 tid->state &= ~AGGR_ADDBA_COMPLETE;
f078f209
LR
1023 tid->addba_exchangeattempts = 0;
1024 spin_unlock_bh(&txq->axq_lock);
1025
a37c2c79 1026 tid->state &= ~AGGR_CLEANUP;
f078f209
LR
1027
1028 /* send buffered frames as singles */
1029 ath_tx_flush_tid(sc, tid);
1030 } else
1031 spin_unlock_bh(&txq->axq_lock);
1032
1033 return;
1034 }
1035
1036 /*
1037 * prepend un-acked frames to the beginning of the pending frame queue
1038 */
1039 if (!list_empty(&bf_pending)) {
1040 spin_lock_bh(&txq->axq_lock);
1041 /* Note: we _prepend_, we _do_not_ at to
1042 * the end of the queue ! */
1043 list_splice(&bf_pending, &tid->buf_q);
1044 ath_tx_queue_tid(txq, tid);
1045 spin_unlock_bh(&txq->axq_lock);
1046 }
1047
1048 if (needreset)
f45144ef 1049 ath_reset(sc, false);
f078f209
LR
1050
1051 return;
1052}
1053
1054/* Process completed xmit descriptors from the specified queue */
1055
1056static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1057{
1058 struct ath_hal *ah = sc->sc_ah;
1059 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1060 struct list_head bf_head;
1061 struct ath_desc *ds, *tmp_ds;
1062 struct sk_buff *skb;
1063 struct ieee80211_tx_info *tx_info;
1064 struct ath_tx_info_priv *tx_info_priv;
1065 int nacked, txok, nbad = 0, isrifs = 0;
1066 int status;
1067
1068 DPRINTF(sc, ATH_DBG_QUEUE,
1069 "%s: tx queue %d (%x), link %p\n", __func__,
1070 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1071 txq->axq_link);
1072
1073 nacked = 0;
1074 for (;;) {
1075 spin_lock_bh(&txq->axq_lock);
f078f209
LR
1076 if (list_empty(&txq->axq_q)) {
1077 txq->axq_link = NULL;
1078 txq->axq_linkbuf = NULL;
1079 spin_unlock_bh(&txq->axq_lock);
1080 break;
1081 }
1082 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1083
1084 /*
1085 * There is a race condition that a BH gets scheduled
1086 * after sw writes TxE and before hw re-load the last
1087 * descriptor to get the newly chained one.
1088 * Software must keep the last DONE descriptor as a
1089 * holding descriptor - software does so by marking
1090 * it with the STALE flag.
1091 */
1092 bf_held = NULL;
1093 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1094 bf_held = bf;
1095 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1096 /* FIXME:
1097 * The holding descriptor is the last
1098 * descriptor in queue. It's safe to remove
1099 * the last holding descriptor in BH context.
1100 */
1101 spin_unlock_bh(&txq->axq_lock);
1102 break;
1103 } else {
1104 /* Lets work with the next buffer now */
1105 bf = list_entry(bf_held->list.next,
1106 struct ath_buf, list);
1107 }
1108 }
1109
1110 lastbf = bf->bf_lastbf;
1111 ds = lastbf->bf_desc; /* NB: last decriptor */
1112
1113 status = ath9k_hw_txprocdesc(ah, ds);
1114 if (status == -EINPROGRESS) {
1115 spin_unlock_bh(&txq->axq_lock);
1116 break;
1117 }
1118 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1119 txq->axq_lastdsWithCTS = NULL;
1120 if (ds == txq->axq_gatingds)
1121 txq->axq_gatingds = NULL;
1122
1123 /*
1124 * Remove ath_buf's of the same transmit unit from txq,
1125 * however leave the last descriptor back as the holding
1126 * descriptor for hw.
1127 */
1128 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1129 INIT_LIST_HEAD(&bf_head);
1130
1131 if (!list_is_singular(&lastbf->list))
1132 list_cut_position(&bf_head,
1133 &txq->axq_q, lastbf->list.prev);
1134
1135 txq->axq_depth--;
1136
cd3d39a6 1137 if (bf_isaggr(bf))
f078f209
LR
1138 txq->axq_aggr_depth--;
1139
1140 txok = (ds->ds_txstat.ts_status == 0);
1141
1142 spin_unlock_bh(&txq->axq_lock);
1143
1144 if (bf_held) {
1145 list_del(&bf_held->list);
1146 spin_lock_bh(&sc->sc_txbuflock);
1147 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1148 spin_unlock_bh(&sc->sc_txbuflock);
1149 }
1150
cd3d39a6 1151 if (!bf_isampdu(bf)) {
f078f209
LR
1152 /*
1153 * This frame is sent out as a single frame.
1154 * Use hardware retry status for this frame.
1155 */
1156 bf->bf_retries = ds->ds_txstat.ts_longretry;
1157 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
cd3d39a6 1158 bf->bf_state.bf_type |= BUF_XRETRY;
f078f209
LR
1159 nbad = 0;
1160 } else {
1161 nbad = ath_tx_num_badfrms(sc, bf, txok);
1162 }
1163 skb = bf->bf_mpdu;
1164 tx_info = IEEE80211_SKB_CB(skb);
e6a9854b
JB
1165
1166 /* XXX: HACK! */
1167 tx_info_priv = (struct ath_tx_info_priv *) tx_info->control.vif;
f078f209
LR
1168 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1169 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1170 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1171 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
1172 if (ds->ds_txstat.ts_status == 0)
1173 nacked++;
1174
cd3d39a6 1175 if (bf_isdata(bf)) {
f078f209
LR
1176 if (isrifs)
1177 tmp_ds = bf->bf_rifslast->bf_desc;
1178 else
1179 tmp_ds = ds;
1180 memcpy(&tx_info_priv->tx,
1181 &tmp_ds->ds_txstat,
1182 sizeof(tx_info_priv->tx));
1183 tx_info_priv->n_frames = bf->bf_nframes;
1184 tx_info_priv->n_bad_frames = nbad;
1185 }
1186 }
1187
1188 /*
1189 * Complete this transmit unit
1190 */
cd3d39a6 1191 if (bf_isampdu(bf))
f078f209
LR
1192 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1193 else
1194 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1195
1196 /* Wake up mac80211 queue */
1197
1198 spin_lock_bh(&txq->axq_lock);
1199 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1200 (ATH_TXBUF - 20)) {
1201 int qnum;
1202 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1203 if (qnum != -1) {
1204 ieee80211_wake_queue(sc->hw, qnum);
1205 txq->stopped = 0;
1206 }
1207
1208 }
1209
1210 /*
1211 * schedule any pending packets if aggregation is enabled
1212 */
672840ac 1213 if (sc->sc_flags & SC_OP_TXAGGR)
f078f209
LR
1214 ath_txq_schedule(sc, txq);
1215 spin_unlock_bh(&txq->axq_lock);
1216 }
1217 return nacked;
1218}
1219
1220static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1221{
1222 struct ath_hal *ah = sc->sc_ah;
1223
1224 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1225 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
1226 __func__, txq->axq_qnum,
1227 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
1228}
1229
1230/* Drain only the data queues */
1231
1232static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1233{
1234 struct ath_hal *ah = sc->sc_ah;
102e0572 1235 int i, status, npend = 0;
f078f209 1236
672840ac 1237 if (!(sc->sc_flags & SC_OP_INVALID)) {
f078f209
LR
1238 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1239 if (ATH_TXQ_SETUP(sc, i)) {
1240 ath_tx_stopdma(sc, &sc->sc_txq[i]);
f078f209
LR
1241 /* The TxDMA may not really be stopped.
1242 * Double check the hal tx pending count */
1243 npend += ath9k_hw_numtxpending(ah,
102e0572 1244 sc->sc_txq[i].axq_qnum);
f078f209
LR
1245 }
1246 }
1247 }
1248
1249 if (npend) {
f078f209
LR
1250 /* TxDMA not stopped, reset the hal */
1251 DPRINTF(sc, ATH_DBG_XMIT,
1252 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1253
1254 spin_lock_bh(&sc->sc_resetlock);
b4696c8b 1255 if (!ath9k_hw_reset(ah,
927e70e9
S
1256 sc->sc_ah->ah_curchan,
1257 sc->sc_ht_info.tx_chan_width,
1258 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1259 sc->sc_ht_extprotspacing, true, &status)) {
f078f209
LR
1260
1261 DPRINTF(sc, ATH_DBG_FATAL,
1262 "%s: unable to reset hardware; hal status %u\n",
1263 __func__,
1264 status);
1265 }
1266 spin_unlock_bh(&sc->sc_resetlock);
1267 }
1268
1269 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1270 if (ATH_TXQ_SETUP(sc, i))
1271 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1272 }
1273}
1274
1275/* Add a sub-frame to block ack window */
1276
1277static void ath_tx_addto_baw(struct ath_softc *sc,
1278 struct ath_atx_tid *tid,
1279 struct ath_buf *bf)
1280{
1281 int index, cindex;
1282
cd3d39a6 1283 if (bf_isretried(bf))
f078f209
LR
1284 return;
1285
1286 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1287 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1288
1289 ASSERT(tid->tx_buf[cindex] == NULL);
1290 tid->tx_buf[cindex] = bf;
1291
1292 if (index >= ((tid->baw_tail - tid->baw_head) &
1293 (ATH_TID_MAX_BUFS - 1))) {
1294 tid->baw_tail = cindex;
1295 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1296 }
1297}
1298
1299/*
1300 * Function to send an A-MPDU
1301 * NB: must be called with txq lock held
1302 */
1303
1304static int ath_tx_send_ampdu(struct ath_softc *sc,
f078f209
LR
1305 struct ath_atx_tid *tid,
1306 struct list_head *bf_head,
1307 struct ath_tx_control *txctl)
1308{
1309 struct ath_buf *bf;
1310 struct sk_buff *skb;
1311 struct ieee80211_tx_info *tx_info;
1312 struct ath_tx_info_priv *tx_info_priv;
1313
1314 BUG_ON(list_empty(bf_head));
1315
1316 bf = list_first_entry(bf_head, struct ath_buf, list);
cd3d39a6 1317 bf->bf_state.bf_type |= BUF_AMPDU;
f078f209
LR
1318
1319 /*
1320 * Do not queue to h/w when any of the following conditions is true:
1321 * - there are pending frames in software queue
1322 * - the TID is currently paused for ADDBA/BAR request
1323 * - seqno is not within block-ack window
1324 * - h/w queue depth exceeds low water mark
1325 */
1326 if (!list_empty(&tid->buf_q) || tid->paused ||
1327 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
528f0c6b 1328 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
f078f209
LR
1329 /*
1330 * Add this frame to software queue for scheduling later
1331 * for aggregation.
1332 */
1333 list_splice_tail_init(bf_head, &tid->buf_q);
528f0c6b 1334 ath_tx_queue_tid(txctl->txq, tid);
f078f209
LR
1335 return 0;
1336 }
1337
1338 skb = (struct sk_buff *)bf->bf_mpdu;
1339 tx_info = IEEE80211_SKB_CB(skb);
e6a9854b
JB
1340 /* XXX: HACK! */
1341 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
f078f209
LR
1342 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1343
1344 /* Add sub-frame to BAW */
1345 ath_tx_addto_baw(sc, tid, bf);
1346
1347 /* Queue to h/w without aggregation */
1348 bf->bf_nframes = 1;
1349 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1350 ath_buf_set_rate(sc, bf);
528f0c6b 1351 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
102e0572 1352
f078f209
LR
1353 return 0;
1354}
1355
1356/*
1357 * looks up the rate
1358 * returns aggr limit based on lowest of the rates
1359 */
1360
1361static u32 ath_lookup_rate(struct ath_softc *sc,
ae5eb026
JB
1362 struct ath_buf *bf,
1363 struct ath_atx_tid *tid)
f078f209
LR
1364{
1365 const struct ath9k_rate_table *rt = sc->sc_currates;
1366 struct sk_buff *skb;
1367 struct ieee80211_tx_info *tx_info;
1368 struct ath_tx_info_priv *tx_info_priv;
1369 u32 max_4ms_framelen, frame_length;
1370 u16 aggr_limit, legacy = 0, maxampdu;
1371 int i;
1372
f078f209
LR
1373 skb = (struct sk_buff *)bf->bf_mpdu;
1374 tx_info = IEEE80211_SKB_CB(skb);
1375 tx_info_priv = (struct ath_tx_info_priv *)
e6a9854b 1376 tx_info->control.vif; /* XXX: HACK! */
f078f209
LR
1377 memcpy(bf->bf_rcs,
1378 tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1379
1380 /*
1381 * Find the lowest frame length among the rate series that will have a
1382 * 4ms transmit duration.
1383 * TODO - TXOP limit needs to be considered.
1384 */
1385 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1386
1387 for (i = 0; i < 4; i++) {
1388 if (bf->bf_rcs[i].tries) {
1389 frame_length = bf->bf_rcs[i].max_4ms_framelen;
1390
1391 if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) {
1392 legacy = 1;
1393 break;
1394 }
1395
1396 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1397 }
1398 }
1399
1400 /*
1401 * limit aggregate size by the minimum rate if rate selected is
1402 * not a probe rate, if rate selected is a probe rate then
1403 * avoid aggregation of this packet.
1404 */
1405 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1406 return 0;
1407
1408 aggr_limit = min(max_4ms_framelen,
1409 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1410
1411 /*
1412 * h/w can accept aggregates upto 16 bit lengths (65535).
1413 * The IE, however can hold upto 65536, which shows up here
1414 * as zero. Ignore 65536 since we are constrained by hw.
1415 */
ae5eb026 1416 maxampdu = tid->an->maxampdu;
f078f209
LR
1417 if (maxampdu)
1418 aggr_limit = min(aggr_limit, maxampdu);
1419
1420 return aggr_limit;
1421}
1422
1423/*
1424 * returns the number of delimiters to be added to
1425 * meet the minimum required mpdudensity.
1426 * caller should make sure that the rate is HT rate .
1427 */
1428
1429static int ath_compute_num_delims(struct ath_softc *sc,
ae5eb026 1430 struct ath_atx_tid *tid,
f078f209
LR
1431 struct ath_buf *bf,
1432 u16 frmlen)
1433{
1434 const struct ath9k_rate_table *rt = sc->sc_currates;
1435 u32 nsymbits, nsymbols, mpdudensity;
1436 u16 minlen;
1437 u8 rc, flags, rix;
1438 int width, half_gi, ndelim, mindelim;
1439
1440 /* Select standard number of delimiters based on frame length alone */
1441 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1442
1443 /*
1444 * If encryption enabled, hardware requires some more padding between
1445 * subframes.
1446 * TODO - this could be improved to be dependent on the rate.
1447 * The hardware can keep up at lower rates, but not higher rates
1448 */
1449 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1450 ndelim += ATH_AGGR_ENCRYPTDELIM;
1451
1452 /*
1453 * Convert desired mpdu density from microeconds to bytes based
1454 * on highest rate in rate series (i.e. first rate) to determine
1455 * required minimum length for subframe. Take into account
1456 * whether high rate is 20 or 40Mhz and half or full GI.
1457 */
ae5eb026 1458 mpdudensity = tid->an->mpdudensity;
f078f209
LR
1459
1460 /*
1461 * If there is no mpdu density restriction, no further calculation
1462 * is needed.
1463 */
1464 if (mpdudensity == 0)
1465 return ndelim;
1466
1467 rix = bf->bf_rcs[0].rix;
1468 flags = bf->bf_rcs[0].flags;
1469 rc = rt->info[rix].rateCode;
1470 width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0;
1471 half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0;
1472
1473 if (half_gi)
1474 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1475 else
1476 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1477
1478 if (nsymbols == 0)
1479 nsymbols = 1;
1480
1481 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1482 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1483
1484 /* Is frame shorter than required minimum length? */
1485 if (frmlen < minlen) {
1486 /* Get the minimum number of delimiters required. */
1487 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1488 ndelim = max(mindelim, ndelim);
1489 }
1490
1491 return ndelim;
1492}
1493
1494/*
1495 * For aggregation from software buffer queue.
1496 * NB: must be called with txq lock held
1497 */
1498
1499static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1500 struct ath_atx_tid *tid,
1501 struct list_head *bf_q,
1502 struct ath_buf **bf_last,
1503 struct aggr_rifs_param *param,
1504 int *prev_frames)
1505{
1506#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1507 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1508 struct list_head bf_head;
1509 int rl = 0, nframes = 0, ndelim;
1510 u16 aggr_limit = 0, al = 0, bpad = 0,
1511 al_delta, h_baw = tid->baw_size / 2;
1512 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1513 int prev_al = 0, is_ds_rate = 0;
1514 INIT_LIST_HEAD(&bf_head);
1515
1516 BUG_ON(list_empty(&tid->buf_q));
1517
1518 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1519
1520 do {
1521 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1522
1523 /*
1524 * do not step over block-ack window
1525 */
1526 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1527 status = ATH_AGGR_BAW_CLOSED;
1528 break;
1529 }
1530
1531 if (!rl) {
ae5eb026 1532 aggr_limit = ath_lookup_rate(sc, bf, tid);
f078f209
LR
1533 rl = 1;
1534 /*
1535 * Is rate dual stream
1536 */
1537 is_ds_rate =
1538 (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0;
1539 }
1540
1541 /*
1542 * do not exceed aggregation limit
1543 */
1544 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1545
1546 if (nframes && (aggr_limit <
1547 (al + bpad + al_delta + prev_al))) {
1548 status = ATH_AGGR_LIMITED;
1549 break;
1550 }
1551
1552 /*
1553 * do not exceed subframe limit
1554 */
1555 if ((nframes + *prev_frames) >=
1556 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1557 status = ATH_AGGR_LIMITED;
1558 break;
1559 }
1560
1561 /*
1562 * add padding for previous frame to aggregation length
1563 */
1564 al += bpad + al_delta;
1565
1566 /*
1567 * Get the delimiters needed to meet the MPDU
1568 * density for this node.
1569 */
ae5eb026 1570 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
f078f209
LR
1571
1572 bpad = PADBYTES(al_delta) + (ndelim << 2);
1573
1574 bf->bf_next = NULL;
1575 bf->bf_lastfrm->bf_desc->ds_link = 0;
1576
1577 /*
1578 * this packet is part of an aggregate
1579 * - remove all descriptors belonging to this frame from
1580 * software queue
1581 * - add it to block ack window
1582 * - set up descriptors for aggregation
1583 */
1584 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1585 ath_tx_addto_baw(sc, tid, bf);
1586
1587 list_for_each_entry(tbf, &bf_head, list) {
1588 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1589 tbf->bf_desc, ndelim);
1590 }
1591
1592 /*
1593 * link buffers of this frame to the aggregate
1594 */
1595 list_splice_tail_init(&bf_head, bf_q);
1596 nframes++;
1597
1598 if (bf_prev) {
1599 bf_prev->bf_next = bf;
1600 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1601 }
1602 bf_prev = bf;
1603
1604#ifdef AGGR_NOSHORT
1605 /*
1606 * terminate aggregation on a small packet boundary
1607 */
1608 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1609 status = ATH_AGGR_SHORTPKT;
1610 break;
1611 }
1612#endif
1613 } while (!list_empty(&tid->buf_q));
1614
1615 bf_first->bf_al = al;
1616 bf_first->bf_nframes = nframes;
1617 *bf_last = bf_prev;
1618 return status;
1619#undef PADBYTES
1620}
1621
1622/*
1623 * process pending frames possibly doing a-mpdu aggregation
1624 * NB: must be called with txq lock held
1625 */
1626
1627static void ath_tx_sched_aggr(struct ath_softc *sc,
1628 struct ath_txq *txq, struct ath_atx_tid *tid)
1629{
1630 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1631 enum ATH_AGGR_STATUS status;
1632 struct list_head bf_q;
1633 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1634 int prev_frames = 0;
1635
1636 do {
1637 if (list_empty(&tid->buf_q))
1638 return;
1639
1640 INIT_LIST_HEAD(&bf_q);
1641
1642 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
1643 &prev_frames);
1644
1645 /*
1646 * no frames picked up to be aggregated; block-ack
1647 * window is not open
1648 */
1649 if (list_empty(&bf_q))
1650 break;
1651
1652 bf = list_first_entry(&bf_q, struct ath_buf, list);
1653 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1654 bf->bf_lastbf = bf_last;
1655
1656 /*
1657 * if only one frame, send as non-aggregate
1658 */
1659 if (bf->bf_nframes == 1) {
1660 ASSERT(bf->bf_lastfrm == bf_last);
1661
cd3d39a6 1662 bf->bf_state.bf_type &= ~BUF_AGGR;
f078f209
LR
1663 /*
1664 * clear aggr bits for every descriptor
1665 * XXX TODO: is there a way to optimize it?
1666 */
1667 list_for_each_entry(tbf, &bf_q, list) {
1668 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1669 }
1670
1671 ath_buf_set_rate(sc, bf);
1672 ath_tx_txqaddbuf(sc, txq, &bf_q);
1673 continue;
1674 }
1675
1676 /*
1677 * setup first desc with rate and aggr info
1678 */
cd3d39a6 1679 bf->bf_state.bf_type |= BUF_AGGR;
f078f209
LR
1680 ath_buf_set_rate(sc, bf);
1681 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1682
1683 /*
1684 * anchor last frame of aggregate correctly
1685 */
1686 ASSERT(bf_lastaggr);
1687 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1688 tbf = bf_lastaggr;
1689 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1690
1691 /* XXX: We don't enter into this loop, consider removing this */
1692 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1693 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1694 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1695 }
1696
1697 txq->axq_aggr_depth++;
1698
1699 /*
1700 * Normal aggregate, queue to hardware
1701 */
1702 ath_tx_txqaddbuf(sc, txq, &bf_q);
1703
1704 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1705 status != ATH_AGGR_BAW_CLOSED);
1706}
1707
1708/* Called with txq lock held */
1709
1710static void ath_tid_drain(struct ath_softc *sc,
1711 struct ath_txq *txq,
b5aa9bf9
S
1712 struct ath_atx_tid *tid)
1713
f078f209
LR
1714{
1715 struct ath_buf *bf;
1716 struct list_head bf_head;
1717 INIT_LIST_HEAD(&bf_head);
1718
1719 for (;;) {
1720 if (list_empty(&tid->buf_q))
1721 break;
1722 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1723
1724 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1725
1726 /* update baw for software retried frame */
cd3d39a6 1727 if (bf_isretried(bf))
f078f209
LR
1728 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1729
1730 /*
1731 * do not indicate packets while holding txq spinlock.
1732 * unlock is intentional here
1733 */
b5aa9bf9 1734 spin_unlock(&txq->axq_lock);
f078f209
LR
1735
1736 /* complete this sub-frame */
1737 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1738
b5aa9bf9 1739 spin_lock(&txq->axq_lock);
f078f209
LR
1740 }
1741
1742 /*
1743 * TODO: For frame(s) that are in the retry state, we will reuse the
1744 * sequence number(s) without setting the retry bit. The
1745 * alternative is to give up on these and BAR the receiver's window
1746 * forward.
1747 */
1748 tid->seq_next = tid->seq_start;
1749 tid->baw_tail = tid->baw_head;
1750}
1751
1752/*
1753 * Drain all pending buffers
1754 * NB: must be called with txq lock held
1755 */
1756
1757static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
b5aa9bf9 1758 struct ath_txq *txq)
f078f209
LR
1759{
1760 struct ath_atx_ac *ac, *ac_tmp;
1761 struct ath_atx_tid *tid, *tid_tmp;
1762
1763 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1764 list_del(&ac->list);
1765 ac->sched = false;
1766 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1767 list_del(&tid->list);
1768 tid->sched = false;
b5aa9bf9 1769 ath_tid_drain(sc, txq, tid);
f078f209
LR
1770 }
1771 }
1772}
1773
528f0c6b
S
1774static void ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
1775 struct sk_buff *skb, struct scatterlist *sg,
1776 struct ath_tx_control *txctl)
f078f209 1777{
528f0c6b
S
1778 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1779 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
f078f209
LR
1780 struct ath_tx_info_priv *tx_info_priv;
1781 struct ath_rc_series *rcs;
528f0c6b
S
1782 int hdrlen;
1783 __le16 fc;
e022edbd 1784
528f0c6b
S
1785 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
1786 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1787 fc = hdr->frame_control;
1788 rcs = tx_info_priv->rcs;
f078f209 1789
528f0c6b 1790 ATH_TXBUF_RESET(bf);
f078f209 1791
528f0c6b 1792 /* Frame type */
f078f209 1793
528f0c6b 1794 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
cd3d39a6
S
1795
1796 ieee80211_is_data(fc) ?
1797 (bf->bf_state.bf_type |= BUF_DATA) :
1798 (bf->bf_state.bf_type &= ~BUF_DATA);
1799 ieee80211_is_back_req(fc) ?
1800 (bf->bf_state.bf_type |= BUF_BAR) :
1801 (bf->bf_state.bf_type &= ~BUF_BAR);
1802 ieee80211_is_pspoll(fc) ?
1803 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1804 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
672840ac 1805 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
cd3d39a6
S
1806 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1807 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
528f0c6b
S
1808 (sc->hw->conf.ht.enabled &&
1809 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1810 (bf->bf_state.bf_type |= BUF_HT) :
1811 (bf->bf_state.bf_type &= ~BUF_HT);
1812
1813 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1814
1815 /* Crypto */
1816
1817 bf->bf_keytype = get_hw_crypto_keytype(skb);
1818
1819 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1820 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1821 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1822 } else {
1823 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1824 }
1825
1826 /* Rate series */
1827
1828 setup_rate_retries(sc, skb);
cd3d39a6 1829
f078f209
LR
1830 bf->bf_rcs[0] = rcs[0];
1831 bf->bf_rcs[1] = rcs[1];
1832 bf->bf_rcs[2] = rcs[2];
1833 bf->bf_rcs[3] = rcs[3];
528f0c6b
S
1834
1835 /* Assign seqno, tidno */
1836
1837 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
1838 assign_aggr_tid_seqno(skb, bf);
1839
1840 /* DMA setup */
1841
f078f209 1842 bf->bf_mpdu = skb;
528f0c6b
S
1843 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1844 skb->len, PCI_DMA_TODEVICE);
1845 bf->bf_buf_addr = bf->bf_dmacontext;
1846}
1847
1848/* FIXME: tx power */
1849static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1850 struct scatterlist *sg, u32 n_sg,
1851 struct ath_tx_control *txctl)
1852{
1853 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1854 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1855 struct ath_node *an = NULL;
1856 struct list_head bf_head;
1857 struct ath_desc *ds;
1858 struct ath_atx_tid *tid;
1859 struct ath_hal *ah = sc->sc_ah;
1860 int frm_type;
1861
1862 if (tx_info->control.sta) {
1863 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1864 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1865 }
1866
1867 frm_type = get_hw_packet_type(skb);
1868
1869 INIT_LIST_HEAD(&bf_head);
1870 list_add_tail(&bf->list, &bf_head);
f078f209
LR
1871
1872 /* setup descriptor */
528f0c6b 1873
f078f209
LR
1874 ds = bf->bf_desc;
1875 ds->ds_link = 0;
1876 ds->ds_data = bf->bf_buf_addr;
1877
528f0c6b 1878 /* Formulate first tx descriptor with tx controls */
f078f209 1879
528f0c6b
S
1880 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1881 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1882
1883 ath9k_hw_filltxdesc(ah, ds,
1884 sg_dma_len(sg), /* segment length */
1885 true, /* first segment */
1886 (n_sg == 1) ? true : false, /* last segment */
1887 ds); /* first descriptor */
f078f209
LR
1888
1889 bf->bf_lastfrm = bf;
f078f209 1890
528f0c6b 1891 spin_lock_bh(&txctl->txq->axq_lock);
f078f209 1892
528f0c6b
S
1893 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR)) {
1894 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
f078f209
LR
1895 /*
1896 * Try aggregation if it's a unicast data frame
1897 * and the destination is HT capable.
1898 */
528f0c6b 1899 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
f078f209
LR
1900 } else {
1901 /*
528f0c6b
S
1902 * Send this frame as regular when ADDBA
1903 * exchange is neither complete nor pending.
f078f209 1904 */
528f0c6b
S
1905 ath_tx_send_normal(sc, txctl->txq,
1906 tid, &bf_head);
f078f209
LR
1907 }
1908 } else {
1909 bf->bf_lastbf = bf;
1910 bf->bf_nframes = 1;
f078f209 1911
528f0c6b
S
1912 ath_buf_set_rate(sc, bf);
1913 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
f078f209 1914 }
528f0c6b
S
1915
1916 spin_unlock_bh(&txctl->txq->axq_lock);
f078f209
LR
1917}
1918
528f0c6b
S
1919int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1920 struct ath_tx_control *txctl)
f078f209 1921{
528f0c6b 1922 struct ath_buf *bf;
f078f209
LR
1923 struct scatterlist sg;
1924
528f0c6b
S
1925 /* Check if a tx buffer is available */
1926
1927 bf = ath_tx_get_buffer(sc);
1928 if (!bf) {
1929 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX buffers are full\n",
1930 __func__);
1931 return -1;
1932 }
1933
1934 ath_tx_setup_buffer(sc, bf, skb, &sg, txctl);
1935
1936 /* Setup S/G */
f078f209 1937
f078f209 1938 memset(&sg, 0, sizeof(struct scatterlist));
528f0c6b 1939 sg_dma_address(&sg) = bf->bf_dmacontext;
f078f209
LR
1940 sg_dma_len(&sg) = skb->len;
1941
528f0c6b 1942 ath_tx_start_dma(sc, bf, &sg, 1, txctl);
f078f209 1943
528f0c6b 1944 return 0;
f078f209
LR
1945}
1946
1947/* Initialize TX queue and h/w */
1948
1949int ath_tx_init(struct ath_softc *sc, int nbufs)
1950{
1951 int error = 0;
1952
1953 do {
1954 spin_lock_init(&sc->sc_txbuflock);
1955
1956 /* Setup tx descriptors */
1957 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
556bb8f1 1958 "tx", nbufs, 1);
f078f209
LR
1959 if (error != 0) {
1960 DPRINTF(sc, ATH_DBG_FATAL,
1961 "%s: failed to allocate tx descriptors: %d\n",
1962 __func__, error);
1963 break;
1964 }
1965
1966 /* XXX allocate beacon state together with vap */
1967 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
1968 "beacon", ATH_BCBUF, 1);
1969 if (error != 0) {
1970 DPRINTF(sc, ATH_DBG_FATAL,
1971 "%s: failed to allocate "
1972 "beacon descripotrs: %d\n",
1973 __func__, error);
1974 break;
1975 }
1976
1977 } while (0);
1978
1979 if (error != 0)
1980 ath_tx_cleanup(sc);
1981
1982 return error;
1983}
1984
1985/* Reclaim all tx queue resources */
1986
1987int ath_tx_cleanup(struct ath_softc *sc)
1988{
1989 /* cleanup beacon descriptors */
1990 if (sc->sc_bdma.dd_desc_len != 0)
1991 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
1992
1993 /* cleanup tx descriptors */
1994 if (sc->sc_txdma.dd_desc_len != 0)
1995 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
1996
1997 return 0;
1998}
1999
2000/* Setup a h/w transmit queue */
2001
2002struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2003{
2004 struct ath_hal *ah = sc->sc_ah;
ea9880fb 2005 struct ath9k_tx_queue_info qi;
f078f209
LR
2006 int qnum;
2007
0345f37b 2008 memset(&qi, 0, sizeof(qi));
f078f209
LR
2009 qi.tqi_subtype = subtype;
2010 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
2011 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
2012 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
ea9880fb 2013 qi.tqi_physCompBuf = 0;
f078f209
LR
2014
2015 /*
2016 * Enable interrupts only for EOL and DESC conditions.
2017 * We mark tx descriptors to receive a DESC interrupt
2018 * when a tx queue gets deep; otherwise waiting for the
2019 * EOL to reap descriptors. Note that this is done to
2020 * reduce interrupt load and this only defers reaping
2021 * descriptors, never transmitting frames. Aside from
2022 * reducing interrupts this also permits more concurrency.
2023 * The only potential downside is if the tx queue backs
2024 * up in which case the top half of the kernel may backup
2025 * due to a lack of tx descriptors.
2026 *
2027 * The UAPSD queue is an exception, since we take a desc-
2028 * based intr on the EOSP frames.
2029 */
2030 if (qtype == ATH9K_TX_QUEUE_UAPSD)
2031 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
2032 else
2033 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
2034 TXQ_FLAG_TXDESCINT_ENABLE;
2035 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
2036 if (qnum == -1) {
2037 /*
2038 * NB: don't print a message, this happens
2039 * normally on parts with too few tx queues
2040 */
2041 return NULL;
2042 }
2043 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
2044 DPRINTF(sc, ATH_DBG_FATAL,
2045 "%s: hal qnum %u out of range, max %u!\n",
2046 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
2047 ath9k_hw_releasetxqueue(ah, qnum);
2048 return NULL;
2049 }
2050 if (!ATH_TXQ_SETUP(sc, qnum)) {
2051 struct ath_txq *txq = &sc->sc_txq[qnum];
2052
2053 txq->axq_qnum = qnum;
2054 txq->axq_link = NULL;
2055 INIT_LIST_HEAD(&txq->axq_q);
2056 INIT_LIST_HEAD(&txq->axq_acq);
2057 spin_lock_init(&txq->axq_lock);
2058 txq->axq_depth = 0;
2059 txq->axq_aggr_depth = 0;
2060 txq->axq_totalqueued = 0;
f078f209
LR
2061 txq->axq_linkbuf = NULL;
2062 sc->sc_txqsetup |= 1<<qnum;
2063 }
2064 return &sc->sc_txq[qnum];
2065}
2066
2067/* Reclaim resources for a setup queue */
2068
2069void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
2070{
2071 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
2072 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
2073}
2074
2075/*
2076 * Setup a hardware data transmit queue for the specified
2077 * access control. The hal may not support all requested
2078 * queues in which case it will return a reference to a
2079 * previously setup queue. We record the mapping from ac's
2080 * to h/w queues for use by ath_tx_start and also track
2081 * the set of h/w queues being used to optimize work in the
2082 * transmit interrupt handler and related routines.
2083 */
2084
2085int ath_tx_setup(struct ath_softc *sc, int haltype)
2086{
2087 struct ath_txq *txq;
2088
2089 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2090 DPRINTF(sc, ATH_DBG_FATAL,
2091 "%s: HAL AC %u out of range, max %zu!\n",
2092 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
2093 return 0;
2094 }
2095 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
2096 if (txq != NULL) {
2097 sc->sc_haltype2q[haltype] = txq->axq_qnum;
2098 return 1;
2099 } else
2100 return 0;
2101}
2102
2103int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2104{
2105 int qnum;
2106
2107 switch (qtype) {
2108 case ATH9K_TX_QUEUE_DATA:
2109 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2110 DPRINTF(sc, ATH_DBG_FATAL,
2111 "%s: HAL AC %u out of range, max %zu!\n",
2112 __func__,
2113 haltype, ARRAY_SIZE(sc->sc_haltype2q));
2114 return -1;
2115 }
2116 qnum = sc->sc_haltype2q[haltype];
2117 break;
2118 case ATH9K_TX_QUEUE_BEACON:
2119 qnum = sc->sc_bhalq;
2120 break;
2121 case ATH9K_TX_QUEUE_CAB:
2122 qnum = sc->sc_cabq->axq_qnum;
2123 break;
2124 default:
2125 qnum = -1;
2126 }
2127 return qnum;
2128}
2129
528f0c6b
S
2130/* Get a transmit queue, if available */
2131
2132struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2133{
2134 struct ath_txq *txq = NULL;
2135 int qnum;
2136
2137 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
2138 txq = &sc->sc_txq[qnum];
2139
2140 spin_lock_bh(&txq->axq_lock);
2141
2142 /* Try to avoid running out of descriptors */
2143 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
2144 DPRINTF(sc, ATH_DBG_FATAL,
2145 "%s: TX queue: %d is full, depth: %d\n",
2146 __func__, qnum, txq->axq_depth);
2147 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
2148 txq->stopped = 1;
2149 spin_unlock_bh(&txq->axq_lock);
2150 return NULL;
2151 }
2152
2153 spin_unlock_bh(&txq->axq_lock);
2154
2155 return txq;
2156}
2157
f078f209
LR
2158/* Update parameters for a transmit queue */
2159
ea9880fb
S
2160int ath_txq_update(struct ath_softc *sc, int qnum,
2161 struct ath9k_tx_queue_info *qinfo)
f078f209
LR
2162{
2163 struct ath_hal *ah = sc->sc_ah;
2164 int error = 0;
ea9880fb 2165 struct ath9k_tx_queue_info qi;
f078f209
LR
2166
2167 if (qnum == sc->sc_bhalq) {
2168 /*
2169 * XXX: for beacon queue, we just save the parameter.
2170 * It will be picked up by ath_beaconq_config when
2171 * it's necessary.
2172 */
ea9880fb 2173 sc->sc_beacon_qi = *qinfo;
f078f209
LR
2174 return 0;
2175 }
2176
2177 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2178
ea9880fb
S
2179 ath9k_hw_get_txq_props(ah, qnum, &qi);
2180 qi.tqi_aifs = qinfo->tqi_aifs;
2181 qi.tqi_cwmin = qinfo->tqi_cwmin;
2182 qi.tqi_cwmax = qinfo->tqi_cwmax;
2183 qi.tqi_burstTime = qinfo->tqi_burstTime;
2184 qi.tqi_readyTime = qinfo->tqi_readyTime;
f078f209 2185
ea9880fb 2186 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
f078f209
LR
2187 DPRINTF(sc, ATH_DBG_FATAL,
2188 "%s: unable to update hardware queue %u!\n",
2189 __func__, qnum);
2190 error = -EIO;
2191 } else {
2192 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2193 }
2194
2195 return error;
2196}
2197
2198int ath_cabq_update(struct ath_softc *sc)
2199{
ea9880fb 2200 struct ath9k_tx_queue_info qi;
f078f209
LR
2201 int qnum = sc->sc_cabq->axq_qnum;
2202 struct ath_beacon_config conf;
2203
ea9880fb 2204 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209
LR
2205 /*
2206 * Ensure the readytime % is within the bounds.
2207 */
2208 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2209 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2210 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2211 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2212
2213 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2214 qi.tqi_readyTime =
2215 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2216 ath_txq_update(sc, qnum, &qi);
2217
2218 return 0;
2219}
2220
f078f209
LR
2221/* Deferred processing of transmit interrupt */
2222
2223void ath_tx_tasklet(struct ath_softc *sc)
2224{
1fe1132b 2225 int i;
f078f209
LR
2226 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2227
2228 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2229
2230 /*
2231 * Process each active queue.
2232 */
2233 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2234 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
1fe1132b 2235 ath_tx_processq(sc, &sc->sc_txq[i]);
f078f209 2236 }
f078f209
LR
2237}
2238
2239void ath_tx_draintxq(struct ath_softc *sc,
2240 struct ath_txq *txq, bool retry_tx)
2241{
2242 struct ath_buf *bf, *lastbf;
2243 struct list_head bf_head;
2244
2245 INIT_LIST_HEAD(&bf_head);
2246
2247 /*
2248 * NB: this assumes output has been stopped and
2249 * we do not need to block ath_tx_tasklet
2250 */
2251 for (;;) {
2252 spin_lock_bh(&txq->axq_lock);
2253
2254 if (list_empty(&txq->axq_q)) {
2255 txq->axq_link = NULL;
2256 txq->axq_linkbuf = NULL;
2257 spin_unlock_bh(&txq->axq_lock);
2258 break;
2259 }
2260
2261 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2262
2263 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2264 list_del(&bf->list);
2265 spin_unlock_bh(&txq->axq_lock);
2266
2267 spin_lock_bh(&sc->sc_txbuflock);
2268 list_add_tail(&bf->list, &sc->sc_txbuf);
2269 spin_unlock_bh(&sc->sc_txbuflock);
2270 continue;
2271 }
2272
2273 lastbf = bf->bf_lastbf;
2274 if (!retry_tx)
2275 lastbf->bf_desc->ds_txstat.ts_flags =
2276 ATH9K_TX_SW_ABORTED;
2277
2278 /* remove ath_buf's of the same mpdu from txq */
2279 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2280 txq->axq_depth--;
2281
2282 spin_unlock_bh(&txq->axq_lock);
2283
cd3d39a6 2284 if (bf_isampdu(bf))
f078f209
LR
2285 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2286 else
2287 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2288 }
2289
2290 /* flush any pending frames if aggregation is enabled */
672840ac 2291 if (sc->sc_flags & SC_OP_TXAGGR) {
f078f209
LR
2292 if (!retry_tx) {
2293 spin_lock_bh(&txq->axq_lock);
b5aa9bf9 2294 ath_txq_drain_pending_buffers(sc, txq);
f078f209
LR
2295 spin_unlock_bh(&txq->axq_lock);
2296 }
2297 }
2298}
2299
2300/* Drain the transmit queues and reclaim resources */
2301
2302void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2303{
2304 /* stop beacon queue. The beacon will be freed when
2305 * we go to INIT state */
672840ac 2306 if (!(sc->sc_flags & SC_OP_INVALID)) {
f078f209
LR
2307 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2308 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2309 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2310 }
2311
2312 ath_drain_txdataq(sc, retry_tx);
2313}
2314
2315u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2316{
2317 return sc->sc_txq[qnum].axq_depth;
2318}
2319
2320u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2321{
2322 return sc->sc_txq[qnum].axq_aggr_depth;
2323}
2324
ccc75c52 2325bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
f078f209
LR
2326{
2327 struct ath_atx_tid *txtid;
f078f209 2328
672840ac 2329 if (!(sc->sc_flags & SC_OP_TXAGGR))
ccc75c52 2330 return false;
f078f209 2331
f078f209
LR
2332 txtid = ATH_AN_2_TID(an, tidno);
2333
a37c2c79
S
2334 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2335 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
f078f209
LR
2336 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2337 txtid->addba_exchangeattempts++;
ccc75c52 2338 return true;
f078f209
LR
2339 }
2340 }
2341
ccc75c52 2342 return false;
f078f209
LR
2343}
2344
2345/* Start TX aggregation */
2346
b5aa9bf9
S
2347int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
2348 u16 tid, u16 *ssn)
f078f209
LR
2349{
2350 struct ath_atx_tid *txtid;
2351 struct ath_node *an;
2352
b5aa9bf9 2353 an = (struct ath_node *)sta->drv_priv;
f078f209 2354
672840ac 2355 if (sc->sc_flags & SC_OP_TXAGGR) {
f078f209 2356 txtid = ATH_AN_2_TID(an, tid);
a37c2c79 2357 txtid->state |= AGGR_ADDBA_PROGRESS;
f078f209
LR
2358 ath_tx_pause_tid(sc, txtid);
2359 }
2360
2361 return 0;
2362}
2363
2364/* Stop tx aggregation */
2365
b5aa9bf9 2366int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
f078f209 2367{
b5aa9bf9 2368 struct ath_node *an = (struct ath_node *)sta->drv_priv;
f078f209
LR
2369
2370 ath_tx_aggr_teardown(sc, an, tid);
2371 return 0;
2372}
2373
8469cdef
S
2374/* Resume tx aggregation */
2375
2376void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2377{
2378 struct ath_atx_tid *txtid;
2379 struct ath_node *an;
2380
2381 an = (struct ath_node *)sta->drv_priv;
2382
2383 if (sc->sc_flags & SC_OP_TXAGGR) {
2384 txtid = ATH_AN_2_TID(an, tid);
2385 txtid->baw_size =
2386 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
2387 txtid->state |= AGGR_ADDBA_COMPLETE;
2388 txtid->state &= ~AGGR_ADDBA_PROGRESS;
2389 ath_tx_resume_tid(sc, txtid);
2390 }
2391}
2392
f078f209
LR
2393/*
2394 * Performs transmit side cleanup when TID changes from aggregated to
2395 * unaggregated.
2396 * - Pause the TID and mark cleanup in progress
2397 * - Discard all retry frames from the s/w queue.
2398 */
2399
b5aa9bf9 2400void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
f078f209
LR
2401{
2402 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2403 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2404 struct ath_buf *bf;
2405 struct list_head bf_head;
2406 INIT_LIST_HEAD(&bf_head);
2407
2408 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
2409
a37c2c79 2410 if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */
f078f209
LR
2411 return;
2412
a37c2c79 2413 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
f078f209
LR
2414 txtid->addba_exchangeattempts = 0;
2415 return;
2416 }
2417
2418 /* TID must be paused first */
2419 ath_tx_pause_tid(sc, txtid);
2420
2421 /* drop all software retried frames and mark this TID */
2422 spin_lock_bh(&txq->axq_lock);
2423 while (!list_empty(&txtid->buf_q)) {
2424 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
cd3d39a6 2425 if (!bf_isretried(bf)) {
f078f209
LR
2426 /*
2427 * NB: it's based on the assumption that
2428 * software retried frame will always stay
2429 * at the head of software queue.
2430 */
2431 break;
2432 }
2433 list_cut_position(&bf_head,
2434 &txtid->buf_q, &bf->bf_lastfrm->list);
2435 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2436
2437 /* complete this sub-frame */
2438 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2439 }
2440
2441 if (txtid->baw_head != txtid->baw_tail) {
2442 spin_unlock_bh(&txq->axq_lock);
a37c2c79 2443 txtid->state |= AGGR_CLEANUP;
f078f209 2444 } else {
a37c2c79 2445 txtid->state &= ~AGGR_ADDBA_COMPLETE;
f078f209
LR
2446 txtid->addba_exchangeattempts = 0;
2447 spin_unlock_bh(&txq->axq_lock);
2448 ath_tx_flush_tid(sc, txtid);
2449 }
2450}
2451
2452/*
2453 * Tx scheduling logic
2454 * NB: must be called with txq lock held
2455 */
2456
2457void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2458{
2459 struct ath_atx_ac *ac;
2460 struct ath_atx_tid *tid;
2461
2462 /* nothing to schedule */
2463 if (list_empty(&txq->axq_acq))
2464 return;
2465 /*
2466 * get the first node/ac pair on the queue
2467 */
2468 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2469 list_del(&ac->list);
2470 ac->sched = false;
2471
2472 /*
2473 * process a single tid per destination
2474 */
2475 do {
2476 /* nothing to schedule */
2477 if (list_empty(&ac->tid_q))
2478 return;
2479
2480 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2481 list_del(&tid->list);
2482 tid->sched = false;
2483
2484 if (tid->paused) /* check next tid to keep h/w busy */
2485 continue;
2486
43453b33 2487 if ((txq->axq_depth % 2) == 0)
f078f209 2488 ath_tx_sched_aggr(sc, txq, tid);
f078f209
LR
2489
2490 /*
2491 * add tid to round-robin queue if more frames
2492 * are pending for the tid
2493 */
2494 if (!list_empty(&tid->buf_q))
2495 ath_tx_queue_tid(txq, tid);
2496
2497 /* only schedule one TID at a time */
2498 break;
2499 } while (!list_empty(&ac->tid_q));
2500
2501 /*
2502 * schedule AC if more TIDs need processing
2503 */
2504 if (!list_empty(&ac->tid_q)) {
2505 /*
2506 * add dest ac to txq if not already added
2507 */
2508 if (!ac->sched) {
2509 ac->sched = true;
2510 list_add_tail(&ac->list, &txq->axq_acq);
2511 }
2512 }
2513}
2514
2515/* Initialize per-node transmit state */
2516
2517void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2518{
c5170163
S
2519 struct ath_atx_tid *tid;
2520 struct ath_atx_ac *ac;
2521 int tidno, acno;
f078f209 2522
c5170163
S
2523 /*
2524 * Init per tid tx state
2525 */
2526 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2527 tidno < WME_NUM_TID;
2528 tidno++, tid++) {
2529 tid->an = an;
2530 tid->tidno = tidno;
2531 tid->seq_start = tid->seq_next = 0;
2532 tid->baw_size = WME_MAX_BA;
2533 tid->baw_head = tid->baw_tail = 0;
2534 tid->sched = false;
2535 tid->paused = false;
a37c2c79 2536 tid->state &= ~AGGR_CLEANUP;
c5170163
S
2537 INIT_LIST_HEAD(&tid->buf_q);
2538
2539 acno = TID_TO_WME_AC(tidno);
2540 tid->ac = &an->an_aggr.tx.ac[acno];
2541
2542 /* ADDBA state */
a37c2c79
S
2543 tid->state &= ~AGGR_ADDBA_COMPLETE;
2544 tid->state &= ~AGGR_ADDBA_PROGRESS;
2545 tid->addba_exchangeattempts = 0;
c5170163 2546 }
f078f209 2547
c5170163
S
2548 /*
2549 * Init per ac tx state
2550 */
2551 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2552 acno < WME_NUM_AC; acno++, ac++) {
2553 ac->sched = false;
2554 INIT_LIST_HEAD(&ac->tid_q);
2555
2556 switch (acno) {
2557 case WME_AC_BE:
2558 ac->qnum = ath_tx_get_qnum(sc,
2559 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2560 break;
2561 case WME_AC_BK:
2562 ac->qnum = ath_tx_get_qnum(sc,
2563 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2564 break;
2565 case WME_AC_VI:
2566 ac->qnum = ath_tx_get_qnum(sc,
2567 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2568 break;
2569 case WME_AC_VO:
2570 ac->qnum = ath_tx_get_qnum(sc,
2571 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2572 break;
f078f209
LR
2573 }
2574 }
2575}
2576
2577/* Cleanupthe pending buffers for the node. */
2578
b5aa9bf9 2579void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
f078f209
LR
2580{
2581 int i;
2582 struct ath_atx_ac *ac, *ac_tmp;
2583 struct ath_atx_tid *tid, *tid_tmp;
2584 struct ath_txq *txq;
2585 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2586 if (ATH_TXQ_SETUP(sc, i)) {
2587 txq = &sc->sc_txq[i];
2588
b5aa9bf9 2589 spin_lock(&txq->axq_lock);
f078f209
LR
2590
2591 list_for_each_entry_safe(ac,
2592 ac_tmp, &txq->axq_acq, list) {
2593 tid = list_first_entry(&ac->tid_q,
2594 struct ath_atx_tid, list);
2595 if (tid && tid->an != an)
2596 continue;
2597 list_del(&ac->list);
2598 ac->sched = false;
2599
2600 list_for_each_entry_safe(tid,
2601 tid_tmp, &ac->tid_q, list) {
2602 list_del(&tid->list);
2603 tid->sched = false;
b5aa9bf9 2604 ath_tid_drain(sc, txq, tid);
a37c2c79 2605 tid->state &= ~AGGR_ADDBA_COMPLETE;
f078f209 2606 tid->addba_exchangeattempts = 0;
a37c2c79 2607 tid->state &= ~AGGR_CLEANUP;
f078f209
LR
2608 }
2609 }
2610
b5aa9bf9 2611 spin_unlock(&txq->axq_lock);
f078f209
LR
2612 }
2613 }
2614}
2615
e022edbd
JM
2616void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2617{
2618 int hdrlen, padsize;
2619 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2620 struct ath_tx_control txctl;
2621
528f0c6b
S
2622 memset(&txctl, 0, sizeof(struct ath_tx_control));
2623
e022edbd
JM
2624 /*
2625 * As a temporary workaround, assign seq# here; this will likely need
2626 * to be cleaned up to work better with Beacon transmission and virtual
2627 * BSSes.
2628 */
2629 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2630 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2631 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2632 sc->seq_no += 0x10;
2633 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2634 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2635 }
2636
2637 /* Add the padding after the header if this is not already done */
2638 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2639 if (hdrlen & 3) {
2640 padsize = hdrlen % 4;
2641 if (skb_headroom(skb) < padsize) {
2642 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding "
2643 "failed\n", __func__);
2644 dev_kfree_skb_any(skb);
2645 return;
2646 }
2647 skb_push(skb, padsize);
2648 memmove(skb->data, skb->data + padsize, hdrlen);
2649 }
2650
528f0c6b
S
2651 txctl.txq = sc->sc_cabq;
2652
e022edbd
JM
2653 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
2654 __func__,
2655 skb);
2656
528f0c6b
S
2657 if (ath_tx_start(sc, skb, &txctl) != 0) {
2658 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
2659 goto exit;
e022edbd 2660 }
e022edbd 2661
528f0c6b
S
2662 return;
2663exit:
2664 dev_kfree_skb_any(skb);
2665}
This page took 0.289611 seconds and 5 git commands to generate.