mac80211/drivers: rewrite the rate control API
[deliverable/linux.git] / drivers / net / wireless / ath9k / xmit.c
CommitLineData
f078f209
LR
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Implementation of transmit path.
19 */
20
21#include "core.h"
22
23#define BITS_PER_BYTE 8
24#define OFDM_PLCP_BITS 22
25#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
26#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
27#define L_STF 8
28#define L_LTF 8
29#define L_SIG 4
30#define HT_SIG 8
31#define HT_STF 4
32#define HT_LTF(_ns) (4 * (_ns))
33#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
34#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
35#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
36#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
37
38#define OFDM_SIFS_TIME 16
39
40static u32 bits_per_symbol[][2] = {
41 /* 20MHz 40MHz */
42 { 26, 54 }, /* 0: BPSK */
43 { 52, 108 }, /* 1: QPSK 1/2 */
44 { 78, 162 }, /* 2: QPSK 3/4 */
45 { 104, 216 }, /* 3: 16-QAM 1/2 */
46 { 156, 324 }, /* 4: 16-QAM 3/4 */
47 { 208, 432 }, /* 5: 64-QAM 2/3 */
48 { 234, 486 }, /* 6: 64-QAM 3/4 */
49 { 260, 540 }, /* 7: 64-QAM 5/6 */
50 { 52, 108 }, /* 8: BPSK */
51 { 104, 216 }, /* 9: QPSK 1/2 */
52 { 156, 324 }, /* 10: QPSK 3/4 */
53 { 208, 432 }, /* 11: 16-QAM 1/2 */
54 { 312, 648 }, /* 12: 16-QAM 3/4 */
55 { 416, 864 }, /* 13: 64-QAM 2/3 */
56 { 468, 972 }, /* 14: 64-QAM 3/4 */
57 { 520, 1080 }, /* 15: 64-QAM 5/6 */
58};
59
60#define IS_HT_RATE(_rate) ((_rate) & 0x80)
61
f078f209
LR
62/*
63 * Insert a chain of ath_buf (descriptors) on a txq and
64 * assume the descriptors are already chained together by caller.
65 * NB: must be called with txq lock held
66 */
67
68static void ath_tx_txqaddbuf(struct ath_softc *sc,
69 struct ath_txq *txq, struct list_head *head)
70{
71 struct ath_hal *ah = sc->sc_ah;
72 struct ath_buf *bf;
73 /*
74 * Insert the frame on the outbound list and
75 * pass it on to the hardware.
76 */
77
78 if (list_empty(head))
79 return;
80
81 bf = list_first_entry(head, struct ath_buf, list);
82
83 list_splice_tail_init(head, &txq->axq_q);
84 txq->axq_depth++;
85 txq->axq_totalqueued++;
86 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
87
88 DPRINTF(sc, ATH_DBG_QUEUE,
89 "%s: txq depth = %d\n", __func__, txq->axq_depth);
90
91 if (txq->axq_link == NULL) {
92 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
93 DPRINTF(sc, ATH_DBG_XMIT,
94 "%s: TXDP[%u] = %llx (%p)\n",
95 __func__, txq->axq_qnum,
96 ito64(bf->bf_daddr), bf->bf_desc);
97 } else {
98 *txq->axq_link = bf->bf_daddr;
99 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
100 __func__,
101 txq->axq_qnum, txq->axq_link,
102 ito64(bf->bf_daddr), bf->bf_desc);
103 }
104 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
105 ath9k_hw_txstart(ah, txq->axq_qnum);
106}
107
108/* Get transmit rate index using rate in Kbps */
109
110static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate)
111{
112 int i;
113 int ndx = 0;
114
115 for (i = 0; i < rt->rateCount; i++) {
116 if (rt->info[i].rateKbps == rate) {
117 ndx = i;
118 break;
119 }
120 }
121
122 return ndx;
123}
124
125/* Check if it's okay to send out aggregates */
126
127static int ath_aggr_query(struct ath_softc *sc,
128 struct ath_node *an, u8 tidno)
129{
130 struct ath_atx_tid *tid;
131 tid = ATH_AN_2_TID(an, tidno);
132
133 if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress)
134 return 1;
135 else
136 return 0;
137}
138
139static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr)
140{
141 enum ath9k_pkt_type htype;
142 __le16 fc;
143
144 fc = hdr->frame_control;
145
146 /* Calculate Atheros packet type from IEEE80211 packet header */
147
148 if (ieee80211_is_beacon(fc))
149 htype = ATH9K_PKT_TYPE_BEACON;
150 else if (ieee80211_is_probe_resp(fc))
151 htype = ATH9K_PKT_TYPE_PROBE_RESP;
152 else if (ieee80211_is_atim(fc))
153 htype = ATH9K_PKT_TYPE_ATIM;
154 else if (ieee80211_is_pspoll(fc))
155 htype = ATH9K_PKT_TYPE_PSPOLL;
156 else
157 htype = ATH9K_PKT_TYPE_NORMAL;
158
159 return htype;
160}
161
162static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl)
163{
164 struct ieee80211_hdr *hdr;
165 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
166 struct ath_tx_info_priv *tx_info_priv;
167 __le16 fc;
168
169 hdr = (struct ieee80211_hdr *)skb->data;
170 fc = hdr->frame_control;
e6a9854b
JB
171
172 /* XXX: HACK! */
173 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
f078f209
LR
174
175 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
176 txctl->use_minrate = 1;
177 txctl->min_rate = tx_info_priv->min_rate;
178 } else if (ieee80211_is_data(fc)) {
179 if (ieee80211_is_nullfunc(fc) ||
180 /* Port Access Entity (IEEE 802.1X) */
181 (skb->protocol == cpu_to_be16(0x888E))) {
182 txctl->use_minrate = 1;
183 txctl->min_rate = tx_info_priv->min_rate;
184 }
185 if (is_multicast_ether_addr(hdr->addr1))
186 txctl->mcast_rate = tx_info_priv->min_rate;
187 }
188
189}
190
191/* This function will setup additional txctl information, mostly rate stuff */
192/* FIXME: seqno, ps */
193static int ath_tx_prepare(struct ath_softc *sc,
194 struct sk_buff *skb,
195 struct ath_tx_control *txctl)
196{
197 struct ieee80211_hw *hw = sc->hw;
198 struct ieee80211_hdr *hdr;
199 struct ath_rc_series *rcs;
200 struct ath_txq *txq = NULL;
201 const struct ath9k_rate_table *rt;
202 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
203 struct ath_tx_info_priv *tx_info_priv;
204 int hdrlen;
205 u8 rix, antenna;
206 __le16 fc;
207 u8 *qc;
208
f078f209
LR
209 txctl->dev = sc;
210 hdr = (struct ieee80211_hdr *)skb->data;
211 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
212 fc = hdr->frame_control;
213
214 rt = sc->sc_currates;
215 BUG_ON(!rt);
216
217 /* Fill misc fields */
218
219 spin_lock_bh(&sc->node_lock);
220 txctl->an = ath_node_get(sc, hdr->addr1);
221 /* create a temp node, if the node is not there already */
222 if (!txctl->an)
223 txctl->an = ath_node_attach(sc, hdr->addr1, 0);
224 spin_unlock_bh(&sc->node_lock);
225
226 if (ieee80211_is_data_qos(fc)) {
227 qc = ieee80211_get_qos_ctl(hdr);
228 txctl->tidno = qc[0] & 0xf;
229 }
230
231 txctl->if_id = 0;
f078f209
LR
232 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
233 txctl->txpower = MAX_RATE_POWER; /* FIXME */
234
235 /* Fill Key related fields */
236
237 txctl->keytype = ATH9K_KEY_TYPE_CLEAR;
238 txctl->keyix = ATH9K_TXKEYIX_INVALID;
239
240 if (tx_info->control.hw_key) {
241 txctl->keyix = tx_info->control.hw_key->hw_key_idx;
76708dee 242 txctl->frmlen += tx_info->control.hw_key->icv_len;
f078f209 243
d0be7cc7 244 if (tx_info->control.hw_key->alg == ALG_WEP)
f078f209 245 txctl->keytype = ATH9K_KEY_TYPE_WEP;
d0be7cc7 246 else if (tx_info->control.hw_key->alg == ALG_TKIP)
f078f209 247 txctl->keytype = ATH9K_KEY_TYPE_TKIP;
d0be7cc7 248 else if (tx_info->control.hw_key->alg == ALG_CCMP)
f078f209
LR
249 txctl->keytype = ATH9K_KEY_TYPE_AES;
250 }
251
252 /* Fill packet type */
253
254 txctl->atype = get_hal_packet_type(hdr);
255
256 /* Fill qnum */
257
e022edbd
JM
258 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) {
259 txctl->qnum = 0;
260 txq = sc->sc_cabq;
261 } else {
262 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
263 txq = &sc->sc_txq[txctl->qnum];
264 }
f078f209
LR
265 spin_lock_bh(&txq->axq_lock);
266
267 /* Try to avoid running out of descriptors */
e022edbd
JM
268 if (txq->axq_depth >= (ATH_TXBUF - 20) &&
269 !(txctl->flags & ATH9K_TXDESC_CAB)) {
f078f209
LR
270 DPRINTF(sc, ATH_DBG_FATAL,
271 "%s: TX queue: %d is full, depth: %d\n",
272 __func__,
273 txctl->qnum,
274 txq->axq_depth);
275 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
276 txq->stopped = 1;
277 spin_unlock_bh(&txq->axq_lock);
278 return -1;
279 }
280
281 spin_unlock_bh(&txq->axq_lock);
282
283 /* Fill rate */
284
285 fill_min_rates(skb, txctl);
286
287 /* Fill flags */
288
e022edbd 289 txctl->flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
f078f209
LR
290
291 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
b14ecdd0 292 txctl->flags |= ATH9K_TXDESC_NOACK;
e6a9854b
JB
293
294 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
b14ecdd0 295 txctl->flags |= ATH9K_TXDESC_RTSENA;
f078f209
LR
296
297 /*
298 * Setup for rate calculations.
299 */
e6a9854b
JB
300
301 /* XXX: HACK! */
302 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
f078f209
LR
303 rcs = tx_info_priv->rcs;
304
305 if (ieee80211_is_data(fc) && !txctl->use_minrate) {
306
307 /* Enable HT only for DATA frames and not for EAPOL */
ae5eb026
JB
308 /* XXX why AMPDU only?? */
309 txctl->ht = (hw->conf.ht.enabled &&
f078f209
LR
310 (tx_info->flags & IEEE80211_TX_CTL_AMPDU));
311
312 if (is_multicast_ether_addr(hdr->addr1)) {
313 rcs[0].rix = (u8)
314 ath_tx_findindex(rt, txctl->mcast_rate);
315
316 /*
317 * mcast packets are not re-tried.
318 */
319 rcs[0].tries = 1;
320 }
321 /* For HT capable stations, we save tidno for later use.
322 * We also override seqno set by upper layer with the one
323 * in tx aggregation state.
324 *
325 * First, the fragmentation stat is determined.
326 * If fragmentation is on, the sequence number is
327 * not overridden, since it has been
328 * incremented by the fragmentation routine.
329 */
330 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
672840ac 331 txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
f078f209
LR
332 struct ath_atx_tid *tid;
333
334 tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
335
336 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
337 IEEE80211_SEQ_SEQ_SHIFT);
338 txctl->seqno = tid->seq_next;
339 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
340 }
341 } else {
342 /* for management and control frames,
343 * or for NULL and EAPOL frames */
344 if (txctl->min_rate)
345 rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate);
346 else
86b89eed 347 rcs[0].rix = 0;
f078f209
LR
348 rcs[0].tries = ATH_MGT_TXMAXTRY;
349 }
350 rix = rcs[0].rix;
351
14cc709f
S
352 if (ieee80211_has_morefrags(fc) ||
353 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
f078f209 354 /*
14cc709f
S
355 ** Force hardware to use computed duration for next
356 ** fragment by disabling multi-rate retry, which
357 ** updates duration based on the multi-rate
358 ** duration table.
359 */
360 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
361 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
362 /* reset tries but keep rate index */
363 rcs[0].tries = ATH_TXMAXTRY;
f078f209
LR
364 }
365
366 /*
367 * Determine if a tx interrupt should be generated for
368 * this descriptor. We take a tx interrupt to reap
369 * descriptors when the h/w hits an EOL condition or
370 * when the descriptor is specifically marked to generate
371 * an interrupt. We periodically mark descriptors in this
372 * way to insure timely replenishing of the supply needed
373 * for sending frames. Defering interrupts reduces system
374 * load and potentially allows more concurrent work to be
375 * done but if done to aggressively can cause senders to
376 * backup.
377 *
378 * NB: use >= to deal with sc_txintrperiod changing
379 * dynamically through sysctl.
380 */
381 spin_lock_bh(&txq->axq_lock);
382 if ((++txq->axq_intrcnt >= sc->sc_txintrperiod)) {
383 txctl->flags |= ATH9K_TXDESC_INTREQ;
384 txq->axq_intrcnt = 0;
385 }
386 spin_unlock_bh(&txq->axq_lock);
387
388 if (is_multicast_ether_addr(hdr->addr1)) {
389 antenna = sc->sc_mcastantenna + 1;
390 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
98deeea0 391 }
f078f209 392
f078f209
LR
393 return 0;
394}
395
396/* To complete a chain of buffers associated a frame */
397
398static void ath_tx_complete_buf(struct ath_softc *sc,
399 struct ath_buf *bf,
400 struct list_head *bf_q,
401 int txok, int sendbar)
402{
403 struct sk_buff *skb = bf->bf_mpdu;
404 struct ath_xmit_status tx_status;
f078f209
LR
405
406 /*
407 * Set retry information.
408 * NB: Don't use the information in the descriptor, because the frame
409 * could be software retried.
410 */
411 tx_status.retries = bf->bf_retries;
412 tx_status.flags = 0;
413
414 if (sendbar)
415 tx_status.flags = ATH_TX_BAR;
416
417 if (!txok) {
418 tx_status.flags |= ATH_TX_ERROR;
419
cd3d39a6 420 if (bf_isxretried(bf))
f078f209
LR
421 tx_status.flags |= ATH_TX_XRETRY;
422 }
423 /* Unmap this frame */
f078f209 424 pci_unmap_single(sc->pdev,
ff9b662d 425 bf->bf_dmacontext,
f078f209
LR
426 skb->len,
427 PCI_DMA_TODEVICE);
428 /* complete this frame */
429 ath_tx_complete(sc, skb, &tx_status, bf->bf_node);
430
431 /*
432 * Return the list of ath_buf of this mpdu to free queue
433 */
434 spin_lock_bh(&sc->sc_txbuflock);
435 list_splice_tail_init(bf_q, &sc->sc_txbuf);
436 spin_unlock_bh(&sc->sc_txbuflock);
437}
438
439/*
440 * queue up a dest/ac pair for tx scheduling
441 * NB: must be called with txq lock held
442 */
443
444static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
445{
446 struct ath_atx_ac *ac = tid->ac;
447
448 /*
449 * if tid is paused, hold off
450 */
451 if (tid->paused)
452 return;
453
454 /*
455 * add tid to ac atmost once
456 */
457 if (tid->sched)
458 return;
459
460 tid->sched = true;
461 list_add_tail(&tid->list, &ac->tid_q);
462
463 /*
464 * add node ac to txq atmost once
465 */
466 if (ac->sched)
467 return;
468
469 ac->sched = true;
470 list_add_tail(&ac->list, &txq->axq_acq);
471}
472
473/* pause a tid */
474
475static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
476{
477 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
478
479 spin_lock_bh(&txq->axq_lock);
480
481 tid->paused++;
482
483 spin_unlock_bh(&txq->axq_lock);
484}
485
486/* resume a tid and schedule aggregate */
487
488void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
489{
490 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
491
492 ASSERT(tid->paused > 0);
493 spin_lock_bh(&txq->axq_lock);
494
495 tid->paused--;
496
497 if (tid->paused > 0)
498 goto unlock;
499
500 if (list_empty(&tid->buf_q))
501 goto unlock;
502
503 /*
504 * Add this TID to scheduler and try to send out aggregates
505 */
506 ath_tx_queue_tid(txq, tid);
507 ath_txq_schedule(sc, txq);
508unlock:
509 spin_unlock_bh(&txq->axq_lock);
510}
511
512/* Compute the number of bad frames */
513
514static int ath_tx_num_badfrms(struct ath_softc *sc,
515 struct ath_buf *bf, int txok)
516{
517 struct ath_node *an = bf->bf_node;
518 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
519 struct ath_buf *bf_last = bf->bf_lastbf;
520 struct ath_desc *ds = bf_last->bf_desc;
521 u16 seq_st = 0;
522 u32 ba[WME_BA_BMP_SIZE >> 5];
523 int ba_index;
524 int nbad = 0;
525 int isaggr = 0;
526
527 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
528 return 0;
529
cd3d39a6 530 isaggr = bf_isaggr(bf);
f078f209
LR
531 if (isaggr) {
532 seq_st = ATH_DS_BA_SEQ(ds);
533 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
534 }
535
536 while (bf) {
537 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
538 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
539 nbad++;
540
541 bf = bf->bf_next;
542 }
543
544 return nbad;
545}
546
547static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
548{
549 struct sk_buff *skb;
550 struct ieee80211_hdr *hdr;
551
cd3d39a6 552 bf->bf_state.bf_type |= BUF_RETRY;
f078f209
LR
553 bf->bf_retries++;
554
555 skb = bf->bf_mpdu;
556 hdr = (struct ieee80211_hdr *)skb->data;
557 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
558}
559
560/* Update block ack window */
561
562static void ath_tx_update_baw(struct ath_softc *sc,
563 struct ath_atx_tid *tid, int seqno)
564{
565 int index, cindex;
566
567 index = ATH_BA_INDEX(tid->seq_start, seqno);
568 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
569
570 tid->tx_buf[cindex] = NULL;
571
572 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
573 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
574 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
575 }
576}
577
578/*
579 * ath_pkt_dur - compute packet duration (NB: not NAV)
580 *
581 * rix - rate index
582 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
583 * width - 0 for 20 MHz, 1 for 40 MHz
584 * half_gi - to use 4us v/s 3.6 us for symbol time
585 */
586
587static u32 ath_pkt_duration(struct ath_softc *sc,
588 u8 rix,
589 struct ath_buf *bf,
590 int width,
591 int half_gi,
592 bool shortPreamble)
593{
594 const struct ath9k_rate_table *rt = sc->sc_currates;
595 u32 nbits, nsymbits, duration, nsymbols;
596 u8 rc;
597 int streams, pktlen;
598
cd3d39a6 599 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
f078f209
LR
600 rc = rt->info[rix].rateCode;
601
602 /*
603 * for legacy rates, use old function to compute packet duration
604 */
605 if (!IS_HT_RATE(rc))
606 return ath9k_hw_computetxtime(sc->sc_ah,
607 rt,
608 pktlen,
609 rix,
610 shortPreamble);
611 /*
612 * find number of symbols: PLCP + data
613 */
614 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
615 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
616 nsymbols = (nbits + nsymbits - 1) / nsymbits;
617
618 if (!half_gi)
619 duration = SYMBOL_TIME(nsymbols);
620 else
621 duration = SYMBOL_TIME_HALFGI(nsymbols);
622
623 /*
624 * addup duration for legacy/ht training and signal fields
625 */
626 streams = HT_RC_2_STREAMS(rc);
627 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
628 return duration;
629}
630
631/* Rate module function to set rate related fields in tx descriptor */
632
633static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
634{
635 struct ath_hal *ah = sc->sc_ah;
636 const struct ath9k_rate_table *rt;
637 struct ath_desc *ds = bf->bf_desc;
638 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
639 struct ath9k_11n_rate_series series[4];
640 int i, flags, rtsctsena = 0, dynamic_mimops = 0;
641 u32 ctsduration = 0;
642 u8 rix = 0, cix, ctsrate = 0;
98deeea0 643 u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit;
f078f209
LR
644 struct ath_node *an = (struct ath_node *) bf->bf_node;
645
646 /*
647 * get the cix for the lowest valid rix.
648 */
649 rt = sc->sc_currates;
650 for (i = 4; i--;) {
651 if (bf->bf_rcs[i].tries) {
652 rix = bf->bf_rcs[i].rix;
653 break;
654 }
655 }
656 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
657 cix = rt->info[rix].controlRate;
658
659 /*
660 * If 802.11g protection is enabled, determine whether
661 * to use RTS/CTS or just CTS. Note that this is only
662 * done for OFDM/HT unicast frames.
663 */
664 if (sc->sc_protmode != PROT_M_NONE &&
665 (rt->info[rix].phy == PHY_OFDM ||
666 rt->info[rix].phy == PHY_HT) &&
667 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
668 if (sc->sc_protmode == PROT_M_RTSCTS)
669 flags = ATH9K_TXDESC_RTSENA;
670 else if (sc->sc_protmode == PROT_M_CTSONLY)
671 flags = ATH9K_TXDESC_CTSENA;
672
673 cix = rt->info[sc->sc_protrix].controlRate;
674 rtsctsena = 1;
675 }
676
677 /* For 11n, the default behavior is to enable RTS for
678 * hw retried frames. We enable the global flag here and
679 * let rate series flags determine which rates will actually
680 * use RTS.
681 */
cd3d39a6 682 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
f078f209
LR
683 BUG_ON(!an);
684 /*
685 * 802.11g protection not needed, use our default behavior
686 */
687 if (!rtsctsena)
688 flags = ATH9K_TXDESC_RTSENA;
689 /*
690 * For dynamic MIMO PS, RTS needs to precede the first aggregate
691 * and the second aggregate should have any protection at all.
692 */
693 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
cd3d39a6 694 if (!bf_isaggrburst(bf)) {
f078f209
LR
695 flags = ATH9K_TXDESC_RTSENA;
696 dynamic_mimops = 1;
697 } else {
698 flags = 0;
699 }
700 }
701 }
702
703 /*
704 * Set protection if aggregate protection on
705 */
706 if (sc->sc_config.ath_aggr_prot &&
cd3d39a6 707 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
f078f209
LR
708 flags = ATH9K_TXDESC_RTSENA;
709 cix = rt->info[sc->sc_protrix].controlRate;
710 rtsctsena = 1;
711 }
712
713 /*
714 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
715 */
cd3d39a6 716 if (bf_isaggr(bf) && (bf->bf_al > aggr_limit_with_rts)) {
f078f209
LR
717 /*
718 * Ensure that in the case of SM Dynamic power save
719 * while we are bursting the second aggregate the
720 * RTS is cleared.
721 */
722 flags &= ~(ATH9K_TXDESC_RTSENA);
723 }
724
725 /*
726 * CTS transmit rate is derived from the transmit rate
727 * by looking in the h/w rate table. We must also factor
728 * in whether or not a short preamble is to be used.
729 */
730 /* NB: cix is set above where RTS/CTS is enabled */
731 BUG_ON(cix == 0xff);
732 ctsrate = rt->info[cix].rateCode |
cd3d39a6 733 (bf_isshpreamble(bf) ? rt->info[cix].shortPreamble : 0);
f078f209
LR
734
735 /*
736 * Setup HAL rate series
737 */
0345f37b 738 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
f078f209
LR
739
740 for (i = 0; i < 4; i++) {
741 if (!bf->bf_rcs[i].tries)
742 continue;
743
744 rix = bf->bf_rcs[i].rix;
745
746 series[i].Rate = rt->info[rix].rateCode |
cd3d39a6 747 (bf_isshpreamble(bf) ? rt->info[rix].shortPreamble : 0);
f078f209
LR
748
749 series[i].Tries = bf->bf_rcs[i].tries;
750
751 series[i].RateFlags = (
752 (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
753 ATH9K_RATESERIES_RTS_CTS : 0) |
754 ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
755 ATH9K_RATESERIES_2040 : 0) |
756 ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ?
757 ATH9K_RATESERIES_HALFGI : 0);
758
759 series[i].PktDuration = ath_pkt_duration(
760 sc, rix, bf,
761 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
762 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
cd3d39a6 763 bf_isshpreamble(bf));
f078f209
LR
764
765 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
766 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
767 /*
768 * When sending to an HT node that has enabled static
769 * SM/MIMO power save, send at single stream rates but
770 * use maximum allowed transmit chains per user,
771 * hardware, regulatory, or country limits for
772 * better range.
773 */
774 series[i].ChSel = sc->sc_tx_chainmask;
775 } else {
cd3d39a6 776 if (bf_isht(bf))
f078f209
LR
777 series[i].ChSel =
778 ath_chainmask_sel_logic(sc, an);
779 else
780 series[i].ChSel = sc->sc_tx_chainmask;
781 }
782
783 if (rtsctsena)
784 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
785
786 /*
787 * Set RTS for all rates if node is in dynamic powersave
788 * mode and we are using dual stream rates.
789 */
790 if (dynamic_mimops && (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG))
791 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
792 }
793
794 /*
795 * For non-HT devices, calculate RTS/CTS duration in software
796 * and disable multi-rate retry.
797 */
60b67f51 798 if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) {
f078f209
LR
799 /*
800 * Compute the transmit duration based on the frame
801 * size and the size of an ACK frame. We call into the
802 * HAL to do the computation since it depends on the
803 * characteristics of the actual PHY being used.
804 *
805 * NB: CTS is assumed the same size as an ACK so we can
806 * use the precalculated ACK durations.
807 */
808 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
cd3d39a6 809 ctsduration += bf_isshpreamble(bf) ?
f078f209
LR
810 rt->info[cix].spAckDuration :
811 rt->info[cix].lpAckDuration;
812 }
813
814 ctsduration += series[0].PktDuration;
815
816 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
cd3d39a6 817 ctsduration += bf_isshpreamble(bf) ?
f078f209
LR
818 rt->info[rix].spAckDuration :
819 rt->info[rix].lpAckDuration;
820 }
821
822 /*
823 * Disable multi-rate retry when using RTS/CTS by clearing
824 * series 1, 2 and 3.
825 */
0345f37b 826 memset(&series[1], 0, sizeof(struct ath9k_11n_rate_series) * 3);
f078f209
LR
827 }
828
829 /*
830 * set dur_update_en for l-sig computation except for PS-Poll frames
831 */
832 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
cd3d39a6
S
833 !bf_ispspoll(bf),
834 ctsrate,
835 ctsduration,
836 series, 4, flags);
f078f209
LR
837 if (sc->sc_config.ath_aggr_prot && flags)
838 ath9k_hw_set11n_burstduration(ah, ds, 8192);
839}
840
841/*
842 * Function to send a normal HT (non-AMPDU) frame
843 * NB: must be called with txq lock held
844 */
845
846static int ath_tx_send_normal(struct ath_softc *sc,
847 struct ath_txq *txq,
848 struct ath_atx_tid *tid,
849 struct list_head *bf_head)
850{
851 struct ath_buf *bf;
852 struct sk_buff *skb;
853 struct ieee80211_tx_info *tx_info;
854 struct ath_tx_info_priv *tx_info_priv;
855
856 BUG_ON(list_empty(bf_head));
857
858 bf = list_first_entry(bf_head, struct ath_buf, list);
cd3d39a6 859 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
f078f209
LR
860
861 skb = (struct sk_buff *)bf->bf_mpdu;
862 tx_info = IEEE80211_SKB_CB(skb);
e6a9854b
JB
863
864 /* XXX: HACK! */
865 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
f078f209
LR
866 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
867
868 /* update starting sequence number for subsequent ADDBA request */
869 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
870
871 /* Queue to h/w without aggregation */
872 bf->bf_nframes = 1;
873 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
874 ath_buf_set_rate(sc, bf);
875 ath_tx_txqaddbuf(sc, txq, bf_head);
876
877 return 0;
878}
879
880/* flush tid's software queue and send frames as non-ampdu's */
881
882static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
883{
884 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
885 struct ath_buf *bf;
886 struct list_head bf_head;
887 INIT_LIST_HEAD(&bf_head);
888
889 ASSERT(tid->paused > 0);
890 spin_lock_bh(&txq->axq_lock);
891
892 tid->paused--;
893
894 if (tid->paused > 0) {
895 spin_unlock_bh(&txq->axq_lock);
896 return;
897 }
898
899 while (!list_empty(&tid->buf_q)) {
900 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
cd3d39a6 901 ASSERT(!bf_isretried(bf));
f078f209
LR
902 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
903 ath_tx_send_normal(sc, txq, tid, &bf_head);
904 }
905
906 spin_unlock_bh(&txq->axq_lock);
907}
908
909/* Completion routine of an aggregate */
910
911static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
912 struct ath_txq *txq,
913 struct ath_buf *bf,
914 struct list_head *bf_q,
915 int txok)
916{
917 struct ath_node *an = bf->bf_node;
918 struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
919 struct ath_buf *bf_last = bf->bf_lastbf;
920 struct ath_desc *ds = bf_last->bf_desc;
921 struct ath_buf *bf_next, *bf_lastq = NULL;
922 struct list_head bf_head, bf_pending;
923 u16 seq_st = 0;
924 u32 ba[WME_BA_BMP_SIZE >> 5];
925 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
926 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
927
cd3d39a6 928 isaggr = bf_isaggr(bf);
f078f209
LR
929 if (isaggr) {
930 if (txok) {
931 if (ATH_DS_TX_BA(ds)) {
932 /*
933 * extract starting sequence and
934 * block-ack bitmap
935 */
936 seq_st = ATH_DS_BA_SEQ(ds);
937 memcpy(ba,
938 ATH_DS_BA_BITMAP(ds),
939 WME_BA_BMP_SIZE >> 3);
940 } else {
0345f37b 941 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209
LR
942
943 /*
944 * AR5416 can become deaf/mute when BA
945 * issue happens. Chip needs to be reset.
946 * But AP code may have sychronization issues
947 * when perform internal reset in this routine.
948 * Only enable reset in STA mode for now.
949 */
b4696c8b 950 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
f078f209
LR
951 needreset = 1;
952 }
953 } else {
0345f37b 954 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
f078f209
LR
955 }
956 }
957
958 INIT_LIST_HEAD(&bf_pending);
959 INIT_LIST_HEAD(&bf_head);
960
961 while (bf) {
962 txfail = txpending = 0;
963 bf_next = bf->bf_next;
964
965 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
966 /* transmit completion, subframe is
967 * acked by block ack */
968 } else if (!isaggr && txok) {
969 /* transmit completion */
970 } else {
971
972 if (!tid->cleanup_inprogress && !isnodegone &&
973 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
974 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
975 ath_tx_set_retry(sc, bf);
976 txpending = 1;
977 } else {
cd3d39a6 978 bf->bf_state.bf_type |= BUF_XRETRY;
f078f209
LR
979 txfail = 1;
980 sendbar = 1;
981 }
982 } else {
983 /*
984 * cleanup in progress, just fail
985 * the un-acked sub-frames
986 */
987 txfail = 1;
988 }
989 }
990 /*
991 * Remove ath_buf's of this sub-frame from aggregate queue.
992 */
993 if (bf_next == NULL) { /* last subframe in the aggregate */
994 ASSERT(bf->bf_lastfrm == bf_last);
995
996 /*
997 * The last descriptor of the last sub frame could be
998 * a holding descriptor for h/w. If that's the case,
999 * bf->bf_lastfrm won't be in the bf_q.
1000 * Make sure we handle bf_q properly here.
1001 */
1002
1003 if (!list_empty(bf_q)) {
1004 bf_lastq = list_entry(bf_q->prev,
1005 struct ath_buf, list);
1006 list_cut_position(&bf_head,
1007 bf_q, &bf_lastq->list);
1008 } else {
1009 /*
1010 * XXX: if the last subframe only has one
1011 * descriptor which is also being used as
1012 * a holding descriptor. Then the ath_buf
1013 * is not in the bf_q at all.
1014 */
1015 INIT_LIST_HEAD(&bf_head);
1016 }
1017 } else {
1018 ASSERT(!list_empty(bf_q));
1019 list_cut_position(&bf_head,
1020 bf_q, &bf->bf_lastfrm->list);
1021 }
1022
1023 if (!txpending) {
1024 /*
1025 * complete the acked-ones/xretried ones; update
1026 * block-ack window
1027 */
1028 spin_lock_bh(&txq->axq_lock);
1029 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1030 spin_unlock_bh(&txq->axq_lock);
1031
1032 /* complete this sub-frame */
1033 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
1034 } else {
1035 /*
1036 * retry the un-acked ones
1037 */
1038 /*
1039 * XXX: if the last descriptor is holding descriptor,
1040 * in order to requeue the frame to software queue, we
1041 * need to allocate a new descriptor and
1042 * copy the content of holding descriptor to it.
1043 */
1044 if (bf->bf_next == NULL &&
1045 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
1046 struct ath_buf *tbf;
1047
1048 /* allocate new descriptor */
1049 spin_lock_bh(&sc->sc_txbuflock);
1050 ASSERT(!list_empty((&sc->sc_txbuf)));
1051 tbf = list_first_entry(&sc->sc_txbuf,
1052 struct ath_buf, list);
1053 list_del(&tbf->list);
1054 spin_unlock_bh(&sc->sc_txbuflock);
1055
1056 ATH_TXBUF_RESET(tbf);
1057
1058 /* copy descriptor content */
1059 tbf->bf_mpdu = bf_last->bf_mpdu;
1060 tbf->bf_node = bf_last->bf_node;
1061 tbf->bf_buf_addr = bf_last->bf_buf_addr;
1062 *(tbf->bf_desc) = *(bf_last->bf_desc);
1063
1064 /* link it to the frame */
1065 if (bf_lastq) {
1066 bf_lastq->bf_desc->ds_link =
1067 tbf->bf_daddr;
1068 bf->bf_lastfrm = tbf;
1069 ath9k_hw_cleartxdesc(sc->sc_ah,
1070 bf->bf_lastfrm->bf_desc);
1071 } else {
1072 tbf->bf_state = bf_last->bf_state;
1073 tbf->bf_lastfrm = tbf;
1074 ath9k_hw_cleartxdesc(sc->sc_ah,
1075 tbf->bf_lastfrm->bf_desc);
1076
1077 /* copy the DMA context */
ff9b662d
S
1078 tbf->bf_dmacontext =
1079 bf_last->bf_dmacontext;
f078f209
LR
1080 }
1081 list_add_tail(&tbf->list, &bf_head);
1082 } else {
1083 /*
1084 * Clear descriptor status words for
1085 * software retry
1086 */
1087 ath9k_hw_cleartxdesc(sc->sc_ah,
ff9b662d 1088 bf->bf_lastfrm->bf_desc);
f078f209
LR
1089 }
1090
1091 /*
1092 * Put this buffer to the temporary pending
1093 * queue to retain ordering
1094 */
1095 list_splice_tail_init(&bf_head, &bf_pending);
1096 }
1097
1098 bf = bf_next;
1099 }
1100
1101 /*
1102 * node is already gone. no more assocication
1103 * with the node. the node might have been freed
1104 * any node acces can result in panic.note tid
1105 * is part of the node.
1106 */
1107 if (isnodegone)
1108 return;
1109
1110 if (tid->cleanup_inprogress) {
1111 /* check to see if we're done with cleaning the h/w queue */
1112 spin_lock_bh(&txq->axq_lock);
1113
1114 if (tid->baw_head == tid->baw_tail) {
1115 tid->addba_exchangecomplete = 0;
1116 tid->addba_exchangeattempts = 0;
1117 spin_unlock_bh(&txq->axq_lock);
1118
1119 tid->cleanup_inprogress = false;
1120
1121 /* send buffered frames as singles */
1122 ath_tx_flush_tid(sc, tid);
1123 } else
1124 spin_unlock_bh(&txq->axq_lock);
1125
1126 return;
1127 }
1128
1129 /*
1130 * prepend un-acked frames to the beginning of the pending frame queue
1131 */
1132 if (!list_empty(&bf_pending)) {
1133 spin_lock_bh(&txq->axq_lock);
1134 /* Note: we _prepend_, we _do_not_ at to
1135 * the end of the queue ! */
1136 list_splice(&bf_pending, &tid->buf_q);
1137 ath_tx_queue_tid(txq, tid);
1138 spin_unlock_bh(&txq->axq_lock);
1139 }
1140
1141 if (needreset)
f45144ef 1142 ath_reset(sc, false);
f078f209
LR
1143
1144 return;
1145}
1146
1147/* Process completed xmit descriptors from the specified queue */
1148
1149static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1150{
1151 struct ath_hal *ah = sc->sc_ah;
1152 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1153 struct list_head bf_head;
1154 struct ath_desc *ds, *tmp_ds;
1155 struct sk_buff *skb;
1156 struct ieee80211_tx_info *tx_info;
1157 struct ath_tx_info_priv *tx_info_priv;
1158 int nacked, txok, nbad = 0, isrifs = 0;
1159 int status;
1160
1161 DPRINTF(sc, ATH_DBG_QUEUE,
1162 "%s: tx queue %d (%x), link %p\n", __func__,
1163 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1164 txq->axq_link);
1165
1166 nacked = 0;
1167 for (;;) {
1168 spin_lock_bh(&txq->axq_lock);
1169 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
1170 if (list_empty(&txq->axq_q)) {
1171 txq->axq_link = NULL;
1172 txq->axq_linkbuf = NULL;
1173 spin_unlock_bh(&txq->axq_lock);
1174 break;
1175 }
1176 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1177
1178 /*
1179 * There is a race condition that a BH gets scheduled
1180 * after sw writes TxE and before hw re-load the last
1181 * descriptor to get the newly chained one.
1182 * Software must keep the last DONE descriptor as a
1183 * holding descriptor - software does so by marking
1184 * it with the STALE flag.
1185 */
1186 bf_held = NULL;
1187 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1188 bf_held = bf;
1189 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1190 /* FIXME:
1191 * The holding descriptor is the last
1192 * descriptor in queue. It's safe to remove
1193 * the last holding descriptor in BH context.
1194 */
1195 spin_unlock_bh(&txq->axq_lock);
1196 break;
1197 } else {
1198 /* Lets work with the next buffer now */
1199 bf = list_entry(bf_held->list.next,
1200 struct ath_buf, list);
1201 }
1202 }
1203
1204 lastbf = bf->bf_lastbf;
1205 ds = lastbf->bf_desc; /* NB: last decriptor */
1206
1207 status = ath9k_hw_txprocdesc(ah, ds);
1208 if (status == -EINPROGRESS) {
1209 spin_unlock_bh(&txq->axq_lock);
1210 break;
1211 }
1212 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1213 txq->axq_lastdsWithCTS = NULL;
1214 if (ds == txq->axq_gatingds)
1215 txq->axq_gatingds = NULL;
1216
1217 /*
1218 * Remove ath_buf's of the same transmit unit from txq,
1219 * however leave the last descriptor back as the holding
1220 * descriptor for hw.
1221 */
1222 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1223 INIT_LIST_HEAD(&bf_head);
1224
1225 if (!list_is_singular(&lastbf->list))
1226 list_cut_position(&bf_head,
1227 &txq->axq_q, lastbf->list.prev);
1228
1229 txq->axq_depth--;
1230
cd3d39a6 1231 if (bf_isaggr(bf))
f078f209
LR
1232 txq->axq_aggr_depth--;
1233
1234 txok = (ds->ds_txstat.ts_status == 0);
1235
1236 spin_unlock_bh(&txq->axq_lock);
1237
1238 if (bf_held) {
1239 list_del(&bf_held->list);
1240 spin_lock_bh(&sc->sc_txbuflock);
1241 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1242 spin_unlock_bh(&sc->sc_txbuflock);
1243 }
1244
cd3d39a6 1245 if (!bf_isampdu(bf)) {
f078f209
LR
1246 /*
1247 * This frame is sent out as a single frame.
1248 * Use hardware retry status for this frame.
1249 */
1250 bf->bf_retries = ds->ds_txstat.ts_longretry;
1251 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
cd3d39a6 1252 bf->bf_state.bf_type |= BUF_XRETRY;
f078f209
LR
1253 nbad = 0;
1254 } else {
1255 nbad = ath_tx_num_badfrms(sc, bf, txok);
1256 }
1257 skb = bf->bf_mpdu;
1258 tx_info = IEEE80211_SKB_CB(skb);
e6a9854b
JB
1259
1260 /* XXX: HACK! */
1261 tx_info_priv = (struct ath_tx_info_priv *) tx_info->control.vif;
f078f209
LR
1262 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1263 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1264 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1265 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
1266 if (ds->ds_txstat.ts_status == 0)
1267 nacked++;
1268
cd3d39a6 1269 if (bf_isdata(bf)) {
f078f209
LR
1270 if (isrifs)
1271 tmp_ds = bf->bf_rifslast->bf_desc;
1272 else
1273 tmp_ds = ds;
1274 memcpy(&tx_info_priv->tx,
1275 &tmp_ds->ds_txstat,
1276 sizeof(tx_info_priv->tx));
1277 tx_info_priv->n_frames = bf->bf_nframes;
1278 tx_info_priv->n_bad_frames = nbad;
1279 }
1280 }
1281
1282 /*
1283 * Complete this transmit unit
1284 */
cd3d39a6 1285 if (bf_isampdu(bf))
f078f209
LR
1286 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1287 else
1288 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1289
1290 /* Wake up mac80211 queue */
1291
1292 spin_lock_bh(&txq->axq_lock);
1293 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1294 (ATH_TXBUF - 20)) {
1295 int qnum;
1296 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1297 if (qnum != -1) {
1298 ieee80211_wake_queue(sc->hw, qnum);
1299 txq->stopped = 0;
1300 }
1301
1302 }
1303
1304 /*
1305 * schedule any pending packets if aggregation is enabled
1306 */
672840ac 1307 if (sc->sc_flags & SC_OP_TXAGGR)
f078f209
LR
1308 ath_txq_schedule(sc, txq);
1309 spin_unlock_bh(&txq->axq_lock);
1310 }
1311 return nacked;
1312}
1313
1314static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1315{
1316 struct ath_hal *ah = sc->sc_ah;
1317
1318 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1319 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
1320 __func__, txq->axq_qnum,
1321 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
1322}
1323
1324/* Drain only the data queues */
1325
1326static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1327{
1328 struct ath_hal *ah = sc->sc_ah;
1329 int i;
1330 int npend = 0;
f078f209
LR
1331
1332 /* XXX return value */
672840ac 1333 if (!(sc->sc_flags & SC_OP_INVALID)) {
f078f209
LR
1334 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1335 if (ATH_TXQ_SETUP(sc, i)) {
1336 ath_tx_stopdma(sc, &sc->sc_txq[i]);
1337
1338 /* The TxDMA may not really be stopped.
1339 * Double check the hal tx pending count */
1340 npend += ath9k_hw_numtxpending(ah,
1341 sc->sc_txq[i].axq_qnum);
1342 }
1343 }
1344 }
1345
1346 if (npend) {
1347 int status;
1348
1349 /* TxDMA not stopped, reset the hal */
1350 DPRINTF(sc, ATH_DBG_XMIT,
1351 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1352
1353 spin_lock_bh(&sc->sc_resetlock);
b4696c8b 1354 if (!ath9k_hw_reset(ah,
927e70e9
S
1355 sc->sc_ah->ah_curchan,
1356 sc->sc_ht_info.tx_chan_width,
1357 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1358 sc->sc_ht_extprotspacing, true, &status)) {
f078f209
LR
1359
1360 DPRINTF(sc, ATH_DBG_FATAL,
1361 "%s: unable to reset hardware; hal status %u\n",
1362 __func__,
1363 status);
1364 }
1365 spin_unlock_bh(&sc->sc_resetlock);
1366 }
1367
1368 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1369 if (ATH_TXQ_SETUP(sc, i))
1370 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1371 }
1372}
1373
1374/* Add a sub-frame to block ack window */
1375
1376static void ath_tx_addto_baw(struct ath_softc *sc,
1377 struct ath_atx_tid *tid,
1378 struct ath_buf *bf)
1379{
1380 int index, cindex;
1381
cd3d39a6 1382 if (bf_isretried(bf))
f078f209
LR
1383 return;
1384
1385 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1386 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1387
1388 ASSERT(tid->tx_buf[cindex] == NULL);
1389 tid->tx_buf[cindex] = bf;
1390
1391 if (index >= ((tid->baw_tail - tid->baw_head) &
1392 (ATH_TID_MAX_BUFS - 1))) {
1393 tid->baw_tail = cindex;
1394 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1395 }
1396}
1397
1398/*
1399 * Function to send an A-MPDU
1400 * NB: must be called with txq lock held
1401 */
1402
1403static int ath_tx_send_ampdu(struct ath_softc *sc,
1404 struct ath_txq *txq,
1405 struct ath_atx_tid *tid,
1406 struct list_head *bf_head,
1407 struct ath_tx_control *txctl)
1408{
1409 struct ath_buf *bf;
1410 struct sk_buff *skb;
1411 struct ieee80211_tx_info *tx_info;
1412 struct ath_tx_info_priv *tx_info_priv;
1413
1414 BUG_ON(list_empty(bf_head));
1415
1416 bf = list_first_entry(bf_head, struct ath_buf, list);
cd3d39a6 1417 bf->bf_state.bf_type |= BUF_AMPDU;
f078f209
LR
1418 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
1419 bf->bf_tidno = txctl->tidno;
1420
1421 /*
1422 * Do not queue to h/w when any of the following conditions is true:
1423 * - there are pending frames in software queue
1424 * - the TID is currently paused for ADDBA/BAR request
1425 * - seqno is not within block-ack window
1426 * - h/w queue depth exceeds low water mark
1427 */
1428 if (!list_empty(&tid->buf_q) || tid->paused ||
1429 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1430 txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1431 /*
1432 * Add this frame to software queue for scheduling later
1433 * for aggregation.
1434 */
1435 list_splice_tail_init(bf_head, &tid->buf_q);
1436 ath_tx_queue_tid(txq, tid);
1437 return 0;
1438 }
1439
1440 skb = (struct sk_buff *)bf->bf_mpdu;
1441 tx_info = IEEE80211_SKB_CB(skb);
e6a9854b
JB
1442 /* XXX: HACK! */
1443 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
f078f209
LR
1444 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1445
1446 /* Add sub-frame to BAW */
1447 ath_tx_addto_baw(sc, tid, bf);
1448
1449 /* Queue to h/w without aggregation */
1450 bf->bf_nframes = 1;
1451 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1452 ath_buf_set_rate(sc, bf);
1453 ath_tx_txqaddbuf(sc, txq, bf_head);
1454 return 0;
1455}
1456
1457/*
1458 * looks up the rate
1459 * returns aggr limit based on lowest of the rates
1460 */
1461
1462static u32 ath_lookup_rate(struct ath_softc *sc,
ae5eb026
JB
1463 struct ath_buf *bf,
1464 struct ath_atx_tid *tid)
f078f209
LR
1465{
1466 const struct ath9k_rate_table *rt = sc->sc_currates;
1467 struct sk_buff *skb;
1468 struct ieee80211_tx_info *tx_info;
1469 struct ath_tx_info_priv *tx_info_priv;
1470 u32 max_4ms_framelen, frame_length;
1471 u16 aggr_limit, legacy = 0, maxampdu;
1472 int i;
1473
1474
1475 skb = (struct sk_buff *)bf->bf_mpdu;
1476 tx_info = IEEE80211_SKB_CB(skb);
1477 tx_info_priv = (struct ath_tx_info_priv *)
e6a9854b 1478 tx_info->control.vif; /* XXX: HACK! */
f078f209
LR
1479 memcpy(bf->bf_rcs,
1480 tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1481
1482 /*
1483 * Find the lowest frame length among the rate series that will have a
1484 * 4ms transmit duration.
1485 * TODO - TXOP limit needs to be considered.
1486 */
1487 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1488
1489 for (i = 0; i < 4; i++) {
1490 if (bf->bf_rcs[i].tries) {
1491 frame_length = bf->bf_rcs[i].max_4ms_framelen;
1492
1493 if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) {
1494 legacy = 1;
1495 break;
1496 }
1497
1498 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1499 }
1500 }
1501
1502 /*
1503 * limit aggregate size by the minimum rate if rate selected is
1504 * not a probe rate, if rate selected is a probe rate then
1505 * avoid aggregation of this packet.
1506 */
1507 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1508 return 0;
1509
1510 aggr_limit = min(max_4ms_framelen,
1511 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1512
1513 /*
1514 * h/w can accept aggregates upto 16 bit lengths (65535).
1515 * The IE, however can hold upto 65536, which shows up here
1516 * as zero. Ignore 65536 since we are constrained by hw.
1517 */
ae5eb026 1518 maxampdu = tid->an->maxampdu;
f078f209
LR
1519 if (maxampdu)
1520 aggr_limit = min(aggr_limit, maxampdu);
1521
1522 return aggr_limit;
1523}
1524
1525/*
1526 * returns the number of delimiters to be added to
1527 * meet the minimum required mpdudensity.
1528 * caller should make sure that the rate is HT rate .
1529 */
1530
1531static int ath_compute_num_delims(struct ath_softc *sc,
ae5eb026 1532 struct ath_atx_tid *tid,
f078f209
LR
1533 struct ath_buf *bf,
1534 u16 frmlen)
1535{
1536 const struct ath9k_rate_table *rt = sc->sc_currates;
1537 u32 nsymbits, nsymbols, mpdudensity;
1538 u16 minlen;
1539 u8 rc, flags, rix;
1540 int width, half_gi, ndelim, mindelim;
1541
1542 /* Select standard number of delimiters based on frame length alone */
1543 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1544
1545 /*
1546 * If encryption enabled, hardware requires some more padding between
1547 * subframes.
1548 * TODO - this could be improved to be dependent on the rate.
1549 * The hardware can keep up at lower rates, but not higher rates
1550 */
1551 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1552 ndelim += ATH_AGGR_ENCRYPTDELIM;
1553
1554 /*
1555 * Convert desired mpdu density from microeconds to bytes based
1556 * on highest rate in rate series (i.e. first rate) to determine
1557 * required minimum length for subframe. Take into account
1558 * whether high rate is 20 or 40Mhz and half or full GI.
1559 */
ae5eb026 1560 mpdudensity = tid->an->mpdudensity;
f078f209
LR
1561
1562 /*
1563 * If there is no mpdu density restriction, no further calculation
1564 * is needed.
1565 */
1566 if (mpdudensity == 0)
1567 return ndelim;
1568
1569 rix = bf->bf_rcs[0].rix;
1570 flags = bf->bf_rcs[0].flags;
1571 rc = rt->info[rix].rateCode;
1572 width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0;
1573 half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0;
1574
1575 if (half_gi)
1576 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1577 else
1578 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1579
1580 if (nsymbols == 0)
1581 nsymbols = 1;
1582
1583 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1584 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1585
1586 /* Is frame shorter than required minimum length? */
1587 if (frmlen < minlen) {
1588 /* Get the minimum number of delimiters required. */
1589 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1590 ndelim = max(mindelim, ndelim);
1591 }
1592
1593 return ndelim;
1594}
1595
1596/*
1597 * For aggregation from software buffer queue.
1598 * NB: must be called with txq lock held
1599 */
1600
1601static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1602 struct ath_atx_tid *tid,
1603 struct list_head *bf_q,
1604 struct ath_buf **bf_last,
1605 struct aggr_rifs_param *param,
1606 int *prev_frames)
1607{
1608#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1609 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1610 struct list_head bf_head;
1611 int rl = 0, nframes = 0, ndelim;
1612 u16 aggr_limit = 0, al = 0, bpad = 0,
1613 al_delta, h_baw = tid->baw_size / 2;
1614 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1615 int prev_al = 0, is_ds_rate = 0;
1616 INIT_LIST_HEAD(&bf_head);
1617
1618 BUG_ON(list_empty(&tid->buf_q));
1619
1620 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1621
1622 do {
1623 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1624
1625 /*
1626 * do not step over block-ack window
1627 */
1628 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1629 status = ATH_AGGR_BAW_CLOSED;
1630 break;
1631 }
1632
1633 if (!rl) {
ae5eb026 1634 aggr_limit = ath_lookup_rate(sc, bf, tid);
f078f209
LR
1635 rl = 1;
1636 /*
1637 * Is rate dual stream
1638 */
1639 is_ds_rate =
1640 (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0;
1641 }
1642
1643 /*
1644 * do not exceed aggregation limit
1645 */
1646 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1647
1648 if (nframes && (aggr_limit <
1649 (al + bpad + al_delta + prev_al))) {
1650 status = ATH_AGGR_LIMITED;
1651 break;
1652 }
1653
1654 /*
1655 * do not exceed subframe limit
1656 */
1657 if ((nframes + *prev_frames) >=
1658 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1659 status = ATH_AGGR_LIMITED;
1660 break;
1661 }
1662
1663 /*
1664 * add padding for previous frame to aggregation length
1665 */
1666 al += bpad + al_delta;
1667
1668 /*
1669 * Get the delimiters needed to meet the MPDU
1670 * density for this node.
1671 */
ae5eb026 1672 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
f078f209
LR
1673
1674 bpad = PADBYTES(al_delta) + (ndelim << 2);
1675
1676 bf->bf_next = NULL;
1677 bf->bf_lastfrm->bf_desc->ds_link = 0;
1678
1679 /*
1680 * this packet is part of an aggregate
1681 * - remove all descriptors belonging to this frame from
1682 * software queue
1683 * - add it to block ack window
1684 * - set up descriptors for aggregation
1685 */
1686 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1687 ath_tx_addto_baw(sc, tid, bf);
1688
1689 list_for_each_entry(tbf, &bf_head, list) {
1690 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1691 tbf->bf_desc, ndelim);
1692 }
1693
1694 /*
1695 * link buffers of this frame to the aggregate
1696 */
1697 list_splice_tail_init(&bf_head, bf_q);
1698 nframes++;
1699
1700 if (bf_prev) {
1701 bf_prev->bf_next = bf;
1702 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1703 }
1704 bf_prev = bf;
1705
1706#ifdef AGGR_NOSHORT
1707 /*
1708 * terminate aggregation on a small packet boundary
1709 */
1710 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1711 status = ATH_AGGR_SHORTPKT;
1712 break;
1713 }
1714#endif
1715 } while (!list_empty(&tid->buf_q));
1716
1717 bf_first->bf_al = al;
1718 bf_first->bf_nframes = nframes;
1719 *bf_last = bf_prev;
1720 return status;
1721#undef PADBYTES
1722}
1723
1724/*
1725 * process pending frames possibly doing a-mpdu aggregation
1726 * NB: must be called with txq lock held
1727 */
1728
1729static void ath_tx_sched_aggr(struct ath_softc *sc,
1730 struct ath_txq *txq, struct ath_atx_tid *tid)
1731{
1732 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1733 enum ATH_AGGR_STATUS status;
1734 struct list_head bf_q;
1735 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1736 int prev_frames = 0;
1737
1738 do {
1739 if (list_empty(&tid->buf_q))
1740 return;
1741
1742 INIT_LIST_HEAD(&bf_q);
1743
1744 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
1745 &prev_frames);
1746
1747 /*
1748 * no frames picked up to be aggregated; block-ack
1749 * window is not open
1750 */
1751 if (list_empty(&bf_q))
1752 break;
1753
1754 bf = list_first_entry(&bf_q, struct ath_buf, list);
1755 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1756 bf->bf_lastbf = bf_last;
1757
1758 /*
1759 * if only one frame, send as non-aggregate
1760 */
1761 if (bf->bf_nframes == 1) {
1762 ASSERT(bf->bf_lastfrm == bf_last);
1763
cd3d39a6 1764 bf->bf_state.bf_type &= ~BUF_AGGR;
f078f209
LR
1765 /*
1766 * clear aggr bits for every descriptor
1767 * XXX TODO: is there a way to optimize it?
1768 */
1769 list_for_each_entry(tbf, &bf_q, list) {
1770 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1771 }
1772
1773 ath_buf_set_rate(sc, bf);
1774 ath_tx_txqaddbuf(sc, txq, &bf_q);
1775 continue;
1776 }
1777
1778 /*
1779 * setup first desc with rate and aggr info
1780 */
cd3d39a6 1781 bf->bf_state.bf_type |= BUF_AGGR;
f078f209
LR
1782 ath_buf_set_rate(sc, bf);
1783 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1784
1785 /*
1786 * anchor last frame of aggregate correctly
1787 */
1788 ASSERT(bf_lastaggr);
1789 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1790 tbf = bf_lastaggr;
1791 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1792
1793 /* XXX: We don't enter into this loop, consider removing this */
1794 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1795 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1796 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1797 }
1798
1799 txq->axq_aggr_depth++;
1800
1801 /*
1802 * Normal aggregate, queue to hardware
1803 */
1804 ath_tx_txqaddbuf(sc, txq, &bf_q);
1805
1806 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1807 status != ATH_AGGR_BAW_CLOSED);
1808}
1809
1810/* Called with txq lock held */
1811
1812static void ath_tid_drain(struct ath_softc *sc,
1813 struct ath_txq *txq,
1814 struct ath_atx_tid *tid,
1815 bool bh_flag)
1816{
1817 struct ath_buf *bf;
1818 struct list_head bf_head;
1819 INIT_LIST_HEAD(&bf_head);
1820
1821 for (;;) {
1822 if (list_empty(&tid->buf_q))
1823 break;
1824 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1825
1826 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1827
1828 /* update baw for software retried frame */
cd3d39a6 1829 if (bf_isretried(bf))
f078f209
LR
1830 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1831
1832 /*
1833 * do not indicate packets while holding txq spinlock.
1834 * unlock is intentional here
1835 */
1836 if (likely(bh_flag))
1837 spin_unlock_bh(&txq->axq_lock);
1838 else
1839 spin_unlock(&txq->axq_lock);
1840
1841 /* complete this sub-frame */
1842 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1843
1844 if (likely(bh_flag))
1845 spin_lock_bh(&txq->axq_lock);
1846 else
1847 spin_lock(&txq->axq_lock);
1848 }
1849
1850 /*
1851 * TODO: For frame(s) that are in the retry state, we will reuse the
1852 * sequence number(s) without setting the retry bit. The
1853 * alternative is to give up on these and BAR the receiver's window
1854 * forward.
1855 */
1856 tid->seq_next = tid->seq_start;
1857 tid->baw_tail = tid->baw_head;
1858}
1859
1860/*
1861 * Drain all pending buffers
1862 * NB: must be called with txq lock held
1863 */
1864
1865static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1866 struct ath_txq *txq,
1867 bool bh_flag)
1868{
1869 struct ath_atx_ac *ac, *ac_tmp;
1870 struct ath_atx_tid *tid, *tid_tmp;
1871
1872 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1873 list_del(&ac->list);
1874 ac->sched = false;
1875 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1876 list_del(&tid->list);
1877 tid->sched = false;
1878 ath_tid_drain(sc, txq, tid, bh_flag);
1879 }
1880 }
1881}
1882
1883static int ath_tx_start_dma(struct ath_softc *sc,
1884 struct sk_buff *skb,
1885 struct scatterlist *sg,
1886 u32 n_sg,
1887 struct ath_tx_control *txctl)
1888{
1889 struct ath_node *an = txctl->an;
1890 struct ath_buf *bf = NULL;
1891 struct list_head bf_head;
1892 struct ath_desc *ds;
1893 struct ath_hal *ah = sc->sc_ah;
e022edbd 1894 struct ath_txq *txq;
f078f209
LR
1895 struct ath_tx_info_priv *tx_info_priv;
1896 struct ath_rc_series *rcs;
1897 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1898 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1899 __le16 fc = hdr->frame_control;
1900
e022edbd
JM
1901 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB))
1902 txq = sc->sc_cabq;
1903 else
1904 txq = &sc->sc_txq[txctl->qnum];
1905
f078f209
LR
1906 /* For each sglist entry, allocate an ath_buf for DMA */
1907 INIT_LIST_HEAD(&bf_head);
1908 spin_lock_bh(&sc->sc_txbuflock);
1909 if (unlikely(list_empty(&sc->sc_txbuf))) {
1910 spin_unlock_bh(&sc->sc_txbuflock);
1911 return -ENOMEM;
1912 }
1913
1914 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
1915 list_del(&bf->list);
1916 spin_unlock_bh(&sc->sc_txbuflock);
1917
1918 list_add_tail(&bf->list, &bf_head);
1919
1920 /* set up this buffer */
1921 ATH_TXBUF_RESET(bf);
1922 bf->bf_frmlen = txctl->frmlen;
cd3d39a6
S
1923
1924 ieee80211_is_data(fc) ?
1925 (bf->bf_state.bf_type |= BUF_DATA) :
1926 (bf->bf_state.bf_type &= ~BUF_DATA);
1927 ieee80211_is_back_req(fc) ?
1928 (bf->bf_state.bf_type |= BUF_BAR) :
1929 (bf->bf_state.bf_type &= ~BUF_BAR);
1930 ieee80211_is_pspoll(fc) ?
1931 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1932 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
672840ac 1933 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
cd3d39a6
S
1934 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1935 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1936
f078f209 1937 bf->bf_flags = txctl->flags;
f078f209 1938 bf->bf_keytype = txctl->keytype;
e6a9854b
JB
1939 /* XXX: HACK! */
1940 tx_info_priv = (struct ath_tx_info_priv *)tx_info->control.vif;
f078f209
LR
1941 rcs = tx_info_priv->rcs;
1942 bf->bf_rcs[0] = rcs[0];
1943 bf->bf_rcs[1] = rcs[1];
1944 bf->bf_rcs[2] = rcs[2];
1945 bf->bf_rcs[3] = rcs[3];
1946 bf->bf_node = an;
1947 bf->bf_mpdu = skb;
1948 bf->bf_buf_addr = sg_dma_address(sg);
1949
1950 /* setup descriptor */
1951 ds = bf->bf_desc;
1952 ds->ds_link = 0;
1953 ds->ds_data = bf->bf_buf_addr;
1954
1955 /*
1956 * Save the DMA context in the first ath_buf
1957 */
ff9b662d 1958 bf->bf_dmacontext = txctl->dmacontext;
f078f209
LR
1959
1960 /*
1961 * Formulate first tx descriptor with tx controls.
1962 */
1963 ath9k_hw_set11n_txdesc(ah,
1964 ds,
1965 bf->bf_frmlen, /* frame length */
1966 txctl->atype, /* Atheros packet type */
1967 min(txctl->txpower, (u16)60), /* txpower */
1968 txctl->keyix, /* key cache index */
1969 txctl->keytype, /* key type */
1970 txctl->flags); /* flags */
1971 ath9k_hw_filltxdesc(ah,
1972 ds,
1973 sg_dma_len(sg), /* segment length */
1974 true, /* first segment */
1975 (n_sg == 1) ? true : false, /* last segment */
1976 ds); /* first descriptor */
1977
1978 bf->bf_lastfrm = bf;
cd3d39a6
S
1979 (txctl->ht) ?
1980 (bf->bf_state.bf_type |= BUF_HT) :
1981 (bf->bf_state.bf_type &= ~BUF_HT);
f078f209
LR
1982
1983 spin_lock_bh(&txq->axq_lock);
1984
672840ac 1985 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
f078f209
LR
1986 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
1987 if (ath_aggr_query(sc, an, txctl->tidno)) {
1988 /*
1989 * Try aggregation if it's a unicast data frame
1990 * and the destination is HT capable.
1991 */
1992 ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl);
1993 } else {
1994 /*
1995 * Send this frame as regular when ADDBA exchange
1996 * is neither complete nor pending.
1997 */
1998 ath_tx_send_normal(sc, txq, tid, &bf_head);
1999 }
2000 } else {
2001 bf->bf_lastbf = bf;
2002 bf->bf_nframes = 1;
2003 ath_buf_set_rate(sc, bf);
2004
2005 if (ieee80211_is_back_req(fc)) {
2006 /* This is required for resuming tid
2007 * during BAR completion */
2008 bf->bf_tidno = txctl->tidno;
2009 }
2010
e022edbd 2011 ath_tx_txqaddbuf(sc, txq, &bf_head);
f078f209
LR
2012 }
2013 spin_unlock_bh(&txq->axq_lock);
2014 return 0;
2015}
2016
2017static void xmit_map_sg(struct ath_softc *sc,
2018 struct sk_buff *skb,
f078f209
LR
2019 struct ath_tx_control *txctl)
2020{
2021 struct ath_xmit_status tx_status;
2022 struct ath_atx_tid *tid;
2023 struct scatterlist sg;
2024
ff9b662d
S
2025 txctl->dmacontext = pci_map_single(sc->pdev, skb->data,
2026 skb->len, PCI_DMA_TODEVICE);
f078f209
LR
2027
2028 /* setup S/G list */
2029 memset(&sg, 0, sizeof(struct scatterlist));
ff9b662d 2030 sg_dma_address(&sg) = txctl->dmacontext;
f078f209
LR
2031 sg_dma_len(&sg) = skb->len;
2032
2033 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
2034 /*
2035 * We have to do drop frame here.
2036 */
ff9b662d
S
2037 pci_unmap_single(sc->pdev, txctl->dmacontext,
2038 skb->len, PCI_DMA_TODEVICE);
f078f209
LR
2039
2040 tx_status.retries = 0;
2041 tx_status.flags = ATH_TX_ERROR;
2042
672840ac 2043 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
f078f209
LR
2044 /* Reclaim the seqno. */
2045 tid = ATH_AN_2_TID((struct ath_node *)
2046 txctl->an, txctl->tidno);
2047 DECR(tid->seq_next, IEEE80211_SEQ_MAX);
2048 }
2049 ath_tx_complete(sc, skb, &tx_status, txctl->an);
2050 }
2051}
2052
2053/* Initialize TX queue and h/w */
2054
2055int ath_tx_init(struct ath_softc *sc, int nbufs)
2056{
2057 int error = 0;
2058
2059 do {
2060 spin_lock_init(&sc->sc_txbuflock);
2061
2062 /* Setup tx descriptors */
2063 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
556bb8f1 2064 "tx", nbufs, 1);
f078f209
LR
2065 if (error != 0) {
2066 DPRINTF(sc, ATH_DBG_FATAL,
2067 "%s: failed to allocate tx descriptors: %d\n",
2068 __func__, error);
2069 break;
2070 }
2071
2072 /* XXX allocate beacon state together with vap */
2073 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
2074 "beacon", ATH_BCBUF, 1);
2075 if (error != 0) {
2076 DPRINTF(sc, ATH_DBG_FATAL,
2077 "%s: failed to allocate "
2078 "beacon descripotrs: %d\n",
2079 __func__, error);
2080 break;
2081 }
2082
2083 } while (0);
2084
2085 if (error != 0)
2086 ath_tx_cleanup(sc);
2087
2088 return error;
2089}
2090
2091/* Reclaim all tx queue resources */
2092
2093int ath_tx_cleanup(struct ath_softc *sc)
2094{
2095 /* cleanup beacon descriptors */
2096 if (sc->sc_bdma.dd_desc_len != 0)
2097 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
2098
2099 /* cleanup tx descriptors */
2100 if (sc->sc_txdma.dd_desc_len != 0)
2101 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
2102
2103 return 0;
2104}
2105
2106/* Setup a h/w transmit queue */
2107
2108struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2109{
2110 struct ath_hal *ah = sc->sc_ah;
ea9880fb 2111 struct ath9k_tx_queue_info qi;
f078f209
LR
2112 int qnum;
2113
0345f37b 2114 memset(&qi, 0, sizeof(qi));
f078f209
LR
2115 qi.tqi_subtype = subtype;
2116 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
2117 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
2118 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
ea9880fb 2119 qi.tqi_physCompBuf = 0;
f078f209
LR
2120
2121 /*
2122 * Enable interrupts only for EOL and DESC conditions.
2123 * We mark tx descriptors to receive a DESC interrupt
2124 * when a tx queue gets deep; otherwise waiting for the
2125 * EOL to reap descriptors. Note that this is done to
2126 * reduce interrupt load and this only defers reaping
2127 * descriptors, never transmitting frames. Aside from
2128 * reducing interrupts this also permits more concurrency.
2129 * The only potential downside is if the tx queue backs
2130 * up in which case the top half of the kernel may backup
2131 * due to a lack of tx descriptors.
2132 *
2133 * The UAPSD queue is an exception, since we take a desc-
2134 * based intr on the EOSP frames.
2135 */
2136 if (qtype == ATH9K_TX_QUEUE_UAPSD)
2137 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
2138 else
2139 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
2140 TXQ_FLAG_TXDESCINT_ENABLE;
2141 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
2142 if (qnum == -1) {
2143 /*
2144 * NB: don't print a message, this happens
2145 * normally on parts with too few tx queues
2146 */
2147 return NULL;
2148 }
2149 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
2150 DPRINTF(sc, ATH_DBG_FATAL,
2151 "%s: hal qnum %u out of range, max %u!\n",
2152 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
2153 ath9k_hw_releasetxqueue(ah, qnum);
2154 return NULL;
2155 }
2156 if (!ATH_TXQ_SETUP(sc, qnum)) {
2157 struct ath_txq *txq = &sc->sc_txq[qnum];
2158
2159 txq->axq_qnum = qnum;
2160 txq->axq_link = NULL;
2161 INIT_LIST_HEAD(&txq->axq_q);
2162 INIT_LIST_HEAD(&txq->axq_acq);
2163 spin_lock_init(&txq->axq_lock);
2164 txq->axq_depth = 0;
2165 txq->axq_aggr_depth = 0;
2166 txq->axq_totalqueued = 0;
2167 txq->axq_intrcnt = 0;
2168 txq->axq_linkbuf = NULL;
2169 sc->sc_txqsetup |= 1<<qnum;
2170 }
2171 return &sc->sc_txq[qnum];
2172}
2173
2174/* Reclaim resources for a setup queue */
2175
2176void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
2177{
2178 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
2179 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
2180}
2181
2182/*
2183 * Setup a hardware data transmit queue for the specified
2184 * access control. The hal may not support all requested
2185 * queues in which case it will return a reference to a
2186 * previously setup queue. We record the mapping from ac's
2187 * to h/w queues for use by ath_tx_start and also track
2188 * the set of h/w queues being used to optimize work in the
2189 * transmit interrupt handler and related routines.
2190 */
2191
2192int ath_tx_setup(struct ath_softc *sc, int haltype)
2193{
2194 struct ath_txq *txq;
2195
2196 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2197 DPRINTF(sc, ATH_DBG_FATAL,
2198 "%s: HAL AC %u out of range, max %zu!\n",
2199 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
2200 return 0;
2201 }
2202 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
2203 if (txq != NULL) {
2204 sc->sc_haltype2q[haltype] = txq->axq_qnum;
2205 return 1;
2206 } else
2207 return 0;
2208}
2209
2210int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2211{
2212 int qnum;
2213
2214 switch (qtype) {
2215 case ATH9K_TX_QUEUE_DATA:
2216 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2217 DPRINTF(sc, ATH_DBG_FATAL,
2218 "%s: HAL AC %u out of range, max %zu!\n",
2219 __func__,
2220 haltype, ARRAY_SIZE(sc->sc_haltype2q));
2221 return -1;
2222 }
2223 qnum = sc->sc_haltype2q[haltype];
2224 break;
2225 case ATH9K_TX_QUEUE_BEACON:
2226 qnum = sc->sc_bhalq;
2227 break;
2228 case ATH9K_TX_QUEUE_CAB:
2229 qnum = sc->sc_cabq->axq_qnum;
2230 break;
2231 default:
2232 qnum = -1;
2233 }
2234 return qnum;
2235}
2236
2237/* Update parameters for a transmit queue */
2238
ea9880fb
S
2239int ath_txq_update(struct ath_softc *sc, int qnum,
2240 struct ath9k_tx_queue_info *qinfo)
f078f209
LR
2241{
2242 struct ath_hal *ah = sc->sc_ah;
2243 int error = 0;
ea9880fb 2244 struct ath9k_tx_queue_info qi;
f078f209
LR
2245
2246 if (qnum == sc->sc_bhalq) {
2247 /*
2248 * XXX: for beacon queue, we just save the parameter.
2249 * It will be picked up by ath_beaconq_config when
2250 * it's necessary.
2251 */
ea9880fb 2252 sc->sc_beacon_qi = *qinfo;
f078f209
LR
2253 return 0;
2254 }
2255
2256 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2257
ea9880fb
S
2258 ath9k_hw_get_txq_props(ah, qnum, &qi);
2259 qi.tqi_aifs = qinfo->tqi_aifs;
2260 qi.tqi_cwmin = qinfo->tqi_cwmin;
2261 qi.tqi_cwmax = qinfo->tqi_cwmax;
2262 qi.tqi_burstTime = qinfo->tqi_burstTime;
2263 qi.tqi_readyTime = qinfo->tqi_readyTime;
f078f209 2264
ea9880fb 2265 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
f078f209
LR
2266 DPRINTF(sc, ATH_DBG_FATAL,
2267 "%s: unable to update hardware queue %u!\n",
2268 __func__, qnum);
2269 error = -EIO;
2270 } else {
2271 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2272 }
2273
2274 return error;
2275}
2276
2277int ath_cabq_update(struct ath_softc *sc)
2278{
ea9880fb 2279 struct ath9k_tx_queue_info qi;
f078f209
LR
2280 int qnum = sc->sc_cabq->axq_qnum;
2281 struct ath_beacon_config conf;
2282
ea9880fb 2283 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
f078f209
LR
2284 /*
2285 * Ensure the readytime % is within the bounds.
2286 */
2287 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2288 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2289 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2290 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2291
2292 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2293 qi.tqi_readyTime =
2294 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2295 ath_txq_update(sc, qnum, &qi);
2296
2297 return 0;
2298}
2299
2300int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2301{
2302 struct ath_tx_control txctl;
2303 int error = 0;
2304
e022edbd 2305 memset(&txctl, 0, sizeof(struct ath_tx_control));
f078f209
LR
2306 error = ath_tx_prepare(sc, skb, &txctl);
2307 if (error == 0)
2308 /*
2309 * Start DMA mapping.
2310 * ath_tx_start_dma() will be called either synchronously
2311 * or asynchrounsly once DMA is complete.
2312 */
ff9b662d 2313 xmit_map_sg(sc, skb, &txctl);
f078f209
LR
2314 else
2315 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2316
2317 /* failed packets will be dropped by the caller */
2318 return error;
2319}
2320
2321/* Deferred processing of transmit interrupt */
2322
2323void ath_tx_tasklet(struct ath_softc *sc)
2324{
1fe1132b 2325 int i;
f078f209
LR
2326 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2327
2328 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2329
2330 /*
2331 * Process each active queue.
2332 */
2333 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2334 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
1fe1132b 2335 ath_tx_processq(sc, &sc->sc_txq[i]);
f078f209 2336 }
f078f209
LR
2337}
2338
2339void ath_tx_draintxq(struct ath_softc *sc,
2340 struct ath_txq *txq, bool retry_tx)
2341{
2342 struct ath_buf *bf, *lastbf;
2343 struct list_head bf_head;
2344
2345 INIT_LIST_HEAD(&bf_head);
2346
2347 /*
2348 * NB: this assumes output has been stopped and
2349 * we do not need to block ath_tx_tasklet
2350 */
2351 for (;;) {
2352 spin_lock_bh(&txq->axq_lock);
2353
2354 if (list_empty(&txq->axq_q)) {
2355 txq->axq_link = NULL;
2356 txq->axq_linkbuf = NULL;
2357 spin_unlock_bh(&txq->axq_lock);
2358 break;
2359 }
2360
2361 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2362
2363 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2364 list_del(&bf->list);
2365 spin_unlock_bh(&txq->axq_lock);
2366
2367 spin_lock_bh(&sc->sc_txbuflock);
2368 list_add_tail(&bf->list, &sc->sc_txbuf);
2369 spin_unlock_bh(&sc->sc_txbuflock);
2370 continue;
2371 }
2372
2373 lastbf = bf->bf_lastbf;
2374 if (!retry_tx)
2375 lastbf->bf_desc->ds_txstat.ts_flags =
2376 ATH9K_TX_SW_ABORTED;
2377
2378 /* remove ath_buf's of the same mpdu from txq */
2379 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2380 txq->axq_depth--;
2381
2382 spin_unlock_bh(&txq->axq_lock);
2383
cd3d39a6 2384 if (bf_isampdu(bf))
f078f209
LR
2385 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2386 else
2387 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2388 }
2389
2390 /* flush any pending frames if aggregation is enabled */
672840ac 2391 if (sc->sc_flags & SC_OP_TXAGGR) {
f078f209
LR
2392 if (!retry_tx) {
2393 spin_lock_bh(&txq->axq_lock);
2394 ath_txq_drain_pending_buffers(sc, txq,
2395 ATH9K_BH_STATUS_CHANGE);
2396 spin_unlock_bh(&txq->axq_lock);
2397 }
2398 }
2399}
2400
2401/* Drain the transmit queues and reclaim resources */
2402
2403void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2404{
2405 /* stop beacon queue. The beacon will be freed when
2406 * we go to INIT state */
672840ac 2407 if (!(sc->sc_flags & SC_OP_INVALID)) {
f078f209
LR
2408 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2409 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2410 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2411 }
2412
2413 ath_drain_txdataq(sc, retry_tx);
2414}
2415
2416u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2417{
2418 return sc->sc_txq[qnum].axq_depth;
2419}
2420
2421u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2422{
2423 return sc->sc_txq[qnum].axq_aggr_depth;
2424}
2425
2426/* Check if an ADDBA is required. A valid node must be passed. */
2427enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2428 struct ath_node *an,
2429 u8 tidno)
2430{
2431 struct ath_atx_tid *txtid;
f078f209 2432
672840ac 2433 if (!(sc->sc_flags & SC_OP_TXAGGR))
f078f209
LR
2434 return AGGR_NOT_REQUIRED;
2435
2436 /* ADDBA exchange must be completed before sending aggregates */
2437 txtid = ATH_AN_2_TID(an, tidno);
2438
2439 if (txtid->addba_exchangecomplete)
2440 return AGGR_EXCHANGE_DONE;
2441
2442 if (txtid->cleanup_inprogress)
2443 return AGGR_CLEANUP_PROGRESS;
2444
2445 if (txtid->addba_exchangeinprogress)
2446 return AGGR_EXCHANGE_PROGRESS;
2447
2448 if (!txtid->addba_exchangecomplete) {
2449 if (!txtid->addba_exchangeinprogress &&
2450 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2451 txtid->addba_exchangeattempts++;
2452 return AGGR_REQUIRED;
2453 }
2454 }
2455
2456 return AGGR_NOT_REQUIRED;
2457}
2458
2459/* Start TX aggregation */
2460
2461int ath_tx_aggr_start(struct ath_softc *sc,
2462 const u8 *addr,
2463 u16 tid,
2464 u16 *ssn)
2465{
2466 struct ath_atx_tid *txtid;
2467 struct ath_node *an;
2468
2469 spin_lock_bh(&sc->node_lock);
2470 an = ath_node_find(sc, (u8 *) addr);
2471 spin_unlock_bh(&sc->node_lock);
2472
2473 if (!an) {
2474 DPRINTF(sc, ATH_DBG_AGGR,
2475 "%s: Node not found to initialize "
2476 "TX aggregation\n", __func__);
2477 return -1;
2478 }
2479
672840ac 2480 if (sc->sc_flags & SC_OP_TXAGGR) {
f078f209
LR
2481 txtid = ATH_AN_2_TID(an, tid);
2482 txtid->addba_exchangeinprogress = 1;
2483 ath_tx_pause_tid(sc, txtid);
2484 }
2485
2486 return 0;
2487}
2488
2489/* Stop tx aggregation */
2490
2491int ath_tx_aggr_stop(struct ath_softc *sc,
2492 const u8 *addr,
2493 u16 tid)
2494{
2495 struct ath_node *an;
2496
2497 spin_lock_bh(&sc->node_lock);
2498 an = ath_node_find(sc, (u8 *) addr);
2499 spin_unlock_bh(&sc->node_lock);
2500
2501 if (!an) {
2502 DPRINTF(sc, ATH_DBG_AGGR,
2503 "%s: TX aggr stop for non-existent node\n", __func__);
2504 return -1;
2505 }
2506
2507 ath_tx_aggr_teardown(sc, an, tid);
2508 return 0;
2509}
2510
2511/*
2512 * Performs transmit side cleanup when TID changes from aggregated to
2513 * unaggregated.
2514 * - Pause the TID and mark cleanup in progress
2515 * - Discard all retry frames from the s/w queue.
2516 */
2517
2518void ath_tx_aggr_teardown(struct ath_softc *sc,
2519 struct ath_node *an, u8 tid)
2520{
2521 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2522 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2523 struct ath_buf *bf;
2524 struct list_head bf_head;
2525 INIT_LIST_HEAD(&bf_head);
2526
2527 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
2528
2529 if (txtid->cleanup_inprogress) /* cleanup is in progress */
2530 return;
2531
2532 if (!txtid->addba_exchangecomplete) {
2533 txtid->addba_exchangeattempts = 0;
2534 return;
2535 }
2536
2537 /* TID must be paused first */
2538 ath_tx_pause_tid(sc, txtid);
2539
2540 /* drop all software retried frames and mark this TID */
2541 spin_lock_bh(&txq->axq_lock);
2542 while (!list_empty(&txtid->buf_q)) {
2543 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
cd3d39a6 2544 if (!bf_isretried(bf)) {
f078f209
LR
2545 /*
2546 * NB: it's based on the assumption that
2547 * software retried frame will always stay
2548 * at the head of software queue.
2549 */
2550 break;
2551 }
2552 list_cut_position(&bf_head,
2553 &txtid->buf_q, &bf->bf_lastfrm->list);
2554 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2555
2556 /* complete this sub-frame */
2557 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2558 }
2559
2560 if (txtid->baw_head != txtid->baw_tail) {
2561 spin_unlock_bh(&txq->axq_lock);
2562 txtid->cleanup_inprogress = true;
2563 } else {
2564 txtid->addba_exchangecomplete = 0;
2565 txtid->addba_exchangeattempts = 0;
2566 spin_unlock_bh(&txq->axq_lock);
2567 ath_tx_flush_tid(sc, txtid);
2568 }
2569}
2570
2571/*
2572 * Tx scheduling logic
2573 * NB: must be called with txq lock held
2574 */
2575
2576void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2577{
2578 struct ath_atx_ac *ac;
2579 struct ath_atx_tid *tid;
2580
2581 /* nothing to schedule */
2582 if (list_empty(&txq->axq_acq))
2583 return;
2584 /*
2585 * get the first node/ac pair on the queue
2586 */
2587 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2588 list_del(&ac->list);
2589 ac->sched = false;
2590
2591 /*
2592 * process a single tid per destination
2593 */
2594 do {
2595 /* nothing to schedule */
2596 if (list_empty(&ac->tid_q))
2597 return;
2598
2599 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2600 list_del(&tid->list);
2601 tid->sched = false;
2602
2603 if (tid->paused) /* check next tid to keep h/w busy */
2604 continue;
2605
2606 if (!(tid->an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) ||
2607 ((txq->axq_depth % 2) == 0)) {
2608 ath_tx_sched_aggr(sc, txq, tid);
2609 }
2610
2611 /*
2612 * add tid to round-robin queue if more frames
2613 * are pending for the tid
2614 */
2615 if (!list_empty(&tid->buf_q))
2616 ath_tx_queue_tid(txq, tid);
2617
2618 /* only schedule one TID at a time */
2619 break;
2620 } while (!list_empty(&ac->tid_q));
2621
2622 /*
2623 * schedule AC if more TIDs need processing
2624 */
2625 if (!list_empty(&ac->tid_q)) {
2626 /*
2627 * add dest ac to txq if not already added
2628 */
2629 if (!ac->sched) {
2630 ac->sched = true;
2631 list_add_tail(&ac->list, &txq->axq_acq);
2632 }
2633 }
2634}
2635
2636/* Initialize per-node transmit state */
2637
2638void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2639{
672840ac 2640 if (sc->sc_flags & SC_OP_TXAGGR) {
f078f209
LR
2641 struct ath_atx_tid *tid;
2642 struct ath_atx_ac *ac;
2643 int tidno, acno;
2644
ae5eb026 2645 an->maxampdu = ATH_AMPDU_LIMIT_DEFAULT;
f078f209
LR
2646
2647 /*
2648 * Init per tid tx state
2649 */
2650 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2651 tidno < WME_NUM_TID;
2652 tidno++, tid++) {
2653 tid->an = an;
2654 tid->tidno = tidno;
2655 tid->seq_start = tid->seq_next = 0;
2656 tid->baw_size = WME_MAX_BA;
2657 tid->baw_head = tid->baw_tail = 0;
2658 tid->sched = false;
2659 tid->paused = false;
2660 tid->cleanup_inprogress = false;
2661 INIT_LIST_HEAD(&tid->buf_q);
2662
2663 acno = TID_TO_WME_AC(tidno);
2664 tid->ac = &an->an_aggr.tx.ac[acno];
2665
2666 /* ADDBA state */
2667 tid->addba_exchangecomplete = 0;
2668 tid->addba_exchangeinprogress = 0;
2669 tid->addba_exchangeattempts = 0;
2670 }
2671
2672 /*
2673 * Init per ac tx state
2674 */
2675 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2676 acno < WME_NUM_AC; acno++, ac++) {
2677 ac->sched = false;
2678 INIT_LIST_HEAD(&ac->tid_q);
2679
2680 switch (acno) {
2681 case WME_AC_BE:
2682 ac->qnum = ath_tx_get_qnum(sc,
2683 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2684 break;
2685 case WME_AC_BK:
2686 ac->qnum = ath_tx_get_qnum(sc,
2687 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2688 break;
2689 case WME_AC_VI:
2690 ac->qnum = ath_tx_get_qnum(sc,
2691 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2692 break;
2693 case WME_AC_VO:
2694 ac->qnum = ath_tx_get_qnum(sc,
2695 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2696 break;
2697 }
2698 }
2699 }
2700}
2701
2702/* Cleanupthe pending buffers for the node. */
2703
2704void ath_tx_node_cleanup(struct ath_softc *sc,
2705 struct ath_node *an, bool bh_flag)
2706{
2707 int i;
2708 struct ath_atx_ac *ac, *ac_tmp;
2709 struct ath_atx_tid *tid, *tid_tmp;
2710 struct ath_txq *txq;
2711 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2712 if (ATH_TXQ_SETUP(sc, i)) {
2713 txq = &sc->sc_txq[i];
2714
2715 if (likely(bh_flag))
2716 spin_lock_bh(&txq->axq_lock);
2717 else
2718 spin_lock(&txq->axq_lock);
2719
2720 list_for_each_entry_safe(ac,
2721 ac_tmp, &txq->axq_acq, list) {
2722 tid = list_first_entry(&ac->tid_q,
2723 struct ath_atx_tid, list);
2724 if (tid && tid->an != an)
2725 continue;
2726 list_del(&ac->list);
2727 ac->sched = false;
2728
2729 list_for_each_entry_safe(tid,
2730 tid_tmp, &ac->tid_q, list) {
2731 list_del(&tid->list);
2732 tid->sched = false;
2733 ath_tid_drain(sc, txq, tid, bh_flag);
2734 tid->addba_exchangecomplete = 0;
2735 tid->addba_exchangeattempts = 0;
2736 tid->cleanup_inprogress = false;
2737 }
2738 }
2739
2740 if (likely(bh_flag))
2741 spin_unlock_bh(&txq->axq_lock);
2742 else
2743 spin_unlock(&txq->axq_lock);
2744 }
2745 }
2746}
2747
2748/* Cleanup per node transmit state */
2749
2750void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2751{
672840ac 2752 if (sc->sc_flags & SC_OP_TXAGGR) {
f078f209
LR
2753 struct ath_atx_tid *tid;
2754 int tidno, i;
2755
2756 /* Init per tid rx state */
2757 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2758 tidno < WME_NUM_TID;
2759 tidno++, tid++) {
2760
2761 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
2762 ASSERT(tid->tx_buf[i] == NULL);
2763 }
2764 }
2765}
e022edbd
JM
2766
2767void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2768{
2769 int hdrlen, padsize;
2770 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2771 struct ath_tx_control txctl;
2772
2773 /*
2774 * As a temporary workaround, assign seq# here; this will likely need
2775 * to be cleaned up to work better with Beacon transmission and virtual
2776 * BSSes.
2777 */
2778 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2779 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2780 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2781 sc->seq_no += 0x10;
2782 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2783 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2784 }
2785
2786 /* Add the padding after the header if this is not already done */
2787 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2788 if (hdrlen & 3) {
2789 padsize = hdrlen % 4;
2790 if (skb_headroom(skb) < padsize) {
2791 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding "
2792 "failed\n", __func__);
2793 dev_kfree_skb_any(skb);
2794 return;
2795 }
2796 skb_push(skb, padsize);
2797 memmove(skb->data, skb->data + padsize, hdrlen);
2798 }
2799
2800 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
2801 __func__,
2802 skb);
2803
2804 memset(&txctl, 0, sizeof(struct ath_tx_control));
2805 txctl.flags = ATH9K_TXDESC_CAB;
2806 if (ath_tx_prepare(sc, skb, &txctl) == 0) {
2807 /*
2808 * Start DMA mapping.
2809 * ath_tx_start_dma() will be called either synchronously
2810 * or asynchrounsly once DMA is complete.
2811 */
2812 xmit_map_sg(sc, skb, &txctl);
2813 } else {
2814 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2815 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ failed\n", __func__);
2816 dev_kfree_skb_any(skb);
2817 }
2818}
2819
This page took 0.174963 seconds and 5 git commands to generate.