Commit | Line | Data |
---|---|---|
f078f209 LR |
1 | /* |
2 | * Copyright (c) 2008 Atheros Communications Inc. | |
3 | * | |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
f078f209 LR |
17 | #include "core.h" |
18 | ||
19 | #define BITS_PER_BYTE 8 | |
20 | #define OFDM_PLCP_BITS 22 | |
21 | #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f) | |
22 | #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) | |
23 | #define L_STF 8 | |
24 | #define L_LTF 8 | |
25 | #define L_SIG 4 | |
26 | #define HT_SIG 8 | |
27 | #define HT_STF 4 | |
28 | #define HT_LTF(_ns) (4 * (_ns)) | |
29 | #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ | |
30 | #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ | |
31 | #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) | |
32 | #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) | |
33 | ||
34 | #define OFDM_SIFS_TIME 16 | |
35 | ||
36 | static u32 bits_per_symbol[][2] = { | |
37 | /* 20MHz 40MHz */ | |
38 | { 26, 54 }, /* 0: BPSK */ | |
39 | { 52, 108 }, /* 1: QPSK 1/2 */ | |
40 | { 78, 162 }, /* 2: QPSK 3/4 */ | |
41 | { 104, 216 }, /* 3: 16-QAM 1/2 */ | |
42 | { 156, 324 }, /* 4: 16-QAM 3/4 */ | |
43 | { 208, 432 }, /* 5: 64-QAM 2/3 */ | |
44 | { 234, 486 }, /* 6: 64-QAM 3/4 */ | |
45 | { 260, 540 }, /* 7: 64-QAM 5/6 */ | |
46 | { 52, 108 }, /* 8: BPSK */ | |
47 | { 104, 216 }, /* 9: QPSK 1/2 */ | |
48 | { 156, 324 }, /* 10: QPSK 3/4 */ | |
49 | { 208, 432 }, /* 11: 16-QAM 1/2 */ | |
50 | { 312, 648 }, /* 12: 16-QAM 3/4 */ | |
51 | { 416, 864 }, /* 13: 64-QAM 2/3 */ | |
52 | { 468, 972 }, /* 14: 64-QAM 3/4 */ | |
53 | { 520, 1080 }, /* 15: 64-QAM 5/6 */ | |
54 | }; | |
55 | ||
56 | #define IS_HT_RATE(_rate) ((_rate) & 0x80) | |
57 | ||
f078f209 LR |
58 | /* |
59 | * Insert a chain of ath_buf (descriptors) on a txq and | |
60 | * assume the descriptors are already chained together by caller. | |
61 | * NB: must be called with txq lock held | |
62 | */ | |
63 | ||
102e0572 S |
64 | static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, |
65 | struct list_head *head) | |
f078f209 LR |
66 | { |
67 | struct ath_hal *ah = sc->sc_ah; | |
68 | struct ath_buf *bf; | |
102e0572 | 69 | |
f078f209 LR |
70 | /* |
71 | * Insert the frame on the outbound list and | |
72 | * pass it on to the hardware. | |
73 | */ | |
74 | ||
75 | if (list_empty(head)) | |
76 | return; | |
77 | ||
78 | bf = list_first_entry(head, struct ath_buf, list); | |
79 | ||
80 | list_splice_tail_init(head, &txq->axq_q); | |
81 | txq->axq_depth++; | |
82 | txq->axq_totalqueued++; | |
83 | txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); | |
84 | ||
85 | DPRINTF(sc, ATH_DBG_QUEUE, | |
86 | "%s: txq depth = %d\n", __func__, txq->axq_depth); | |
87 | ||
88 | if (txq->axq_link == NULL) { | |
89 | ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); | |
90 | DPRINTF(sc, ATH_DBG_XMIT, | |
91 | "%s: TXDP[%u] = %llx (%p)\n", | |
92 | __func__, txq->axq_qnum, | |
93 | ito64(bf->bf_daddr), bf->bf_desc); | |
94 | } else { | |
95 | *txq->axq_link = bf->bf_daddr; | |
96 | DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n", | |
97 | __func__, | |
98 | txq->axq_qnum, txq->axq_link, | |
99 | ito64(bf->bf_daddr), bf->bf_desc); | |
100 | } | |
101 | txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); | |
102 | ath9k_hw_txstart(ah, txq->axq_qnum); | |
103 | } | |
104 | ||
c4288390 S |
105 | static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, |
106 | struct ath_xmit_status *tx_status) | |
107 | { | |
108 | struct ieee80211_hw *hw = sc->hw; | |
109 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | |
110 | struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); | |
111 | ||
112 | DPRINTF(sc, ATH_DBG_XMIT, | |
113 | "%s: TX complete: skb: %p\n", __func__, skb); | |
114 | ||
115 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK || | |
116 | tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) { | |
117 | kfree(tx_info_priv); | |
118 | tx_info->rate_driver_data[0] = NULL; | |
119 | } | |
120 | ||
121 | if (tx_status->flags & ATH_TX_BAR) { | |
122 | tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | |
123 | tx_status->flags &= ~ATH_TX_BAR; | |
124 | } | |
125 | ||
126 | if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { | |
127 | /* Frame was ACKed */ | |
128 | tx_info->flags |= IEEE80211_TX_STAT_ACK; | |
129 | } | |
130 | ||
131 | tx_info->status.rates[0].count = tx_status->retries + 1; | |
132 | ||
133 | ieee80211_tx_status(hw, skb); | |
134 | } | |
135 | ||
f078f209 LR |
136 | /* Check if it's okay to send out aggregates */ |
137 | ||
a37c2c79 | 138 | static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno) |
f078f209 LR |
139 | { |
140 | struct ath_atx_tid *tid; | |
141 | tid = ATH_AN_2_TID(an, tidno); | |
142 | ||
a37c2c79 S |
143 | if (tid->state & AGGR_ADDBA_COMPLETE || |
144 | tid->state & AGGR_ADDBA_PROGRESS) | |
f078f209 LR |
145 | return 1; |
146 | else | |
147 | return 0; | |
148 | } | |
149 | ||
528f0c6b S |
150 | /* Calculate Atheros packet type from IEEE80211 packet header */ |
151 | ||
152 | static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) | |
f078f209 | 153 | { |
528f0c6b | 154 | struct ieee80211_hdr *hdr; |
f078f209 LR |
155 | enum ath9k_pkt_type htype; |
156 | __le16 fc; | |
157 | ||
528f0c6b | 158 | hdr = (struct ieee80211_hdr *)skb->data; |
f078f209 LR |
159 | fc = hdr->frame_control; |
160 | ||
f078f209 LR |
161 | if (ieee80211_is_beacon(fc)) |
162 | htype = ATH9K_PKT_TYPE_BEACON; | |
163 | else if (ieee80211_is_probe_resp(fc)) | |
164 | htype = ATH9K_PKT_TYPE_PROBE_RESP; | |
165 | else if (ieee80211_is_atim(fc)) | |
166 | htype = ATH9K_PKT_TYPE_ATIM; | |
167 | else if (ieee80211_is_pspoll(fc)) | |
168 | htype = ATH9K_PKT_TYPE_PSPOLL; | |
169 | else | |
170 | htype = ATH9K_PKT_TYPE_NORMAL; | |
171 | ||
172 | return htype; | |
173 | } | |
174 | ||
a8efee4f | 175 | static bool is_pae(struct sk_buff *skb) |
f078f209 LR |
176 | { |
177 | struct ieee80211_hdr *hdr; | |
f078f209 LR |
178 | __le16 fc; |
179 | ||
180 | hdr = (struct ieee80211_hdr *)skb->data; | |
181 | fc = hdr->frame_control; | |
e6a9854b | 182 | |
a8efee4f | 183 | if (ieee80211_is_data(fc)) { |
f078f209 | 184 | if (ieee80211_is_nullfunc(fc) || |
528f0c6b S |
185 | /* Port Access Entity (IEEE 802.1X) */ |
186 | (skb->protocol == cpu_to_be16(ETH_P_PAE))) { | |
a8efee4f | 187 | return true; |
f078f209 | 188 | } |
f078f209 LR |
189 | } |
190 | ||
a8efee4f | 191 | return false; |
f078f209 LR |
192 | } |
193 | ||
528f0c6b | 194 | static int get_hw_crypto_keytype(struct sk_buff *skb) |
f078f209 | 195 | { |
f078f209 | 196 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
f078f209 LR |
197 | |
198 | if (tx_info->control.hw_key) { | |
d0be7cc7 | 199 | if (tx_info->control.hw_key->alg == ALG_WEP) |
528f0c6b | 200 | return ATH9K_KEY_TYPE_WEP; |
d0be7cc7 | 201 | else if (tx_info->control.hw_key->alg == ALG_TKIP) |
528f0c6b | 202 | return ATH9K_KEY_TYPE_TKIP; |
d0be7cc7 | 203 | else if (tx_info->control.hw_key->alg == ALG_CCMP) |
528f0c6b | 204 | return ATH9K_KEY_TYPE_AES; |
f078f209 LR |
205 | } |
206 | ||
528f0c6b S |
207 | return ATH9K_KEY_TYPE_CLEAR; |
208 | } | |
f078f209 | 209 | |
528f0c6b | 210 | /* Called only when tx aggregation is enabled and HT is supported */ |
e6a9854b | 211 | |
528f0c6b S |
212 | static void assign_aggr_tid_seqno(struct sk_buff *skb, |
213 | struct ath_buf *bf) | |
214 | { | |
215 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | |
216 | struct ieee80211_hdr *hdr; | |
217 | struct ath_node *an; | |
218 | struct ath_atx_tid *tid; | |
219 | __le16 fc; | |
220 | u8 *qc; | |
f078f209 | 221 | |
528f0c6b S |
222 | if (!tx_info->control.sta) |
223 | return; | |
e6a9854b | 224 | |
528f0c6b S |
225 | an = (struct ath_node *)tx_info->control.sta->drv_priv; |
226 | hdr = (struct ieee80211_hdr *)skb->data; | |
227 | fc = hdr->frame_control; | |
f078f209 | 228 | |
528f0c6b | 229 | /* Get tidno */ |
f078f209 | 230 | |
528f0c6b S |
231 | if (ieee80211_is_data_qos(fc)) { |
232 | qc = ieee80211_get_qos_ctl(hdr); | |
233 | bf->bf_tidno = qc[0] & 0xf; | |
234 | } | |
f078f209 | 235 | |
528f0c6b | 236 | /* Get seqno */ |
f078f209 | 237 | |
a8efee4f | 238 | if (ieee80211_is_data(fc) && !is_pae(skb)) { |
f078f209 LR |
239 | /* For HT capable stations, we save tidno for later use. |
240 | * We also override seqno set by upper layer with the one | |
241 | * in tx aggregation state. | |
242 | * | |
f078f209 LR |
243 | * If fragmentation is on, the sequence number is |
244 | * not overridden, since it has been | |
245 | * incremented by the fragmentation routine. | |
528f0c6b S |
246 | * |
247 | * FIXME: check if the fragmentation threshold exceeds | |
248 | * IEEE80211 max. | |
f078f209 | 249 | */ |
528f0c6b S |
250 | tid = ATH_AN_2_TID(an, bf->bf_tidno); |
251 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << | |
252 | IEEE80211_SEQ_SEQ_SHIFT); | |
253 | bf->bf_seqno = tid->seq_next; | |
254 | INCR(tid->seq_next, IEEE80211_SEQ_MAX); | |
255 | } | |
256 | } | |
f078f209 | 257 | |
528f0c6b S |
258 | static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb, |
259 | struct ath_txq *txq) | |
260 | { | |
261 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | |
262 | int flags = 0; | |
f078f209 | 263 | |
528f0c6b S |
264 | flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ |
265 | flags |= ATH9K_TXDESC_INTREQ; | |
f078f209 | 266 | |
528f0c6b S |
267 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) |
268 | flags |= ATH9K_TXDESC_NOACK; | |
269 | if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) | |
270 | flags |= ATH9K_TXDESC_RTSENA; | |
271 | ||
272 | return flags; | |
273 | } | |
f078f209 | 274 | |
528f0c6b S |
275 | static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) |
276 | { | |
277 | struct ath_buf *bf = NULL; | |
278 | ||
279 | spin_lock_bh(&sc->sc_txbuflock); | |
280 | ||
281 | if (unlikely(list_empty(&sc->sc_txbuf))) { | |
282 | spin_unlock_bh(&sc->sc_txbuflock); | |
283 | return NULL; | |
98deeea0 | 284 | } |
f078f209 | 285 | |
528f0c6b S |
286 | bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list); |
287 | list_del(&bf->list); | |
288 | ||
289 | spin_unlock_bh(&sc->sc_txbuflock); | |
290 | ||
291 | return bf; | |
f078f209 LR |
292 | } |
293 | ||
294 | /* To complete a chain of buffers associated a frame */ | |
295 | ||
296 | static void ath_tx_complete_buf(struct ath_softc *sc, | |
297 | struct ath_buf *bf, | |
298 | struct list_head *bf_q, | |
299 | int txok, int sendbar) | |
300 | { | |
301 | struct sk_buff *skb = bf->bf_mpdu; | |
302 | struct ath_xmit_status tx_status; | |
f078f209 LR |
303 | |
304 | /* | |
305 | * Set retry information. | |
306 | * NB: Don't use the information in the descriptor, because the frame | |
307 | * could be software retried. | |
308 | */ | |
309 | tx_status.retries = bf->bf_retries; | |
310 | tx_status.flags = 0; | |
311 | ||
312 | if (sendbar) | |
313 | tx_status.flags = ATH_TX_BAR; | |
314 | ||
315 | if (!txok) { | |
316 | tx_status.flags |= ATH_TX_ERROR; | |
317 | ||
cd3d39a6 | 318 | if (bf_isxretried(bf)) |
f078f209 LR |
319 | tx_status.flags |= ATH_TX_XRETRY; |
320 | } | |
102e0572 | 321 | |
f078f209 | 322 | /* Unmap this frame */ |
f078f209 | 323 | pci_unmap_single(sc->pdev, |
ff9b662d | 324 | bf->bf_dmacontext, |
f078f209 LR |
325 | skb->len, |
326 | PCI_DMA_TODEVICE); | |
327 | /* complete this frame */ | |
528f0c6b | 328 | ath_tx_complete(sc, skb, &tx_status); |
f078f209 LR |
329 | |
330 | /* | |
331 | * Return the list of ath_buf of this mpdu to free queue | |
332 | */ | |
333 | spin_lock_bh(&sc->sc_txbuflock); | |
334 | list_splice_tail_init(bf_q, &sc->sc_txbuf); | |
335 | spin_unlock_bh(&sc->sc_txbuflock); | |
336 | } | |
337 | ||
338 | /* | |
339 | * queue up a dest/ac pair for tx scheduling | |
340 | * NB: must be called with txq lock held | |
341 | */ | |
342 | ||
343 | static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) | |
344 | { | |
345 | struct ath_atx_ac *ac = tid->ac; | |
346 | ||
347 | /* | |
348 | * if tid is paused, hold off | |
349 | */ | |
350 | if (tid->paused) | |
351 | return; | |
352 | ||
353 | /* | |
354 | * add tid to ac atmost once | |
355 | */ | |
356 | if (tid->sched) | |
357 | return; | |
358 | ||
359 | tid->sched = true; | |
360 | list_add_tail(&tid->list, &ac->tid_q); | |
361 | ||
362 | /* | |
363 | * add node ac to txq atmost once | |
364 | */ | |
365 | if (ac->sched) | |
366 | return; | |
367 | ||
368 | ac->sched = true; | |
369 | list_add_tail(&ac->list, &txq->axq_acq); | |
370 | } | |
371 | ||
372 | /* pause a tid */ | |
373 | ||
374 | static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | |
375 | { | |
376 | struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; | |
377 | ||
378 | spin_lock_bh(&txq->axq_lock); | |
379 | ||
380 | tid->paused++; | |
381 | ||
382 | spin_unlock_bh(&txq->axq_lock); | |
383 | } | |
384 | ||
385 | /* resume a tid and schedule aggregate */ | |
386 | ||
387 | void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | |
388 | { | |
389 | struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; | |
390 | ||
391 | ASSERT(tid->paused > 0); | |
392 | spin_lock_bh(&txq->axq_lock); | |
393 | ||
394 | tid->paused--; | |
395 | ||
396 | if (tid->paused > 0) | |
397 | goto unlock; | |
398 | ||
399 | if (list_empty(&tid->buf_q)) | |
400 | goto unlock; | |
401 | ||
402 | /* | |
403 | * Add this TID to scheduler and try to send out aggregates | |
404 | */ | |
405 | ath_tx_queue_tid(txq, tid); | |
406 | ath_txq_schedule(sc, txq); | |
407 | unlock: | |
408 | spin_unlock_bh(&txq->axq_lock); | |
409 | } | |
410 | ||
411 | /* Compute the number of bad frames */ | |
412 | ||
b5aa9bf9 S |
413 | static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, |
414 | int txok) | |
f078f209 | 415 | { |
f078f209 LR |
416 | struct ath_buf *bf_last = bf->bf_lastbf; |
417 | struct ath_desc *ds = bf_last->bf_desc; | |
418 | u16 seq_st = 0; | |
419 | u32 ba[WME_BA_BMP_SIZE >> 5]; | |
420 | int ba_index; | |
421 | int nbad = 0; | |
422 | int isaggr = 0; | |
423 | ||
b5aa9bf9 | 424 | if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) |
f078f209 LR |
425 | return 0; |
426 | ||
cd3d39a6 | 427 | isaggr = bf_isaggr(bf); |
f078f209 LR |
428 | if (isaggr) { |
429 | seq_st = ATH_DS_BA_SEQ(ds); | |
430 | memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); | |
431 | } | |
432 | ||
433 | while (bf) { | |
434 | ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno); | |
435 | if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) | |
436 | nbad++; | |
437 | ||
438 | bf = bf->bf_next; | |
439 | } | |
440 | ||
441 | return nbad; | |
442 | } | |
443 | ||
444 | static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) | |
445 | { | |
446 | struct sk_buff *skb; | |
447 | struct ieee80211_hdr *hdr; | |
448 | ||
cd3d39a6 | 449 | bf->bf_state.bf_type |= BUF_RETRY; |
f078f209 LR |
450 | bf->bf_retries++; |
451 | ||
452 | skb = bf->bf_mpdu; | |
453 | hdr = (struct ieee80211_hdr *)skb->data; | |
454 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); | |
455 | } | |
456 | ||
457 | /* Update block ack window */ | |
458 | ||
102e0572 S |
459 | static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, |
460 | int seqno) | |
f078f209 LR |
461 | { |
462 | int index, cindex; | |
463 | ||
464 | index = ATH_BA_INDEX(tid->seq_start, seqno); | |
465 | cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | |
466 | ||
467 | tid->tx_buf[cindex] = NULL; | |
468 | ||
469 | while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { | |
470 | INCR(tid->seq_start, IEEE80211_SEQ_MAX); | |
471 | INCR(tid->baw_head, ATH_TID_MAX_BUFS); | |
472 | } | |
473 | } | |
474 | ||
475 | /* | |
476 | * ath_pkt_dur - compute packet duration (NB: not NAV) | |
477 | * | |
478 | * rix - rate index | |
479 | * pktlen - total bytes (delims + data + fcs + pads + pad delims) | |
480 | * width - 0 for 20 MHz, 1 for 40 MHz | |
481 | * half_gi - to use 4us v/s 3.6 us for symbol time | |
482 | */ | |
102e0572 S |
483 | static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf, |
484 | int width, int half_gi, bool shortPreamble) | |
f078f209 | 485 | { |
e63835b0 | 486 | struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode]; |
f078f209 LR |
487 | u32 nbits, nsymbits, duration, nsymbols; |
488 | u8 rc; | |
489 | int streams, pktlen; | |
490 | ||
cd3d39a6 | 491 | pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen; |
e63835b0 | 492 | rc = rate_table->info[rix].ratecode; |
f078f209 | 493 | |
e63835b0 | 494 | /* for legacy rates, use old function to compute packet duration */ |
f078f209 | 495 | if (!IS_HT_RATE(rc)) |
e63835b0 S |
496 | return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen, |
497 | rix, shortPreamble); | |
498 | ||
499 | /* find number of symbols: PLCP + data */ | |
f078f209 LR |
500 | nbits = (pktlen << 3) + OFDM_PLCP_BITS; |
501 | nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; | |
502 | nsymbols = (nbits + nsymbits - 1) / nsymbits; | |
503 | ||
504 | if (!half_gi) | |
505 | duration = SYMBOL_TIME(nsymbols); | |
506 | else | |
507 | duration = SYMBOL_TIME_HALFGI(nsymbols); | |
508 | ||
e63835b0 | 509 | /* addup duration for legacy/ht training and signal fields */ |
f078f209 LR |
510 | streams = HT_RC_2_STREAMS(rc); |
511 | duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); | |
102e0572 | 512 | |
f078f209 LR |
513 | return duration; |
514 | } | |
515 | ||
516 | /* Rate module function to set rate related fields in tx descriptor */ | |
517 | ||
518 | static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) | |
519 | { | |
520 | struct ath_hal *ah = sc->sc_ah; | |
e63835b0 | 521 | struct ath_rate_table *rt; |
f078f209 LR |
522 | struct ath_desc *ds = bf->bf_desc; |
523 | struct ath_desc *lastds = bf->bf_lastbf->bf_desc; | |
524 | struct ath9k_11n_rate_series series[4]; | |
528f0c6b S |
525 | struct ath_node *an = NULL; |
526 | struct sk_buff *skb; | |
527 | struct ieee80211_tx_info *tx_info; | |
a8efee4f | 528 | struct ieee80211_tx_rate *rates; |
e63835b0 S |
529 | struct ieee80211_hdr *hdr; |
530 | int i, flags, rtsctsena = 0; | |
531 | u32 ctsduration = 0; | |
532 | u8 rix = 0, cix, ctsrate = 0; | |
533 | __le16 fc; | |
534 | ||
535 | memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); | |
528f0c6b S |
536 | |
537 | skb = (struct sk_buff *)bf->bf_mpdu; | |
e63835b0 S |
538 | hdr = (struct ieee80211_hdr *)skb->data; |
539 | fc = hdr->frame_control; | |
528f0c6b | 540 | tx_info = IEEE80211_SKB_CB(skb); |
e63835b0 | 541 | rates = tx_info->control.rates; |
528f0c6b S |
542 | |
543 | if (tx_info->control.sta) | |
544 | an = (struct ath_node *)tx_info->control.sta->drv_priv; | |
f078f209 | 545 | |
e63835b0 S |
546 | if (ieee80211_has_morefrags(fc) || |
547 | (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) { | |
548 | rates[1].count = rates[2].count = rates[3].count = 0; | |
549 | rates[1].idx = rates[2].idx = rates[3].idx = 0; | |
550 | rates[0].count = ATH_TXMAXTRY; | |
551 | } | |
552 | ||
553 | /* get the cix for the lowest valid rix */ | |
554 | rt = sc->hw_rate_table[sc->sc_curmode]; | |
a8efee4f | 555 | for (i = 3; i >= 0; i--) { |
e63835b0 | 556 | if (rates[i].count && (rates[i].idx >= 0)) { |
a8efee4f | 557 | rix = rates[i].idx; |
f078f209 LR |
558 | break; |
559 | } | |
560 | } | |
e63835b0 | 561 | |
f078f209 | 562 | flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)); |
e63835b0 | 563 | cix = rt->info[rix].ctrl_rate; |
f078f209 LR |
564 | |
565 | /* | |
e63835b0 S |
566 | * If 802.11g protection is enabled, determine whether to use RTS/CTS or |
567 | * just CTS. Note that this is only done for OFDM/HT unicast frames. | |
f078f209 | 568 | */ |
e63835b0 | 569 | if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK) |
46d14a58 | 570 | && (rt->info[rix].phy == WLAN_RC_PHY_OFDM || |
e63835b0 | 571 | WLAN_RC_PHY_HT(rt->info[rix].phy))) { |
f078f209 LR |
572 | if (sc->sc_protmode == PROT_M_RTSCTS) |
573 | flags = ATH9K_TXDESC_RTSENA; | |
574 | else if (sc->sc_protmode == PROT_M_CTSONLY) | |
575 | flags = ATH9K_TXDESC_CTSENA; | |
576 | ||
e63835b0 | 577 | cix = rt->info[sc->sc_protrix].ctrl_rate; |
f078f209 LR |
578 | rtsctsena = 1; |
579 | } | |
580 | ||
e63835b0 S |
581 | /* For 11n, the default behavior is to enable RTS for hw retried frames. |
582 | * We enable the global flag here and let rate series flags determine | |
583 | * which rates will actually use RTS. | |
f078f209 | 584 | */ |
cd3d39a6 | 585 | if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) { |
e63835b0 | 586 | /* 802.11g protection not needed, use our default behavior */ |
f078f209 LR |
587 | if (!rtsctsena) |
588 | flags = ATH9K_TXDESC_RTSENA; | |
f078f209 LR |
589 | } |
590 | ||
e63835b0 | 591 | /* Set protection if aggregate protection on */ |
f078f209 | 592 | if (sc->sc_config.ath_aggr_prot && |
cd3d39a6 | 593 | (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) { |
f078f209 | 594 | flags = ATH9K_TXDESC_RTSENA; |
e63835b0 | 595 | cix = rt->info[sc->sc_protrix].ctrl_rate; |
f078f209 LR |
596 | rtsctsena = 1; |
597 | } | |
598 | ||
e63835b0 S |
599 | /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ |
600 | if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit)) | |
f078f209 | 601 | flags &= ~(ATH9K_TXDESC_RTSENA); |
f078f209 LR |
602 | |
603 | /* | |
e63835b0 S |
604 | * CTS transmit rate is derived from the transmit rate by looking in the |
605 | * h/w rate table. We must also factor in whether or not a short | |
606 | * preamble is to be used. NB: cix is set above where RTS/CTS is enabled | |
f078f209 | 607 | */ |
e63835b0 S |
608 | ctsrate = rt->info[cix].ratecode | |
609 | (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0); | |
f078f209 LR |
610 | |
611 | for (i = 0; i < 4; i++) { | |
e63835b0 | 612 | if (!rates[i].count || (rates[i].idx < 0)) |
f078f209 LR |
613 | continue; |
614 | ||
a8efee4f | 615 | rix = rates[i].idx; |
f078f209 | 616 | |
e63835b0 S |
617 | series[i].Rate = rt->info[rix].ratecode | |
618 | (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0); | |
f078f209 | 619 | |
a8efee4f | 620 | series[i].Tries = rates[i].count; |
f078f209 LR |
621 | |
622 | series[i].RateFlags = ( | |
a8efee4f | 623 | (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ? |
f078f209 | 624 | ATH9K_RATESERIES_RTS_CTS : 0) | |
a8efee4f | 625 | ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? |
f078f209 | 626 | ATH9K_RATESERIES_2040 : 0) | |
a8efee4f | 627 | ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ? |
f078f209 LR |
628 | ATH9K_RATESERIES_HALFGI : 0); |
629 | ||
102e0572 | 630 | series[i].PktDuration = ath_pkt_duration(sc, rix, bf, |
a8efee4f S |
631 | (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0, |
632 | (rates[i].flags & IEEE80211_TX_RC_SHORT_GI), | |
102e0572 | 633 | bf_isshpreamble(bf)); |
f078f209 | 634 | |
102e0572 S |
635 | if (bf_isht(bf) && an) |
636 | series[i].ChSel = ath_chainmask_sel_logic(sc, an); | |
43453b33 | 637 | else |
f078f209 | 638 | series[i].ChSel = sc->sc_tx_chainmask; |
f078f209 LR |
639 | |
640 | if (rtsctsena) | |
641 | series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; | |
f078f209 LR |
642 | } |
643 | ||
e63835b0 S |
644 | /* set dur_update_en for l-sig computation except for PS-Poll frames */ |
645 | ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf), | |
646 | ctsrate, ctsduration, | |
cd3d39a6 | 647 | series, 4, flags); |
102e0572 | 648 | |
f078f209 LR |
649 | if (sc->sc_config.ath_aggr_prot && flags) |
650 | ath9k_hw_set11n_burstduration(ah, ds, 8192); | |
651 | } | |
652 | ||
653 | /* | |
654 | * Function to send a normal HT (non-AMPDU) frame | |
655 | * NB: must be called with txq lock held | |
656 | */ | |
f078f209 LR |
657 | static int ath_tx_send_normal(struct ath_softc *sc, |
658 | struct ath_txq *txq, | |
659 | struct ath_atx_tid *tid, | |
660 | struct list_head *bf_head) | |
661 | { | |
662 | struct ath_buf *bf; | |
f078f209 LR |
663 | |
664 | BUG_ON(list_empty(bf_head)); | |
665 | ||
666 | bf = list_first_entry(bf_head, struct ath_buf, list); | |
cd3d39a6 | 667 | bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */ |
f078f209 | 668 | |
f078f209 LR |
669 | /* update starting sequence number for subsequent ADDBA request */ |
670 | INCR(tid->seq_start, IEEE80211_SEQ_MAX); | |
671 | ||
672 | /* Queue to h/w without aggregation */ | |
673 | bf->bf_nframes = 1; | |
674 | bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ | |
675 | ath_buf_set_rate(sc, bf); | |
676 | ath_tx_txqaddbuf(sc, txq, bf_head); | |
677 | ||
678 | return 0; | |
679 | } | |
680 | ||
681 | /* flush tid's software queue and send frames as non-ampdu's */ | |
682 | ||
683 | static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | |
684 | { | |
685 | struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; | |
686 | struct ath_buf *bf; | |
687 | struct list_head bf_head; | |
688 | INIT_LIST_HEAD(&bf_head); | |
689 | ||
690 | ASSERT(tid->paused > 0); | |
691 | spin_lock_bh(&txq->axq_lock); | |
692 | ||
693 | tid->paused--; | |
694 | ||
695 | if (tid->paused > 0) { | |
696 | spin_unlock_bh(&txq->axq_lock); | |
697 | return; | |
698 | } | |
699 | ||
700 | while (!list_empty(&tid->buf_q)) { | |
701 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | |
cd3d39a6 | 702 | ASSERT(!bf_isretried(bf)); |
f078f209 LR |
703 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); |
704 | ath_tx_send_normal(sc, txq, tid, &bf_head); | |
705 | } | |
706 | ||
707 | spin_unlock_bh(&txq->axq_lock); | |
708 | } | |
709 | ||
710 | /* Completion routine of an aggregate */ | |
711 | ||
712 | static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, | |
713 | struct ath_txq *txq, | |
714 | struct ath_buf *bf, | |
715 | struct list_head *bf_q, | |
716 | int txok) | |
717 | { | |
528f0c6b S |
718 | struct ath_node *an = NULL; |
719 | struct sk_buff *skb; | |
720 | struct ieee80211_tx_info *tx_info; | |
721 | struct ath_atx_tid *tid = NULL; | |
f078f209 LR |
722 | struct ath_buf *bf_last = bf->bf_lastbf; |
723 | struct ath_desc *ds = bf_last->bf_desc; | |
724 | struct ath_buf *bf_next, *bf_lastq = NULL; | |
725 | struct list_head bf_head, bf_pending; | |
726 | u16 seq_st = 0; | |
727 | u32 ba[WME_BA_BMP_SIZE >> 5]; | |
728 | int isaggr, txfail, txpending, sendbar = 0, needreset = 0; | |
f078f209 | 729 | |
528f0c6b S |
730 | skb = (struct sk_buff *)bf->bf_mpdu; |
731 | tx_info = IEEE80211_SKB_CB(skb); | |
732 | ||
733 | if (tx_info->control.sta) { | |
734 | an = (struct ath_node *)tx_info->control.sta->drv_priv; | |
735 | tid = ATH_AN_2_TID(an, bf->bf_tidno); | |
736 | } | |
737 | ||
cd3d39a6 | 738 | isaggr = bf_isaggr(bf); |
f078f209 LR |
739 | if (isaggr) { |
740 | if (txok) { | |
741 | if (ATH_DS_TX_BA(ds)) { | |
742 | /* | |
743 | * extract starting sequence and | |
744 | * block-ack bitmap | |
745 | */ | |
746 | seq_st = ATH_DS_BA_SEQ(ds); | |
747 | memcpy(ba, | |
748 | ATH_DS_BA_BITMAP(ds), | |
749 | WME_BA_BMP_SIZE >> 3); | |
750 | } else { | |
0345f37b | 751 | memset(ba, 0, WME_BA_BMP_SIZE >> 3); |
f078f209 LR |
752 | |
753 | /* | |
754 | * AR5416 can become deaf/mute when BA | |
755 | * issue happens. Chip needs to be reset. | |
756 | * But AP code may have sychronization issues | |
757 | * when perform internal reset in this routine. | |
758 | * Only enable reset in STA mode for now. | |
759 | */ | |
b4696c8b | 760 | if (sc->sc_ah->ah_opmode == ATH9K_M_STA) |
f078f209 LR |
761 | needreset = 1; |
762 | } | |
763 | } else { | |
0345f37b | 764 | memset(ba, 0, WME_BA_BMP_SIZE >> 3); |
f078f209 LR |
765 | } |
766 | } | |
767 | ||
768 | INIT_LIST_HEAD(&bf_pending); | |
769 | INIT_LIST_HEAD(&bf_head); | |
770 | ||
771 | while (bf) { | |
772 | txfail = txpending = 0; | |
773 | bf_next = bf->bf_next; | |
774 | ||
775 | if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { | |
776 | /* transmit completion, subframe is | |
777 | * acked by block ack */ | |
778 | } else if (!isaggr && txok) { | |
779 | /* transmit completion */ | |
780 | } else { | |
781 | ||
a37c2c79 | 782 | if (!(tid->state & AGGR_CLEANUP) && |
f078f209 LR |
783 | ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { |
784 | if (bf->bf_retries < ATH_MAX_SW_RETRIES) { | |
785 | ath_tx_set_retry(sc, bf); | |
786 | txpending = 1; | |
787 | } else { | |
cd3d39a6 | 788 | bf->bf_state.bf_type |= BUF_XRETRY; |
f078f209 LR |
789 | txfail = 1; |
790 | sendbar = 1; | |
791 | } | |
792 | } else { | |
793 | /* | |
794 | * cleanup in progress, just fail | |
795 | * the un-acked sub-frames | |
796 | */ | |
797 | txfail = 1; | |
798 | } | |
799 | } | |
800 | /* | |
801 | * Remove ath_buf's of this sub-frame from aggregate queue. | |
802 | */ | |
803 | if (bf_next == NULL) { /* last subframe in the aggregate */ | |
804 | ASSERT(bf->bf_lastfrm == bf_last); | |
805 | ||
806 | /* | |
807 | * The last descriptor of the last sub frame could be | |
808 | * a holding descriptor for h/w. If that's the case, | |
809 | * bf->bf_lastfrm won't be in the bf_q. | |
810 | * Make sure we handle bf_q properly here. | |
811 | */ | |
812 | ||
813 | if (!list_empty(bf_q)) { | |
814 | bf_lastq = list_entry(bf_q->prev, | |
815 | struct ath_buf, list); | |
816 | list_cut_position(&bf_head, | |
817 | bf_q, &bf_lastq->list); | |
818 | } else { | |
819 | /* | |
820 | * XXX: if the last subframe only has one | |
821 | * descriptor which is also being used as | |
822 | * a holding descriptor. Then the ath_buf | |
823 | * is not in the bf_q at all. | |
824 | */ | |
825 | INIT_LIST_HEAD(&bf_head); | |
826 | } | |
827 | } else { | |
828 | ASSERT(!list_empty(bf_q)); | |
829 | list_cut_position(&bf_head, | |
830 | bf_q, &bf->bf_lastfrm->list); | |
831 | } | |
832 | ||
833 | if (!txpending) { | |
834 | /* | |
835 | * complete the acked-ones/xretried ones; update | |
836 | * block-ack window | |
837 | */ | |
838 | spin_lock_bh(&txq->axq_lock); | |
839 | ath_tx_update_baw(sc, tid, bf->bf_seqno); | |
840 | spin_unlock_bh(&txq->axq_lock); | |
841 | ||
842 | /* complete this sub-frame */ | |
843 | ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); | |
844 | } else { | |
845 | /* | |
846 | * retry the un-acked ones | |
847 | */ | |
848 | /* | |
849 | * XXX: if the last descriptor is holding descriptor, | |
850 | * in order to requeue the frame to software queue, we | |
851 | * need to allocate a new descriptor and | |
852 | * copy the content of holding descriptor to it. | |
853 | */ | |
854 | if (bf->bf_next == NULL && | |
855 | bf_last->bf_status & ATH_BUFSTATUS_STALE) { | |
856 | struct ath_buf *tbf; | |
857 | ||
858 | /* allocate new descriptor */ | |
859 | spin_lock_bh(&sc->sc_txbuflock); | |
860 | ASSERT(!list_empty((&sc->sc_txbuf))); | |
861 | tbf = list_first_entry(&sc->sc_txbuf, | |
862 | struct ath_buf, list); | |
863 | list_del(&tbf->list); | |
864 | spin_unlock_bh(&sc->sc_txbuflock); | |
865 | ||
866 | ATH_TXBUF_RESET(tbf); | |
867 | ||
868 | /* copy descriptor content */ | |
869 | tbf->bf_mpdu = bf_last->bf_mpdu; | |
f078f209 LR |
870 | tbf->bf_buf_addr = bf_last->bf_buf_addr; |
871 | *(tbf->bf_desc) = *(bf_last->bf_desc); | |
872 | ||
873 | /* link it to the frame */ | |
874 | if (bf_lastq) { | |
875 | bf_lastq->bf_desc->ds_link = | |
876 | tbf->bf_daddr; | |
877 | bf->bf_lastfrm = tbf; | |
878 | ath9k_hw_cleartxdesc(sc->sc_ah, | |
879 | bf->bf_lastfrm->bf_desc); | |
880 | } else { | |
881 | tbf->bf_state = bf_last->bf_state; | |
882 | tbf->bf_lastfrm = tbf; | |
883 | ath9k_hw_cleartxdesc(sc->sc_ah, | |
884 | tbf->bf_lastfrm->bf_desc); | |
885 | ||
886 | /* copy the DMA context */ | |
ff9b662d S |
887 | tbf->bf_dmacontext = |
888 | bf_last->bf_dmacontext; | |
f078f209 LR |
889 | } |
890 | list_add_tail(&tbf->list, &bf_head); | |
891 | } else { | |
892 | /* | |
893 | * Clear descriptor status words for | |
894 | * software retry | |
895 | */ | |
896 | ath9k_hw_cleartxdesc(sc->sc_ah, | |
ff9b662d | 897 | bf->bf_lastfrm->bf_desc); |
f078f209 LR |
898 | } |
899 | ||
900 | /* | |
901 | * Put this buffer to the temporary pending | |
902 | * queue to retain ordering | |
903 | */ | |
904 | list_splice_tail_init(&bf_head, &bf_pending); | |
905 | } | |
906 | ||
907 | bf = bf_next; | |
908 | } | |
909 | ||
a37c2c79 | 910 | if (tid->state & AGGR_CLEANUP) { |
f078f209 LR |
911 | /* check to see if we're done with cleaning the h/w queue */ |
912 | spin_lock_bh(&txq->axq_lock); | |
913 | ||
914 | if (tid->baw_head == tid->baw_tail) { | |
a37c2c79 | 915 | tid->state &= ~AGGR_ADDBA_COMPLETE; |
f078f209 LR |
916 | tid->addba_exchangeattempts = 0; |
917 | spin_unlock_bh(&txq->axq_lock); | |
918 | ||
a37c2c79 | 919 | tid->state &= ~AGGR_CLEANUP; |
f078f209 LR |
920 | |
921 | /* send buffered frames as singles */ | |
922 | ath_tx_flush_tid(sc, tid); | |
923 | } else | |
924 | spin_unlock_bh(&txq->axq_lock); | |
925 | ||
926 | return; | |
927 | } | |
928 | ||
929 | /* | |
930 | * prepend un-acked frames to the beginning of the pending frame queue | |
931 | */ | |
932 | if (!list_empty(&bf_pending)) { | |
933 | spin_lock_bh(&txq->axq_lock); | |
934 | /* Note: we _prepend_, we _do_not_ at to | |
935 | * the end of the queue ! */ | |
936 | list_splice(&bf_pending, &tid->buf_q); | |
937 | ath_tx_queue_tid(txq, tid); | |
938 | spin_unlock_bh(&txq->axq_lock); | |
939 | } | |
940 | ||
941 | if (needreset) | |
f45144ef | 942 | ath_reset(sc, false); |
f078f209 LR |
943 | |
944 | return; | |
945 | } | |
946 | ||
c4288390 S |
947 | static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad) |
948 | { | |
949 | struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; | |
950 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | |
951 | struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); | |
952 | ||
953 | if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) | |
954 | tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; | |
955 | ||
956 | if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && | |
957 | (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { | |
958 | if (bf_isdata(bf)) { | |
959 | memcpy(&tx_info_priv->tx, &ds->ds_txstat, | |
960 | sizeof(tx_info_priv->tx)); | |
961 | tx_info_priv->n_frames = bf->bf_nframes; | |
962 | tx_info_priv->n_bad_frames = nbad; | |
963 | } | |
964 | } | |
965 | } | |
966 | ||
f078f209 LR |
967 | /* Process completed xmit descriptors from the specified queue */ |
968 | ||
c4288390 | 969 | static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) |
f078f209 LR |
970 | { |
971 | struct ath_hal *ah = sc->sc_ah; | |
972 | struct ath_buf *bf, *lastbf, *bf_held = NULL; | |
973 | struct list_head bf_head; | |
c4288390 S |
974 | struct ath_desc *ds; |
975 | int txok, nbad = 0; | |
f078f209 LR |
976 | int status; |
977 | ||
978 | DPRINTF(sc, ATH_DBG_QUEUE, | |
979 | "%s: tx queue %d (%x), link %p\n", __func__, | |
980 | txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), | |
981 | txq->axq_link); | |
982 | ||
f078f209 LR |
983 | for (;;) { |
984 | spin_lock_bh(&txq->axq_lock); | |
f078f209 LR |
985 | if (list_empty(&txq->axq_q)) { |
986 | txq->axq_link = NULL; | |
987 | txq->axq_linkbuf = NULL; | |
988 | spin_unlock_bh(&txq->axq_lock); | |
989 | break; | |
990 | } | |
991 | bf = list_first_entry(&txq->axq_q, struct ath_buf, list); | |
992 | ||
993 | /* | |
994 | * There is a race condition that a BH gets scheduled | |
995 | * after sw writes TxE and before hw re-load the last | |
996 | * descriptor to get the newly chained one. | |
997 | * Software must keep the last DONE descriptor as a | |
998 | * holding descriptor - software does so by marking | |
999 | * it with the STALE flag. | |
1000 | */ | |
1001 | bf_held = NULL; | |
1002 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | |
1003 | bf_held = bf; | |
1004 | if (list_is_last(&bf_held->list, &txq->axq_q)) { | |
1005 | /* FIXME: | |
1006 | * The holding descriptor is the last | |
1007 | * descriptor in queue. It's safe to remove | |
1008 | * the last holding descriptor in BH context. | |
1009 | */ | |
1010 | spin_unlock_bh(&txq->axq_lock); | |
1011 | break; | |
1012 | } else { | |
1013 | /* Lets work with the next buffer now */ | |
1014 | bf = list_entry(bf_held->list.next, | |
1015 | struct ath_buf, list); | |
1016 | } | |
1017 | } | |
1018 | ||
1019 | lastbf = bf->bf_lastbf; | |
1020 | ds = lastbf->bf_desc; /* NB: last decriptor */ | |
1021 | ||
1022 | status = ath9k_hw_txprocdesc(ah, ds); | |
1023 | if (status == -EINPROGRESS) { | |
1024 | spin_unlock_bh(&txq->axq_lock); | |
1025 | break; | |
1026 | } | |
1027 | if (bf->bf_desc == txq->axq_lastdsWithCTS) | |
1028 | txq->axq_lastdsWithCTS = NULL; | |
1029 | if (ds == txq->axq_gatingds) | |
1030 | txq->axq_gatingds = NULL; | |
1031 | ||
1032 | /* | |
1033 | * Remove ath_buf's of the same transmit unit from txq, | |
1034 | * however leave the last descriptor back as the holding | |
1035 | * descriptor for hw. | |
1036 | */ | |
1037 | lastbf->bf_status |= ATH_BUFSTATUS_STALE; | |
1038 | INIT_LIST_HEAD(&bf_head); | |
1039 | ||
1040 | if (!list_is_singular(&lastbf->list)) | |
1041 | list_cut_position(&bf_head, | |
1042 | &txq->axq_q, lastbf->list.prev); | |
1043 | ||
1044 | txq->axq_depth--; | |
1045 | ||
cd3d39a6 | 1046 | if (bf_isaggr(bf)) |
f078f209 LR |
1047 | txq->axq_aggr_depth--; |
1048 | ||
1049 | txok = (ds->ds_txstat.ts_status == 0); | |
1050 | ||
1051 | spin_unlock_bh(&txq->axq_lock); | |
1052 | ||
1053 | if (bf_held) { | |
1054 | list_del(&bf_held->list); | |
1055 | spin_lock_bh(&sc->sc_txbuflock); | |
1056 | list_add_tail(&bf_held->list, &sc->sc_txbuf); | |
1057 | spin_unlock_bh(&sc->sc_txbuflock); | |
1058 | } | |
1059 | ||
cd3d39a6 | 1060 | if (!bf_isampdu(bf)) { |
f078f209 LR |
1061 | /* |
1062 | * This frame is sent out as a single frame. | |
1063 | * Use hardware retry status for this frame. | |
1064 | */ | |
1065 | bf->bf_retries = ds->ds_txstat.ts_longretry; | |
1066 | if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) | |
cd3d39a6 | 1067 | bf->bf_state.bf_type |= BUF_XRETRY; |
f078f209 LR |
1068 | nbad = 0; |
1069 | } else { | |
1070 | nbad = ath_tx_num_badfrms(sc, bf, txok); | |
1071 | } | |
c4288390 S |
1072 | |
1073 | ath_tx_rc_status(bf, ds, nbad); | |
f078f209 LR |
1074 | |
1075 | /* | |
1076 | * Complete this transmit unit | |
1077 | */ | |
cd3d39a6 | 1078 | if (bf_isampdu(bf)) |
f078f209 LR |
1079 | ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok); |
1080 | else | |
1081 | ath_tx_complete_buf(sc, bf, &bf_head, txok, 0); | |
1082 | ||
1083 | /* Wake up mac80211 queue */ | |
1084 | ||
1085 | spin_lock_bh(&txq->axq_lock); | |
1086 | if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <= | |
1087 | (ATH_TXBUF - 20)) { | |
1088 | int qnum; | |
1089 | qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); | |
1090 | if (qnum != -1) { | |
1091 | ieee80211_wake_queue(sc->hw, qnum); | |
1092 | txq->stopped = 0; | |
1093 | } | |
1094 | ||
1095 | } | |
1096 | ||
1097 | /* | |
1098 | * schedule any pending packets if aggregation is enabled | |
1099 | */ | |
672840ac | 1100 | if (sc->sc_flags & SC_OP_TXAGGR) |
f078f209 LR |
1101 | ath_txq_schedule(sc, txq); |
1102 | spin_unlock_bh(&txq->axq_lock); | |
1103 | } | |
f078f209 LR |
1104 | } |
1105 | ||
1106 | static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) | |
1107 | { | |
1108 | struct ath_hal *ah = sc->sc_ah; | |
1109 | ||
1110 | (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum); | |
1111 | DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n", | |
1112 | __func__, txq->axq_qnum, | |
1113 | ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link); | |
1114 | } | |
1115 | ||
1116 | /* Drain only the data queues */ | |
1117 | ||
1118 | static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx) | |
1119 | { | |
1120 | struct ath_hal *ah = sc->sc_ah; | |
102e0572 | 1121 | int i, status, npend = 0; |
f078f209 | 1122 | |
672840ac | 1123 | if (!(sc->sc_flags & SC_OP_INVALID)) { |
f078f209 LR |
1124 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
1125 | if (ATH_TXQ_SETUP(sc, i)) { | |
1126 | ath_tx_stopdma(sc, &sc->sc_txq[i]); | |
f078f209 LR |
1127 | /* The TxDMA may not really be stopped. |
1128 | * Double check the hal tx pending count */ | |
1129 | npend += ath9k_hw_numtxpending(ah, | |
102e0572 | 1130 | sc->sc_txq[i].axq_qnum); |
f078f209 LR |
1131 | } |
1132 | } | |
1133 | } | |
1134 | ||
1135 | if (npend) { | |
f078f209 LR |
1136 | /* TxDMA not stopped, reset the hal */ |
1137 | DPRINTF(sc, ATH_DBG_XMIT, | |
1138 | "%s: Unable to stop TxDMA. Reset HAL!\n", __func__); | |
1139 | ||
1140 | spin_lock_bh(&sc->sc_resetlock); | |
b4696c8b | 1141 | if (!ath9k_hw_reset(ah, |
927e70e9 S |
1142 | sc->sc_ah->ah_curchan, |
1143 | sc->sc_ht_info.tx_chan_width, | |
1144 | sc->sc_tx_chainmask, sc->sc_rx_chainmask, | |
1145 | sc->sc_ht_extprotspacing, true, &status)) { | |
f078f209 LR |
1146 | |
1147 | DPRINTF(sc, ATH_DBG_FATAL, | |
1148 | "%s: unable to reset hardware; hal status %u\n", | |
1149 | __func__, | |
1150 | status); | |
1151 | } | |
1152 | spin_unlock_bh(&sc->sc_resetlock); | |
1153 | } | |
1154 | ||
1155 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | |
1156 | if (ATH_TXQ_SETUP(sc, i)) | |
1157 | ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx); | |
1158 | } | |
1159 | } | |
1160 | ||
1161 | /* Add a sub-frame to block ack window */ | |
1162 | ||
1163 | static void ath_tx_addto_baw(struct ath_softc *sc, | |
1164 | struct ath_atx_tid *tid, | |
1165 | struct ath_buf *bf) | |
1166 | { | |
1167 | int index, cindex; | |
1168 | ||
cd3d39a6 | 1169 | if (bf_isretried(bf)) |
f078f209 LR |
1170 | return; |
1171 | ||
1172 | index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); | |
1173 | cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | |
1174 | ||
1175 | ASSERT(tid->tx_buf[cindex] == NULL); | |
1176 | tid->tx_buf[cindex] = bf; | |
1177 | ||
1178 | if (index >= ((tid->baw_tail - tid->baw_head) & | |
1179 | (ATH_TID_MAX_BUFS - 1))) { | |
1180 | tid->baw_tail = cindex; | |
1181 | INCR(tid->baw_tail, ATH_TID_MAX_BUFS); | |
1182 | } | |
1183 | } | |
1184 | ||
1185 | /* | |
1186 | * Function to send an A-MPDU | |
1187 | * NB: must be called with txq lock held | |
1188 | */ | |
1189 | ||
1190 | static int ath_tx_send_ampdu(struct ath_softc *sc, | |
f078f209 LR |
1191 | struct ath_atx_tid *tid, |
1192 | struct list_head *bf_head, | |
1193 | struct ath_tx_control *txctl) | |
1194 | { | |
1195 | struct ath_buf *bf; | |
f078f209 LR |
1196 | |
1197 | BUG_ON(list_empty(bf_head)); | |
1198 | ||
1199 | bf = list_first_entry(bf_head, struct ath_buf, list); | |
cd3d39a6 | 1200 | bf->bf_state.bf_type |= BUF_AMPDU; |
f078f209 LR |
1201 | |
1202 | /* | |
1203 | * Do not queue to h/w when any of the following conditions is true: | |
1204 | * - there are pending frames in software queue | |
1205 | * - the TID is currently paused for ADDBA/BAR request | |
1206 | * - seqno is not within block-ack window | |
1207 | * - h/w queue depth exceeds low water mark | |
1208 | */ | |
1209 | if (!list_empty(&tid->buf_q) || tid->paused || | |
1210 | !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || | |
528f0c6b | 1211 | txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { |
f078f209 LR |
1212 | /* |
1213 | * Add this frame to software queue for scheduling later | |
1214 | * for aggregation. | |
1215 | */ | |
1216 | list_splice_tail_init(bf_head, &tid->buf_q); | |
528f0c6b | 1217 | ath_tx_queue_tid(txctl->txq, tid); |
f078f209 LR |
1218 | return 0; |
1219 | } | |
1220 | ||
f078f209 LR |
1221 | /* Add sub-frame to BAW */ |
1222 | ath_tx_addto_baw(sc, tid, bf); | |
1223 | ||
1224 | /* Queue to h/w without aggregation */ | |
1225 | bf->bf_nframes = 1; | |
1226 | bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ | |
1227 | ath_buf_set_rate(sc, bf); | |
528f0c6b | 1228 | ath_tx_txqaddbuf(sc, txctl->txq, bf_head); |
102e0572 | 1229 | |
f078f209 LR |
1230 | return 0; |
1231 | } | |
1232 | ||
1233 | /* | |
1234 | * looks up the rate | |
1235 | * returns aggr limit based on lowest of the rates | |
1236 | */ | |
1237 | ||
1238 | static u32 ath_lookup_rate(struct ath_softc *sc, | |
ae5eb026 JB |
1239 | struct ath_buf *bf, |
1240 | struct ath_atx_tid *tid) | |
f078f209 | 1241 | { |
a8efee4f | 1242 | struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode]; |
f078f209 LR |
1243 | struct sk_buff *skb; |
1244 | struct ieee80211_tx_info *tx_info; | |
a8efee4f | 1245 | struct ieee80211_tx_rate *rates; |
f078f209 LR |
1246 | struct ath_tx_info_priv *tx_info_priv; |
1247 | u32 max_4ms_framelen, frame_length; | |
1248 | u16 aggr_limit, legacy = 0, maxampdu; | |
1249 | int i; | |
1250 | ||
f078f209 LR |
1251 | skb = (struct sk_buff *)bf->bf_mpdu; |
1252 | tx_info = IEEE80211_SKB_CB(skb); | |
a8efee4f S |
1253 | rates = tx_info->control.rates; |
1254 | tx_info_priv = | |
1255 | (struct ath_tx_info_priv *)tx_info->rate_driver_data[0]; | |
f078f209 LR |
1256 | |
1257 | /* | |
1258 | * Find the lowest frame length among the rate series that will have a | |
1259 | * 4ms transmit duration. | |
1260 | * TODO - TXOP limit needs to be considered. | |
1261 | */ | |
1262 | max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; | |
1263 | ||
1264 | for (i = 0; i < 4; i++) { | |
a8efee4f | 1265 | if (rates[i].count) { |
e63835b0 | 1266 | if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) { |
f078f209 LR |
1267 | legacy = 1; |
1268 | break; | |
1269 | } | |
1270 | ||
a8efee4f S |
1271 | frame_length = |
1272 | rate_table->info[rates[i].idx].max_4ms_framelen; | |
f078f209 LR |
1273 | max_4ms_framelen = min(max_4ms_framelen, frame_length); |
1274 | } | |
1275 | } | |
1276 | ||
1277 | /* | |
1278 | * limit aggregate size by the minimum rate if rate selected is | |
1279 | * not a probe rate, if rate selected is a probe rate then | |
1280 | * avoid aggregation of this packet. | |
1281 | */ | |
1282 | if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) | |
1283 | return 0; | |
1284 | ||
1285 | aggr_limit = min(max_4ms_framelen, | |
1286 | (u32)ATH_AMPDU_LIMIT_DEFAULT); | |
1287 | ||
1288 | /* | |
1289 | * h/w can accept aggregates upto 16 bit lengths (65535). | |
1290 | * The IE, however can hold upto 65536, which shows up here | |
1291 | * as zero. Ignore 65536 since we are constrained by hw. | |
1292 | */ | |
ae5eb026 | 1293 | maxampdu = tid->an->maxampdu; |
f078f209 LR |
1294 | if (maxampdu) |
1295 | aggr_limit = min(aggr_limit, maxampdu); | |
1296 | ||
1297 | return aggr_limit; | |
1298 | } | |
1299 | ||
1300 | /* | |
1301 | * returns the number of delimiters to be added to | |
1302 | * meet the minimum required mpdudensity. | |
1303 | * caller should make sure that the rate is HT rate . | |
1304 | */ | |
1305 | ||
1306 | static int ath_compute_num_delims(struct ath_softc *sc, | |
ae5eb026 | 1307 | struct ath_atx_tid *tid, |
f078f209 LR |
1308 | struct ath_buf *bf, |
1309 | u16 frmlen) | |
1310 | { | |
e63835b0 | 1311 | struct ath_rate_table *rt = sc->hw_rate_table[sc->sc_curmode]; |
a8efee4f S |
1312 | struct sk_buff *skb = bf->bf_mpdu; |
1313 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | |
f078f209 LR |
1314 | u32 nsymbits, nsymbols, mpdudensity; |
1315 | u16 minlen; | |
1316 | u8 rc, flags, rix; | |
1317 | int width, half_gi, ndelim, mindelim; | |
1318 | ||
1319 | /* Select standard number of delimiters based on frame length alone */ | |
1320 | ndelim = ATH_AGGR_GET_NDELIM(frmlen); | |
1321 | ||
1322 | /* | |
1323 | * If encryption enabled, hardware requires some more padding between | |
1324 | * subframes. | |
1325 | * TODO - this could be improved to be dependent on the rate. | |
1326 | * The hardware can keep up at lower rates, but not higher rates | |
1327 | */ | |
1328 | if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) | |
1329 | ndelim += ATH_AGGR_ENCRYPTDELIM; | |
1330 | ||
1331 | /* | |
1332 | * Convert desired mpdu density from microeconds to bytes based | |
1333 | * on highest rate in rate series (i.e. first rate) to determine | |
1334 | * required minimum length for subframe. Take into account | |
1335 | * whether high rate is 20 or 40Mhz and half or full GI. | |
1336 | */ | |
ae5eb026 | 1337 | mpdudensity = tid->an->mpdudensity; |
f078f209 LR |
1338 | |
1339 | /* | |
1340 | * If there is no mpdu density restriction, no further calculation | |
1341 | * is needed. | |
1342 | */ | |
1343 | if (mpdudensity == 0) | |
1344 | return ndelim; | |
1345 | ||
a8efee4f S |
1346 | rix = tx_info->control.rates[0].idx; |
1347 | flags = tx_info->control.rates[0].flags; | |
e63835b0 | 1348 | rc = rt->info[rix].ratecode; |
a8efee4f S |
1349 | width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0; |
1350 | half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; | |
f078f209 LR |
1351 | |
1352 | if (half_gi) | |
1353 | nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity); | |
1354 | else | |
1355 | nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity); | |
1356 | ||
1357 | if (nsymbols == 0) | |
1358 | nsymbols = 1; | |
1359 | ||
1360 | nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; | |
1361 | minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; | |
1362 | ||
1363 | /* Is frame shorter than required minimum length? */ | |
1364 | if (frmlen < minlen) { | |
1365 | /* Get the minimum number of delimiters required. */ | |
1366 | mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; | |
1367 | ndelim = max(mindelim, ndelim); | |
1368 | } | |
1369 | ||
1370 | return ndelim; | |
1371 | } | |
1372 | ||
1373 | /* | |
1374 | * For aggregation from software buffer queue. | |
1375 | * NB: must be called with txq lock held | |
1376 | */ | |
1377 | ||
1378 | static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | |
1379 | struct ath_atx_tid *tid, | |
1380 | struct list_head *bf_q, | |
1381 | struct ath_buf **bf_last, | |
1382 | struct aggr_rifs_param *param, | |
1383 | int *prev_frames) | |
1384 | { | |
1385 | #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) | |
1386 | struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL; | |
1387 | struct list_head bf_head; | |
1388 | int rl = 0, nframes = 0, ndelim; | |
1389 | u16 aggr_limit = 0, al = 0, bpad = 0, | |
1390 | al_delta, h_baw = tid->baw_size / 2; | |
1391 | enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; | |
a8efee4f | 1392 | int prev_al = 0; |
f078f209 LR |
1393 | INIT_LIST_HEAD(&bf_head); |
1394 | ||
1395 | BUG_ON(list_empty(&tid->buf_q)); | |
1396 | ||
1397 | bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); | |
1398 | ||
1399 | do { | |
1400 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | |
1401 | ||
1402 | /* | |
1403 | * do not step over block-ack window | |
1404 | */ | |
1405 | if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { | |
1406 | status = ATH_AGGR_BAW_CLOSED; | |
1407 | break; | |
1408 | } | |
1409 | ||
1410 | if (!rl) { | |
ae5eb026 | 1411 | aggr_limit = ath_lookup_rate(sc, bf, tid); |
f078f209 | 1412 | rl = 1; |
f078f209 LR |
1413 | } |
1414 | ||
1415 | /* | |
1416 | * do not exceed aggregation limit | |
1417 | */ | |
1418 | al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; | |
1419 | ||
1420 | if (nframes && (aggr_limit < | |
1421 | (al + bpad + al_delta + prev_al))) { | |
1422 | status = ATH_AGGR_LIMITED; | |
1423 | break; | |
1424 | } | |
1425 | ||
1426 | /* | |
1427 | * do not exceed subframe limit | |
1428 | */ | |
1429 | if ((nframes + *prev_frames) >= | |
1430 | min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { | |
1431 | status = ATH_AGGR_LIMITED; | |
1432 | break; | |
1433 | } | |
1434 | ||
1435 | /* | |
1436 | * add padding for previous frame to aggregation length | |
1437 | */ | |
1438 | al += bpad + al_delta; | |
1439 | ||
1440 | /* | |
1441 | * Get the delimiters needed to meet the MPDU | |
1442 | * density for this node. | |
1443 | */ | |
ae5eb026 | 1444 | ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); |
f078f209 LR |
1445 | |
1446 | bpad = PADBYTES(al_delta) + (ndelim << 2); | |
1447 | ||
1448 | bf->bf_next = NULL; | |
1449 | bf->bf_lastfrm->bf_desc->ds_link = 0; | |
1450 | ||
1451 | /* | |
1452 | * this packet is part of an aggregate | |
1453 | * - remove all descriptors belonging to this frame from | |
1454 | * software queue | |
1455 | * - add it to block ack window | |
1456 | * - set up descriptors for aggregation | |
1457 | */ | |
1458 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); | |
1459 | ath_tx_addto_baw(sc, tid, bf); | |
1460 | ||
1461 | list_for_each_entry(tbf, &bf_head, list) { | |
1462 | ath9k_hw_set11n_aggr_middle(sc->sc_ah, | |
1463 | tbf->bf_desc, ndelim); | |
1464 | } | |
1465 | ||
1466 | /* | |
1467 | * link buffers of this frame to the aggregate | |
1468 | */ | |
1469 | list_splice_tail_init(&bf_head, bf_q); | |
1470 | nframes++; | |
1471 | ||
1472 | if (bf_prev) { | |
1473 | bf_prev->bf_next = bf; | |
1474 | bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr; | |
1475 | } | |
1476 | bf_prev = bf; | |
1477 | ||
1478 | #ifdef AGGR_NOSHORT | |
1479 | /* | |
1480 | * terminate aggregation on a small packet boundary | |
1481 | */ | |
1482 | if (bf->bf_frmlen < ATH_AGGR_MINPLEN) { | |
1483 | status = ATH_AGGR_SHORTPKT; | |
1484 | break; | |
1485 | } | |
1486 | #endif | |
1487 | } while (!list_empty(&tid->buf_q)); | |
1488 | ||
1489 | bf_first->bf_al = al; | |
1490 | bf_first->bf_nframes = nframes; | |
1491 | *bf_last = bf_prev; | |
1492 | return status; | |
1493 | #undef PADBYTES | |
1494 | } | |
1495 | ||
1496 | /* | |
1497 | * process pending frames possibly doing a-mpdu aggregation | |
1498 | * NB: must be called with txq lock held | |
1499 | */ | |
1500 | ||
1501 | static void ath_tx_sched_aggr(struct ath_softc *sc, | |
1502 | struct ath_txq *txq, struct ath_atx_tid *tid) | |
1503 | { | |
1504 | struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL; | |
1505 | enum ATH_AGGR_STATUS status; | |
1506 | struct list_head bf_q; | |
1507 | struct aggr_rifs_param param = {0, 0, 0, 0, NULL}; | |
1508 | int prev_frames = 0; | |
1509 | ||
1510 | do { | |
1511 | if (list_empty(&tid->buf_q)) | |
1512 | return; | |
1513 | ||
1514 | INIT_LIST_HEAD(&bf_q); | |
1515 | ||
1516 | status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, ¶m, | |
1517 | &prev_frames); | |
1518 | ||
1519 | /* | |
1520 | * no frames picked up to be aggregated; block-ack | |
1521 | * window is not open | |
1522 | */ | |
1523 | if (list_empty(&bf_q)) | |
1524 | break; | |
1525 | ||
1526 | bf = list_first_entry(&bf_q, struct ath_buf, list); | |
1527 | bf_last = list_entry(bf_q.prev, struct ath_buf, list); | |
1528 | bf->bf_lastbf = bf_last; | |
1529 | ||
1530 | /* | |
1531 | * if only one frame, send as non-aggregate | |
1532 | */ | |
1533 | if (bf->bf_nframes == 1) { | |
1534 | ASSERT(bf->bf_lastfrm == bf_last); | |
1535 | ||
cd3d39a6 | 1536 | bf->bf_state.bf_type &= ~BUF_AGGR; |
f078f209 LR |
1537 | /* |
1538 | * clear aggr bits for every descriptor | |
1539 | * XXX TODO: is there a way to optimize it? | |
1540 | */ | |
1541 | list_for_each_entry(tbf, &bf_q, list) { | |
1542 | ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc); | |
1543 | } | |
1544 | ||
1545 | ath_buf_set_rate(sc, bf); | |
1546 | ath_tx_txqaddbuf(sc, txq, &bf_q); | |
1547 | continue; | |
1548 | } | |
1549 | ||
1550 | /* | |
1551 | * setup first desc with rate and aggr info | |
1552 | */ | |
cd3d39a6 | 1553 | bf->bf_state.bf_type |= BUF_AGGR; |
f078f209 LR |
1554 | ath_buf_set_rate(sc, bf); |
1555 | ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); | |
1556 | ||
1557 | /* | |
1558 | * anchor last frame of aggregate correctly | |
1559 | */ | |
1560 | ASSERT(bf_lastaggr); | |
1561 | ASSERT(bf_lastaggr->bf_lastfrm == bf_last); | |
1562 | tbf = bf_lastaggr; | |
1563 | ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc); | |
1564 | ||
1565 | /* XXX: We don't enter into this loop, consider removing this */ | |
1566 | while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) { | |
1567 | tbf = list_entry(tbf->list.next, struct ath_buf, list); | |
1568 | ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc); | |
1569 | } | |
1570 | ||
1571 | txq->axq_aggr_depth++; | |
1572 | ||
1573 | /* | |
1574 | * Normal aggregate, queue to hardware | |
1575 | */ | |
1576 | ath_tx_txqaddbuf(sc, txq, &bf_q); | |
1577 | ||
1578 | } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && | |
1579 | status != ATH_AGGR_BAW_CLOSED); | |
1580 | } | |
1581 | ||
1582 | /* Called with txq lock held */ | |
1583 | ||
1584 | static void ath_tid_drain(struct ath_softc *sc, | |
1585 | struct ath_txq *txq, | |
b5aa9bf9 S |
1586 | struct ath_atx_tid *tid) |
1587 | ||
f078f209 LR |
1588 | { |
1589 | struct ath_buf *bf; | |
1590 | struct list_head bf_head; | |
1591 | INIT_LIST_HEAD(&bf_head); | |
1592 | ||
1593 | for (;;) { | |
1594 | if (list_empty(&tid->buf_q)) | |
1595 | break; | |
1596 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | |
1597 | ||
1598 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); | |
1599 | ||
1600 | /* update baw for software retried frame */ | |
cd3d39a6 | 1601 | if (bf_isretried(bf)) |
f078f209 LR |
1602 | ath_tx_update_baw(sc, tid, bf->bf_seqno); |
1603 | ||
1604 | /* | |
1605 | * do not indicate packets while holding txq spinlock. | |
1606 | * unlock is intentional here | |
1607 | */ | |
b5aa9bf9 | 1608 | spin_unlock(&txq->axq_lock); |
f078f209 LR |
1609 | |
1610 | /* complete this sub-frame */ | |
1611 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | |
1612 | ||
b5aa9bf9 | 1613 | spin_lock(&txq->axq_lock); |
f078f209 LR |
1614 | } |
1615 | ||
1616 | /* | |
1617 | * TODO: For frame(s) that are in the retry state, we will reuse the | |
1618 | * sequence number(s) without setting the retry bit. The | |
1619 | * alternative is to give up on these and BAR the receiver's window | |
1620 | * forward. | |
1621 | */ | |
1622 | tid->seq_next = tid->seq_start; | |
1623 | tid->baw_tail = tid->baw_head; | |
1624 | } | |
1625 | ||
1626 | /* | |
1627 | * Drain all pending buffers | |
1628 | * NB: must be called with txq lock held | |
1629 | */ | |
1630 | ||
1631 | static void ath_txq_drain_pending_buffers(struct ath_softc *sc, | |
b5aa9bf9 | 1632 | struct ath_txq *txq) |
f078f209 LR |
1633 | { |
1634 | struct ath_atx_ac *ac, *ac_tmp; | |
1635 | struct ath_atx_tid *tid, *tid_tmp; | |
1636 | ||
1637 | list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { | |
1638 | list_del(&ac->list); | |
1639 | ac->sched = false; | |
1640 | list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { | |
1641 | list_del(&tid->list); | |
1642 | tid->sched = false; | |
b5aa9bf9 | 1643 | ath_tid_drain(sc, txq, tid); |
f078f209 LR |
1644 | } |
1645 | } | |
1646 | } | |
1647 | ||
528f0c6b S |
1648 | static void ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf, |
1649 | struct sk_buff *skb, struct scatterlist *sg, | |
1650 | struct ath_tx_control *txctl) | |
f078f209 | 1651 | { |
528f0c6b S |
1652 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
1653 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
f078f209 | 1654 | struct ath_tx_info_priv *tx_info_priv; |
528f0c6b S |
1655 | int hdrlen; |
1656 | __le16 fc; | |
e022edbd | 1657 | |
a8efee4f S |
1658 | tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_KERNEL); |
1659 | tx_info->rate_driver_data[0] = tx_info_priv; | |
528f0c6b S |
1660 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); |
1661 | fc = hdr->frame_control; | |
f078f209 | 1662 | |
528f0c6b | 1663 | ATH_TXBUF_RESET(bf); |
f078f209 | 1664 | |
528f0c6b | 1665 | /* Frame type */ |
f078f209 | 1666 | |
528f0c6b | 1667 | bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3); |
cd3d39a6 S |
1668 | |
1669 | ieee80211_is_data(fc) ? | |
1670 | (bf->bf_state.bf_type |= BUF_DATA) : | |
1671 | (bf->bf_state.bf_type &= ~BUF_DATA); | |
1672 | ieee80211_is_back_req(fc) ? | |
1673 | (bf->bf_state.bf_type |= BUF_BAR) : | |
1674 | (bf->bf_state.bf_type &= ~BUF_BAR); | |
1675 | ieee80211_is_pspoll(fc) ? | |
1676 | (bf->bf_state.bf_type |= BUF_PSPOLL) : | |
1677 | (bf->bf_state.bf_type &= ~BUF_PSPOLL); | |
672840ac | 1678 | (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ? |
cd3d39a6 S |
1679 | (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) : |
1680 | (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE); | |
a8efee4f | 1681 | (sc->hw->conf.ht.enabled && !is_pae(skb) && |
528f0c6b S |
1682 | (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ? |
1683 | (bf->bf_state.bf_type |= BUF_HT) : | |
1684 | (bf->bf_state.bf_type &= ~BUF_HT); | |
1685 | ||
1686 | bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); | |
1687 | ||
1688 | /* Crypto */ | |
1689 | ||
1690 | bf->bf_keytype = get_hw_crypto_keytype(skb); | |
1691 | ||
1692 | if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { | |
1693 | bf->bf_frmlen += tx_info->control.hw_key->icv_len; | |
1694 | bf->bf_keyix = tx_info->control.hw_key->hw_key_idx; | |
1695 | } else { | |
1696 | bf->bf_keyix = ATH9K_TXKEYIX_INVALID; | |
1697 | } | |
1698 | ||
528f0c6b S |
1699 | /* Assign seqno, tidno */ |
1700 | ||
1701 | if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR)) | |
1702 | assign_aggr_tid_seqno(skb, bf); | |
1703 | ||
1704 | /* DMA setup */ | |
1705 | ||
f078f209 | 1706 | bf->bf_mpdu = skb; |
528f0c6b S |
1707 | bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data, |
1708 | skb->len, PCI_DMA_TODEVICE); | |
1709 | bf->bf_buf_addr = bf->bf_dmacontext; | |
1710 | } | |
1711 | ||
1712 | /* FIXME: tx power */ | |
1713 | static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | |
1714 | struct scatterlist *sg, u32 n_sg, | |
1715 | struct ath_tx_control *txctl) | |
1716 | { | |
1717 | struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; | |
1718 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | |
1719 | struct ath_node *an = NULL; | |
1720 | struct list_head bf_head; | |
1721 | struct ath_desc *ds; | |
1722 | struct ath_atx_tid *tid; | |
1723 | struct ath_hal *ah = sc->sc_ah; | |
1724 | int frm_type; | |
1725 | ||
528f0c6b S |
1726 | frm_type = get_hw_packet_type(skb); |
1727 | ||
1728 | INIT_LIST_HEAD(&bf_head); | |
1729 | list_add_tail(&bf->list, &bf_head); | |
f078f209 LR |
1730 | |
1731 | /* setup descriptor */ | |
528f0c6b | 1732 | |
f078f209 LR |
1733 | ds = bf->bf_desc; |
1734 | ds->ds_link = 0; | |
1735 | ds->ds_data = bf->bf_buf_addr; | |
1736 | ||
528f0c6b | 1737 | /* Formulate first tx descriptor with tx controls */ |
f078f209 | 1738 | |
528f0c6b S |
1739 | ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, |
1740 | bf->bf_keyix, bf->bf_keytype, bf->bf_flags); | |
1741 | ||
1742 | ath9k_hw_filltxdesc(ah, ds, | |
1743 | sg_dma_len(sg), /* segment length */ | |
1744 | true, /* first segment */ | |
1745 | (n_sg == 1) ? true : false, /* last segment */ | |
1746 | ds); /* first descriptor */ | |
f078f209 LR |
1747 | |
1748 | bf->bf_lastfrm = bf; | |
f078f209 | 1749 | |
528f0c6b | 1750 | spin_lock_bh(&txctl->txq->axq_lock); |
f078f209 | 1751 | |
f1617967 JL |
1752 | if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && |
1753 | tx_info->control.sta) { | |
1754 | an = (struct ath_node *)tx_info->control.sta->drv_priv; | |
1755 | tid = ATH_AN_2_TID(an, bf->bf_tidno); | |
1756 | ||
528f0c6b | 1757 | if (ath_aggr_query(sc, an, bf->bf_tidno)) { |
f078f209 LR |
1758 | /* |
1759 | * Try aggregation if it's a unicast data frame | |
1760 | * and the destination is HT capable. | |
1761 | */ | |
528f0c6b | 1762 | ath_tx_send_ampdu(sc, tid, &bf_head, txctl); |
f078f209 LR |
1763 | } else { |
1764 | /* | |
528f0c6b S |
1765 | * Send this frame as regular when ADDBA |
1766 | * exchange is neither complete nor pending. | |
f078f209 | 1767 | */ |
528f0c6b S |
1768 | ath_tx_send_normal(sc, txctl->txq, |
1769 | tid, &bf_head); | |
f078f209 LR |
1770 | } |
1771 | } else { | |
1772 | bf->bf_lastbf = bf; | |
1773 | bf->bf_nframes = 1; | |
f078f209 | 1774 | |
528f0c6b S |
1775 | ath_buf_set_rate(sc, bf); |
1776 | ath_tx_txqaddbuf(sc, txctl->txq, &bf_head); | |
f078f209 | 1777 | } |
528f0c6b S |
1778 | |
1779 | spin_unlock_bh(&txctl->txq->axq_lock); | |
f078f209 LR |
1780 | } |
1781 | ||
528f0c6b S |
1782 | int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb, |
1783 | struct ath_tx_control *txctl) | |
f078f209 | 1784 | { |
528f0c6b | 1785 | struct ath_buf *bf; |
f078f209 LR |
1786 | struct scatterlist sg; |
1787 | ||
528f0c6b S |
1788 | /* Check if a tx buffer is available */ |
1789 | ||
1790 | bf = ath_tx_get_buffer(sc); | |
1791 | if (!bf) { | |
1792 | DPRINTF(sc, ATH_DBG_XMIT, "%s: TX buffers are full\n", | |
1793 | __func__); | |
1794 | return -1; | |
1795 | } | |
1796 | ||
1797 | ath_tx_setup_buffer(sc, bf, skb, &sg, txctl); | |
1798 | ||
1799 | /* Setup S/G */ | |
f078f209 | 1800 | |
f078f209 | 1801 | memset(&sg, 0, sizeof(struct scatterlist)); |
528f0c6b | 1802 | sg_dma_address(&sg) = bf->bf_dmacontext; |
f078f209 LR |
1803 | sg_dma_len(&sg) = skb->len; |
1804 | ||
528f0c6b | 1805 | ath_tx_start_dma(sc, bf, &sg, 1, txctl); |
f078f209 | 1806 | |
528f0c6b | 1807 | return 0; |
f078f209 LR |
1808 | } |
1809 | ||
1810 | /* Initialize TX queue and h/w */ | |
1811 | ||
1812 | int ath_tx_init(struct ath_softc *sc, int nbufs) | |
1813 | { | |
1814 | int error = 0; | |
1815 | ||
1816 | do { | |
1817 | spin_lock_init(&sc->sc_txbuflock); | |
1818 | ||
1819 | /* Setup tx descriptors */ | |
1820 | error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, | |
556bb8f1 | 1821 | "tx", nbufs, 1); |
f078f209 LR |
1822 | if (error != 0) { |
1823 | DPRINTF(sc, ATH_DBG_FATAL, | |
1824 | "%s: failed to allocate tx descriptors: %d\n", | |
1825 | __func__, error); | |
1826 | break; | |
1827 | } | |
1828 | ||
1829 | /* XXX allocate beacon state together with vap */ | |
1830 | error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, | |
1831 | "beacon", ATH_BCBUF, 1); | |
1832 | if (error != 0) { | |
1833 | DPRINTF(sc, ATH_DBG_FATAL, | |
1834 | "%s: failed to allocate " | |
1835 | "beacon descripotrs: %d\n", | |
1836 | __func__, error); | |
1837 | break; | |
1838 | } | |
1839 | ||
1840 | } while (0); | |
1841 | ||
1842 | if (error != 0) | |
1843 | ath_tx_cleanup(sc); | |
1844 | ||
1845 | return error; | |
1846 | } | |
1847 | ||
1848 | /* Reclaim all tx queue resources */ | |
1849 | ||
1850 | int ath_tx_cleanup(struct ath_softc *sc) | |
1851 | { | |
1852 | /* cleanup beacon descriptors */ | |
1853 | if (sc->sc_bdma.dd_desc_len != 0) | |
1854 | ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); | |
1855 | ||
1856 | /* cleanup tx descriptors */ | |
1857 | if (sc->sc_txdma.dd_desc_len != 0) | |
1858 | ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); | |
1859 | ||
1860 | return 0; | |
1861 | } | |
1862 | ||
1863 | /* Setup a h/w transmit queue */ | |
1864 | ||
1865 | struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) | |
1866 | { | |
1867 | struct ath_hal *ah = sc->sc_ah; | |
ea9880fb | 1868 | struct ath9k_tx_queue_info qi; |
f078f209 LR |
1869 | int qnum; |
1870 | ||
0345f37b | 1871 | memset(&qi, 0, sizeof(qi)); |
f078f209 LR |
1872 | qi.tqi_subtype = subtype; |
1873 | qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; | |
1874 | qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; | |
1875 | qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; | |
ea9880fb | 1876 | qi.tqi_physCompBuf = 0; |
f078f209 LR |
1877 | |
1878 | /* | |
1879 | * Enable interrupts only for EOL and DESC conditions. | |
1880 | * We mark tx descriptors to receive a DESC interrupt | |
1881 | * when a tx queue gets deep; otherwise waiting for the | |
1882 | * EOL to reap descriptors. Note that this is done to | |
1883 | * reduce interrupt load and this only defers reaping | |
1884 | * descriptors, never transmitting frames. Aside from | |
1885 | * reducing interrupts this also permits more concurrency. | |
1886 | * The only potential downside is if the tx queue backs | |
1887 | * up in which case the top half of the kernel may backup | |
1888 | * due to a lack of tx descriptors. | |
1889 | * | |
1890 | * The UAPSD queue is an exception, since we take a desc- | |
1891 | * based intr on the EOSP frames. | |
1892 | */ | |
1893 | if (qtype == ATH9K_TX_QUEUE_UAPSD) | |
1894 | qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; | |
1895 | else | |
1896 | qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | | |
1897 | TXQ_FLAG_TXDESCINT_ENABLE; | |
1898 | qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); | |
1899 | if (qnum == -1) { | |
1900 | /* | |
1901 | * NB: don't print a message, this happens | |
1902 | * normally on parts with too few tx queues | |
1903 | */ | |
1904 | return NULL; | |
1905 | } | |
1906 | if (qnum >= ARRAY_SIZE(sc->sc_txq)) { | |
1907 | DPRINTF(sc, ATH_DBG_FATAL, | |
1908 | "%s: hal qnum %u out of range, max %u!\n", | |
1909 | __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq)); | |
1910 | ath9k_hw_releasetxqueue(ah, qnum); | |
1911 | return NULL; | |
1912 | } | |
1913 | if (!ATH_TXQ_SETUP(sc, qnum)) { | |
1914 | struct ath_txq *txq = &sc->sc_txq[qnum]; | |
1915 | ||
1916 | txq->axq_qnum = qnum; | |
1917 | txq->axq_link = NULL; | |
1918 | INIT_LIST_HEAD(&txq->axq_q); | |
1919 | INIT_LIST_HEAD(&txq->axq_acq); | |
1920 | spin_lock_init(&txq->axq_lock); | |
1921 | txq->axq_depth = 0; | |
1922 | txq->axq_aggr_depth = 0; | |
1923 | txq->axq_totalqueued = 0; | |
f078f209 LR |
1924 | txq->axq_linkbuf = NULL; |
1925 | sc->sc_txqsetup |= 1<<qnum; | |
1926 | } | |
1927 | return &sc->sc_txq[qnum]; | |
1928 | } | |
1929 | ||
1930 | /* Reclaim resources for a setup queue */ | |
1931 | ||
1932 | void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) | |
1933 | { | |
1934 | ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); | |
1935 | sc->sc_txqsetup &= ~(1<<txq->axq_qnum); | |
1936 | } | |
1937 | ||
1938 | /* | |
1939 | * Setup a hardware data transmit queue for the specified | |
1940 | * access control. The hal may not support all requested | |
1941 | * queues in which case it will return a reference to a | |
1942 | * previously setup queue. We record the mapping from ac's | |
1943 | * to h/w queues for use by ath_tx_start and also track | |
1944 | * the set of h/w queues being used to optimize work in the | |
1945 | * transmit interrupt handler and related routines. | |
1946 | */ | |
1947 | ||
1948 | int ath_tx_setup(struct ath_softc *sc, int haltype) | |
1949 | { | |
1950 | struct ath_txq *txq; | |
1951 | ||
1952 | if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { | |
1953 | DPRINTF(sc, ATH_DBG_FATAL, | |
1954 | "%s: HAL AC %u out of range, max %zu!\n", | |
1955 | __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q)); | |
1956 | return 0; | |
1957 | } | |
1958 | txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); | |
1959 | if (txq != NULL) { | |
1960 | sc->sc_haltype2q[haltype] = txq->axq_qnum; | |
1961 | return 1; | |
1962 | } else | |
1963 | return 0; | |
1964 | } | |
1965 | ||
1966 | int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) | |
1967 | { | |
1968 | int qnum; | |
1969 | ||
1970 | switch (qtype) { | |
1971 | case ATH9K_TX_QUEUE_DATA: | |
1972 | if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { | |
1973 | DPRINTF(sc, ATH_DBG_FATAL, | |
1974 | "%s: HAL AC %u out of range, max %zu!\n", | |
1975 | __func__, | |
1976 | haltype, ARRAY_SIZE(sc->sc_haltype2q)); | |
1977 | return -1; | |
1978 | } | |
1979 | qnum = sc->sc_haltype2q[haltype]; | |
1980 | break; | |
1981 | case ATH9K_TX_QUEUE_BEACON: | |
1982 | qnum = sc->sc_bhalq; | |
1983 | break; | |
1984 | case ATH9K_TX_QUEUE_CAB: | |
1985 | qnum = sc->sc_cabq->axq_qnum; | |
1986 | break; | |
1987 | default: | |
1988 | qnum = -1; | |
1989 | } | |
1990 | return qnum; | |
1991 | } | |
1992 | ||
528f0c6b S |
1993 | /* Get a transmit queue, if available */ |
1994 | ||
1995 | struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) | |
1996 | { | |
1997 | struct ath_txq *txq = NULL; | |
1998 | int qnum; | |
1999 | ||
2000 | qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); | |
2001 | txq = &sc->sc_txq[qnum]; | |
2002 | ||
2003 | spin_lock_bh(&txq->axq_lock); | |
2004 | ||
2005 | /* Try to avoid running out of descriptors */ | |
2006 | if (txq->axq_depth >= (ATH_TXBUF - 20)) { | |
2007 | DPRINTF(sc, ATH_DBG_FATAL, | |
2008 | "%s: TX queue: %d is full, depth: %d\n", | |
2009 | __func__, qnum, txq->axq_depth); | |
2010 | ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb)); | |
2011 | txq->stopped = 1; | |
2012 | spin_unlock_bh(&txq->axq_lock); | |
2013 | return NULL; | |
2014 | } | |
2015 | ||
2016 | spin_unlock_bh(&txq->axq_lock); | |
2017 | ||
2018 | return txq; | |
2019 | } | |
2020 | ||
f078f209 LR |
2021 | /* Update parameters for a transmit queue */ |
2022 | ||
ea9880fb S |
2023 | int ath_txq_update(struct ath_softc *sc, int qnum, |
2024 | struct ath9k_tx_queue_info *qinfo) | |
f078f209 LR |
2025 | { |
2026 | struct ath_hal *ah = sc->sc_ah; | |
2027 | int error = 0; | |
ea9880fb | 2028 | struct ath9k_tx_queue_info qi; |
f078f209 LR |
2029 | |
2030 | if (qnum == sc->sc_bhalq) { | |
2031 | /* | |
2032 | * XXX: for beacon queue, we just save the parameter. | |
2033 | * It will be picked up by ath_beaconq_config when | |
2034 | * it's necessary. | |
2035 | */ | |
ea9880fb | 2036 | sc->sc_beacon_qi = *qinfo; |
f078f209 LR |
2037 | return 0; |
2038 | } | |
2039 | ||
2040 | ASSERT(sc->sc_txq[qnum].axq_qnum == qnum); | |
2041 | ||
ea9880fb S |
2042 | ath9k_hw_get_txq_props(ah, qnum, &qi); |
2043 | qi.tqi_aifs = qinfo->tqi_aifs; | |
2044 | qi.tqi_cwmin = qinfo->tqi_cwmin; | |
2045 | qi.tqi_cwmax = qinfo->tqi_cwmax; | |
2046 | qi.tqi_burstTime = qinfo->tqi_burstTime; | |
2047 | qi.tqi_readyTime = qinfo->tqi_readyTime; | |
f078f209 | 2048 | |
ea9880fb | 2049 | if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { |
f078f209 LR |
2050 | DPRINTF(sc, ATH_DBG_FATAL, |
2051 | "%s: unable to update hardware queue %u!\n", | |
2052 | __func__, qnum); | |
2053 | error = -EIO; | |
2054 | } else { | |
2055 | ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */ | |
2056 | } | |
2057 | ||
2058 | return error; | |
2059 | } | |
2060 | ||
2061 | int ath_cabq_update(struct ath_softc *sc) | |
2062 | { | |
ea9880fb | 2063 | struct ath9k_tx_queue_info qi; |
f078f209 LR |
2064 | int qnum = sc->sc_cabq->axq_qnum; |
2065 | struct ath_beacon_config conf; | |
2066 | ||
ea9880fb | 2067 | ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); |
f078f209 LR |
2068 | /* |
2069 | * Ensure the readytime % is within the bounds. | |
2070 | */ | |
2071 | if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) | |
2072 | sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; | |
2073 | else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) | |
2074 | sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; | |
2075 | ||
2076 | ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf); | |
2077 | qi.tqi_readyTime = | |
2078 | (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100; | |
2079 | ath_txq_update(sc, qnum, &qi); | |
2080 | ||
2081 | return 0; | |
2082 | } | |
2083 | ||
f078f209 LR |
2084 | /* Deferred processing of transmit interrupt */ |
2085 | ||
2086 | void ath_tx_tasklet(struct ath_softc *sc) | |
2087 | { | |
1fe1132b | 2088 | int i; |
f078f209 LR |
2089 | u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); |
2090 | ||
2091 | ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); | |
2092 | ||
2093 | /* | |
2094 | * Process each active queue. | |
2095 | */ | |
2096 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | |
2097 | if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) | |
1fe1132b | 2098 | ath_tx_processq(sc, &sc->sc_txq[i]); |
f078f209 | 2099 | } |
f078f209 LR |
2100 | } |
2101 | ||
2102 | void ath_tx_draintxq(struct ath_softc *sc, | |
2103 | struct ath_txq *txq, bool retry_tx) | |
2104 | { | |
2105 | struct ath_buf *bf, *lastbf; | |
2106 | struct list_head bf_head; | |
2107 | ||
2108 | INIT_LIST_HEAD(&bf_head); | |
2109 | ||
2110 | /* | |
2111 | * NB: this assumes output has been stopped and | |
2112 | * we do not need to block ath_tx_tasklet | |
2113 | */ | |
2114 | for (;;) { | |
2115 | spin_lock_bh(&txq->axq_lock); | |
2116 | ||
2117 | if (list_empty(&txq->axq_q)) { | |
2118 | txq->axq_link = NULL; | |
2119 | txq->axq_linkbuf = NULL; | |
2120 | spin_unlock_bh(&txq->axq_lock); | |
2121 | break; | |
2122 | } | |
2123 | ||
2124 | bf = list_first_entry(&txq->axq_q, struct ath_buf, list); | |
2125 | ||
2126 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | |
2127 | list_del(&bf->list); | |
2128 | spin_unlock_bh(&txq->axq_lock); | |
2129 | ||
2130 | spin_lock_bh(&sc->sc_txbuflock); | |
2131 | list_add_tail(&bf->list, &sc->sc_txbuf); | |
2132 | spin_unlock_bh(&sc->sc_txbuflock); | |
2133 | continue; | |
2134 | } | |
2135 | ||
2136 | lastbf = bf->bf_lastbf; | |
2137 | if (!retry_tx) | |
2138 | lastbf->bf_desc->ds_txstat.ts_flags = | |
2139 | ATH9K_TX_SW_ABORTED; | |
2140 | ||
2141 | /* remove ath_buf's of the same mpdu from txq */ | |
2142 | list_cut_position(&bf_head, &txq->axq_q, &lastbf->list); | |
2143 | txq->axq_depth--; | |
2144 | ||
2145 | spin_unlock_bh(&txq->axq_lock); | |
2146 | ||
cd3d39a6 | 2147 | if (bf_isampdu(bf)) |
f078f209 LR |
2148 | ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0); |
2149 | else | |
2150 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | |
2151 | } | |
2152 | ||
2153 | /* flush any pending frames if aggregation is enabled */ | |
672840ac | 2154 | if (sc->sc_flags & SC_OP_TXAGGR) { |
f078f209 LR |
2155 | if (!retry_tx) { |
2156 | spin_lock_bh(&txq->axq_lock); | |
b5aa9bf9 | 2157 | ath_txq_drain_pending_buffers(sc, txq); |
f078f209 LR |
2158 | spin_unlock_bh(&txq->axq_lock); |
2159 | } | |
2160 | } | |
2161 | } | |
2162 | ||
2163 | /* Drain the transmit queues and reclaim resources */ | |
2164 | ||
2165 | void ath_draintxq(struct ath_softc *sc, bool retry_tx) | |
2166 | { | |
2167 | /* stop beacon queue. The beacon will be freed when | |
2168 | * we go to INIT state */ | |
672840ac | 2169 | if (!(sc->sc_flags & SC_OP_INVALID)) { |
f078f209 LR |
2170 | (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); |
2171 | DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__, | |
2172 | ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq)); | |
2173 | } | |
2174 | ||
2175 | ath_drain_txdataq(sc, retry_tx); | |
2176 | } | |
2177 | ||
2178 | u32 ath_txq_depth(struct ath_softc *sc, int qnum) | |
2179 | { | |
2180 | return sc->sc_txq[qnum].axq_depth; | |
2181 | } | |
2182 | ||
2183 | u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum) | |
2184 | { | |
2185 | return sc->sc_txq[qnum].axq_aggr_depth; | |
2186 | } | |
2187 | ||
ccc75c52 | 2188 | bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) |
f078f209 LR |
2189 | { |
2190 | struct ath_atx_tid *txtid; | |
f078f209 | 2191 | |
672840ac | 2192 | if (!(sc->sc_flags & SC_OP_TXAGGR)) |
ccc75c52 | 2193 | return false; |
f078f209 | 2194 | |
f078f209 LR |
2195 | txtid = ATH_AN_2_TID(an, tidno); |
2196 | ||
a37c2c79 S |
2197 | if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { |
2198 | if (!(txtid->state & AGGR_ADDBA_PROGRESS) && | |
f078f209 LR |
2199 | (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) { |
2200 | txtid->addba_exchangeattempts++; | |
ccc75c52 | 2201 | return true; |
f078f209 LR |
2202 | } |
2203 | } | |
2204 | ||
ccc75c52 | 2205 | return false; |
f078f209 LR |
2206 | } |
2207 | ||
2208 | /* Start TX aggregation */ | |
2209 | ||
b5aa9bf9 S |
2210 | int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, |
2211 | u16 tid, u16 *ssn) | |
f078f209 LR |
2212 | { |
2213 | struct ath_atx_tid *txtid; | |
2214 | struct ath_node *an; | |
2215 | ||
b5aa9bf9 | 2216 | an = (struct ath_node *)sta->drv_priv; |
f078f209 | 2217 | |
672840ac | 2218 | if (sc->sc_flags & SC_OP_TXAGGR) { |
f078f209 | 2219 | txtid = ATH_AN_2_TID(an, tid); |
a37c2c79 | 2220 | txtid->state |= AGGR_ADDBA_PROGRESS; |
f078f209 LR |
2221 | ath_tx_pause_tid(sc, txtid); |
2222 | } | |
2223 | ||
2224 | return 0; | |
2225 | } | |
2226 | ||
2227 | /* Stop tx aggregation */ | |
2228 | ||
b5aa9bf9 | 2229 | int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) |
f078f209 | 2230 | { |
b5aa9bf9 | 2231 | struct ath_node *an = (struct ath_node *)sta->drv_priv; |
f078f209 LR |
2232 | |
2233 | ath_tx_aggr_teardown(sc, an, tid); | |
2234 | return 0; | |
2235 | } | |
2236 | ||
8469cdef S |
2237 | /* Resume tx aggregation */ |
2238 | ||
2239 | void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) | |
2240 | { | |
2241 | struct ath_atx_tid *txtid; | |
2242 | struct ath_node *an; | |
2243 | ||
2244 | an = (struct ath_node *)sta->drv_priv; | |
2245 | ||
2246 | if (sc->sc_flags & SC_OP_TXAGGR) { | |
2247 | txtid = ATH_AN_2_TID(an, tid); | |
2248 | txtid->baw_size = | |
2249 | IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; | |
2250 | txtid->state |= AGGR_ADDBA_COMPLETE; | |
2251 | txtid->state &= ~AGGR_ADDBA_PROGRESS; | |
2252 | ath_tx_resume_tid(sc, txtid); | |
2253 | } | |
2254 | } | |
2255 | ||
f078f209 LR |
2256 | /* |
2257 | * Performs transmit side cleanup when TID changes from aggregated to | |
2258 | * unaggregated. | |
2259 | * - Pause the TID and mark cleanup in progress | |
2260 | * - Discard all retry frames from the s/w queue. | |
2261 | */ | |
2262 | ||
b5aa9bf9 | 2263 | void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid) |
f078f209 LR |
2264 | { |
2265 | struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); | |
2266 | struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum]; | |
2267 | struct ath_buf *bf; | |
2268 | struct list_head bf_head; | |
2269 | INIT_LIST_HEAD(&bf_head); | |
2270 | ||
2271 | DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__); | |
2272 | ||
a37c2c79 | 2273 | if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */ |
f078f209 LR |
2274 | return; |
2275 | ||
a37c2c79 | 2276 | if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { |
f078f209 LR |
2277 | txtid->addba_exchangeattempts = 0; |
2278 | return; | |
2279 | } | |
2280 | ||
2281 | /* TID must be paused first */ | |
2282 | ath_tx_pause_tid(sc, txtid); | |
2283 | ||
2284 | /* drop all software retried frames and mark this TID */ | |
2285 | spin_lock_bh(&txq->axq_lock); | |
2286 | while (!list_empty(&txtid->buf_q)) { | |
2287 | bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); | |
cd3d39a6 | 2288 | if (!bf_isretried(bf)) { |
f078f209 LR |
2289 | /* |
2290 | * NB: it's based on the assumption that | |
2291 | * software retried frame will always stay | |
2292 | * at the head of software queue. | |
2293 | */ | |
2294 | break; | |
2295 | } | |
2296 | list_cut_position(&bf_head, | |
2297 | &txtid->buf_q, &bf->bf_lastfrm->list); | |
2298 | ath_tx_update_baw(sc, txtid, bf->bf_seqno); | |
2299 | ||
2300 | /* complete this sub-frame */ | |
2301 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | |
2302 | } | |
2303 | ||
2304 | if (txtid->baw_head != txtid->baw_tail) { | |
2305 | spin_unlock_bh(&txq->axq_lock); | |
a37c2c79 | 2306 | txtid->state |= AGGR_CLEANUP; |
f078f209 | 2307 | } else { |
a37c2c79 | 2308 | txtid->state &= ~AGGR_ADDBA_COMPLETE; |
f078f209 LR |
2309 | txtid->addba_exchangeattempts = 0; |
2310 | spin_unlock_bh(&txq->axq_lock); | |
2311 | ath_tx_flush_tid(sc, txtid); | |
2312 | } | |
2313 | } | |
2314 | ||
2315 | /* | |
2316 | * Tx scheduling logic | |
2317 | * NB: must be called with txq lock held | |
2318 | */ | |
2319 | ||
2320 | void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | |
2321 | { | |
2322 | struct ath_atx_ac *ac; | |
2323 | struct ath_atx_tid *tid; | |
2324 | ||
2325 | /* nothing to schedule */ | |
2326 | if (list_empty(&txq->axq_acq)) | |
2327 | return; | |
2328 | /* | |
2329 | * get the first node/ac pair on the queue | |
2330 | */ | |
2331 | ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); | |
2332 | list_del(&ac->list); | |
2333 | ac->sched = false; | |
2334 | ||
2335 | /* | |
2336 | * process a single tid per destination | |
2337 | */ | |
2338 | do { | |
2339 | /* nothing to schedule */ | |
2340 | if (list_empty(&ac->tid_q)) | |
2341 | return; | |
2342 | ||
2343 | tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); | |
2344 | list_del(&tid->list); | |
2345 | tid->sched = false; | |
2346 | ||
2347 | if (tid->paused) /* check next tid to keep h/w busy */ | |
2348 | continue; | |
2349 | ||
43453b33 | 2350 | if ((txq->axq_depth % 2) == 0) |
f078f209 | 2351 | ath_tx_sched_aggr(sc, txq, tid); |
f078f209 LR |
2352 | |
2353 | /* | |
2354 | * add tid to round-robin queue if more frames | |
2355 | * are pending for the tid | |
2356 | */ | |
2357 | if (!list_empty(&tid->buf_q)) | |
2358 | ath_tx_queue_tid(txq, tid); | |
2359 | ||
2360 | /* only schedule one TID at a time */ | |
2361 | break; | |
2362 | } while (!list_empty(&ac->tid_q)); | |
2363 | ||
2364 | /* | |
2365 | * schedule AC if more TIDs need processing | |
2366 | */ | |
2367 | if (!list_empty(&ac->tid_q)) { | |
2368 | /* | |
2369 | * add dest ac to txq if not already added | |
2370 | */ | |
2371 | if (!ac->sched) { | |
2372 | ac->sched = true; | |
2373 | list_add_tail(&ac->list, &txq->axq_acq); | |
2374 | } | |
2375 | } | |
2376 | } | |
2377 | ||
2378 | /* Initialize per-node transmit state */ | |
2379 | ||
2380 | void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) | |
2381 | { | |
c5170163 S |
2382 | struct ath_atx_tid *tid; |
2383 | struct ath_atx_ac *ac; | |
2384 | int tidno, acno; | |
f078f209 | 2385 | |
c5170163 S |
2386 | /* |
2387 | * Init per tid tx state | |
2388 | */ | |
2389 | for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno]; | |
2390 | tidno < WME_NUM_TID; | |
2391 | tidno++, tid++) { | |
2392 | tid->an = an; | |
2393 | tid->tidno = tidno; | |
2394 | tid->seq_start = tid->seq_next = 0; | |
2395 | tid->baw_size = WME_MAX_BA; | |
2396 | tid->baw_head = tid->baw_tail = 0; | |
2397 | tid->sched = false; | |
2398 | tid->paused = false; | |
a37c2c79 | 2399 | tid->state &= ~AGGR_CLEANUP; |
c5170163 S |
2400 | INIT_LIST_HEAD(&tid->buf_q); |
2401 | ||
2402 | acno = TID_TO_WME_AC(tidno); | |
2403 | tid->ac = &an->an_aggr.tx.ac[acno]; | |
2404 | ||
2405 | /* ADDBA state */ | |
a37c2c79 S |
2406 | tid->state &= ~AGGR_ADDBA_COMPLETE; |
2407 | tid->state &= ~AGGR_ADDBA_PROGRESS; | |
2408 | tid->addba_exchangeattempts = 0; | |
c5170163 | 2409 | } |
f078f209 | 2410 | |
c5170163 S |
2411 | /* |
2412 | * Init per ac tx state | |
2413 | */ | |
2414 | for (acno = 0, ac = &an->an_aggr.tx.ac[acno]; | |
2415 | acno < WME_NUM_AC; acno++, ac++) { | |
2416 | ac->sched = false; | |
2417 | INIT_LIST_HEAD(&ac->tid_q); | |
2418 | ||
2419 | switch (acno) { | |
2420 | case WME_AC_BE: | |
2421 | ac->qnum = ath_tx_get_qnum(sc, | |
2422 | ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); | |
2423 | break; | |
2424 | case WME_AC_BK: | |
2425 | ac->qnum = ath_tx_get_qnum(sc, | |
2426 | ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK); | |
2427 | break; | |
2428 | case WME_AC_VI: | |
2429 | ac->qnum = ath_tx_get_qnum(sc, | |
2430 | ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI); | |
2431 | break; | |
2432 | case WME_AC_VO: | |
2433 | ac->qnum = ath_tx_get_qnum(sc, | |
2434 | ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO); | |
2435 | break; | |
f078f209 LR |
2436 | } |
2437 | } | |
2438 | } | |
2439 | ||
2440 | /* Cleanupthe pending buffers for the node. */ | |
2441 | ||
b5aa9bf9 | 2442 | void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) |
f078f209 LR |
2443 | { |
2444 | int i; | |
2445 | struct ath_atx_ac *ac, *ac_tmp; | |
2446 | struct ath_atx_tid *tid, *tid_tmp; | |
2447 | struct ath_txq *txq; | |
2448 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | |
2449 | if (ATH_TXQ_SETUP(sc, i)) { | |
2450 | txq = &sc->sc_txq[i]; | |
2451 | ||
b5aa9bf9 | 2452 | spin_lock(&txq->axq_lock); |
f078f209 LR |
2453 | |
2454 | list_for_each_entry_safe(ac, | |
2455 | ac_tmp, &txq->axq_acq, list) { | |
2456 | tid = list_first_entry(&ac->tid_q, | |
2457 | struct ath_atx_tid, list); | |
2458 | if (tid && tid->an != an) | |
2459 | continue; | |
2460 | list_del(&ac->list); | |
2461 | ac->sched = false; | |
2462 | ||
2463 | list_for_each_entry_safe(tid, | |
2464 | tid_tmp, &ac->tid_q, list) { | |
2465 | list_del(&tid->list); | |
2466 | tid->sched = false; | |
b5aa9bf9 | 2467 | ath_tid_drain(sc, txq, tid); |
a37c2c79 | 2468 | tid->state &= ~AGGR_ADDBA_COMPLETE; |
f078f209 | 2469 | tid->addba_exchangeattempts = 0; |
a37c2c79 | 2470 | tid->state &= ~AGGR_CLEANUP; |
f078f209 LR |
2471 | } |
2472 | } | |
2473 | ||
b5aa9bf9 | 2474 | spin_unlock(&txq->axq_lock); |
f078f209 LR |
2475 | } |
2476 | } | |
2477 | } | |
2478 | ||
e022edbd JM |
2479 | void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb) |
2480 | { | |
2481 | int hdrlen, padsize; | |
2482 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
2483 | struct ath_tx_control txctl; | |
2484 | ||
528f0c6b S |
2485 | memset(&txctl, 0, sizeof(struct ath_tx_control)); |
2486 | ||
e022edbd JM |
2487 | /* |
2488 | * As a temporary workaround, assign seq# here; this will likely need | |
2489 | * to be cleaned up to work better with Beacon transmission and virtual | |
2490 | * BSSes. | |
2491 | */ | |
2492 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { | |
2493 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | |
2494 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) | |
2495 | sc->seq_no += 0x10; | |
2496 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | |
2497 | hdr->seq_ctrl |= cpu_to_le16(sc->seq_no); | |
2498 | } | |
2499 | ||
2500 | /* Add the padding after the header if this is not already done */ | |
2501 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | |
2502 | if (hdrlen & 3) { | |
2503 | padsize = hdrlen % 4; | |
2504 | if (skb_headroom(skb) < padsize) { | |
2505 | DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding " | |
2506 | "failed\n", __func__); | |
2507 | dev_kfree_skb_any(skb); | |
2508 | return; | |
2509 | } | |
2510 | skb_push(skb, padsize); | |
2511 | memmove(skb->data, skb->data + padsize, hdrlen); | |
2512 | } | |
2513 | ||
528f0c6b S |
2514 | txctl.txq = sc->sc_cabq; |
2515 | ||
e022edbd JM |
2516 | DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n", |
2517 | __func__, | |
2518 | skb); | |
2519 | ||
528f0c6b S |
2520 | if (ath_tx_start(sc, skb, &txctl) != 0) { |
2521 | DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__); | |
2522 | goto exit; | |
e022edbd | 2523 | } |
e022edbd | 2524 | |
528f0c6b S |
2525 | return; |
2526 | exit: | |
2527 | dev_kfree_skb_any(skb); | |
2528 | } |