1 /******************************************************************************
5 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
34 #include <linux/ieee80211.h>
39 #include "iwl-agn-hw.h"
41 #include "iwl-trans.h"
43 static const u8 tid_to_ac
[] = {
54 static void iwlagn_tx_cmd_protection(struct iwl_priv
*priv
,
55 struct ieee80211_tx_info
*info
,
56 __le16 fc
, __le32
*tx_flags
)
58 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_RTS_CTS
||
59 info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_CTS_PROTECT
||
60 info
->flags
& IEEE80211_TX_CTL_AMPDU
)
61 *tx_flags
|= TX_CMD_FLG_PROT_REQUIRE_MSK
;
65 * handle build REPLY_TX command notification.
67 static void iwlagn_tx_cmd_build_basic(struct iwl_priv
*priv
,
69 struct iwl_tx_cmd
*tx_cmd
,
70 struct ieee80211_tx_info
*info
,
71 struct ieee80211_hdr
*hdr
, u8 sta_id
)
73 __le16 fc
= hdr
->frame_control
;
74 __le32 tx_flags
= tx_cmd
->tx_flags
;
76 tx_cmd
->stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
78 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
79 tx_flags
|= TX_CMD_FLG_ACK_MSK
;
81 tx_flags
&= ~TX_CMD_FLG_ACK_MSK
;
83 if (ieee80211_is_probe_resp(fc
))
84 tx_flags
|= TX_CMD_FLG_TSF_MSK
;
85 else if (ieee80211_is_back_req(fc
))
86 tx_flags
|= TX_CMD_FLG_ACK_MSK
| TX_CMD_FLG_IMM_BA_RSP_MASK
;
87 else if (info
->band
== IEEE80211_BAND_2GHZ
&&
88 cfg(priv
)->bt_params
&&
89 cfg(priv
)->bt_params
->advanced_bt_coexist
&&
90 (ieee80211_is_auth(fc
) || ieee80211_is_assoc_req(fc
) ||
91 ieee80211_is_reassoc_req(fc
) ||
92 skb
->protocol
== cpu_to_be16(ETH_P_PAE
)))
93 tx_flags
|= TX_CMD_FLG_IGNORE_BT
;
96 tx_cmd
->sta_id
= sta_id
;
97 if (ieee80211_has_morefrags(fc
))
98 tx_flags
|= TX_CMD_FLG_MORE_FRAG_MSK
;
100 if (ieee80211_is_data_qos(fc
)) {
101 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
102 tx_cmd
->tid_tspec
= qc
[0] & 0xf;
103 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL_MSK
;
105 tx_cmd
->tid_tspec
= IWL_TID_NON_QOS
;
106 if (info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
)
107 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
109 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL_MSK
;
112 iwlagn_tx_cmd_protection(priv
, info
, fc
, &tx_flags
);
114 tx_flags
&= ~(TX_CMD_FLG_ANT_SEL_MSK
);
115 if (ieee80211_is_mgmt(fc
)) {
116 if (ieee80211_is_assoc_req(fc
) || ieee80211_is_reassoc_req(fc
))
117 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(3);
119 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(2);
121 tx_cmd
->timeout
.pm_frame_timeout
= 0;
124 tx_cmd
->driver_txop
= 0;
125 tx_cmd
->tx_flags
= tx_flags
;
126 tx_cmd
->next_frame_len
= 0;
129 static void iwlagn_tx_cmd_build_rate(struct iwl_priv
*priv
,
130 struct iwl_tx_cmd
*tx_cmd
,
131 struct ieee80211_tx_info
*info
,
141 rts_retry_limit
= IWLAGN_LOW_RETRY_LIMIT
;
142 data_retry_limit
= IWLAGN_LOW_RETRY_LIMIT
;
144 /* Set retry limit on RTS packets */
145 rts_retry_limit
= IWLAGN_RTS_DFAULT_RETRY_LIMIT
;
147 /* Set retry limit on DATA packets and Probe Responses*/
148 if (ieee80211_is_probe_resp(fc
)) {
149 data_retry_limit
= IWLAGN_MGMT_DFAULT_RETRY_LIMIT
;
151 min(data_retry_limit
, rts_retry_limit
);
152 } else if (ieee80211_is_back_req(fc
))
153 data_retry_limit
= IWLAGN_BAR_DFAULT_RETRY_LIMIT
;
155 data_retry_limit
= IWLAGN_DEFAULT_TX_RETRY
;
158 tx_cmd
->data_retry_limit
= data_retry_limit
;
159 tx_cmd
->rts_retry_limit
= rts_retry_limit
;
161 /* DATA packets will use the uCode station table for rate/antenna
163 if (ieee80211_is_data(fc
)) {
164 tx_cmd
->initial_rate_index
= 0;
165 tx_cmd
->tx_flags
|= TX_CMD_FLG_STA_RATE_MSK
;
166 #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
167 if (priv
->tm_fixed_rate
) {
169 * rate overwrite by testmode
170 * we not only send lq command to change rate
171 * we also re-enforce per data pkt base.
173 tx_cmd
->tx_flags
&= ~TX_CMD_FLG_STA_RATE_MSK
;
174 memcpy(&tx_cmd
->rate_n_flags
, &priv
->tm_fixed_rate
,
175 sizeof(tx_cmd
->rate_n_flags
));
179 } else if (ieee80211_is_back_req(fc
))
180 tx_cmd
->tx_flags
|= TX_CMD_FLG_STA_RATE_MSK
;
183 * If the current TX rate stored in mac80211 has the MCS bit set, it's
184 * not really a TX rate. Thus, we use the lowest supported rate for
185 * this band. Also use the lowest supported rate if the stored rate
188 rate_idx
= info
->control
.rates
[0].idx
;
189 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_MCS
||
190 (rate_idx
< 0) || (rate_idx
> IWL_RATE_COUNT_LEGACY
))
191 rate_idx
= rate_lowest_index(&priv
->bands
[info
->band
],
193 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
194 if (info
->band
== IEEE80211_BAND_5GHZ
)
195 rate_idx
+= IWL_FIRST_OFDM_RATE
;
196 /* Get PLCP rate for tx_cmd->rate_n_flags */
197 rate_plcp
= iwl_rates
[rate_idx
].plcp
;
198 /* Zero out flags for this packet */
201 /* Set CCK flag as needed */
202 if ((rate_idx
>= IWL_FIRST_CCK_RATE
) && (rate_idx
<= IWL_LAST_CCK_RATE
))
203 rate_flags
|= RATE_MCS_CCK_MSK
;
205 /* Set up antennas */
206 if (cfg(priv
)->bt_params
&&
207 cfg(priv
)->bt_params
->advanced_bt_coexist
&&
208 priv
->bt_full_concurrent
) {
209 /* operated as 1x1 in full concurrency mode */
210 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
211 first_antenna(hw_params(priv
).valid_tx_ant
));
213 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
214 hw_params(priv
).valid_tx_ant
);
215 rate_flags
|= iwl_ant_idx_to_flags(priv
->mgmt_tx_ant
);
217 /* Set the rate in the TX cmd */
218 tx_cmd
->rate_n_flags
= iwl_hw_set_rate_n_flags(rate_plcp
, rate_flags
);
221 static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv
*priv
,
222 struct ieee80211_tx_info
*info
,
223 struct iwl_tx_cmd
*tx_cmd
,
224 struct sk_buff
*skb_frag
)
226 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
228 switch (keyconf
->cipher
) {
229 case WLAN_CIPHER_SUITE_CCMP
:
230 tx_cmd
->sec_ctl
= TX_CMD_SEC_CCM
;
231 memcpy(tx_cmd
->key
, keyconf
->key
, keyconf
->keylen
);
232 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
233 tx_cmd
->tx_flags
|= TX_CMD_FLG_AGG_CCMP_MSK
;
234 IWL_DEBUG_TX(priv
, "tx_cmd with AES hwcrypto\n");
237 case WLAN_CIPHER_SUITE_TKIP
:
238 tx_cmd
->sec_ctl
= TX_CMD_SEC_TKIP
;
239 ieee80211_get_tkip_p2k(keyconf
, skb_frag
, tx_cmd
->key
);
240 IWL_DEBUG_TX(priv
, "tx_cmd with tkip hwcrypto\n");
243 case WLAN_CIPHER_SUITE_WEP104
:
244 tx_cmd
->sec_ctl
|= TX_CMD_SEC_KEY128
;
246 case WLAN_CIPHER_SUITE_WEP40
:
247 tx_cmd
->sec_ctl
|= (TX_CMD_SEC_WEP
|
248 (keyconf
->keyidx
& TX_CMD_SEC_MSK
) << TX_CMD_SEC_SHIFT
);
250 memcpy(&tx_cmd
->key
[3], keyconf
->key
, keyconf
->keylen
);
252 IWL_DEBUG_TX(priv
, "Configuring packet for WEP encryption "
253 "with key %d\n", keyconf
->keyidx
);
257 IWL_ERR(priv
, "Unknown encode cipher %x\n", keyconf
->cipher
);
263 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
264 * @context: the current context
265 * @sta: mac80211 station
267 * In certain circumstances mac80211 passes a station pointer
268 * that may be %NULL, for example during TX or key setup. In
269 * that case, we need to use the broadcast station, so this
270 * inline wraps that pattern.
272 static int iwl_sta_id_or_broadcast(struct iwl_rxon_context
*context
,
273 struct ieee80211_sta
*sta
)
278 return context
->bcast_sta_id
;
280 sta_id
= iwl_sta_id(sta
);
283 * mac80211 should not be passing a partially
284 * initialised station!
286 WARN_ON(sta_id
== IWL_INVALID_STATION
);
292 * start REPLY_TX command process
294 int iwlagn_tx_skb(struct iwl_priv
*priv
, struct sk_buff
*skb
)
296 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
297 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
298 struct iwl_station_priv
*sta_priv
= NULL
;
299 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
300 struct iwl_device_cmd
*dev_cmd
= NULL
;
301 struct iwl_tx_cmd
*tx_cmd
;
304 u16 len
, seq_number
= 0;
305 u8 sta_id
, tid
= IWL_MAX_TID_COUNT
;
309 if (info
->control
.vif
)
310 ctx
= iwl_rxon_ctx_from_vif(info
->control
.vif
);
312 if (iwl_is_rfkill(priv
)) {
313 IWL_DEBUG_DROP(priv
, "Dropping - RF KILL\n");
314 goto drop_unlock_priv
;
317 fc
= hdr
->frame_control
;
319 #ifdef CONFIG_IWLWIFI_DEBUG
320 if (ieee80211_is_auth(fc
))
321 IWL_DEBUG_TX(priv
, "Sending AUTH frame\n");
322 else if (ieee80211_is_assoc_req(fc
))
323 IWL_DEBUG_TX(priv
, "Sending ASSOC frame\n");
324 else if (ieee80211_is_reassoc_req(fc
))
325 IWL_DEBUG_TX(priv
, "Sending REASSOC frame\n");
328 if (unlikely(ieee80211_is_probe_resp(fc
))) {
329 struct iwl_wipan_noa_data
*noa_data
=
330 rcu_dereference(priv
->noa_data
);
333 pskb_expand_head(skb
, 0, noa_data
->length
,
335 memcpy(skb_put(skb
, noa_data
->length
),
336 noa_data
->data
, noa_data
->length
);
337 hdr
= (struct ieee80211_hdr
*)skb
->data
;
341 hdr_len
= ieee80211_hdrlen(fc
);
343 /* For management frames use broadcast id to do not break aggregation */
344 if (!ieee80211_is_data(fc
))
345 sta_id
= ctx
->bcast_sta_id
;
347 /* Find index into station table for destination station */
348 sta_id
= iwl_sta_id_or_broadcast(ctx
, info
->control
.sta
);
349 if (sta_id
== IWL_INVALID_STATION
) {
350 IWL_DEBUG_DROP(priv
, "Dropping - INVALID STATION: %pM\n",
352 goto drop_unlock_priv
;
356 IWL_DEBUG_TX(priv
, "station Id %d\n", sta_id
);
358 if (info
->control
.sta
)
359 sta_priv
= (void *)info
->control
.sta
->drv_priv
;
361 if (sta_priv
&& sta_priv
->asleep
&&
362 (info
->flags
& IEEE80211_TX_CTL_NO_PS_BUFFER
)) {
364 * This sends an asynchronous command to the device,
365 * but we can rely on it being processed before the
366 * next frame is processed -- and the next frame to
367 * this station is the one that will consume this
369 * For now set the counter to just 1 since we do not
372 * FIXME: If we get two non-bufferable frames one
373 * after the other, we might only send out one of
374 * them because this is racy.
376 iwl_sta_modify_sleep_tx_count(priv
, sta_id
, 1);
379 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
382 dev_cmd
= kmem_cache_alloc(iwl_tx_cmd_pool
, GFP_ATOMIC
);
384 if (unlikely(!dev_cmd
))
385 goto drop_unlock_priv
;
387 memset(dev_cmd
, 0, sizeof(*dev_cmd
));
388 tx_cmd
= (struct iwl_tx_cmd
*) dev_cmd
->payload
;
390 /* Total # bytes to be transmitted */
392 tx_cmd
->len
= cpu_to_le16(len
);
394 if (info
->control
.hw_key
)
395 iwlagn_tx_cmd_build_hwcrypto(priv
, info
, tx_cmd
, skb
);
397 /* TODO need this for burst mode later on */
398 iwlagn_tx_cmd_build_basic(priv
, skb
, tx_cmd
, info
, hdr
, sta_id
);
399 iwl_dbg_log_tx_data_frame(priv
, len
, hdr
);
401 iwlagn_tx_cmd_build_rate(priv
, tx_cmd
, info
, fc
);
403 iwl_update_stats(priv
, true, fc
, len
);
405 memset(&info
->status
, 0, sizeof(info
->status
));
407 info
->driver_data
[0] = ctx
;
408 info
->driver_data
[1] = dev_cmd
;
410 spin_lock(&priv
->sta_lock
);
412 if (ieee80211_is_data_qos(fc
) && !ieee80211_is_qos_nullfunc(fc
)) {
414 struct iwl_tid_data
*tid_data
;
415 qc
= ieee80211_get_qos_ctl(hdr
);
416 tid
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
417 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
418 goto drop_unlock_sta
;
419 tid_data
= &priv
->tid_data
[sta_id
][tid
];
421 /* aggregation is on for this <sta,tid> */
422 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
423 tid_data
->agg
.state
!= IWL_AGG_ON
) {
424 IWL_ERR(priv
, "TX_CTL_AMPDU while not in AGG:"
425 " Tx flags = 0x%08x, agg.state = %d",
426 info
->flags
, tid_data
->agg
.state
);
427 IWL_ERR(priv
, "sta_id = %d, tid = %d seq_num = %d",
428 sta_id
, tid
, SEQ_TO_SN(tid_data
->seq_number
));
429 goto drop_unlock_sta
;
432 /* We can receive packets from the stack in IWL_AGG_{ON,OFF}
433 * only. Check this here.
435 if (WARN_ONCE(tid_data
->agg
.state
!= IWL_AGG_ON
&&
436 tid_data
->agg
.state
!= IWL_AGG_OFF
,
437 "Tx while agg.state = %d", tid_data
->agg
.state
))
438 goto drop_unlock_sta
;
440 seq_number
= tid_data
->seq_number
;
441 seq_number
&= IEEE80211_SCTL_SEQ
;
442 hdr
->seq_ctrl
&= cpu_to_le16(IEEE80211_SCTL_FRAG
);
443 hdr
->seq_ctrl
|= cpu_to_le16(seq_number
);
447 /* Copy MAC header from skb into command buffer */
448 memcpy(tx_cmd
->hdr
, hdr
, hdr_len
);
451 txq_id
= priv
->tid_data
[sta_id
][tid
].agg
.txq_id
;
452 else if (info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) {
454 * Send this frame after DTIM -- there's a special queue
455 * reserved for this for contexts that support AP mode.
457 txq_id
= ctx
->mcast_queue
;
460 * The microcode will clear the more data
461 * bit in the last frame it transmits.
463 hdr
->frame_control
|=
464 cpu_to_le16(IEEE80211_FCTL_MOREDATA
);
465 } else if (info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
)
466 txq_id
= IWL_AUX_QUEUE
;
468 txq_id
= ctx
->ac_to_queue
[skb_get_queue_mapping(skb
)];
470 if (iwl_trans_tx(trans(priv
), skb
, dev_cmd
, txq_id
))
471 goto drop_unlock_sta
;
473 if (ieee80211_is_data_qos(fc
) && !ieee80211_is_qos_nullfunc(fc
) &&
474 !ieee80211_has_morefrags(fc
))
475 priv
->tid_data
[sta_id
][tid
].seq_number
= seq_number
;
477 spin_unlock(&priv
->sta_lock
);
480 * Avoid atomic ops if it isn't an associated client.
481 * Also, if this is a packet for aggregation, don't
482 * increase the counter because the ucode will stop
483 * aggregation queues when their respective station
486 if (sta_priv
&& sta_priv
->client
&& !is_agg
)
487 atomic_inc(&sta_priv
->pending_frames
);
493 kmem_cache_free(iwl_tx_cmd_pool
, dev_cmd
);
494 spin_unlock(&priv
->sta_lock
);
499 static int iwlagn_alloc_agg_txq(struct iwl_priv
*priv
, int ac
)
503 for (q
= IWLAGN_FIRST_AMPDU_QUEUE
;
504 q
< cfg(priv
)->base_params
->num_of_queues
; q
++) {
505 if (!test_and_set_bit(q
, priv
->agg_q_alloc
)) {
506 priv
->queue_to_ac
[q
] = ac
;
514 static void iwlagn_dealloc_agg_txq(struct iwl_priv
*priv
, int q
)
516 clear_bit(q
, priv
->agg_q_alloc
);
517 priv
->queue_to_ac
[q
] = IWL_INVALID_AC
;
520 int iwlagn_tx_agg_stop(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
521 struct ieee80211_sta
*sta
, u16 tid
)
523 struct iwl_tid_data
*tid_data
;
526 sta_id
= iwl_sta_id(sta
);
528 if (sta_id
== IWL_INVALID_STATION
) {
529 IWL_ERR(priv
, "Invalid station for AGG tid %d\n", tid
);
533 spin_lock_bh(&priv
->sta_lock
);
535 tid_data
= &priv
->tid_data
[sta_id
][tid
];
536 txq_id
= priv
->tid_data
[sta_id
][tid
].agg
.txq_id
;
538 switch (priv
->tid_data
[sta_id
][tid
].agg
.state
) {
539 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
541 * This can happen if the peer stops aggregation
542 * again before we've had a chance to drain the
543 * queue we selected previously, i.e. before the
544 * session was really started completely.
546 IWL_DEBUG_HT(priv
, "AGG stop before setup done\n");
551 IWL_WARN(priv
, "Stopping AGG while state not ON "
552 "or starting for %d on %d (%d)\n", sta_id
, tid
,
553 priv
->tid_data
[sta_id
][tid
].agg
.state
);
554 spin_unlock_bh(&priv
->sta_lock
);
558 tid_data
->agg
.ssn
= SEQ_TO_SN(tid_data
->seq_number
);
560 /* There are still packets for this RA / TID in the HW */
561 if (!test_bit(txq_id
, priv
->agg_q_alloc
)) {
562 IWL_DEBUG_TX_QUEUES(priv
,
563 "stopping AGG on STA/TID %d/%d but hwq %d not used\n",
564 sta_id
, tid
, txq_id
);
565 } else if (tid_data
->agg
.ssn
!= tid_data
->next_reclaimed
) {
566 IWL_DEBUG_TX_QUEUES(priv
, "Can't proceed: ssn %d, "
569 tid_data
->next_reclaimed
);
570 priv
->tid_data
[sta_id
][tid
].agg
.state
=
571 IWL_EMPTYING_HW_QUEUE_DELBA
;
572 spin_unlock_bh(&priv
->sta_lock
);
576 IWL_DEBUG_TX_QUEUES(priv
, "Can proceed: ssn = next_recl = %d\n",
579 priv
->tid_data
[sta_id
][tid
].agg
.state
= IWL_AGG_OFF
;
581 spin_unlock_bh(&priv
->sta_lock
);
583 if (test_bit(txq_id
, priv
->agg_q_alloc
)) {
584 iwl_trans_tx_agg_disable(trans(priv
), txq_id
);
585 iwlagn_dealloc_agg_txq(priv
, txq_id
);
588 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
593 int iwlagn_tx_agg_start(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
594 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
596 struct iwl_tid_data
*tid_data
;
597 int sta_id
, txq_id
, ret
;
599 IWL_DEBUG_HT(priv
, "TX AGG request on ra = %pM tid = %d\n",
602 sta_id
= iwl_sta_id(sta
);
603 if (sta_id
== IWL_INVALID_STATION
) {
604 IWL_ERR(priv
, "Start AGG on invalid station\n");
607 if (unlikely(tid
>= IWL_MAX_TID_COUNT
))
610 if (priv
->tid_data
[sta_id
][tid
].agg
.state
!= IWL_AGG_OFF
) {
611 IWL_ERR(priv
, "Start AGG when state is not IWL_AGG_OFF !\n");
615 txq_id
= iwlagn_alloc_agg_txq(priv
, tid_to_ac
[tid
]);
617 IWL_DEBUG_TX_QUEUES(priv
,
618 "No free aggregation queue for %pM/%d\n",
623 ret
= iwl_sta_tx_modify_enable_tid(priv
, sta_id
, tid
);
627 spin_lock_bh(&priv
->sta_lock
);
628 tid_data
= &priv
->tid_data
[sta_id
][tid
];
629 tid_data
->agg
.ssn
= SEQ_TO_SN(tid_data
->seq_number
);
630 tid_data
->agg
.txq_id
= txq_id
;
632 *ssn
= tid_data
->agg
.ssn
;
634 if (*ssn
== tid_data
->next_reclaimed
) {
635 IWL_DEBUG_TX_QUEUES(priv
, "Can proceed: ssn = next_recl = %d\n",
637 tid_data
->agg
.state
= IWL_AGG_ON
;
638 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
640 IWL_DEBUG_TX_QUEUES(priv
, "Can't proceed: ssn %d, "
641 "next_reclaimed = %d\n",
643 tid_data
->next_reclaimed
);
644 tid_data
->agg
.state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
646 spin_unlock_bh(&priv
->sta_lock
);
651 int iwlagn_tx_agg_oper(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
652 struct ieee80211_sta
*sta
, u16 tid
, u8 buf_size
)
654 struct iwl_station_priv
*sta_priv
= (void *) sta
->drv_priv
;
655 struct iwl_rxon_context
*ctx
= iwl_rxon_ctx_from_vif(vif
);
659 buf_size
= min_t(int, buf_size
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
);
661 spin_lock_bh(&priv
->sta_lock
);
662 ssn
= priv
->tid_data
[sta_priv
->sta_id
][tid
].agg
.ssn
;
663 q
= priv
->tid_data
[sta_priv
->sta_id
][tid
].agg
.txq_id
;
664 spin_unlock_bh(&priv
->sta_lock
);
666 fifo
= ctx
->ac_to_fifo
[tid_to_ac
[tid
]];
668 iwl_trans_tx_agg_setup(trans(priv
), q
, fifo
,
669 sta_priv
->sta_id
, tid
,
673 * If the limit is 0, then it wasn't initialised yet,
674 * use the default. We can do that since we take the
675 * minimum below, and we don't want to go above our
676 * default due to hardware restrictions.
678 if (sta_priv
->max_agg_bufsize
== 0)
679 sta_priv
->max_agg_bufsize
=
680 LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
683 * Even though in theory the peer could have different
684 * aggregation reorder buffer sizes for different sessions,
685 * our ucode doesn't allow for that and has a global limit
686 * for each station. Therefore, use the minimum of all the
687 * aggregation sessions and our default value.
689 sta_priv
->max_agg_bufsize
=
690 min(sta_priv
->max_agg_bufsize
, buf_size
);
692 if (hw_params(priv
).use_rts_for_aggregation
) {
694 * switch to RTS/CTS if it is the prefer protection
695 * method for HT traffic
698 sta_priv
->lq_sta
.lq
.general_params
.flags
|=
699 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK
;
701 priv
->agg_tids_count
++;
702 IWL_DEBUG_HT(priv
, "priv->agg_tids_count = %u\n",
703 priv
->agg_tids_count
);
705 sta_priv
->lq_sta
.lq
.agg_params
.agg_frame_cnt_limit
=
706 sta_priv
->max_agg_bufsize
;
708 IWL_DEBUG_HT(priv
, "Tx aggregation enabled on ra = %pM tid = %d\n",
711 return iwl_send_lq_cmd(priv
, ctx
,
712 &sta_priv
->lq_sta
.lq
, CMD_ASYNC
, false);
715 static void iwlagn_check_ratid_empty(struct iwl_priv
*priv
, int sta_id
, u8 tid
)
717 struct iwl_tid_data
*tid_data
= &priv
->tid_data
[sta_id
][tid
];
718 enum iwl_rxon_context_id ctx
;
719 struct ieee80211_vif
*vif
;
722 lockdep_assert_held(&priv
->sta_lock
);
724 addr
= priv
->stations
[sta_id
].sta
.sta
.addr
;
725 ctx
= priv
->stations
[sta_id
].ctxid
;
726 vif
= priv
->contexts
[ctx
].vif
;
728 switch (priv
->tid_data
[sta_id
][tid
].agg
.state
) {
729 case IWL_EMPTYING_HW_QUEUE_DELBA
:
730 /* There are no packets for this RA / TID in the HW any more */
731 if (tid_data
->agg
.ssn
== tid_data
->next_reclaimed
) {
732 IWL_DEBUG_TX_QUEUES(priv
,
733 "Can continue DELBA flow ssn = next_recl ="
734 " %d", tid_data
->next_reclaimed
);
735 iwl_trans_tx_agg_disable(trans(priv
),
736 tid_data
->agg
.txq_id
);
737 iwlagn_dealloc_agg_txq(priv
, tid_data
->agg
.txq_id
);
738 tid_data
->agg
.state
= IWL_AGG_OFF
;
739 ieee80211_stop_tx_ba_cb_irqsafe(vif
, addr
, tid
);
742 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
743 /* There are no packets for this RA / TID in the HW any more */
744 if (tid_data
->agg
.ssn
== tid_data
->next_reclaimed
) {
745 IWL_DEBUG_TX_QUEUES(priv
,
746 "Can continue ADDBA flow ssn = next_recl ="
747 " %d", tid_data
->next_reclaimed
);
748 tid_data
->agg
.state
= IWL_AGG_ON
;
749 ieee80211_start_tx_ba_cb_irqsafe(vif
, addr
, tid
);
757 static void iwlagn_non_agg_tx_status(struct iwl_priv
*priv
,
758 struct iwl_rxon_context
*ctx
,
761 struct ieee80211_sta
*sta
;
762 struct iwl_station_priv
*sta_priv
;
765 sta
= ieee80211_find_sta(ctx
->vif
, addr1
);
767 sta_priv
= (void *)sta
->drv_priv
;
768 /* avoid atomic ops if this isn't a client */
769 if (sta_priv
->client
&&
770 atomic_dec_return(&sta_priv
->pending_frames
) == 0)
771 ieee80211_sta_block_awake(priv
->hw
, sta
, false);
777 * translate ucode response to mac80211 tx status control values
779 static void iwlagn_hwrate_to_tx_control(struct iwl_priv
*priv
, u32 rate_n_flags
,
780 struct ieee80211_tx_info
*info
)
782 struct ieee80211_tx_rate
*r
= &info
->status
.rates
[0];
784 info
->status
.antenna
=
785 ((rate_n_flags
& RATE_MCS_ANT_ABC_MSK
) >> RATE_MCS_ANT_POS
);
786 if (rate_n_flags
& RATE_MCS_HT_MSK
)
787 r
->flags
|= IEEE80211_TX_RC_MCS
;
788 if (rate_n_flags
& RATE_MCS_GF_MSK
)
789 r
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
790 if (rate_n_flags
& RATE_MCS_HT40_MSK
)
791 r
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
792 if (rate_n_flags
& RATE_MCS_DUP_MSK
)
793 r
->flags
|= IEEE80211_TX_RC_DUP_DATA
;
794 if (rate_n_flags
& RATE_MCS_SGI_MSK
)
795 r
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
796 r
->idx
= iwlagn_hwrate_to_mac80211_idx(rate_n_flags
, info
->band
);
799 #ifdef CONFIG_IWLWIFI_DEBUG
800 const char *iwl_get_tx_fail_reason(u32 status
)
802 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
803 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
805 switch (status
& TX_STATUS_MSK
) {
806 case TX_STATUS_SUCCESS
:
808 TX_STATUS_POSTPONE(DELAY
);
809 TX_STATUS_POSTPONE(FEW_BYTES
);
810 TX_STATUS_POSTPONE(BT_PRIO
);
811 TX_STATUS_POSTPONE(QUIET_PERIOD
);
812 TX_STATUS_POSTPONE(CALC_TTAK
);
813 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY
);
814 TX_STATUS_FAIL(SHORT_LIMIT
);
815 TX_STATUS_FAIL(LONG_LIMIT
);
816 TX_STATUS_FAIL(FIFO_UNDERRUN
);
817 TX_STATUS_FAIL(DRAIN_FLOW
);
818 TX_STATUS_FAIL(RFKILL_FLUSH
);
819 TX_STATUS_FAIL(LIFE_EXPIRE
);
820 TX_STATUS_FAIL(DEST_PS
);
821 TX_STATUS_FAIL(HOST_ABORTED
);
822 TX_STATUS_FAIL(BT_RETRY
);
823 TX_STATUS_FAIL(STA_INVALID
);
824 TX_STATUS_FAIL(FRAG_DROPPED
);
825 TX_STATUS_FAIL(TID_DISABLE
);
826 TX_STATUS_FAIL(FIFO_FLUSHED
);
827 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL
);
828 TX_STATUS_FAIL(PASSIVE_NO_RX
);
829 TX_STATUS_FAIL(NO_BEACON_ON_RADAR
);
834 #undef TX_STATUS_FAIL
835 #undef TX_STATUS_POSTPONE
837 #endif /* CONFIG_IWLWIFI_DEBUG */
839 static void iwlagn_count_agg_tx_err_status(struct iwl_priv
*priv
, u16 status
)
841 status
&= AGG_TX_STATUS_MSK
;
844 case AGG_TX_STATE_UNDERRUN_MSK
:
845 priv
->reply_agg_tx_stats
.underrun
++;
847 case AGG_TX_STATE_BT_PRIO_MSK
:
848 priv
->reply_agg_tx_stats
.bt_prio
++;
850 case AGG_TX_STATE_FEW_BYTES_MSK
:
851 priv
->reply_agg_tx_stats
.few_bytes
++;
853 case AGG_TX_STATE_ABORT_MSK
:
854 priv
->reply_agg_tx_stats
.abort
++;
856 case AGG_TX_STATE_LAST_SENT_TTL_MSK
:
857 priv
->reply_agg_tx_stats
.last_sent_ttl
++;
859 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK
:
860 priv
->reply_agg_tx_stats
.last_sent_try
++;
862 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK
:
863 priv
->reply_agg_tx_stats
.last_sent_bt_kill
++;
865 case AGG_TX_STATE_SCD_QUERY_MSK
:
866 priv
->reply_agg_tx_stats
.scd_query
++;
868 case AGG_TX_STATE_TEST_BAD_CRC32_MSK
:
869 priv
->reply_agg_tx_stats
.bad_crc32
++;
871 case AGG_TX_STATE_RESPONSE_MSK
:
872 priv
->reply_agg_tx_stats
.response
++;
874 case AGG_TX_STATE_DUMP_TX_MSK
:
875 priv
->reply_agg_tx_stats
.dump_tx
++;
877 case AGG_TX_STATE_DELAY_TX_MSK
:
878 priv
->reply_agg_tx_stats
.delay_tx
++;
881 priv
->reply_agg_tx_stats
.unknown
++;
886 static void iwl_rx_reply_tx_agg(struct iwl_priv
*priv
,
887 struct iwlagn_tx_resp
*tx_resp
)
889 struct agg_tx_status
*frame_status
= &tx_resp
->status
;
890 int tid
= (tx_resp
->ra_tid
& IWLAGN_TX_RES_TID_MSK
) >>
891 IWLAGN_TX_RES_TID_POS
;
892 int sta_id
= (tx_resp
->ra_tid
& IWLAGN_TX_RES_RA_MSK
) >>
893 IWLAGN_TX_RES_RA_POS
;
894 struct iwl_ht_agg
*agg
= &priv
->tid_data
[sta_id
][tid
].agg
;
895 u32 status
= le16_to_cpu(tx_resp
->status
.status
);
898 WARN_ON(tid
== IWL_TID_NON_QOS
);
900 if (agg
->wait_for_ba
)
901 IWL_DEBUG_TX_REPLY(priv
,
902 "got tx response w/o block-ack\n");
904 agg
->rate_n_flags
= le32_to_cpu(tx_resp
->rate_n_flags
);
905 agg
->wait_for_ba
= (tx_resp
->frame_count
> 1);
908 * If the BT kill count is non-zero, we'll get this
909 * notification again.
911 if (tx_resp
->bt_kill_count
&& tx_resp
->frame_count
== 1 &&
912 cfg(priv
)->bt_params
&&
913 cfg(priv
)->bt_params
->advanced_bt_coexist
) {
914 IWL_DEBUG_COEX(priv
, "receive reply tx w/ bt_kill\n");
917 if (tx_resp
->frame_count
== 1)
920 /* Construct bit-map of pending frames within Tx window */
921 for (i
= 0; i
< tx_resp
->frame_count
; i
++) {
922 u16 fstatus
= le16_to_cpu(frame_status
[i
].status
);
924 if (status
& AGG_TX_STATUS_MSK
)
925 iwlagn_count_agg_tx_err_status(priv
, fstatus
);
927 if (status
& (AGG_TX_STATE_FEW_BYTES_MSK
|
928 AGG_TX_STATE_ABORT_MSK
))
931 IWL_DEBUG_TX_REPLY(priv
, "status %s (0x%08x), "
932 "try-count (0x%08x)\n",
933 iwl_get_agg_tx_fail_reason(fstatus
),
934 fstatus
& AGG_TX_STATUS_MSK
,
935 fstatus
& AGG_TX_TRY_MSK
);
939 #ifdef CONFIG_IWLWIFI_DEBUG
940 #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
942 const char *iwl_get_agg_tx_fail_reason(u16 status
)
944 status
&= AGG_TX_STATUS_MSK
;
946 case AGG_TX_STATE_TRANSMITTED
:
948 AGG_TX_STATE_FAIL(UNDERRUN_MSK
);
949 AGG_TX_STATE_FAIL(BT_PRIO_MSK
);
950 AGG_TX_STATE_FAIL(FEW_BYTES_MSK
);
951 AGG_TX_STATE_FAIL(ABORT_MSK
);
952 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK
);
953 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK
);
954 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK
);
955 AGG_TX_STATE_FAIL(SCD_QUERY_MSK
);
956 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK
);
957 AGG_TX_STATE_FAIL(RESPONSE_MSK
);
958 AGG_TX_STATE_FAIL(DUMP_TX_MSK
);
959 AGG_TX_STATE_FAIL(DELAY_TX_MSK
);
964 #endif /* CONFIG_IWLWIFI_DEBUG */
966 static inline u32
iwlagn_get_scd_ssn(struct iwlagn_tx_resp
*tx_resp
)
968 return le32_to_cpup((__le32
*)&tx_resp
->status
+
969 tx_resp
->frame_count
) & MAX_SN
;
972 static void iwlagn_count_tx_err_status(struct iwl_priv
*priv
, u16 status
)
974 status
&= TX_STATUS_MSK
;
977 case TX_STATUS_POSTPONE_DELAY
:
978 priv
->reply_tx_stats
.pp_delay
++;
980 case TX_STATUS_POSTPONE_FEW_BYTES
:
981 priv
->reply_tx_stats
.pp_few_bytes
++;
983 case TX_STATUS_POSTPONE_BT_PRIO
:
984 priv
->reply_tx_stats
.pp_bt_prio
++;
986 case TX_STATUS_POSTPONE_QUIET_PERIOD
:
987 priv
->reply_tx_stats
.pp_quiet_period
++;
989 case TX_STATUS_POSTPONE_CALC_TTAK
:
990 priv
->reply_tx_stats
.pp_calc_ttak
++;
992 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY
:
993 priv
->reply_tx_stats
.int_crossed_retry
++;
995 case TX_STATUS_FAIL_SHORT_LIMIT
:
996 priv
->reply_tx_stats
.short_limit
++;
998 case TX_STATUS_FAIL_LONG_LIMIT
:
999 priv
->reply_tx_stats
.long_limit
++;
1001 case TX_STATUS_FAIL_FIFO_UNDERRUN
:
1002 priv
->reply_tx_stats
.fifo_underrun
++;
1004 case TX_STATUS_FAIL_DRAIN_FLOW
:
1005 priv
->reply_tx_stats
.drain_flow
++;
1007 case TX_STATUS_FAIL_RFKILL_FLUSH
:
1008 priv
->reply_tx_stats
.rfkill_flush
++;
1010 case TX_STATUS_FAIL_LIFE_EXPIRE
:
1011 priv
->reply_tx_stats
.life_expire
++;
1013 case TX_STATUS_FAIL_DEST_PS
:
1014 priv
->reply_tx_stats
.dest_ps
++;
1016 case TX_STATUS_FAIL_HOST_ABORTED
:
1017 priv
->reply_tx_stats
.host_abort
++;
1019 case TX_STATUS_FAIL_BT_RETRY
:
1020 priv
->reply_tx_stats
.bt_retry
++;
1022 case TX_STATUS_FAIL_STA_INVALID
:
1023 priv
->reply_tx_stats
.sta_invalid
++;
1025 case TX_STATUS_FAIL_FRAG_DROPPED
:
1026 priv
->reply_tx_stats
.frag_drop
++;
1028 case TX_STATUS_FAIL_TID_DISABLE
:
1029 priv
->reply_tx_stats
.tid_disable
++;
1031 case TX_STATUS_FAIL_FIFO_FLUSHED
:
1032 priv
->reply_tx_stats
.fifo_flush
++;
1034 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL
:
1035 priv
->reply_tx_stats
.insuff_cf_poll
++;
1037 case TX_STATUS_FAIL_PASSIVE_NO_RX
:
1038 priv
->reply_tx_stats
.fail_hw_drop
++;
1040 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR
:
1041 priv
->reply_tx_stats
.sta_color_mismatch
++;
1044 priv
->reply_tx_stats
.unknown
++;
1049 static void iwlagn_set_tx_status(struct iwl_priv
*priv
,
1050 struct ieee80211_tx_info
*info
,
1051 struct iwlagn_tx_resp
*tx_resp
,
1054 u16 status
= le16_to_cpu(tx_resp
->status
.status
);
1056 info
->status
.rates
[0].count
= tx_resp
->failure_frame
+ 1;
1058 info
->flags
&= ~IEEE80211_TX_CTL_AMPDU
;
1059 info
->flags
|= iwl_tx_status_to_mac80211(status
);
1060 iwlagn_hwrate_to_tx_control(priv
, le32_to_cpu(tx_resp
->rate_n_flags
),
1062 if (!iwl_is_tx_success(status
))
1063 iwlagn_count_tx_err_status(priv
, status
);
1066 static void iwl_check_abort_status(struct iwl_priv
*priv
,
1067 u8 frame_count
, u32 status
)
1069 if (frame_count
== 1 && status
== TX_STATUS_FAIL_RFKILL_FLUSH
) {
1070 IWL_ERR(priv
, "Tx flush command to flush out all frames\n");
1071 if (!test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
1072 queue_work(priv
->workqueue
, &priv
->tx_flush
);
1076 static int iwl_reclaim(struct iwl_priv
*priv
, int sta_id
, int tid
,
1077 int txq_id
, int ssn
, struct sk_buff_head
*skbs
)
1079 if (unlikely(txq_id
>= IWLAGN_FIRST_AMPDU_QUEUE
&&
1080 tid
!= IWL_TID_NON_QOS
&&
1081 txq_id
!= priv
->tid_data
[sta_id
][tid
].agg
.txq_id
)) {
1083 * FIXME: this is a uCode bug which need to be addressed,
1084 * log the information and return for now.
1085 * Since it is can possibly happen very often and in order
1086 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1088 IWL_DEBUG_TX_QUEUES(priv
,
1089 "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1090 txq_id
, sta_id
, tid
,
1091 priv
->tid_data
[sta_id
][tid
].agg
.txq_id
);
1095 iwl_trans_reclaim(trans(priv
), txq_id
, ssn
, skbs
);
1099 int iwlagn_rx_reply_tx(struct iwl_priv
*priv
, struct iwl_rx_cmd_buffer
*rxb
,
1100 struct iwl_device_cmd
*cmd
)
1102 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1103 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1104 int txq_id
= SEQ_TO_QUEUE(sequence
);
1105 int cmd_index __maybe_unused
= SEQ_TO_INDEX(sequence
);
1106 struct iwlagn_tx_resp
*tx_resp
= (void *)pkt
->data
;
1107 struct ieee80211_hdr
*hdr
;
1108 u32 status
= le16_to_cpu(tx_resp
->status
.status
);
1109 u16 ssn
= iwlagn_get_scd_ssn(tx_resp
);
1113 struct ieee80211_tx_info
*info
;
1114 struct sk_buff_head skbs
;
1115 struct sk_buff
*skb
;
1116 struct iwl_rxon_context
*ctx
;
1117 bool is_agg
= (txq_id
>= IWLAGN_FIRST_AMPDU_QUEUE
);
1119 tid
= (tx_resp
->ra_tid
& IWLAGN_TX_RES_TID_MSK
) >>
1120 IWLAGN_TX_RES_TID_POS
;
1121 sta_id
= (tx_resp
->ra_tid
& IWLAGN_TX_RES_RA_MSK
) >>
1122 IWLAGN_TX_RES_RA_POS
;
1124 spin_lock(&priv
->sta_lock
);
1127 iwl_rx_reply_tx_agg(priv
, tx_resp
);
1129 __skb_queue_head_init(&skbs
);
1131 if (tx_resp
->frame_count
== 1) {
1132 u16 next_reclaimed
= le16_to_cpu(tx_resp
->seq_ctl
);
1133 next_reclaimed
= SEQ_TO_SN(next_reclaimed
+ 0x10);
1136 /* If this is an aggregation queue, we can rely on the
1137 * ssn since the wifi sequence number corresponds to
1138 * the index in the TFD ring (%256).
1139 * The seq_ctl is the sequence control of the packet
1140 * to which this Tx response relates. But if there is a
1141 * hole in the bitmap of the BA we received, this Tx
1142 * response may allow to reclaim the hole and all the
1143 * subsequent packets that were already acked.
1144 * In that case, seq_ctl != ssn, and the next packet
1145 * to be reclaimed will be ssn and not seq_ctl.
1147 next_reclaimed
= ssn
;
1150 if (tid
!= IWL_TID_NON_QOS
) {
1151 priv
->tid_data
[sta_id
][tid
].next_reclaimed
=
1153 IWL_DEBUG_TX_REPLY(priv
, "Next reclaimed packet:%d\n",
1157 /*we can free until ssn % q.n_bd not inclusive */
1158 WARN_ON(iwl_reclaim(priv
, sta_id
, tid
, txq_id
, ssn
, &skbs
));
1159 iwlagn_check_ratid_empty(priv
, sta_id
, tid
);
1162 /* process frames */
1163 skb_queue_walk(&skbs
, skb
) {
1164 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1166 if (!ieee80211_is_data_qos(hdr
->frame_control
))
1167 priv
->last_seq_ctl
= tx_resp
->seq_ctl
;
1169 info
= IEEE80211_SKB_CB(skb
);
1170 ctx
= info
->driver_data
[0];
1171 kmem_cache_free(iwl_tx_cmd_pool
,
1172 (info
->driver_data
[1]));
1174 memset(&info
->status
, 0, sizeof(info
->status
));
1176 if (status
== TX_STATUS_FAIL_PASSIVE_NO_RX
&&
1177 iwl_is_associated_ctx(ctx
) && ctx
->vif
&&
1178 ctx
->vif
->type
== NL80211_IFTYPE_STATION
) {
1179 /* block and stop all queues */
1180 priv
->passive_no_rx
= true;
1181 IWL_DEBUG_TX_QUEUES(priv
, "stop all queues: "
1183 ieee80211_stop_queues(priv
->hw
);
1185 IWL_DEBUG_TX_REPLY(priv
,
1186 "TXQ %d status %s (0x%08x) "
1187 "rate_n_flags 0x%x retries %d\n",
1189 iwl_get_tx_fail_reason(status
),
1191 le32_to_cpu(tx_resp
->rate_n_flags
),
1192 tx_resp
->failure_frame
);
1194 IWL_DEBUG_TX_REPLY(priv
,
1195 "FrameCnt = %d, idx=%d\n",
1196 tx_resp
->frame_count
, cmd_index
);
1199 /* check if BAR is needed */
1200 if (is_agg
&& !iwl_is_tx_success(status
))
1201 info
->flags
|= IEEE80211_TX_STAT_AMPDU_NO_BACK
;
1202 iwlagn_set_tx_status(priv
, IEEE80211_SKB_CB(skb
),
1205 iwlagn_non_agg_tx_status(priv
, ctx
, hdr
->addr1
);
1210 WARN_ON(!is_agg
&& freed
!= 1);
1213 iwl_check_abort_status(priv
, tx_resp
->frame_count
, status
);
1214 spin_unlock(&priv
->sta_lock
);
1216 while (!skb_queue_empty(&skbs
)) {
1217 skb
= __skb_dequeue(&skbs
);
1218 ieee80211_tx_status(priv
->hw
, skb
);
1225 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1227 * Handles block-acknowledge notification from device, which reports success
1228 * of frames sent via aggregation.
1230 int iwlagn_rx_reply_compressed_ba(struct iwl_priv
*priv
,
1231 struct iwl_rx_cmd_buffer
*rxb
,
1232 struct iwl_device_cmd
*cmd
)
1234 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1235 struct iwl_compressed_ba_resp
*ba_resp
= (void *)pkt
->data
;
1236 struct iwl_ht_agg
*agg
;
1237 struct sk_buff_head reclaimed_skbs
;
1238 struct ieee80211_tx_info
*info
;
1239 struct ieee80211_hdr
*hdr
;
1240 struct sk_buff
*skb
;
1245 /* "flow" corresponds to Tx queue */
1246 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
1248 /* "ssn" is start of block-ack Tx window, corresponds to index
1249 * (in Tx queue's circular buffer) of first TFD/frame in window */
1250 u16 ba_resp_scd_ssn
= le16_to_cpu(ba_resp
->scd_ssn
);
1252 if (scd_flow
>= cfg(priv
)->base_params
->num_of_queues
) {
1254 "BUG_ON scd_flow is bigger than number of queues\n");
1258 sta_id
= ba_resp
->sta_id
;
1260 agg
= &priv
->tid_data
[sta_id
][tid
].agg
;
1262 spin_lock(&priv
->sta_lock
);
1264 if (unlikely(!agg
->wait_for_ba
)) {
1265 if (unlikely(ba_resp
->bitmap
))
1266 IWL_ERR(priv
, "Received BA when not expected\n");
1267 spin_unlock(&priv
->sta_lock
);
1271 __skb_queue_head_init(&reclaimed_skbs
);
1273 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1274 * block-ack window (we assume that they've been successfully
1275 * transmitted ... if not, it's too late anyway). */
1276 if (iwl_reclaim(priv
, sta_id
, tid
, scd_flow
,
1277 ba_resp_scd_ssn
, &reclaimed_skbs
)) {
1278 spin_unlock(&priv
->sta_lock
);
1282 IWL_DEBUG_TX_REPLY(priv
, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1285 (u8
*) &ba_resp
->sta_addr_lo32
,
1287 IWL_DEBUG_TX_REPLY(priv
, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
1288 "scd_flow = %d, scd_ssn = %d\n",
1289 ba_resp
->tid
, le16_to_cpu(ba_resp
->seq_ctl
),
1290 (unsigned long long)le64_to_cpu(ba_resp
->bitmap
),
1291 scd_flow
, ba_resp_scd_ssn
);
1293 /* Mark that the expected block-ack response arrived */
1294 agg
->wait_for_ba
= false;
1296 /* Sanity check values reported by uCode */
1297 if (ba_resp
->txed_2_done
> ba_resp
->txed
) {
1298 IWL_DEBUG_TX_REPLY(priv
,
1299 "bogus sent(%d) and ack(%d) count\n",
1300 ba_resp
->txed
, ba_resp
->txed_2_done
);
1302 * set txed_2_done = txed,
1303 * so it won't impact rate scale
1305 ba_resp
->txed
= ba_resp
->txed_2_done
;
1307 IWL_DEBUG_HT(priv
, "agg frames sent:%d, acked:%d\n",
1308 ba_resp
->txed
, ba_resp
->txed_2_done
);
1310 priv
->tid_data
[sta_id
][tid
].next_reclaimed
= ba_resp_scd_ssn
;
1312 iwlagn_check_ratid_empty(priv
, sta_id
, tid
);
1315 skb_queue_walk(&reclaimed_skbs
, skb
) {
1316 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1318 if (ieee80211_is_data_qos(hdr
->frame_control
))
1323 info
= IEEE80211_SKB_CB(skb
);
1324 kmem_cache_free(iwl_tx_cmd_pool
, (info
->driver_data
[1]));
1327 /* this is the first skb we deliver in this batch */
1328 /* put the rate scaling data there */
1329 info
= IEEE80211_SKB_CB(skb
);
1330 memset(&info
->status
, 0, sizeof(info
->status
));
1331 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1332 info
->flags
|= IEEE80211_TX_STAT_AMPDU
;
1333 info
->status
.ampdu_ack_len
= ba_resp
->txed_2_done
;
1334 info
->status
.ampdu_len
= ba_resp
->txed
;
1335 iwlagn_hwrate_to_tx_control(priv
, agg
->rate_n_flags
,
1340 spin_unlock(&priv
->sta_lock
);
1342 while (!skb_queue_empty(&reclaimed_skbs
)) {
1343 skb
= __skb_dequeue(&reclaimed_skbs
);
1344 ieee80211_tx_status(priv
->hw
, skb
);