Commit | Line | Data |
---|---|---|
b305a080 WYG |
1 | /****************************************************************************** |
2 | * | |
3 | * GPL LICENSE SUMMARY | |
4 | * | |
5 | * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of version 2 of the GNU General Public License as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but | |
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | |
19 | * USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution | |
22 | * in the file called LICENSE.GPL. | |
23 | * | |
24 | * Contact Information: | |
25 | * Intel Linux Wireless <ilw@linux.intel.com> | |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
27 | * | |
28 | *****************************************************************************/ | |
29 | ||
30 | #include <linux/kernel.h> | |
31 | #include <linux/module.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/sched.h> | |
34 | ||
35 | #include "iwl-dev.h" | |
36 | #include "iwl-core.h" | |
37 | #include "iwl-sta.h" | |
38 | #include "iwl-io.h" | |
74bcdb33 | 39 | #include "iwl-helpers.h" |
19e6cda0 | 40 | #include "iwl-agn-hw.h" |
8d801080 | 41 | #include "iwl-agn.h" |
b305a080 | 42 | |
74bcdb33 WYG |
43 | /* |
44 | * mac80211 queues, ACs, hardware queues, FIFOs. | |
45 | * | |
46 | * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues | |
47 | * | |
48 | * Mac80211 uses the following numbers, which we get as from it | |
49 | * by way of skb_get_queue_mapping(skb): | |
50 | * | |
51 | * VO 0 | |
52 | * VI 1 | |
53 | * BE 2 | |
54 | * BK 3 | |
55 | * | |
56 | * | |
57 | * Regular (not A-MPDU) frames are put into hardware queues corresponding | |
58 | * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their | |
59 | * own queue per aggregation session (RA/TID combination), such queues are | |
60 | * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In | |
61 | * order to map frames to the right queue, we also need an AC->hw queue | |
62 | * mapping. This is implemented here. | |
63 | * | |
64 | * Due to the way hw queues are set up (by the hw specific modules like | |
65 | * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity | |
66 | * mapping. | |
67 | */ | |
68 | ||
69 | static const u8 tid_to_ac[] = { | |
70 | /* this matches the mac80211 numbers */ | |
71 | 2, 3, 3, 2, 1, 1, 0, 0 | |
72 | }; | |
73 | ||
74 | static const u8 ac_to_fifo[] = { | |
75 | IWL_TX_FIFO_VO, | |
76 | IWL_TX_FIFO_VI, | |
77 | IWL_TX_FIFO_BE, | |
78 | IWL_TX_FIFO_BK, | |
79 | }; | |
80 | ||
81 | static inline int get_fifo_from_ac(u8 ac) | |
82 | { | |
83 | return ac_to_fifo[ac]; | |
84 | } | |
85 | ||
c2845d01 SZ |
86 | static inline int get_ac_from_tid(u16 tid) |
87 | { | |
88 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | |
89 | return tid_to_ac[tid]; | |
90 | ||
91 | /* no support for TIDs 8-15 yet */ | |
92 | return -EINVAL; | |
93 | } | |
94 | ||
74bcdb33 WYG |
95 | static inline int get_fifo_from_tid(u16 tid) |
96 | { | |
97 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | |
98 | return get_fifo_from_ac(tid_to_ac[tid]); | |
99 | ||
100 | /* no support for TIDs 8-15 yet */ | |
101 | return -EINVAL; | |
102 | } | |
103 | ||
b305a080 WYG |
104 | /** |
105 | * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | |
106 | */ | |
107 | void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | |
108 | struct iwl_tx_queue *txq, | |
109 | u16 byte_cnt) | |
110 | { | |
19e6cda0 | 111 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; |
b305a080 WYG |
112 | int write_ptr = txq->q.write_ptr; |
113 | int txq_id = txq->q.id; | |
114 | u8 sec_ctl = 0; | |
115 | u8 sta_id = 0; | |
116 | u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; | |
117 | __le16 bc_ent; | |
118 | ||
119 | WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); | |
120 | ||
13bb9483 | 121 | if (txq_id != priv->cmd_queue) { |
b305a080 WYG |
122 | sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; |
123 | sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; | |
124 | ||
125 | switch (sec_ctl & TX_CMD_SEC_MSK) { | |
126 | case TX_CMD_SEC_CCM: | |
127 | len += CCMP_MIC_LEN; | |
128 | break; | |
129 | case TX_CMD_SEC_TKIP: | |
130 | len += TKIP_ICV_LEN; | |
131 | break; | |
132 | case TX_CMD_SEC_WEP: | |
133 | len += WEP_IV_LEN + WEP_ICV_LEN; | |
134 | break; | |
135 | } | |
136 | } | |
137 | ||
138 | bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); | |
139 | ||
140 | scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; | |
141 | ||
142 | if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) | |
143 | scd_bc_tbl[txq_id]. | |
144 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; | |
145 | } | |
146 | ||
147 | void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, | |
148 | struct iwl_tx_queue *txq) | |
149 | { | |
19e6cda0 | 150 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; |
b305a080 WYG |
151 | int txq_id = txq->q.id; |
152 | int read_ptr = txq->q.read_ptr; | |
153 | u8 sta_id = 0; | |
154 | __le16 bc_ent; | |
155 | ||
156 | WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); | |
157 | ||
13bb9483 | 158 | if (txq_id != priv->cmd_queue) |
b305a080 WYG |
159 | sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; |
160 | ||
161 | bc_ent = cpu_to_le16(1 | (sta_id << 12)); | |
162 | scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; | |
163 | ||
164 | if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) | |
165 | scd_bc_tbl[txq_id]. | |
166 | tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; | |
167 | } | |
168 | ||
169 | static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, | |
170 | u16 txq_id) | |
171 | { | |
172 | u32 tbl_dw_addr; | |
173 | u32 tbl_dw; | |
174 | u16 scd_q2ratid; | |
175 | ||
176 | scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; | |
177 | ||
178 | tbl_dw_addr = priv->scd_base_addr + | |
f4388adc | 179 | IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); |
b305a080 WYG |
180 | |
181 | tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); | |
182 | ||
183 | if (txq_id & 0x1) | |
184 | tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); | |
185 | else | |
186 | tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); | |
187 | ||
188 | iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); | |
189 | ||
190 | return 0; | |
191 | } | |
192 | ||
193 | static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) | |
194 | { | |
195 | /* Simply stop the queue, but don't change any configuration; | |
196 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | |
197 | iwl_write_prph(priv, | |
f4388adc WYG |
198 | IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id), |
199 | (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)| | |
200 | (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | |
b305a080 WYG |
201 | } |
202 | ||
203 | void iwlagn_set_wr_ptrs(struct iwl_priv *priv, | |
204 | int txq_id, u32 index) | |
205 | { | |
206 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | |
207 | (index & 0xff) | (txq_id << 8)); | |
f4388adc | 208 | iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index); |
b305a080 WYG |
209 | } |
210 | ||
211 | void iwlagn_tx_queue_set_status(struct iwl_priv *priv, | |
212 | struct iwl_tx_queue *txq, | |
213 | int tx_fifo_id, int scd_retry) | |
214 | { | |
215 | int txq_id = txq->q.id; | |
216 | int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; | |
217 | ||
f4388adc WYG |
218 | iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id), |
219 | (active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) | | |
220 | (tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) | | |
221 | (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) | | |
222 | IWLAGN_SCD_QUEUE_STTS_REG_MSK); | |
b305a080 WYG |
223 | |
224 | txq->sched_retry = scd_retry; | |
225 | ||
226 | IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", | |
227 | active ? "Activate" : "Deactivate", | |
228 | scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); | |
229 | } | |
230 | ||
231 | int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, | |
232 | int tx_fifo, int sta_id, int tid, u16 ssn_idx) | |
233 | { | |
234 | unsigned long flags; | |
235 | u16 ra_tid; | |
4620fefa | 236 | int ret; |
b305a080 | 237 | |
19e6cda0 WYG |
238 | if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || |
239 | (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues | |
b305a080 WYG |
240 | <= txq_id)) { |
241 | IWL_WARN(priv, | |
242 | "queue number out of range: %d, must be %d to %d\n", | |
19e6cda0 WYG |
243 | txq_id, IWLAGN_FIRST_AMPDU_QUEUE, |
244 | IWLAGN_FIRST_AMPDU_QUEUE + | |
b305a080 WYG |
245 | priv->cfg->num_of_ampdu_queues - 1); |
246 | return -EINVAL; | |
247 | } | |
248 | ||
249 | ra_tid = BUILD_RAxTID(sta_id, tid); | |
250 | ||
251 | /* Modify device's station table to Tx this TID */ | |
4620fefa JB |
252 | ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); |
253 | if (ret) | |
254 | return ret; | |
b305a080 WYG |
255 | |
256 | spin_lock_irqsave(&priv->lock, flags); | |
257 | ||
258 | /* Stop this Tx queue before configuring it */ | |
259 | iwlagn_tx_queue_stop_scheduler(priv, txq_id); | |
260 | ||
261 | /* Map receiver-address / traffic-ID to this queue */ | |
262 | iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); | |
263 | ||
264 | /* Set this queue as a chain-building queue */ | |
f4388adc | 265 | iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id)); |
b305a080 WYG |
266 | |
267 | /* enable aggregations for the queue */ | |
f4388adc | 268 | iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id)); |
b305a080 WYG |
269 | |
270 | /* Place first TFD at index corresponding to start sequence number. | |
271 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | |
272 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | |
273 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | |
274 | iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); | |
275 | ||
276 | /* Set up Tx window size and frame limit for this queue */ | |
277 | iwl_write_targ_mem(priv, priv->scd_base_addr + | |
f4388adc | 278 | IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + |
b305a080 WYG |
279 | sizeof(u32), |
280 | ((SCD_WIN_SIZE << | |
f4388adc WYG |
281 | IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & |
282 | IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | |
b305a080 | 283 | ((SCD_FRAME_LIMIT << |
f4388adc WYG |
284 | IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & |
285 | IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | |
b305a080 | 286 | |
f4388adc | 287 | iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id)); |
b305a080 WYG |
288 | |
289 | /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ | |
290 | iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); | |
291 | ||
292 | spin_unlock_irqrestore(&priv->lock, flags); | |
293 | ||
294 | return 0; | |
295 | } | |
296 | ||
297 | int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, | |
298 | u16 ssn_idx, u8 tx_fifo) | |
299 | { | |
19e6cda0 WYG |
300 | if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || |
301 | (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues | |
b305a080 WYG |
302 | <= txq_id)) { |
303 | IWL_ERR(priv, | |
304 | "queue number out of range: %d, must be %d to %d\n", | |
19e6cda0 WYG |
305 | txq_id, IWLAGN_FIRST_AMPDU_QUEUE, |
306 | IWLAGN_FIRST_AMPDU_QUEUE + | |
b305a080 WYG |
307 | priv->cfg->num_of_ampdu_queues - 1); |
308 | return -EINVAL; | |
309 | } | |
310 | ||
311 | iwlagn_tx_queue_stop_scheduler(priv, txq_id); | |
312 | ||
f4388adc | 313 | iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id)); |
b305a080 WYG |
314 | |
315 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | |
316 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | |
317 | /* supposes that ssn_idx is valid (!= 0xFFF) */ | |
318 | iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); | |
319 | ||
f4388adc | 320 | iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id)); |
b305a080 WYG |
321 | iwl_txq_ctx_deactivate(priv, txq_id); |
322 | iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); | |
323 | ||
324 | return 0; | |
325 | } | |
326 | ||
327 | /* | |
328 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | |
329 | * must be called under priv->lock and mac access | |
330 | */ | |
331 | void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask) | |
332 | { | |
f4388adc | 333 | iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask); |
b305a080 | 334 | } |
74bcdb33 WYG |
335 | |
336 | static inline int get_queue_from_ac(u16 ac) | |
337 | { | |
338 | return ac; | |
339 | } | |
340 | ||
341 | /* | |
342 | * handle build REPLY_TX command notification. | |
343 | */ | |
344 | static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, | |
d44ae69e JB |
345 | struct sk_buff *skb, |
346 | struct iwl_tx_cmd *tx_cmd, | |
347 | struct ieee80211_tx_info *info, | |
348 | struct ieee80211_hdr *hdr, | |
349 | u8 std_id) | |
74bcdb33 WYG |
350 | { |
351 | __le16 fc = hdr->frame_control; | |
352 | __le32 tx_flags = tx_cmd->tx_flags; | |
353 | ||
354 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | |
355 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | |
356 | tx_flags |= TX_CMD_FLG_ACK_MSK; | |
357 | if (ieee80211_is_mgmt(fc)) | |
358 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
359 | if (ieee80211_is_probe_resp(fc) && | |
360 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | |
361 | tx_flags |= TX_CMD_FLG_TSF_MSK; | |
362 | } else { | |
363 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | |
364 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
365 | } | |
366 | ||
367 | if (ieee80211_is_back_req(fc)) | |
368 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | |
d44ae69e JB |
369 | else if (info->band == IEEE80211_BAND_2GHZ && |
370 | priv->cfg->advanced_bt_coexist && | |
371 | (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || | |
372 | ieee80211_is_reassoc_req(fc) || | |
373 | skb->protocol == cpu_to_be16(ETH_P_PAE))) | |
374 | tx_flags |= TX_CMD_FLG_IGNORE_BT; | |
74bcdb33 WYG |
375 | |
376 | ||
377 | tx_cmd->sta_id = std_id; | |
378 | if (ieee80211_has_morefrags(fc)) | |
379 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | |
380 | ||
381 | if (ieee80211_is_data_qos(fc)) { | |
382 | u8 *qc = ieee80211_get_qos_ctl(hdr); | |
383 | tx_cmd->tid_tspec = qc[0] & 0xf; | |
384 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | |
385 | } else { | |
386 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
387 | } | |
388 | ||
94597ab2 | 389 | priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags); |
74bcdb33 WYG |
390 | |
391 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | |
392 | if (ieee80211_is_mgmt(fc)) { | |
393 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | |
394 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | |
395 | else | |
396 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | |
397 | } else { | |
398 | tx_cmd->timeout.pm_frame_timeout = 0; | |
399 | } | |
400 | ||
401 | tx_cmd->driver_txop = 0; | |
402 | tx_cmd->tx_flags = tx_flags; | |
403 | tx_cmd->next_frame_len = 0; | |
404 | } | |
405 | ||
406 | #define RTS_DFAULT_RETRY_LIMIT 60 | |
407 | ||
408 | static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, | |
409 | struct iwl_tx_cmd *tx_cmd, | |
410 | struct ieee80211_tx_info *info, | |
411 | __le16 fc) | |
412 | { | |
413 | u32 rate_flags; | |
414 | int rate_idx; | |
415 | u8 rts_retry_limit; | |
416 | u8 data_retry_limit; | |
417 | u8 rate_plcp; | |
418 | ||
419 | /* Set retry limit on DATA packets and Probe Responses*/ | |
420 | if (ieee80211_is_probe_resp(fc)) | |
421 | data_retry_limit = 3; | |
422 | else | |
b744cb79 | 423 | data_retry_limit = IWLAGN_DEFAULT_TX_RETRY; |
74bcdb33 WYG |
424 | tx_cmd->data_retry_limit = data_retry_limit; |
425 | ||
426 | /* Set retry limit on RTS packets */ | |
427 | rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; | |
428 | if (data_retry_limit < rts_retry_limit) | |
429 | rts_retry_limit = data_retry_limit; | |
430 | tx_cmd->rts_retry_limit = rts_retry_limit; | |
431 | ||
432 | /* DATA packets will use the uCode station table for rate/antenna | |
433 | * selection */ | |
434 | if (ieee80211_is_data(fc)) { | |
435 | tx_cmd->initial_rate_index = 0; | |
436 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | |
437 | return; | |
438 | } | |
439 | ||
440 | /** | |
441 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | |
442 | * not really a TX rate. Thus, we use the lowest supported rate for | |
443 | * this band. Also use the lowest supported rate if the stored rate | |
444 | * index is invalid. | |
445 | */ | |
446 | rate_idx = info->control.rates[0].idx; | |
447 | if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || | |
448 | (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) | |
449 | rate_idx = rate_lowest_index(&priv->bands[info->band], | |
450 | info->control.sta); | |
451 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ | |
452 | if (info->band == IEEE80211_BAND_5GHZ) | |
453 | rate_idx += IWL_FIRST_OFDM_RATE; | |
454 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | |
455 | rate_plcp = iwl_rates[rate_idx].plcp; | |
456 | /* Zero out flags for this packet */ | |
457 | rate_flags = 0; | |
458 | ||
459 | /* Set CCK flag as needed */ | |
460 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | |
461 | rate_flags |= RATE_MCS_CCK_MSK; | |
462 | ||
74bcdb33 | 463 | /* Set up antennas */ |
bee008b7 WYG |
464 | if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) { |
465 | /* operated as 1x1 in full concurrency mode */ | |
466 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, | |
467 | first_antenna(priv->hw_params.valid_tx_ant)); | |
468 | } else | |
469 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, | |
0e1654fa | 470 | priv->hw_params.valid_tx_ant); |
74bcdb33 WYG |
471 | rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); |
472 | ||
473 | /* Set the rate in the TX cmd */ | |
474 | tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); | |
475 | } | |
476 | ||
477 | static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | |
478 | struct ieee80211_tx_info *info, | |
479 | struct iwl_tx_cmd *tx_cmd, | |
480 | struct sk_buff *skb_frag, | |
481 | int sta_id) | |
482 | { | |
483 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | |
484 | ||
97359d12 JB |
485 | switch (keyconf->cipher) { |
486 | case WLAN_CIPHER_SUITE_CCMP: | |
74bcdb33 WYG |
487 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; |
488 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | |
489 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | |
490 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | |
491 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); | |
492 | break; | |
493 | ||
97359d12 | 494 | case WLAN_CIPHER_SUITE_TKIP: |
74bcdb33 WYG |
495 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; |
496 | ieee80211_get_tkip_key(keyconf, skb_frag, | |
497 | IEEE80211_TKIP_P2_KEY, tx_cmd->key); | |
498 | IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); | |
499 | break; | |
500 | ||
97359d12 JB |
501 | case WLAN_CIPHER_SUITE_WEP104: |
502 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | |
503 | /* fall through */ | |
504 | case WLAN_CIPHER_SUITE_WEP40: | |
74bcdb33 WYG |
505 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | |
506 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | |
507 | ||
74bcdb33 WYG |
508 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); |
509 | ||
510 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " | |
511 | "with key %d\n", keyconf->keyidx); | |
512 | break; | |
513 | ||
514 | default: | |
97359d12 | 515 | IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher); |
74bcdb33 WYG |
516 | break; |
517 | } | |
518 | } | |
519 | ||
520 | /* | |
521 | * start REPLY_TX command process | |
522 | */ | |
523 | int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |
524 | { | |
525 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
526 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
527 | struct ieee80211_sta *sta = info->control.sta; | |
528 | struct iwl_station_priv *sta_priv = NULL; | |
529 | struct iwl_tx_queue *txq; | |
530 | struct iwl_queue *q; | |
531 | struct iwl_device_cmd *out_cmd; | |
532 | struct iwl_cmd_meta *out_meta; | |
533 | struct iwl_tx_cmd *tx_cmd; | |
534 | int swq_id, txq_id; | |
535 | dma_addr_t phys_addr; | |
536 | dma_addr_t txcmd_phys; | |
537 | dma_addr_t scratch_phys; | |
538 | u16 len, len_org, firstlen, secondlen; | |
539 | u16 seq_number = 0; | |
540 | __le16 fc; | |
541 | u8 hdr_len; | |
542 | u8 sta_id; | |
543 | u8 wait_write_ptr = 0; | |
544 | u8 tid = 0; | |
545 | u8 *qc = NULL; | |
546 | unsigned long flags; | |
547 | ||
548 | spin_lock_irqsave(&priv->lock, flags); | |
549 | if (iwl_is_rfkill(priv)) { | |
550 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); | |
551 | goto drop_unlock; | |
552 | } | |
553 | ||
554 | fc = hdr->frame_control; | |
555 | ||
556 | #ifdef CONFIG_IWLWIFI_DEBUG | |
557 | if (ieee80211_is_auth(fc)) | |
558 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); | |
559 | else if (ieee80211_is_assoc_req(fc)) | |
560 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); | |
561 | else if (ieee80211_is_reassoc_req(fc)) | |
562 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); | |
563 | #endif | |
564 | ||
565 | hdr_len = ieee80211_hdrlen(fc); | |
566 | ||
2a87c26b | 567 | /* Find index into station table for destination station */ |
0af8bcae | 568 | sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta); |
74bcdb33 WYG |
569 | if (sta_id == IWL_INVALID_STATION) { |
570 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | |
571 | hdr->addr1); | |
572 | goto drop_unlock; | |
573 | } | |
574 | ||
575 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); | |
576 | ||
577 | if (sta) | |
578 | sta_priv = (void *)sta->drv_priv; | |
579 | ||
580 | if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && | |
581 | sta_priv->asleep) { | |
582 | WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); | |
583 | /* | |
584 | * This sends an asynchronous command to the device, | |
585 | * but we can rely on it being processed before the | |
586 | * next frame is processed -- and the next frame to | |
587 | * this station is the one that will consume this | |
588 | * counter. | |
589 | * For now set the counter to just 1 since we do not | |
590 | * support uAPSD yet. | |
591 | */ | |
592 | iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); | |
593 | } | |
594 | ||
595 | txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); | |
9c5ac091 RC |
596 | |
597 | /* irqs already disabled/saved above when locking priv->lock */ | |
598 | spin_lock(&priv->sta_lock); | |
599 | ||
74bcdb33 WYG |
600 | if (ieee80211_is_data_qos(fc)) { |
601 | qc = ieee80211_get_qos_ctl(hdr); | |
602 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | |
9c5ac091 RC |
603 | if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { |
604 | spin_unlock(&priv->sta_lock); | |
74bcdb33 | 605 | goto drop_unlock; |
9c5ac091 | 606 | } |
74bcdb33 WYG |
607 | seq_number = priv->stations[sta_id].tid[tid].seq_number; |
608 | seq_number &= IEEE80211_SCTL_SEQ; | |
609 | hdr->seq_ctrl = hdr->seq_ctrl & | |
610 | cpu_to_le16(IEEE80211_SCTL_FRAG); | |
611 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | |
612 | seq_number += 0x10; | |
613 | /* aggregation is on for this <sta,tid> */ | |
614 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | |
615 | priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { | |
616 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | |
617 | } | |
618 | } | |
619 | ||
620 | txq = &priv->txq[txq_id]; | |
621 | swq_id = txq->swq_id; | |
622 | q = &txq->q; | |
623 | ||
9c5ac091 RC |
624 | if (unlikely(iwl_queue_space(q) < q->high_mark)) { |
625 | spin_unlock(&priv->sta_lock); | |
74bcdb33 | 626 | goto drop_unlock; |
9c5ac091 | 627 | } |
74bcdb33 | 628 | |
9c5ac091 | 629 | if (ieee80211_is_data_qos(fc)) { |
74bcdb33 | 630 | priv->stations[sta_id].tid[tid].tfds_in_queue++; |
9c5ac091 RC |
631 | if (!ieee80211_has_morefrags(fc)) |
632 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | |
633 | } | |
634 | ||
635 | spin_unlock(&priv->sta_lock); | |
74bcdb33 WYG |
636 | |
637 | /* Set up driver data for this TFD */ | |
638 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | |
ff0d91c3 | 639 | txq->txb[q->write_ptr].skb = skb; |
74bcdb33 WYG |
640 | |
641 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | |
642 | out_cmd = txq->cmd[q->write_ptr]; | |
643 | out_meta = &txq->meta[q->write_ptr]; | |
644 | tx_cmd = &out_cmd->cmd.tx; | |
645 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | |
646 | memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | |
647 | ||
648 | /* | |
649 | * Set up the Tx-command (not MAC!) header. | |
650 | * Store the chosen Tx queue and TFD index within the sequence field; | |
651 | * after Tx, uCode's Tx response will return this value so driver can | |
652 | * locate the frame within the tx queue and do post-tx processing. | |
653 | */ | |
654 | out_cmd->hdr.cmd = REPLY_TX; | |
655 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | |
656 | INDEX_TO_SEQ(q->write_ptr))); | |
657 | ||
658 | /* Copy MAC header from skb into command buffer */ | |
659 | memcpy(tx_cmd->hdr, hdr, hdr_len); | |
660 | ||
661 | ||
662 | /* Total # bytes to be transmitted */ | |
663 | len = (u16)skb->len; | |
664 | tx_cmd->len = cpu_to_le16(len); | |
665 | ||
666 | if (info->control.hw_key) | |
667 | iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); | |
668 | ||
669 | /* TODO need this for burst mode later on */ | |
d44ae69e | 670 | iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); |
74bcdb33 WYG |
671 | iwl_dbg_log_tx_data_frame(priv, len, hdr); |
672 | ||
673 | iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); | |
674 | ||
675 | iwl_update_stats(priv, true, fc, len); | |
676 | /* | |
677 | * Use the first empty entry in this queue's command buffer array | |
678 | * to contain the Tx command and MAC header concatenated together | |
679 | * (payload data will be in another buffer). | |
680 | * Size of this varies, due to varying MAC header length. | |
681 | * If end is not dword aligned, we'll have 2 extra bytes at the end | |
682 | * of the MAC header (device reads on dword boundaries). | |
683 | * We'll tell device about this padding later. | |
684 | */ | |
685 | len = sizeof(struct iwl_tx_cmd) + | |
686 | sizeof(struct iwl_cmd_header) + hdr_len; | |
687 | ||
688 | len_org = len; | |
689 | firstlen = len = (len + 3) & ~3; | |
690 | ||
691 | if (len_org != len) | |
692 | len_org = 1; | |
693 | else | |
694 | len_org = 0; | |
695 | ||
696 | /* Tell NIC about any 2-byte padding after MAC header */ | |
697 | if (len_org) | |
698 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | |
699 | ||
700 | /* Physical address of this Tx command's header (not MAC header!), | |
701 | * within command buffer array. */ | |
702 | txcmd_phys = pci_map_single(priv->pci_dev, | |
703 | &out_cmd->hdr, len, | |
704 | PCI_DMA_BIDIRECTIONAL); | |
2e724443 FT |
705 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); |
706 | dma_unmap_len_set(out_meta, len, len); | |
74bcdb33 WYG |
707 | /* Add buffer containing Tx command and MAC(!) header to TFD's |
708 | * first entry */ | |
709 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | |
710 | txcmd_phys, len, 1, 0); | |
711 | ||
712 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | |
713 | txq->need_update = 1; | |
74bcdb33 WYG |
714 | } else { |
715 | wait_write_ptr = 1; | |
716 | txq->need_update = 0; | |
717 | } | |
718 | ||
719 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | |
720 | * if any (802.11 null frames have no payload). */ | |
721 | secondlen = len = skb->len - hdr_len; | |
722 | if (len) { | |
723 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | |
724 | len, PCI_DMA_TODEVICE); | |
725 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | |
726 | phys_addr, len, | |
727 | 0, 0); | |
728 | } | |
729 | ||
730 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | |
731 | offsetof(struct iwl_tx_cmd, scratch); | |
732 | ||
733 | len = sizeof(struct iwl_tx_cmd) + | |
734 | sizeof(struct iwl_cmd_header) + hdr_len; | |
735 | /* take back ownership of DMA buffer to enable update */ | |
736 | pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, | |
737 | len, PCI_DMA_BIDIRECTIONAL); | |
738 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | |
739 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | |
740 | ||
91dd6c27 | 741 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", |
74bcdb33 | 742 | le16_to_cpu(out_cmd->hdr.sequence)); |
91dd6c27 | 743 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); |
74bcdb33 WYG |
744 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); |
745 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | |
746 | ||
747 | /* Set up entry for this TFD in Tx byte-count array */ | |
748 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | |
749 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, | |
750 | le16_to_cpu(tx_cmd->len)); | |
751 | ||
752 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | |
753 | len, PCI_DMA_BIDIRECTIONAL); | |
754 | ||
755 | trace_iwlwifi_dev_tx(priv, | |
756 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | |
757 | sizeof(struct iwl_tfd), | |
758 | &out_cmd->hdr, firstlen, | |
759 | skb->data + hdr_len, secondlen); | |
760 | ||
761 | /* Tell device the write index *just past* this latest filled TFD */ | |
762 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
763 | iwl_txq_update_write_ptr(priv, txq); | |
764 | spin_unlock_irqrestore(&priv->lock, flags); | |
765 | ||
766 | /* | |
767 | * At this point the frame is "transmitted" successfully | |
768 | * and we will get a TX status notification eventually, | |
769 | * regardless of the value of ret. "ret" only indicates | |
770 | * whether or not we should update the write pointer. | |
771 | */ | |
772 | ||
773 | /* avoid atomic ops if it isn't an associated client */ | |
774 | if (sta_priv && sta_priv->client) | |
775 | atomic_inc(&sta_priv->pending_frames); | |
776 | ||
777 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { | |
778 | if (wait_write_ptr) { | |
779 | spin_lock_irqsave(&priv->lock, flags); | |
780 | txq->need_update = 1; | |
781 | iwl_txq_update_write_ptr(priv, txq); | |
782 | spin_unlock_irqrestore(&priv->lock, flags); | |
783 | } else { | |
784 | iwl_stop_queue(priv, txq->swq_id); | |
785 | } | |
786 | } | |
787 | ||
788 | return 0; | |
789 | ||
790 | drop_unlock: | |
791 | spin_unlock_irqrestore(&priv->lock, flags); | |
792 | return -1; | |
793 | } | |
794 | ||
795 | static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, | |
796 | struct iwl_dma_ptr *ptr, size_t size) | |
797 | { | |
798 | ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, | |
799 | GFP_KERNEL); | |
800 | if (!ptr->addr) | |
801 | return -ENOMEM; | |
802 | ptr->size = size; | |
803 | return 0; | |
804 | } | |
805 | ||
806 | static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, | |
807 | struct iwl_dma_ptr *ptr) | |
808 | { | |
809 | if (unlikely(!ptr->addr)) | |
810 | return; | |
811 | ||
812 | dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); | |
813 | memset(ptr, 0, sizeof(*ptr)); | |
814 | } | |
815 | ||
816 | /** | |
817 | * iwlagn_hw_txq_ctx_free - Free TXQ Context | |
818 | * | |
819 | * Destroy all TX DMA queues and structures | |
820 | */ | |
821 | void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv) | |
822 | { | |
823 | int txq_id; | |
824 | ||
825 | /* Tx queues */ | |
826 | if (priv->txq) { | |
470058e0 | 827 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) |
13bb9483 | 828 | if (txq_id == priv->cmd_queue) |
74bcdb33 WYG |
829 | iwl_cmd_queue_free(priv); |
830 | else | |
831 | iwl_tx_queue_free(priv, txq_id); | |
832 | } | |
833 | iwlagn_free_dma_ptr(priv, &priv->kw); | |
834 | ||
835 | iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); | |
836 | ||
837 | /* free tx queue structure */ | |
838 | iwl_free_txq_mem(priv); | |
839 | } | |
840 | ||
841 | /** | |
470058e0 ZY |
842 | * iwlagn_txq_ctx_alloc - allocate TX queue context |
843 | * Allocate all Tx DMA structures and initialize them | |
74bcdb33 WYG |
844 | * |
845 | * @param priv | |
846 | * @return error code | |
847 | */ | |
470058e0 | 848 | int iwlagn_txq_ctx_alloc(struct iwl_priv *priv) |
74bcdb33 | 849 | { |
470058e0 | 850 | int ret; |
74bcdb33 WYG |
851 | int txq_id, slots_num; |
852 | unsigned long flags; | |
853 | ||
854 | /* Free all tx/cmd queues and keep-warm buffer */ | |
855 | iwlagn_hw_txq_ctx_free(priv); | |
856 | ||
857 | ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, | |
858 | priv->hw_params.scd_bc_tbls_size); | |
859 | if (ret) { | |
860 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); | |
861 | goto error_bc_tbls; | |
862 | } | |
863 | /* Alloc keep-warm buffer */ | |
864 | ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); | |
865 | if (ret) { | |
866 | IWL_ERR(priv, "Keep Warm allocation failed\n"); | |
867 | goto error_kw; | |
868 | } | |
869 | ||
870 | /* allocate tx queue structure */ | |
871 | ret = iwl_alloc_txq_mem(priv); | |
872 | if (ret) | |
873 | goto error; | |
874 | ||
875 | spin_lock_irqsave(&priv->lock, flags); | |
876 | ||
877 | /* Turn off all Tx DMA fifos */ | |
878 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | |
879 | ||
880 | /* Tell NIC where to find the "keep warm" buffer */ | |
881 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | |
882 | ||
883 | spin_unlock_irqrestore(&priv->lock, flags); | |
884 | ||
13bb9483 | 885 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ |
74bcdb33 | 886 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { |
13bb9483 | 887 | slots_num = (txq_id == priv->cmd_queue) ? |
74bcdb33 WYG |
888 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
889 | ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, | |
890 | txq_id); | |
891 | if (ret) { | |
892 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); | |
893 | goto error; | |
894 | } | |
895 | } | |
896 | ||
897 | return ret; | |
898 | ||
899 | error: | |
900 | iwlagn_hw_txq_ctx_free(priv); | |
901 | iwlagn_free_dma_ptr(priv, &priv->kw); | |
902 | error_kw: | |
903 | iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); | |
904 | error_bc_tbls: | |
905 | return ret; | |
906 | } | |
907 | ||
470058e0 ZY |
908 | void iwlagn_txq_ctx_reset(struct iwl_priv *priv) |
909 | { | |
910 | int txq_id, slots_num; | |
911 | unsigned long flags; | |
912 | ||
913 | spin_lock_irqsave(&priv->lock, flags); | |
914 | ||
915 | /* Turn off all Tx DMA fifos */ | |
916 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | |
917 | ||
918 | /* Tell NIC where to find the "keep warm" buffer */ | |
919 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | |
920 | ||
921 | spin_unlock_irqrestore(&priv->lock, flags); | |
922 | ||
923 | /* Alloc and init all Tx queues, including the command queue (#4) */ | |
924 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | |
13bb9483 | 925 | slots_num = txq_id == priv->cmd_queue ? |
470058e0 ZY |
926 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
927 | iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); | |
928 | } | |
929 | } | |
930 | ||
74bcdb33 | 931 | /** |
470058e0 | 932 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels |
74bcdb33 WYG |
933 | */ |
934 | void iwlagn_txq_ctx_stop(struct iwl_priv *priv) | |
935 | { | |
936 | int ch; | |
937 | unsigned long flags; | |
938 | ||
939 | /* Turn off all Tx DMA fifos */ | |
940 | spin_lock_irqsave(&priv->lock, flags); | |
941 | ||
942 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | |
943 | ||
944 | /* Stop each Tx DMA channel, and wait for it to be idle */ | |
945 | for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { | |
946 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | |
9726f347 | 947 | if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, |
74bcdb33 | 948 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), |
9726f347 EG |
949 | 1000)) |
950 | IWL_ERR(priv, "Failing on timeout while stopping" | |
951 | " DMA channel %d [0x%08x]", ch, | |
952 | iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); | |
74bcdb33 WYG |
953 | } |
954 | spin_unlock_irqrestore(&priv->lock, flags); | |
74bcdb33 WYG |
955 | } |
956 | ||
957 | /* | |
958 | * Find first available (lowest unused) Tx Queue, mark it "active". | |
959 | * Called only when finding queue for aggregation. | |
960 | * Should never return anything < 7, because they should already | |
961 | * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) | |
962 | */ | |
963 | static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv) | |
964 | { | |
965 | int txq_id; | |
966 | ||
967 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | |
968 | if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | |
969 | return txq_id; | |
970 | return -1; | |
971 | } | |
972 | ||
832f47e3 | 973 | int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, |
619753ff | 974 | struct ieee80211_sta *sta, u16 tid, u16 *ssn) |
74bcdb33 WYG |
975 | { |
976 | int sta_id; | |
977 | int tx_fifo; | |
978 | int txq_id; | |
979 | int ret; | |
980 | unsigned long flags; | |
981 | struct iwl_tid_data *tid_data; | |
982 | ||
983 | tx_fifo = get_fifo_from_tid(tid); | |
984 | if (unlikely(tx_fifo < 0)) | |
985 | return tx_fifo; | |
986 | ||
987 | IWL_WARN(priv, "%s on ra = %pM tid = %d\n", | |
619753ff | 988 | __func__, sta->addr, tid); |
74bcdb33 | 989 | |
619753ff | 990 | sta_id = iwl_sta_id(sta); |
74bcdb33 WYG |
991 | if (sta_id == IWL_INVALID_STATION) { |
992 | IWL_ERR(priv, "Start AGG on invalid station\n"); | |
993 | return -ENXIO; | |
994 | } | |
995 | if (unlikely(tid >= MAX_TID_COUNT)) | |
996 | return -EINVAL; | |
997 | ||
998 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | |
999 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); | |
1000 | return -ENXIO; | |
1001 | } | |
1002 | ||
1003 | txq_id = iwlagn_txq_ctx_activate_free(priv); | |
1004 | if (txq_id == -1) { | |
1005 | IWL_ERR(priv, "No free aggregation queue available\n"); | |
1006 | return -ENXIO; | |
1007 | } | |
1008 | ||
1009 | spin_lock_irqsave(&priv->sta_lock, flags); | |
1010 | tid_data = &priv->stations[sta_id].tid[tid]; | |
1011 | *ssn = SEQ_TO_SN(tid_data->seq_number); | |
1012 | tid_data->agg.txq_id = txq_id; | |
c2845d01 | 1013 | priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id); |
74bcdb33 WYG |
1014 | spin_unlock_irqrestore(&priv->sta_lock, flags); |
1015 | ||
1016 | ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, | |
1017 | sta_id, tid, *ssn); | |
1018 | if (ret) | |
1019 | return ret; | |
1020 | ||
9c5ac091 RC |
1021 | spin_lock_irqsave(&priv->sta_lock, flags); |
1022 | tid_data = &priv->stations[sta_id].tid[tid]; | |
74bcdb33 WYG |
1023 | if (tid_data->tfds_in_queue == 0) { |
1024 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | |
1025 | tid_data->agg.state = IWL_AGG_ON; | |
619753ff | 1026 | ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
74bcdb33 WYG |
1027 | } else { |
1028 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", | |
1029 | tid_data->tfds_in_queue); | |
1030 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | |
1031 | } | |
9c5ac091 | 1032 | spin_unlock_irqrestore(&priv->sta_lock, flags); |
74bcdb33 WYG |
1033 | return ret; |
1034 | } | |
1035 | ||
832f47e3 | 1036 | int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, |
619753ff | 1037 | struct ieee80211_sta *sta, u16 tid) |
74bcdb33 | 1038 | { |
18c121d7 | 1039 | int tx_fifo_id, txq_id, sta_id, ssn; |
74bcdb33 WYG |
1040 | struct iwl_tid_data *tid_data; |
1041 | int write_ptr, read_ptr; | |
1042 | unsigned long flags; | |
1043 | ||
74bcdb33 WYG |
1044 | tx_fifo_id = get_fifo_from_tid(tid); |
1045 | if (unlikely(tx_fifo_id < 0)) | |
1046 | return tx_fifo_id; | |
1047 | ||
619753ff | 1048 | sta_id = iwl_sta_id(sta); |
74bcdb33 WYG |
1049 | |
1050 | if (sta_id == IWL_INVALID_STATION) { | |
1051 | IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); | |
1052 | return -ENXIO; | |
1053 | } | |
1054 | ||
9c5ac091 RC |
1055 | spin_lock_irqsave(&priv->sta_lock, flags); |
1056 | ||
74bcdb33 WYG |
1057 | tid_data = &priv->stations[sta_id].tid[tid]; |
1058 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | |
1059 | txq_id = tid_data->agg.txq_id; | |
18c121d7 JB |
1060 | |
1061 | switch (priv->stations[sta_id].tid[tid].agg.state) { | |
1062 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | |
1063 | /* | |
1064 | * This can happen if the peer stops aggregation | |
1065 | * again before we've had a chance to drain the | |
1066 | * queue we selected previously, i.e. before the | |
1067 | * session was really started completely. | |
1068 | */ | |
1069 | IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); | |
1070 | goto turn_off; | |
1071 | case IWL_AGG_ON: | |
1072 | break; | |
1073 | default: | |
1074 | IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); | |
1075 | } | |
1076 | ||
74bcdb33 WYG |
1077 | write_ptr = priv->txq[txq_id].q.write_ptr; |
1078 | read_ptr = priv->txq[txq_id].q.read_ptr; | |
1079 | ||
1080 | /* The queue is not empty */ | |
1081 | if (write_ptr != read_ptr) { | |
1082 | IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); | |
1083 | priv->stations[sta_id].tid[tid].agg.state = | |
1084 | IWL_EMPTYING_HW_QUEUE_DELBA; | |
9c5ac091 | 1085 | spin_unlock_irqrestore(&priv->sta_lock, flags); |
74bcdb33 WYG |
1086 | return 0; |
1087 | } | |
1088 | ||
1089 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | |
18c121d7 | 1090 | turn_off: |
74bcdb33 WYG |
1091 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; |
1092 | ||
9c5ac091 RC |
1093 | /* do not restore/save irqs */ |
1094 | spin_unlock(&priv->sta_lock); | |
1095 | spin_lock(&priv->lock); | |
1096 | ||
74bcdb33 WYG |
1097 | /* |
1098 | * the only reason this call can fail is queue number out of range, | |
1099 | * which can happen if uCode is reloaded and all the station | |
1100 | * information are lost. if it is outside the range, there is no need | |
1101 | * to deactivate the uCode queue, just return "success" to allow | |
1102 | * mac80211 to clean up it own data. | |
1103 | */ | |
1104 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | |
1105 | tx_fifo_id); | |
1106 | spin_unlock_irqrestore(&priv->lock, flags); | |
1107 | ||
619753ff | 1108 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); |
74bcdb33 WYG |
1109 | |
1110 | return 0; | |
1111 | } | |
1112 | ||
1113 | int iwlagn_txq_check_empty(struct iwl_priv *priv, | |
1114 | int sta_id, u8 tid, int txq_id) | |
1115 | { | |
1116 | struct iwl_queue *q = &priv->txq[txq_id].q; | |
1117 | u8 *addr = priv->stations[sta_id].sta.sta.addr; | |
1118 | struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | |
1119 | ||
a24d52f3 | 1120 | lockdep_assert_held(&priv->sta_lock); |
9c5ac091 | 1121 | |
74bcdb33 WYG |
1122 | switch (priv->stations[sta_id].tid[tid].agg.state) { |
1123 | case IWL_EMPTYING_HW_QUEUE_DELBA: | |
1124 | /* We are reclaiming the last packet of the */ | |
1125 | /* aggregated HW queue */ | |
1126 | if ((txq_id == tid_data->agg.txq_id) && | |
1127 | (q->read_ptr == q->write_ptr)) { | |
1128 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); | |
1129 | int tx_fifo = get_fifo_from_tid(tid); | |
1130 | IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); | |
1131 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, | |
1132 | ssn, tx_fifo); | |
1133 | tid_data->agg.state = IWL_AGG_OFF; | |
1134 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); | |
1135 | } | |
1136 | break; | |
1137 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | |
1138 | /* We are reclaiming the last packet of the queue */ | |
1139 | if (tid_data->tfds_in_queue == 0) { | |
1140 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); | |
1141 | tid_data->agg.state = IWL_AGG_ON; | |
1142 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); | |
1143 | } | |
1144 | break; | |
1145 | } | |
9c5ac091 | 1146 | |
74bcdb33 WYG |
1147 | return 0; |
1148 | } | |
1149 | ||
1150 | static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb) | |
1151 | { | |
1152 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | |
1153 | struct ieee80211_sta *sta; | |
1154 | struct iwl_station_priv *sta_priv; | |
1155 | ||
6db6340c | 1156 | rcu_read_lock(); |
74bcdb33 WYG |
1157 | sta = ieee80211_find_sta(priv->vif, hdr->addr1); |
1158 | if (sta) { | |
1159 | sta_priv = (void *)sta->drv_priv; | |
1160 | /* avoid atomic ops if this isn't a client */ | |
1161 | if (sta_priv->client && | |
1162 | atomic_dec_return(&sta_priv->pending_frames) == 0) | |
1163 | ieee80211_sta_block_awake(priv->hw, sta, false); | |
1164 | } | |
6db6340c | 1165 | rcu_read_unlock(); |
74bcdb33 WYG |
1166 | |
1167 | ieee80211_tx_status_irqsafe(priv->hw, skb); | |
1168 | } | |
1169 | ||
1170 | int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | |
1171 | { | |
1172 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
1173 | struct iwl_queue *q = &txq->q; | |
1174 | struct iwl_tx_info *tx_info; | |
1175 | int nfreed = 0; | |
1176 | struct ieee80211_hdr *hdr; | |
1177 | ||
1178 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | |
1179 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " | |
1180 | "is out of range [0-%d] %d %d.\n", txq_id, | |
1181 | index, q->n_bd, q->write_ptr, q->read_ptr); | |
1182 | return 0; | |
1183 | } | |
1184 | ||
1185 | for (index = iwl_queue_inc_wrap(index, q->n_bd); | |
1186 | q->read_ptr != index; | |
1187 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | |
1188 | ||
1189 | tx_info = &txq->txb[txq->q.read_ptr]; | |
ff0d91c3 | 1190 | iwlagn_tx_status(priv, tx_info->skb); |
74bcdb33 | 1191 | |
ff0d91c3 | 1192 | hdr = (struct ieee80211_hdr *)tx_info->skb->data; |
74bcdb33 WYG |
1193 | if (hdr && ieee80211_is_data_qos(hdr->frame_control)) |
1194 | nfreed++; | |
ff0d91c3 | 1195 | tx_info->skb = NULL; |
74bcdb33 WYG |
1196 | |
1197 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | |
1198 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | |
1199 | ||
1200 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | |
1201 | } | |
1202 | return nfreed; | |
1203 | } | |
1204 | ||
1205 | /** | |
1206 | * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack | |
1207 | * | |
1208 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | |
1209 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | |
1210 | */ | |
1211 | static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv, | |
1212 | struct iwl_ht_agg *agg, | |
1213 | struct iwl_compressed_ba_resp *ba_resp) | |
1214 | ||
1215 | { | |
1216 | int i, sh, ack; | |
1217 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | |
1218 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | |
02cd8dee | 1219 | u64 bitmap, sent_bitmap; |
74bcdb33 WYG |
1220 | int successes = 0; |
1221 | struct ieee80211_tx_info *info; | |
1222 | ||
1223 | if (unlikely(!agg->wait_for_ba)) { | |
1224 | IWL_ERR(priv, "Received BA when not expected\n"); | |
1225 | return -EINVAL; | |
1226 | } | |
1227 | ||
1228 | /* Mark that the expected block-ack response arrived */ | |
1229 | agg->wait_for_ba = 0; | |
1230 | IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); | |
1231 | ||
1232 | /* Calculate shift to align block-ack bits with our Tx window bits */ | |
1233 | sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); | |
1234 | if (sh < 0) /* tbw something is wrong with indices */ | |
1235 | sh += 0x100; | |
1236 | ||
1237 | /* don't use 64-bit values for now */ | |
1238 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | |
1239 | ||
1240 | if (agg->frame_count > (64 - sh)) { | |
1241 | IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); | |
1242 | return -1; | |
1243 | } | |
1244 | ||
1245 | /* check for success or failure according to the | |
1246 | * transmitted bitmap and block-ack bitmap */ | |
02cd8dee | 1247 | sent_bitmap = bitmap & agg->bitmap; |
74bcdb33 WYG |
1248 | |
1249 | /* For each frame attempted in aggregation, | |
1250 | * update driver's record of tx frame's status. */ | |
02cd8dee DH |
1251 | i = 0; |
1252 | while (sent_bitmap) { | |
1253 | ack = sent_bitmap & 1ULL; | |
1254 | successes += ack; | |
74bcdb33 WYG |
1255 | IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", |
1256 | ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, | |
1257 | agg->start_idx + i); | |
02cd8dee DH |
1258 | sent_bitmap >>= 1; |
1259 | ++i; | |
74bcdb33 WYG |
1260 | } |
1261 | ||
ff0d91c3 | 1262 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); |
74bcdb33 WYG |
1263 | memset(&info->status, 0, sizeof(info->status)); |
1264 | info->flags |= IEEE80211_TX_STAT_ACK; | |
1265 | info->flags |= IEEE80211_TX_STAT_AMPDU; | |
e3a3cd87 | 1266 | info->status.ampdu_ack_len = successes; |
e3a3cd87 | 1267 | info->status.ampdu_len = agg->frame_count; |
8d801080 | 1268 | iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); |
74bcdb33 WYG |
1269 | |
1270 | IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); | |
1271 | ||
1272 | return 0; | |
1273 | } | |
1274 | ||
8d801080 WYG |
1275 | /** |
1276 | * translate ucode response to mac80211 tx status control values | |
1277 | */ | |
1278 | void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, | |
1279 | struct ieee80211_tx_info *info) | |
1280 | { | |
1281 | struct ieee80211_tx_rate *r = &info->control.rates[0]; | |
1282 | ||
1283 | info->antenna_sel_tx = | |
1284 | ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); | |
1285 | if (rate_n_flags & RATE_MCS_HT_MSK) | |
1286 | r->flags |= IEEE80211_TX_RC_MCS; | |
1287 | if (rate_n_flags & RATE_MCS_GF_MSK) | |
1288 | r->flags |= IEEE80211_TX_RC_GREEN_FIELD; | |
1289 | if (rate_n_flags & RATE_MCS_HT40_MSK) | |
1290 | r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; | |
1291 | if (rate_n_flags & RATE_MCS_DUP_MSK) | |
1292 | r->flags |= IEEE80211_TX_RC_DUP_DATA; | |
1293 | if (rate_n_flags & RATE_MCS_SGI_MSK) | |
1294 | r->flags |= IEEE80211_TX_RC_SHORT_GI; | |
1295 | r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band); | |
1296 | } | |
1297 | ||
74bcdb33 WYG |
1298 | /** |
1299 | * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | |
1300 | * | |
1301 | * Handles block-acknowledge notification from device, which reports success | |
1302 | * of frames sent via aggregation. | |
1303 | */ | |
1304 | void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, | |
1305 | struct iwl_rx_mem_buffer *rxb) | |
1306 | { | |
1307 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
1308 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | |
1309 | struct iwl_tx_queue *txq = NULL; | |
1310 | struct iwl_ht_agg *agg; | |
1311 | int index; | |
1312 | int sta_id; | |
1313 | int tid; | |
9c5ac091 | 1314 | unsigned long flags; |
74bcdb33 WYG |
1315 | |
1316 | /* "flow" corresponds to Tx queue */ | |
1317 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | |
1318 | ||
1319 | /* "ssn" is start of block-ack Tx window, corresponds to index | |
1320 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | |
1321 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | |
1322 | ||
1323 | if (scd_flow >= priv->hw_params.max_txq_num) { | |
1324 | IWL_ERR(priv, | |
1325 | "BUG_ON scd_flow is bigger than number of queues\n"); | |
1326 | return; | |
1327 | } | |
1328 | ||
1329 | txq = &priv->txq[scd_flow]; | |
1330 | sta_id = ba_resp->sta_id; | |
1331 | tid = ba_resp->tid; | |
1332 | agg = &priv->stations[sta_id].tid[tid].agg; | |
b561e827 | 1333 | if (unlikely(agg->txq_id != scd_flow)) { |
735df29a WYG |
1334 | /* |
1335 | * FIXME: this is a uCode bug which need to be addressed, | |
1336 | * log the information and return for now! | |
1337 | * since it is possible happen very often and in order | |
1338 | * not to fill the syslog, don't enable the logging by default | |
1339 | */ | |
1340 | IWL_DEBUG_TX_REPLY(priv, | |
1341 | "BA scd_flow %d does not match txq_id %d\n", | |
b561e827 SZ |
1342 | scd_flow, agg->txq_id); |
1343 | return; | |
1344 | } | |
74bcdb33 WYG |
1345 | |
1346 | /* Find index just before block-ack window */ | |
1347 | index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | |
1348 | ||
9c5ac091 | 1349 | spin_lock_irqsave(&priv->sta_lock, flags); |
74bcdb33 WYG |
1350 | |
1351 | IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " | |
1352 | "sta_id = %d\n", | |
1353 | agg->wait_for_ba, | |
1354 | (u8 *) &ba_resp->sta_addr_lo32, | |
1355 | ba_resp->sta_id); | |
1356 | IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " | |
1357 | "%d, scd_ssn = %d\n", | |
1358 | ba_resp->tid, | |
1359 | ba_resp->seq_ctl, | |
1360 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | |
1361 | ba_resp->scd_flow, | |
1362 | ba_resp->scd_ssn); | |
91dd6c27 | 1363 | IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n", |
74bcdb33 WYG |
1364 | agg->start_idx, |
1365 | (unsigned long long)agg->bitmap); | |
1366 | ||
1367 | /* Update driver's record of ACK vs. not for each frame in window */ | |
1368 | iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp); | |
1369 | ||
1370 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | |
1371 | * block-ack window (we assume that they've been successfully | |
1372 | * transmitted ... if not, it's too late anyway). */ | |
1373 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | |
1374 | /* calculate mac80211 ampdu sw queue to wake */ | |
1375 | int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index); | |
1376 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | |
1377 | ||
1378 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | |
1379 | priv->mac80211_registered && | |
1380 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) | |
1381 | iwl_wake_queue(priv, txq->swq_id); | |
1382 | ||
1383 | iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); | |
1384 | } | |
9c5ac091 RC |
1385 | |
1386 | spin_unlock_irqrestore(&priv->sta_lock, flags); | |
74bcdb33 | 1387 | } |