Commit | Line | Data |
---|---|---|
1053d35f RR |
1 | /****************************************************************************** |
2 | * | |
01f8162a | 3 | * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. |
1053d35f RR |
4 | * |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
759ef89f | 25 | * Intel Linux Wireless <ilw@linux.intel.com> |
1053d35f RR |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
27 | * | |
28 | *****************************************************************************/ | |
29 | ||
fd4abac5 | 30 | #include <linux/etherdevice.h> |
1053d35f RR |
31 | #include <net/mac80211.h> |
32 | #include "iwl-eeprom.h" | |
33 | #include "iwl-dev.h" | |
34 | #include "iwl-core.h" | |
35 | #include "iwl-sta.h" | |
36 | #include "iwl-io.h" | |
37 | #include "iwl-helpers.h" | |
38 | ||
30e553e3 TW |
39 | static const u16 default_tid_to_tx_fifo[] = { |
40 | IWL_TX_FIFO_AC1, | |
41 | IWL_TX_FIFO_AC0, | |
42 | IWL_TX_FIFO_AC0, | |
43 | IWL_TX_FIFO_AC1, | |
44 | IWL_TX_FIFO_AC2, | |
45 | IWL_TX_FIFO_AC2, | |
46 | IWL_TX_FIFO_AC3, | |
47 | IWL_TX_FIFO_AC3, | |
48 | IWL_TX_FIFO_NONE, | |
49 | IWL_TX_FIFO_NONE, | |
50 | IWL_TX_FIFO_NONE, | |
51 | IWL_TX_FIFO_NONE, | |
52 | IWL_TX_FIFO_NONE, | |
53 | IWL_TX_FIFO_NONE, | |
54 | IWL_TX_FIFO_NONE, | |
55 | IWL_TX_FIFO_NONE, | |
56 | IWL_TX_FIFO_AC3 | |
57 | }; | |
58 | ||
4ddbb7d0 TW |
59 | static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv, |
60 | struct iwl_dma_ptr *ptr, size_t size) | |
61 | { | |
62 | ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma); | |
63 | if (!ptr->addr) | |
64 | return -ENOMEM; | |
65 | ptr->size = size; | |
66 | return 0; | |
67 | } | |
68 | ||
69 | static inline void iwl_free_dma_ptr(struct iwl_priv *priv, | |
70 | struct iwl_dma_ptr *ptr) | |
71 | { | |
72 | if (unlikely(!ptr->addr)) | |
73 | return; | |
74 | ||
75 | pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma); | |
76 | memset(ptr, 0, sizeof(*ptr)); | |
77 | } | |
78 | ||
fd4abac5 TW |
79 | /** |
80 | * iwl_txq_update_write_ptr - Send new write index to hardware | |
81 | */ | |
82 | int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |
83 | { | |
84 | u32 reg = 0; | |
85 | int ret = 0; | |
86 | int txq_id = txq->q.id; | |
87 | ||
88 | if (txq->need_update == 0) | |
89 | return ret; | |
90 | ||
91 | /* if we're trying to save power */ | |
92 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | |
93 | /* wake up nic if it's powered down ... | |
94 | * uCode will wake up, and interrupt us again, so next | |
95 | * time we'll skip this part. */ | |
96 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | |
97 | ||
98 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
e1623446 | 99 | IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg); |
fd4abac5 TW |
100 | iwl_set_bit(priv, CSR_GP_CNTRL, |
101 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | |
102 | return ret; | |
103 | } | |
104 | ||
105 | /* restore this queue's parameters in nic hardware. */ | |
106 | ret = iwl_grab_nic_access(priv); | |
107 | if (ret) | |
108 | return ret; | |
109 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | |
110 | txq->q.write_ptr | (txq_id << 8)); | |
111 | iwl_release_nic_access(priv); | |
112 | ||
113 | /* else not in power-save mode, uCode will never sleep when we're | |
114 | * trying to tx (during RFKILL, we're not trying to tx). */ | |
115 | } else | |
116 | iwl_write32(priv, HBUS_TARG_WRPTR, | |
117 | txq->q.write_ptr | (txq_id << 8)); | |
118 | ||
119 | txq->need_update = 0; | |
120 | ||
121 | return ret; | |
122 | } | |
123 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); | |
124 | ||
125 | ||
1053d35f RR |
126 | /** |
127 | * iwl_tx_queue_free - Deallocate DMA queue. | |
128 | * @txq: Transmit queue to deallocate. | |
129 | * | |
130 | * Empty queue by removing and destroying all BD's. | |
131 | * Free all buffers. | |
132 | * 0-fill, but do not free "txq" descriptor structure. | |
133 | */ | |
a8e74e27 | 134 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) |
1053d35f | 135 | { |
da99c4b6 | 136 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; |
443cfd45 | 137 | struct iwl_queue *q = &txq->q; |
1053d35f | 138 | struct pci_dev *dev = priv->pci_dev; |
961ba60a | 139 | int i, len; |
1053d35f RR |
140 | |
141 | if (q->n_bd == 0) | |
142 | return; | |
143 | ||
144 | /* first, empty all BD's */ | |
145 | for (; q->write_ptr != q->read_ptr; | |
146 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) | |
7aaa1d79 | 147 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
1053d35f RR |
148 | |
149 | len = sizeof(struct iwl_cmd) * q->n_window; | |
1053d35f RR |
150 | |
151 | /* De-alloc array of command/tx buffers */ | |
961ba60a | 152 | for (i = 0; i < TFD_TX_CMD_SLOTS; i++) |
da99c4b6 | 153 | kfree(txq->cmd[i]); |
1053d35f RR |
154 | |
155 | /* De-alloc circular buffer of TFDs */ | |
156 | if (txq->q.n_bd) | |
a8e74e27 | 157 | pci_free_consistent(dev, priv->hw_params.tfd_size * |
499b1883 | 158 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
1053d35f RR |
159 | |
160 | /* De-alloc array of per-TFD driver data */ | |
161 | kfree(txq->txb); | |
162 | txq->txb = NULL; | |
163 | ||
164 | /* 0-fill queue descriptor structure */ | |
165 | memset(txq, 0, sizeof(*txq)); | |
166 | } | |
a8e74e27 | 167 | EXPORT_SYMBOL(iwl_tx_queue_free); |
961ba60a TW |
168 | |
169 | /** | |
170 | * iwl_cmd_queue_free - Deallocate DMA queue. | |
171 | * @txq: Transmit queue to deallocate. | |
172 | * | |
173 | * Empty queue by removing and destroying all BD's. | |
174 | * Free all buffers. | |
175 | * 0-fill, but do not free "txq" descriptor structure. | |
176 | */ | |
3e5d238f | 177 | void iwl_cmd_queue_free(struct iwl_priv *priv) |
961ba60a TW |
178 | { |
179 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | |
180 | struct iwl_queue *q = &txq->q; | |
181 | struct pci_dev *dev = priv->pci_dev; | |
182 | int i, len; | |
183 | ||
184 | if (q->n_bd == 0) | |
185 | return; | |
186 | ||
187 | len = sizeof(struct iwl_cmd) * q->n_window; | |
188 | len += IWL_MAX_SCAN_SIZE; | |
189 | ||
190 | /* De-alloc array of command/tx buffers */ | |
191 | for (i = 0; i <= TFD_CMD_SLOTS; i++) | |
192 | kfree(txq->cmd[i]); | |
193 | ||
194 | /* De-alloc circular buffer of TFDs */ | |
195 | if (txq->q.n_bd) | |
3e5d238f | 196 | pci_free_consistent(dev, priv->hw_params.tfd_size * |
499b1883 | 197 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
961ba60a TW |
198 | |
199 | /* 0-fill queue descriptor structure */ | |
200 | memset(txq, 0, sizeof(*txq)); | |
201 | } | |
3e5d238f AK |
202 | EXPORT_SYMBOL(iwl_cmd_queue_free); |
203 | ||
fd4abac5 TW |
204 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** |
205 | * DMA services | |
206 | * | |
207 | * Theory of operation | |
208 | * | |
209 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | |
210 | * of buffer descriptors, each of which points to one or more data buffers for | |
211 | * the device to read from or fill. Driver and device exchange status of each | |
212 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | |
213 | * entries in each circular buffer, to protect against confusing empty and full | |
214 | * queue states. | |
215 | * | |
216 | * The device reads or writes the data in the queues via the device's several | |
217 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | |
218 | * | |
219 | * For Tx queue, there are low mark and high mark limits. If, after queuing | |
220 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | |
221 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | |
222 | * Tx queue resumed. | |
223 | * | |
224 | * See more detailed info in iwl-4965-hw.h. | |
225 | ***************************************************/ | |
226 | ||
227 | int iwl_queue_space(const struct iwl_queue *q) | |
228 | { | |
229 | int s = q->read_ptr - q->write_ptr; | |
230 | ||
231 | if (q->read_ptr > q->write_ptr) | |
232 | s -= q->n_bd; | |
233 | ||
234 | if (s <= 0) | |
235 | s += q->n_window; | |
236 | /* keep some reserve to not confuse empty and full situations */ | |
237 | s -= 2; | |
238 | if (s < 0) | |
239 | s = 0; | |
240 | return s; | |
241 | } | |
242 | EXPORT_SYMBOL(iwl_queue_space); | |
243 | ||
244 | ||
1053d35f RR |
245 | /** |
246 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | |
247 | */ | |
443cfd45 | 248 | static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, |
1053d35f RR |
249 | int count, int slots_num, u32 id) |
250 | { | |
251 | q->n_bd = count; | |
252 | q->n_window = slots_num; | |
253 | q->id = id; | |
254 | ||
255 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | |
256 | * and iwl_queue_dec_wrap are broken. */ | |
257 | BUG_ON(!is_power_of_2(count)); | |
258 | ||
259 | /* slots_num must be power-of-two size, otherwise | |
260 | * get_cmd_index is broken. */ | |
261 | BUG_ON(!is_power_of_2(slots_num)); | |
262 | ||
263 | q->low_mark = q->n_window / 4; | |
264 | if (q->low_mark < 4) | |
265 | q->low_mark = 4; | |
266 | ||
267 | q->high_mark = q->n_window / 8; | |
268 | if (q->high_mark < 2) | |
269 | q->high_mark = 2; | |
270 | ||
271 | q->write_ptr = q->read_ptr = 0; | |
272 | ||
273 | return 0; | |
274 | } | |
275 | ||
276 | /** | |
277 | * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue | |
278 | */ | |
279 | static int iwl_tx_queue_alloc(struct iwl_priv *priv, | |
16466903 | 280 | struct iwl_tx_queue *txq, u32 id) |
1053d35f RR |
281 | { |
282 | struct pci_dev *dev = priv->pci_dev; | |
3978e5bc | 283 | size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; |
1053d35f RR |
284 | |
285 | /* Driver private data, only for Tx (not command) queues, | |
286 | * not shared with device. */ | |
287 | if (id != IWL_CMD_QUEUE_NUM) { | |
288 | txq->txb = kmalloc(sizeof(txq->txb[0]) * | |
289 | TFD_QUEUE_SIZE_MAX, GFP_KERNEL); | |
290 | if (!txq->txb) { | |
15b1687c | 291 | IWL_ERR(priv, "kmalloc for auxiliary BD " |
1053d35f RR |
292 | "structures failed\n"); |
293 | goto error; | |
294 | } | |
3978e5bc | 295 | } else { |
1053d35f | 296 | txq->txb = NULL; |
3978e5bc | 297 | } |
1053d35f RR |
298 | |
299 | /* Circular buffer of transmit frame descriptors (TFDs), | |
300 | * shared with device */ | |
3978e5bc | 301 | txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr); |
1053d35f | 302 | |
499b1883 | 303 | if (!txq->tfds) { |
3978e5bc | 304 | IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); |
1053d35f RR |
305 | goto error; |
306 | } | |
307 | txq->q.id = id; | |
308 | ||
309 | return 0; | |
310 | ||
311 | error: | |
312 | kfree(txq->txb); | |
313 | txq->txb = NULL; | |
314 | ||
315 | return -ENOMEM; | |
316 | } | |
317 | ||
1053d35f RR |
318 | /** |
319 | * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue | |
320 | */ | |
a8e74e27 SO |
321 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
322 | int slots_num, u32 txq_id) | |
1053d35f | 323 | { |
da99c4b6 | 324 | int i, len; |
73b7d742 | 325 | int ret; |
1053d35f RR |
326 | |
327 | /* | |
328 | * Alloc buffer array for commands (Tx or other types of commands). | |
329 | * For the command queue (#4), allocate command space + one big | |
330 | * command for scan, since scan command is very huge; the system will | |
331 | * not have two scans at the same time, so only one is needed. | |
332 | * For normal Tx queues (all other queues), no super-size command | |
333 | * space is needed. | |
334 | */ | |
da99c4b6 GG |
335 | len = sizeof(struct iwl_cmd); |
336 | for (i = 0; i <= slots_num; i++) { | |
337 | if (i == slots_num) { | |
338 | if (txq_id == IWL_CMD_QUEUE_NUM) | |
339 | len += IWL_MAX_SCAN_SIZE; | |
340 | else | |
341 | continue; | |
342 | } | |
343 | ||
49898852 | 344 | txq->cmd[i] = kmalloc(len, GFP_KERNEL); |
da99c4b6 | 345 | if (!txq->cmd[i]) |
73b7d742 | 346 | goto err; |
da99c4b6 | 347 | } |
1053d35f RR |
348 | |
349 | /* Alloc driver data array and TFD circular buffer */ | |
73b7d742 TW |
350 | ret = iwl_tx_queue_alloc(priv, txq, txq_id); |
351 | if (ret) | |
352 | goto err; | |
1053d35f | 353 | |
1053d35f RR |
354 | txq->need_update = 0; |
355 | ||
356 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | |
357 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | |
358 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | |
359 | ||
360 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | |
361 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | |
362 | ||
363 | /* Tell device where to find queue */ | |
a8e74e27 | 364 | priv->cfg->ops->lib->txq_init(priv, txq); |
1053d35f RR |
365 | |
366 | return 0; | |
73b7d742 TW |
367 | err: |
368 | for (i = 0; i < slots_num; i++) { | |
369 | kfree(txq->cmd[i]); | |
370 | txq->cmd[i] = NULL; | |
371 | } | |
372 | ||
373 | if (txq_id == IWL_CMD_QUEUE_NUM) { | |
374 | kfree(txq->cmd[slots_num]); | |
375 | txq->cmd[slots_num] = NULL; | |
376 | } | |
377 | return -ENOMEM; | |
1053d35f | 378 | } |
a8e74e27 SO |
379 | EXPORT_SYMBOL(iwl_tx_queue_init); |
380 | ||
da1bc453 TW |
381 | /** |
382 | * iwl_hw_txq_ctx_free - Free TXQ Context | |
383 | * | |
384 | * Destroy all TX DMA queues and structures | |
385 | */ | |
386 | void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |
387 | { | |
388 | int txq_id; | |
389 | ||
390 | /* Tx queues */ | |
391 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | |
961ba60a TW |
392 | if (txq_id == IWL_CMD_QUEUE_NUM) |
393 | iwl_cmd_queue_free(priv); | |
394 | else | |
395 | iwl_tx_queue_free(priv, txq_id); | |
da1bc453 | 396 | |
4ddbb7d0 TW |
397 | iwl_free_dma_ptr(priv, &priv->kw); |
398 | ||
399 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); | |
da1bc453 TW |
400 | } |
401 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | |
402 | ||
1053d35f RR |
403 | /** |
404 | * iwl_txq_ctx_reset - Reset TX queue context | |
a96a27f9 | 405 | * Destroys all DMA structures and initialize them again |
1053d35f RR |
406 | * |
407 | * @param priv | |
408 | * @return error code | |
409 | */ | |
410 | int iwl_txq_ctx_reset(struct iwl_priv *priv) | |
411 | { | |
412 | int ret = 0; | |
413 | int txq_id, slots_num; | |
da1bc453 | 414 | unsigned long flags; |
1053d35f | 415 | |
1053d35f RR |
416 | /* Free all tx/cmd queues and keep-warm buffer */ |
417 | iwl_hw_txq_ctx_free(priv); | |
418 | ||
4ddbb7d0 TW |
419 | ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls, |
420 | priv->hw_params.scd_bc_tbls_size); | |
421 | if (ret) { | |
15b1687c | 422 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); |
4ddbb7d0 TW |
423 | goto error_bc_tbls; |
424 | } | |
1053d35f | 425 | /* Alloc keep-warm buffer */ |
4ddbb7d0 | 426 | ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); |
1053d35f | 427 | if (ret) { |
15b1687c | 428 | IWL_ERR(priv, "Keep Warm allocation failed\n"); |
1053d35f RR |
429 | goto error_kw; |
430 | } | |
da1bc453 TW |
431 | spin_lock_irqsave(&priv->lock, flags); |
432 | ret = iwl_grab_nic_access(priv); | |
433 | if (unlikely(ret)) { | |
434 | spin_unlock_irqrestore(&priv->lock, flags); | |
435 | goto error_reset; | |
436 | } | |
1053d35f RR |
437 | |
438 | /* Turn off all Tx DMA fifos */ | |
da1bc453 TW |
439 | priv->cfg->ops->lib->txq_set_sched(priv, 0); |
440 | ||
4ddbb7d0 TW |
441 | /* Tell NIC where to find the "keep warm" buffer */ |
442 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | |
443 | ||
da1bc453 TW |
444 | iwl_release_nic_access(priv); |
445 | spin_unlock_irqrestore(&priv->lock, flags); | |
446 | ||
da1bc453 | 447 | /* Alloc and init all Tx queues, including the command queue (#4) */ |
1053d35f RR |
448 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { |
449 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? | |
450 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | |
451 | ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, | |
452 | txq_id); | |
453 | if (ret) { | |
15b1687c | 454 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); |
1053d35f RR |
455 | goto error; |
456 | } | |
457 | } | |
458 | ||
459 | return ret; | |
460 | ||
461 | error: | |
462 | iwl_hw_txq_ctx_free(priv); | |
463 | error_reset: | |
4ddbb7d0 | 464 | iwl_free_dma_ptr(priv, &priv->kw); |
1053d35f | 465 | error_kw: |
4ddbb7d0 TW |
466 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); |
467 | error_bc_tbls: | |
1053d35f RR |
468 | return ret; |
469 | } | |
a33c2f47 | 470 | |
da1bc453 TW |
471 | /** |
472 | * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | |
473 | */ | |
474 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | |
475 | { | |
f3f911d1 | 476 | int ch; |
da1bc453 TW |
477 | unsigned long flags; |
478 | ||
da1bc453 TW |
479 | /* Turn off all Tx DMA fifos */ |
480 | spin_lock_irqsave(&priv->lock, flags); | |
481 | if (iwl_grab_nic_access(priv)) { | |
482 | spin_unlock_irqrestore(&priv->lock, flags); | |
483 | return; | |
484 | } | |
485 | ||
486 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | |
487 | ||
488 | /* Stop each Tx DMA channel, and wait for it to be idle */ | |
f3f911d1 ZY |
489 | for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { |
490 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | |
da1bc453 | 491 | iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, |
f3f911d1 | 492 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), |
f056658b | 493 | 1000); |
da1bc453 TW |
494 | } |
495 | iwl_release_nic_access(priv); | |
496 | spin_unlock_irqrestore(&priv->lock, flags); | |
497 | ||
498 | /* Deallocate memory for all Tx queues */ | |
499 | iwl_hw_txq_ctx_free(priv); | |
500 | } | |
501 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | |
fd4abac5 TW |
502 | |
503 | /* | |
504 | * handle build REPLY_TX command notification. | |
505 | */ | |
506 | static void iwl_tx_cmd_build_basic(struct iwl_priv *priv, | |
507 | struct iwl_tx_cmd *tx_cmd, | |
e039fa4a | 508 | struct ieee80211_tx_info *info, |
fd4abac5 | 509 | struct ieee80211_hdr *hdr, |
0e7690f1 | 510 | u8 std_id) |
fd4abac5 | 511 | { |
fd7c8a40 | 512 | __le16 fc = hdr->frame_control; |
fd4abac5 TW |
513 | __le32 tx_flags = tx_cmd->tx_flags; |
514 | ||
515 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | |
e039fa4a | 516 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { |
fd4abac5 | 517 | tx_flags |= TX_CMD_FLG_ACK_MSK; |
fd7c8a40 | 518 | if (ieee80211_is_mgmt(fc)) |
fd4abac5 | 519 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; |
fd7c8a40 | 520 | if (ieee80211_is_probe_resp(fc) && |
fd4abac5 TW |
521 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) |
522 | tx_flags |= TX_CMD_FLG_TSF_MSK; | |
523 | } else { | |
524 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | |
525 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
526 | } | |
527 | ||
fd7c8a40 | 528 | if (ieee80211_is_back_req(fc)) |
fd4abac5 TW |
529 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; |
530 | ||
531 | ||
532 | tx_cmd->sta_id = std_id; | |
8b7b1e05 | 533 | if (ieee80211_has_morefrags(fc)) |
fd4abac5 TW |
534 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; |
535 | ||
fd7c8a40 HH |
536 | if (ieee80211_is_data_qos(fc)) { |
537 | u8 *qc = ieee80211_get_qos_ctl(hdr); | |
fd4abac5 TW |
538 | tx_cmd->tid_tspec = qc[0] & 0xf; |
539 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | |
540 | } else { | |
541 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
542 | } | |
543 | ||
a326a5d0 | 544 | priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); |
fd4abac5 TW |
545 | |
546 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | |
547 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | |
548 | ||
549 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | |
fd7c8a40 HH |
550 | if (ieee80211_is_mgmt(fc)) { |
551 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | |
fd4abac5 TW |
552 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); |
553 | else | |
554 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | |
555 | } else { | |
556 | tx_cmd->timeout.pm_frame_timeout = 0; | |
557 | } | |
558 | ||
559 | tx_cmd->driver_txop = 0; | |
560 | tx_cmd->tx_flags = tx_flags; | |
561 | tx_cmd->next_frame_len = 0; | |
562 | } | |
563 | ||
564 | #define RTS_HCCA_RETRY_LIMIT 3 | |
565 | #define RTS_DFAULT_RETRY_LIMIT 60 | |
566 | ||
567 | static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, | |
568 | struct iwl_tx_cmd *tx_cmd, | |
e039fa4a | 569 | struct ieee80211_tx_info *info, |
fd7c8a40 | 570 | __le16 fc, int sta_id, |
fd4abac5 TW |
571 | int is_hcca) |
572 | { | |
76eff18b TW |
573 | u32 rate_flags = 0; |
574 | int rate_idx; | |
fd4abac5 TW |
575 | u8 rts_retry_limit = 0; |
576 | u8 data_retry_limit = 0; | |
577 | u8 rate_plcp; | |
2e92e6f2 | 578 | |
e039fa4a | 579 | rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff, |
2e92e6f2 | 580 | IWL_RATE_COUNT - 1); |
fd4abac5 TW |
581 | |
582 | rate_plcp = iwl_rates[rate_idx].plcp; | |
583 | ||
584 | rts_retry_limit = (is_hcca) ? | |
585 | RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT; | |
586 | ||
587 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | |
588 | rate_flags |= RATE_MCS_CCK_MSK; | |
589 | ||
590 | ||
fd7c8a40 | 591 | if (ieee80211_is_probe_resp(fc)) { |
fd4abac5 TW |
592 | data_retry_limit = 3; |
593 | if (data_retry_limit < rts_retry_limit) | |
594 | rts_retry_limit = data_retry_limit; | |
595 | } else | |
596 | data_retry_limit = IWL_DEFAULT_TX_RETRY; | |
597 | ||
598 | if (priv->data_retry_limit != -1) | |
599 | data_retry_limit = priv->data_retry_limit; | |
600 | ||
601 | ||
602 | if (ieee80211_is_data(fc)) { | |
603 | tx_cmd->initial_rate_index = 0; | |
604 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | |
605 | } else { | |
fd7c8a40 HH |
606 | switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { |
607 | case cpu_to_le16(IEEE80211_STYPE_AUTH): | |
608 | case cpu_to_le16(IEEE80211_STYPE_DEAUTH): | |
609 | case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): | |
610 | case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): | |
fd4abac5 TW |
611 | if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { |
612 | tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | |
613 | tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; | |
614 | } | |
615 | break; | |
616 | default: | |
617 | break; | |
618 | } | |
619 | ||
76eff18b TW |
620 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); |
621 | rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); | |
fd4abac5 TW |
622 | } |
623 | ||
624 | tx_cmd->rts_retry_limit = rts_retry_limit; | |
625 | tx_cmd->data_retry_limit = data_retry_limit; | |
e7d326ac | 626 | tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); |
fd4abac5 TW |
627 | } |
628 | ||
629 | static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | |
e039fa4a | 630 | struct ieee80211_tx_info *info, |
fd4abac5 TW |
631 | struct iwl_tx_cmd *tx_cmd, |
632 | struct sk_buff *skb_frag, | |
633 | int sta_id) | |
634 | { | |
e039fa4a | 635 | struct ieee80211_key_conf *keyconf = info->control.hw_key; |
fd4abac5 | 636 | |
ccc038ab | 637 | switch (keyconf->alg) { |
fd4abac5 TW |
638 | case ALG_CCMP: |
639 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | |
ccc038ab | 640 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); |
e039fa4a | 641 | if (info->flags & IEEE80211_TX_CTL_AMPDU) |
fd4abac5 | 642 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; |
e1623446 | 643 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); |
fd4abac5 TW |
644 | break; |
645 | ||
646 | case ALG_TKIP: | |
647 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | |
ccc038ab | 648 | ieee80211_get_tkip_key(keyconf, skb_frag, |
fd4abac5 | 649 | IEEE80211_TKIP_P2_KEY, tx_cmd->key); |
e1623446 | 650 | IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); |
fd4abac5 TW |
651 | break; |
652 | ||
653 | case ALG_WEP: | |
fd4abac5 | 654 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | |
ccc038ab EG |
655 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); |
656 | ||
657 | if (keyconf->keylen == WEP_KEY_LEN_128) | |
658 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | |
659 | ||
660 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | |
fd4abac5 | 661 | |
e1623446 | 662 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " |
ccc038ab | 663 | "with key %d\n", keyconf->keyidx); |
fd4abac5 TW |
664 | break; |
665 | ||
666 | default: | |
978785a3 | 667 | IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); |
fd4abac5 TW |
668 | break; |
669 | } | |
670 | } | |
671 | ||
672 | static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len) | |
673 | { | |
674 | /* 0 - mgmt, 1 - cnt, 2 - data */ | |
675 | int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2; | |
676 | priv->tx_stats[idx].cnt++; | |
677 | priv->tx_stats[idx].bytes += len; | |
678 | } | |
679 | ||
680 | /* | |
681 | * start REPLY_TX command process | |
682 | */ | |
e039fa4a | 683 | int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) |
fd4abac5 TW |
684 | { |
685 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
e039fa4a | 686 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
f3674227 TW |
687 | struct iwl_tx_queue *txq; |
688 | struct iwl_queue *q; | |
689 | struct iwl_cmd *out_cmd; | |
690 | struct iwl_tx_cmd *tx_cmd; | |
691 | int swq_id, txq_id; | |
fd4abac5 TW |
692 | dma_addr_t phys_addr; |
693 | dma_addr_t txcmd_phys; | |
694 | dma_addr_t scratch_phys; | |
b88b15df | 695 | u16 len, len_org; |
fd4abac5 | 696 | u16 seq_number = 0; |
fd7c8a40 | 697 | __le16 fc; |
0e7690f1 | 698 | u8 hdr_len; |
f3674227 | 699 | u8 sta_id; |
fd4abac5 TW |
700 | u8 wait_write_ptr = 0; |
701 | u8 tid = 0; | |
702 | u8 *qc = NULL; | |
703 | unsigned long flags; | |
704 | int ret; | |
705 | ||
706 | spin_lock_irqsave(&priv->lock, flags); | |
707 | if (iwl_is_rfkill(priv)) { | |
e1623446 | 708 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); |
fd4abac5 TW |
709 | goto drop_unlock; |
710 | } | |
711 | ||
e039fa4a | 712 | if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == |
2e92e6f2 | 713 | IWL_INVALID_RATE) { |
15b1687c | 714 | IWL_ERR(priv, "ERROR: No TX rate available.\n"); |
fd4abac5 TW |
715 | goto drop_unlock; |
716 | } | |
717 | ||
fd7c8a40 | 718 | fc = hdr->frame_control; |
fd4abac5 TW |
719 | |
720 | #ifdef CONFIG_IWLWIFI_DEBUG | |
721 | if (ieee80211_is_auth(fc)) | |
e1623446 | 722 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); |
fd7c8a40 | 723 | else if (ieee80211_is_assoc_req(fc)) |
e1623446 | 724 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); |
fd7c8a40 | 725 | else if (ieee80211_is_reassoc_req(fc)) |
e1623446 | 726 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); |
fd4abac5 TW |
727 | #endif |
728 | ||
729 | /* drop all data frame if we are not associated */ | |
fd7c8a40 | 730 | if (ieee80211_is_data(fc) && |
05c914fe | 731 | (priv->iw_mode != NL80211_IFTYPE_MONITOR || |
d10c4ec8 SG |
732 | !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */ |
733 | (!iwl_is_associated(priv) || | |
05c914fe | 734 | ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) || |
d10c4ec8 | 735 | !priv->assoc_station_added)) { |
e1623446 | 736 | IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n"); |
fd4abac5 TW |
737 | goto drop_unlock; |
738 | } | |
739 | ||
740 | spin_unlock_irqrestore(&priv->lock, flags); | |
741 | ||
7294ec95 | 742 | hdr_len = ieee80211_hdrlen(fc); |
fd4abac5 TW |
743 | |
744 | /* Find (or create) index into station table for destination station */ | |
745 | sta_id = iwl_get_sta_id(priv, hdr); | |
746 | if (sta_id == IWL_INVALID_STATION) { | |
e1623446 | 747 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", |
e174961c | 748 | hdr->addr1); |
fd4abac5 TW |
749 | goto drop; |
750 | } | |
751 | ||
e1623446 | 752 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); |
fd4abac5 | 753 | |
f3674227 TW |
754 | swq_id = skb_get_queue_mapping(skb); |
755 | txq_id = swq_id; | |
fd7c8a40 HH |
756 | if (ieee80211_is_data_qos(fc)) { |
757 | qc = ieee80211_get_qos_ctl(hdr); | |
7294ec95 | 758 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; |
f3674227 TW |
759 | seq_number = priv->stations[sta_id].tid[tid].seq_number; |
760 | seq_number &= IEEE80211_SCTL_SEQ; | |
761 | hdr->seq_ctrl = hdr->seq_ctrl & | |
c1b4aa3f | 762 | cpu_to_le16(IEEE80211_SCTL_FRAG); |
f3674227 | 763 | hdr->seq_ctrl |= cpu_to_le16(seq_number); |
fd4abac5 | 764 | seq_number += 0x10; |
fd4abac5 | 765 | /* aggregation is on for this <sta,tid> */ |
e4e72fb4 | 766 | if (info->flags & IEEE80211_TX_CTL_AMPDU) { |
fd4abac5 | 767 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; |
e4e72fb4 JB |
768 | swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); |
769 | } | |
fd4abac5 | 770 | priv->stations[sta_id].tid[tid].tfds_in_queue++; |
fd4abac5 TW |
771 | } |
772 | ||
fd4abac5 TW |
773 | txq = &priv->txq[txq_id]; |
774 | q = &txq->q; | |
3fd07a1e | 775 | txq->swq_id = swq_id; |
fd4abac5 TW |
776 | |
777 | spin_lock_irqsave(&priv->lock, flags); | |
778 | ||
fd4abac5 TW |
779 | /* Set up driver data for this TFD */ |
780 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | |
781 | txq->txb[q->write_ptr].skb[0] = skb; | |
fd4abac5 TW |
782 | |
783 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | |
b88b15df | 784 | out_cmd = txq->cmd[q->write_ptr]; |
fd4abac5 TW |
785 | tx_cmd = &out_cmd->cmd.tx; |
786 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | |
787 | memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | |
788 | ||
789 | /* | |
790 | * Set up the Tx-command (not MAC!) header. | |
791 | * Store the chosen Tx queue and TFD index within the sequence field; | |
792 | * after Tx, uCode's Tx response will return this value so driver can | |
793 | * locate the frame within the tx queue and do post-tx processing. | |
794 | */ | |
795 | out_cmd->hdr.cmd = REPLY_TX; | |
796 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | |
797 | INDEX_TO_SEQ(q->write_ptr))); | |
798 | ||
799 | /* Copy MAC header from skb into command buffer */ | |
800 | memcpy(tx_cmd->hdr, hdr, hdr_len); | |
801 | ||
802 | /* | |
803 | * Use the first empty entry in this queue's command buffer array | |
804 | * to contain the Tx command and MAC header concatenated together | |
805 | * (payload data will be in another buffer). | |
806 | * Size of this varies, due to varying MAC header length. | |
807 | * If end is not dword aligned, we'll have 2 extra bytes at the end | |
808 | * of the MAC header (device reads on dword boundaries). | |
809 | * We'll tell device about this padding later. | |
810 | */ | |
811 | len = sizeof(struct iwl_tx_cmd) + | |
812 | sizeof(struct iwl_cmd_header) + hdr_len; | |
813 | ||
814 | len_org = len; | |
815 | len = (len + 3) & ~3; | |
816 | ||
817 | if (len_org != len) | |
818 | len_org = 1; | |
819 | else | |
820 | len_org = 0; | |
821 | ||
822 | /* Physical address of this Tx command's header (not MAC header!), | |
823 | * within command buffer array. */ | |
499b1883 TW |
824 | txcmd_phys = pci_map_single(priv->pci_dev, |
825 | out_cmd, sizeof(struct iwl_cmd), | |
96891cee | 826 | PCI_DMA_BIDIRECTIONAL); |
499b1883 TW |
827 | pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); |
828 | pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); | |
fd4abac5 TW |
829 | /* Add buffer containing Tx command and MAC(!) header to TFD's |
830 | * first entry */ | |
499b1883 | 831 | txcmd_phys += offsetof(struct iwl_cmd, hdr); |
7aaa1d79 SO |
832 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, |
833 | txcmd_phys, len, 1, 0); | |
fd4abac5 | 834 | |
d0f09804 | 835 | if (info->control.hw_key) |
e039fa4a | 836 | iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); |
fd4abac5 TW |
837 | |
838 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | |
839 | * if any (802.11 null frames have no payload). */ | |
840 | len = skb->len - hdr_len; | |
841 | if (len) { | |
842 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | |
843 | len, PCI_DMA_TODEVICE); | |
7aaa1d79 SO |
844 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, |
845 | phys_addr, len, | |
846 | 0, 0); | |
fd4abac5 TW |
847 | } |
848 | ||
849 | /* Tell NIC about any 2-byte padding after MAC header */ | |
850 | if (len_org) | |
851 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | |
852 | ||
853 | /* Total # bytes to be transmitted */ | |
854 | len = (u16)skb->len; | |
855 | tx_cmd->len = cpu_to_le16(len); | |
856 | /* TODO need this for burst mode later on */ | |
0e7690f1 | 857 | iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); |
fd4abac5 TW |
858 | |
859 | /* set is_hcca to 0; it probably will never be implemented */ | |
e039fa4a | 860 | iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0); |
fd4abac5 | 861 | |
fd7c8a40 | 862 | iwl_update_tx_stats(priv, le16_to_cpu(fc), len); |
fd4abac5 TW |
863 | |
864 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | |
865 | offsetof(struct iwl_tx_cmd, scratch); | |
866 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | |
499b1883 | 867 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); |
fd4abac5 | 868 | |
8b7b1e05 | 869 | if (!ieee80211_has_morefrags(hdr->frame_control)) { |
fd4abac5 TW |
870 | txq->need_update = 1; |
871 | if (qc) | |
872 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | |
873 | } else { | |
874 | wait_write_ptr = 1; | |
875 | txq->need_update = 0; | |
876 | } | |
877 | ||
878 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | |
879 | ||
880 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | |
881 | ||
882 | /* Set up entry for this TFD in Tx byte-count array */ | |
883 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len); | |
884 | ||
885 | /* Tell device the write index *just past* this latest filled TFD */ | |
886 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
887 | ret = iwl_txq_update_write_ptr(priv, txq); | |
888 | spin_unlock_irqrestore(&priv->lock, flags); | |
889 | ||
890 | if (ret) | |
891 | return ret; | |
892 | ||
143b09ef | 893 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { |
fd4abac5 TW |
894 | if (wait_write_ptr) { |
895 | spin_lock_irqsave(&priv->lock, flags); | |
896 | txq->need_update = 1; | |
897 | iwl_txq_update_write_ptr(priv, txq); | |
898 | spin_unlock_irqrestore(&priv->lock, flags); | |
143b09ef | 899 | } else { |
e4e72fb4 | 900 | iwl_stop_queue(priv, txq->swq_id); |
fd4abac5 | 901 | } |
fd4abac5 TW |
902 | } |
903 | ||
904 | return 0; | |
905 | ||
906 | drop_unlock: | |
907 | spin_unlock_irqrestore(&priv->lock, flags); | |
908 | drop: | |
909 | return -1; | |
910 | } | |
911 | EXPORT_SYMBOL(iwl_tx_skb); | |
912 | ||
913 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | |
914 | ||
915 | /** | |
916 | * iwl_enqueue_hcmd - enqueue a uCode command | |
917 | * @priv: device private data point | |
918 | * @cmd: a point to the ucode command structure | |
919 | * | |
920 | * The function returns < 0 values to indicate the operation is | |
921 | * failed. On success, it turns the index (> 0) of command in the | |
922 | * command queue. | |
923 | */ | |
924 | int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |
925 | { | |
926 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | |
927 | struct iwl_queue *q = &txq->q; | |
fd4abac5 | 928 | struct iwl_cmd *out_cmd; |
fd4abac5 | 929 | dma_addr_t phys_addr; |
fd4abac5 | 930 | unsigned long flags; |
f3674227 TW |
931 | int len, ret; |
932 | u32 idx; | |
933 | u16 fix_size; | |
fd4abac5 TW |
934 | |
935 | cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); | |
936 | fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); | |
937 | ||
938 | /* If any of the command structures end up being larger than | |
939 | * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then | |
940 | * we will need to increase the size of the TFD entries */ | |
941 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | |
942 | !(cmd->meta.flags & CMD_SIZE_HUGE)); | |
943 | ||
944 | if (iwl_is_rfkill(priv)) { | |
e1623446 | 945 | IWL_DEBUG_INFO(priv, "Not sending command - RF KILL"); |
fd4abac5 TW |
946 | return -EIO; |
947 | } | |
948 | ||
949 | if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { | |
15b1687c | 950 | IWL_ERR(priv, "No space for Tx\n"); |
fd4abac5 TW |
951 | return -ENOSPC; |
952 | } | |
953 | ||
954 | spin_lock_irqsave(&priv->hcmd_lock, flags); | |
955 | ||
fd4abac5 | 956 | idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); |
da99c4b6 | 957 | out_cmd = txq->cmd[idx]; |
fd4abac5 TW |
958 | |
959 | out_cmd->hdr.cmd = cmd->id; | |
960 | memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); | |
961 | memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); | |
962 | ||
963 | /* At this point, the out_cmd now has all of the incoming cmd | |
964 | * information */ | |
965 | ||
966 | out_cmd->hdr.flags = 0; | |
967 | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | | |
968 | INDEX_TO_SEQ(q->write_ptr)); | |
969 | if (out_cmd->meta.flags & CMD_SIZE_HUGE) | |
9734cb23 | 970 | out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; |
da99c4b6 GG |
971 | len = (idx == TFD_CMD_SLOTS) ? |
972 | IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); | |
499b1883 TW |
973 | |
974 | phys_addr = pci_map_single(priv->pci_dev, out_cmd, | |
96891cee | 975 | len, PCI_DMA_BIDIRECTIONAL); |
499b1883 TW |
976 | pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr); |
977 | pci_unmap_len_set(&out_cmd->meta, len, len); | |
da99c4b6 | 978 | phys_addr += offsetof(struct iwl_cmd, hdr); |
499b1883 | 979 | |
7aaa1d79 | 980 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, |
518099a8 SO |
981 | phys_addr, fix_size, 1, |
982 | U32_PAD(cmd->len)); | |
fd4abac5 | 983 | |
ded2ae7c EK |
984 | #ifdef CONFIG_IWLWIFI_DEBUG |
985 | switch (out_cmd->hdr.cmd) { | |
986 | case REPLY_TX_LINK_QUALITY_CMD: | |
987 | case SENSITIVITY_CMD: | |
e1623446 | 988 | IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, " |
ded2ae7c EK |
989 | "%d bytes at %d[%d]:%d\n", |
990 | get_cmd_string(out_cmd->hdr.cmd), | |
991 | out_cmd->hdr.cmd, | |
992 | le16_to_cpu(out_cmd->hdr.sequence), fix_size, | |
993 | q->write_ptr, idx, IWL_CMD_QUEUE_NUM); | |
994 | break; | |
995 | default: | |
e1623446 | 996 | IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " |
ded2ae7c EK |
997 | "%d bytes at %d[%d]:%d\n", |
998 | get_cmd_string(out_cmd->hdr.cmd), | |
999 | out_cmd->hdr.cmd, | |
1000 | le16_to_cpu(out_cmd->hdr.sequence), fix_size, | |
1001 | q->write_ptr, idx, IWL_CMD_QUEUE_NUM); | |
1002 | } | |
1003 | #endif | |
fd4abac5 TW |
1004 | txq->need_update = 1; |
1005 | ||
518099a8 SO |
1006 | if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) |
1007 | /* Set up entry in queue's byte count circular buffer */ | |
1008 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); | |
fd4abac5 TW |
1009 | |
1010 | /* Increment and update queue's write index */ | |
1011 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | |
1012 | ret = iwl_txq_update_write_ptr(priv, txq); | |
1013 | ||
1014 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | |
1015 | return ret ? ret : idx; | |
1016 | } | |
1017 | ||
17b88929 TW |
1018 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) |
1019 | { | |
1020 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
1021 | struct iwl_queue *q = &txq->q; | |
1022 | struct iwl_tx_info *tx_info; | |
1023 | int nfreed = 0; | |
1024 | ||
1025 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | |
15b1687c | 1026 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " |
17b88929 TW |
1027 | "is out of range [0-%d] %d %d.\n", txq_id, |
1028 | index, q->n_bd, q->write_ptr, q->read_ptr); | |
1029 | return 0; | |
1030 | } | |
1031 | ||
499b1883 TW |
1032 | for (index = iwl_queue_inc_wrap(index, q->n_bd); |
1033 | q->read_ptr != index; | |
1034 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | |
17b88929 TW |
1035 | |
1036 | tx_info = &txq->txb[txq->q.read_ptr]; | |
1037 | ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); | |
1038 | tx_info->skb[0] = NULL; | |
17b88929 | 1039 | |
972cf447 TW |
1040 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) |
1041 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | |
1042 | ||
7aaa1d79 | 1043 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
17b88929 TW |
1044 | nfreed++; |
1045 | } | |
1046 | return nfreed; | |
1047 | } | |
1048 | EXPORT_SYMBOL(iwl_tx_queue_reclaim); | |
1049 | ||
1050 | ||
1051 | /** | |
1052 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | |
1053 | * | |
1054 | * When FW advances 'R' index, all entries between old and new 'R' index | |
1055 | * need to be reclaimed. As result, some free space forms. If there is | |
1056 | * enough free space (> low mark), wake the stack that feeds us. | |
1057 | */ | |
499b1883 TW |
1058 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, |
1059 | int idx, int cmd_idx) | |
17b88929 TW |
1060 | { |
1061 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
1062 | struct iwl_queue *q = &txq->q; | |
1063 | int nfreed = 0; | |
1064 | ||
499b1883 | 1065 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { |
15b1687c | 1066 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " |
17b88929 | 1067 | "is out of range [0-%d] %d %d.\n", txq_id, |
499b1883 | 1068 | idx, q->n_bd, q->write_ptr, q->read_ptr); |
17b88929 TW |
1069 | return; |
1070 | } | |
1071 | ||
499b1883 TW |
1072 | pci_unmap_single(priv->pci_dev, |
1073 | pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping), | |
1074 | pci_unmap_len(&txq->cmd[cmd_idx]->meta, len), | |
96891cee | 1075 | PCI_DMA_BIDIRECTIONAL); |
499b1883 TW |
1076 | |
1077 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | |
1078 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | |
17b88929 | 1079 | |
499b1883 | 1080 | if (nfreed++ > 0) { |
15b1687c | 1081 | IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, |
17b88929 TW |
1082 | q->write_ptr, q->read_ptr); |
1083 | queue_work(priv->workqueue, &priv->restart); | |
1084 | } | |
da99c4b6 | 1085 | |
17b88929 TW |
1086 | } |
1087 | } | |
1088 | ||
1089 | /** | |
1090 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | |
1091 | * @rxb: Rx buffer to reclaim | |
1092 | * | |
1093 | * If an Rx buffer has an async callback associated with it the callback | |
1094 | * will be executed. The attached skb (if present) will only be freed | |
1095 | * if the callback returns 1 | |
1096 | */ | |
1097 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |
1098 | { | |
1099 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | |
1100 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | |
1101 | int txq_id = SEQ_TO_QUEUE(sequence); | |
1102 | int index = SEQ_TO_INDEX(sequence); | |
17b88929 | 1103 | int cmd_index; |
9734cb23 | 1104 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); |
17b88929 TW |
1105 | struct iwl_cmd *cmd; |
1106 | ||
1107 | /* If a Tx command is being handled and it isn't in the actual | |
1108 | * command queue then there a command routing bug has been introduced | |
1109 | * in the queue management code. */ | |
55d6a3cd | 1110 | if (WARN(txq_id != IWL_CMD_QUEUE_NUM, |
01ef9323 WT |
1111 | "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n", |
1112 | txq_id, sequence, | |
1113 | priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr, | |
1114 | priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) { | |
1115 | iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32); | |
55d6a3cd | 1116 | return; |
01ef9323 | 1117 | } |
17b88929 TW |
1118 | |
1119 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | |
da99c4b6 | 1120 | cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; |
17b88929 TW |
1121 | |
1122 | /* Input error checking is done when commands are added to queue. */ | |
1123 | if (cmd->meta.flags & CMD_WANT_SKB) { | |
1124 | cmd->meta.source->u.skb = rxb->skb; | |
1125 | rxb->skb = NULL; | |
1126 | } else if (cmd->meta.u.callback && | |
1127 | !cmd->meta.u.callback(priv, cmd, rxb->skb)) | |
1128 | rxb->skb = NULL; | |
1129 | ||
499b1883 | 1130 | iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); |
17b88929 TW |
1131 | |
1132 | if (!(cmd->meta.flags & CMD_ASYNC)) { | |
1133 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | |
1134 | wake_up_interruptible(&priv->wait_command_queue); | |
1135 | } | |
1136 | } | |
1137 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | |
1138 | ||
30e553e3 TW |
1139 | /* |
1140 | * Find first available (lowest unused) Tx Queue, mark it "active". | |
1141 | * Called only when finding queue for aggregation. | |
1142 | * Should never return anything < 7, because they should already | |
1143 | * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6). | |
1144 | */ | |
1145 | static int iwl_txq_ctx_activate_free(struct iwl_priv *priv) | |
1146 | { | |
1147 | int txq_id; | |
1148 | ||
1149 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | |
1150 | if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | |
1151 | return txq_id; | |
1152 | return -1; | |
1153 | } | |
1154 | ||
1155 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | |
1156 | { | |
1157 | int sta_id; | |
1158 | int tx_fifo; | |
1159 | int txq_id; | |
1160 | int ret; | |
1161 | unsigned long flags; | |
1162 | struct iwl_tid_data *tid_data; | |
30e553e3 TW |
1163 | |
1164 | if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) | |
1165 | tx_fifo = default_tid_to_tx_fifo[tid]; | |
1166 | else | |
1167 | return -EINVAL; | |
1168 | ||
39aadf8c | 1169 | IWL_WARN(priv, "%s on ra = %pM tid = %d\n", |
e174961c | 1170 | __func__, ra, tid); |
30e553e3 TW |
1171 | |
1172 | sta_id = iwl_find_station(priv, ra); | |
3eb92969 WYG |
1173 | if (sta_id == IWL_INVALID_STATION) { |
1174 | IWL_ERR(priv, "Start AGG on invalid station\n"); | |
30e553e3 | 1175 | return -ENXIO; |
3eb92969 | 1176 | } |
30e553e3 TW |
1177 | |
1178 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | |
15b1687c | 1179 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); |
30e553e3 TW |
1180 | return -ENXIO; |
1181 | } | |
1182 | ||
1183 | txq_id = iwl_txq_ctx_activate_free(priv); | |
3eb92969 WYG |
1184 | if (txq_id == -1) { |
1185 | IWL_ERR(priv, "No free aggregation queue available\n"); | |
30e553e3 | 1186 | return -ENXIO; |
3eb92969 | 1187 | } |
30e553e3 TW |
1188 | |
1189 | spin_lock_irqsave(&priv->sta_lock, flags); | |
1190 | tid_data = &priv->stations[sta_id].tid[tid]; | |
1191 | *ssn = SEQ_TO_SN(tid_data->seq_number); | |
1192 | tid_data->agg.txq_id = txq_id; | |
1193 | spin_unlock_irqrestore(&priv->sta_lock, flags); | |
1194 | ||
1195 | ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, | |
1196 | sta_id, tid, *ssn); | |
1197 | if (ret) | |
1198 | return ret; | |
1199 | ||
1200 | if (tid_data->tfds_in_queue == 0) { | |
3eb92969 | 1201 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); |
30e553e3 TW |
1202 | tid_data->agg.state = IWL_AGG_ON; |
1203 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid); | |
1204 | } else { | |
e1623446 | 1205 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", |
30e553e3 TW |
1206 | tid_data->tfds_in_queue); |
1207 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | |
1208 | } | |
1209 | return ret; | |
1210 | } | |
1211 | EXPORT_SYMBOL(iwl_tx_agg_start); | |
1212 | ||
1213 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | |
1214 | { | |
1215 | int tx_fifo_id, txq_id, sta_id, ssn = -1; | |
1216 | struct iwl_tid_data *tid_data; | |
1217 | int ret, write_ptr, read_ptr; | |
1218 | unsigned long flags; | |
30e553e3 TW |
1219 | |
1220 | if (!ra) { | |
15b1687c | 1221 | IWL_ERR(priv, "ra = NULL\n"); |
30e553e3 TW |
1222 | return -EINVAL; |
1223 | } | |
1224 | ||
1225 | if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) | |
1226 | tx_fifo_id = default_tid_to_tx_fifo[tid]; | |
1227 | else | |
1228 | return -EINVAL; | |
1229 | ||
1230 | sta_id = iwl_find_station(priv, ra); | |
1231 | ||
a2f1cbeb WYG |
1232 | if (sta_id == IWL_INVALID_STATION) { |
1233 | IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); | |
30e553e3 | 1234 | return -ENXIO; |
a2f1cbeb | 1235 | } |
30e553e3 TW |
1236 | |
1237 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) | |
39aadf8c | 1238 | IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n"); |
30e553e3 TW |
1239 | |
1240 | tid_data = &priv->stations[sta_id].tid[tid]; | |
1241 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | |
1242 | txq_id = tid_data->agg.txq_id; | |
1243 | write_ptr = priv->txq[txq_id].q.write_ptr; | |
1244 | read_ptr = priv->txq[txq_id].q.read_ptr; | |
1245 | ||
1246 | /* The queue is not empty */ | |
1247 | if (write_ptr != read_ptr) { | |
e1623446 | 1248 | IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); |
30e553e3 TW |
1249 | priv->stations[sta_id].tid[tid].agg.state = |
1250 | IWL_EMPTYING_HW_QUEUE_DELBA; | |
1251 | return 0; | |
1252 | } | |
1253 | ||
e1623446 | 1254 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); |
30e553e3 TW |
1255 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; |
1256 | ||
1257 | spin_lock_irqsave(&priv->lock, flags); | |
1258 | ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | |
1259 | tx_fifo_id); | |
1260 | spin_unlock_irqrestore(&priv->lock, flags); | |
1261 | ||
1262 | if (ret) | |
1263 | return ret; | |
1264 | ||
1265 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid); | |
1266 | ||
1267 | return 0; | |
1268 | } | |
1269 | EXPORT_SYMBOL(iwl_tx_agg_stop); | |
1270 | ||
1271 | int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) | |
1272 | { | |
1273 | struct iwl_queue *q = &priv->txq[txq_id].q; | |
1274 | u8 *addr = priv->stations[sta_id].sta.sta.addr; | |
1275 | struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | |
1276 | ||
1277 | switch (priv->stations[sta_id].tid[tid].agg.state) { | |
1278 | case IWL_EMPTYING_HW_QUEUE_DELBA: | |
1279 | /* We are reclaiming the last packet of the */ | |
1280 | /* aggregated HW queue */ | |
3fd07a1e TW |
1281 | if ((txq_id == tid_data->agg.txq_id) && |
1282 | (q->read_ptr == q->write_ptr)) { | |
30e553e3 TW |
1283 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); |
1284 | int tx_fifo = default_tid_to_tx_fifo[tid]; | |
e1623446 | 1285 | IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); |
30e553e3 TW |
1286 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, |
1287 | ssn, tx_fifo); | |
1288 | tid_data->agg.state = IWL_AGG_OFF; | |
1289 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid); | |
1290 | } | |
1291 | break; | |
1292 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | |
1293 | /* We are reclaiming the last packet of the queue */ | |
1294 | if (tid_data->tfds_in_queue == 0) { | |
e1623446 | 1295 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); |
30e553e3 TW |
1296 | tid_data->agg.state = IWL_AGG_ON; |
1297 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid); | |
1298 | } | |
1299 | break; | |
1300 | } | |
1301 | return 0; | |
1302 | } | |
1303 | EXPORT_SYMBOL(iwl_txq_check_empty); | |
30e553e3 | 1304 | |
653fa4a0 EG |
1305 | /** |
1306 | * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack | |
1307 | * | |
1308 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | |
1309 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | |
1310 | */ | |
1311 | static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, | |
1312 | struct iwl_ht_agg *agg, | |
1313 | struct iwl_compressed_ba_resp *ba_resp) | |
1314 | ||
1315 | { | |
1316 | int i, sh, ack; | |
1317 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | |
1318 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | |
1319 | u64 bitmap; | |
1320 | int successes = 0; | |
1321 | struct ieee80211_tx_info *info; | |
1322 | ||
1323 | if (unlikely(!agg->wait_for_ba)) { | |
15b1687c | 1324 | IWL_ERR(priv, "Received BA when not expected\n"); |
653fa4a0 EG |
1325 | return -EINVAL; |
1326 | } | |
1327 | ||
1328 | /* Mark that the expected block-ack response arrived */ | |
1329 | agg->wait_for_ba = 0; | |
e1623446 | 1330 | IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); |
653fa4a0 EG |
1331 | |
1332 | /* Calculate shift to align block-ack bits with our Tx window bits */ | |
3fd07a1e | 1333 | sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); |
653fa4a0 EG |
1334 | if (sh < 0) /* tbw something is wrong with indices */ |
1335 | sh += 0x100; | |
1336 | ||
1337 | /* don't use 64-bit values for now */ | |
1338 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | |
1339 | ||
1340 | if (agg->frame_count > (64 - sh)) { | |
e1623446 | 1341 | IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); |
653fa4a0 EG |
1342 | return -1; |
1343 | } | |
1344 | ||
1345 | /* check for success or failure according to the | |
1346 | * transmitted bitmap and block-ack bitmap */ | |
1347 | bitmap &= agg->bitmap; | |
1348 | ||
1349 | /* For each frame attempted in aggregation, | |
1350 | * update driver's record of tx frame's status. */ | |
1351 | for (i = 0; i < agg->frame_count ; i++) { | |
4aa41f12 | 1352 | ack = bitmap & (1ULL << i); |
653fa4a0 | 1353 | successes += !!ack; |
e1623446 | 1354 | IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", |
c3056065 | 1355 | ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, |
653fa4a0 EG |
1356 | agg->start_idx + i); |
1357 | } | |
1358 | ||
1359 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); | |
1360 | memset(&info->status, 0, sizeof(info->status)); | |
1361 | info->flags = IEEE80211_TX_STAT_ACK; | |
1362 | info->flags |= IEEE80211_TX_STAT_AMPDU; | |
1363 | info->status.ampdu_ack_map = successes; | |
1364 | info->status.ampdu_ack_len = agg->frame_count; | |
1365 | iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info); | |
1366 | ||
e1623446 | 1367 | IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); |
653fa4a0 EG |
1368 | |
1369 | return 0; | |
1370 | } | |
1371 | ||
1372 | /** | |
1373 | * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | |
1374 | * | |
1375 | * Handles block-acknowledge notification from device, which reports success | |
1376 | * of frames sent via aggregation. | |
1377 | */ | |
1378 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | |
1379 | struct iwl_rx_mem_buffer *rxb) | |
1380 | { | |
1381 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | |
1382 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | |
653fa4a0 EG |
1383 | struct iwl_tx_queue *txq = NULL; |
1384 | struct iwl_ht_agg *agg; | |
3fd07a1e TW |
1385 | int index; |
1386 | int sta_id; | |
1387 | int tid; | |
653fa4a0 EG |
1388 | |
1389 | /* "flow" corresponds to Tx queue */ | |
1390 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | |
1391 | ||
1392 | /* "ssn" is start of block-ack Tx window, corresponds to index | |
1393 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | |
1394 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | |
1395 | ||
1396 | if (scd_flow >= priv->hw_params.max_txq_num) { | |
15b1687c WT |
1397 | IWL_ERR(priv, |
1398 | "BUG_ON scd_flow is bigger than number of queues\n"); | |
653fa4a0 EG |
1399 | return; |
1400 | } | |
1401 | ||
1402 | txq = &priv->txq[scd_flow]; | |
3fd07a1e TW |
1403 | sta_id = ba_resp->sta_id; |
1404 | tid = ba_resp->tid; | |
1405 | agg = &priv->stations[sta_id].tid[tid].agg; | |
653fa4a0 EG |
1406 | |
1407 | /* Find index just before block-ack window */ | |
1408 | index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | |
1409 | ||
1410 | /* TODO: Need to get this copy more safely - now good for debug */ | |
1411 | ||
e1623446 | 1412 | IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " |
653fa4a0 EG |
1413 | "sta_id = %d\n", |
1414 | agg->wait_for_ba, | |
e174961c | 1415 | (u8 *) &ba_resp->sta_addr_lo32, |
653fa4a0 | 1416 | ba_resp->sta_id); |
e1623446 | 1417 | IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " |
653fa4a0 EG |
1418 | "%d, scd_ssn = %d\n", |
1419 | ba_resp->tid, | |
1420 | ba_resp->seq_ctl, | |
1421 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | |
1422 | ba_resp->scd_flow, | |
1423 | ba_resp->scd_ssn); | |
e1623446 | 1424 | IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n", |
653fa4a0 EG |
1425 | agg->start_idx, |
1426 | (unsigned long long)agg->bitmap); | |
1427 | ||
1428 | /* Update driver's record of ACK vs. not for each frame in window */ | |
1429 | iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp); | |
1430 | ||
1431 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | |
1432 | * block-ack window (we assume that they've been successfully | |
1433 | * transmitted ... if not, it's too late anyway). */ | |
1434 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | |
1435 | /* calculate mac80211 ampdu sw queue to wake */ | |
653fa4a0 | 1436 | int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); |
3fd07a1e TW |
1437 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; |
1438 | ||
1439 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | |
1440 | priv->mac80211_registered && | |
1441 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) | |
e4e72fb4 | 1442 | iwl_wake_queue(priv, txq->swq_id); |
3fd07a1e TW |
1443 | |
1444 | iwl_txq_check_empty(priv, sta_id, tid, scd_flow); | |
653fa4a0 EG |
1445 | } |
1446 | } | |
1447 | EXPORT_SYMBOL(iwl_rx_reply_compressed_ba); | |
1448 | ||
994d31f7 | 1449 | #ifdef CONFIG_IWLWIFI_DEBUG |
a332f8d6 TW |
1450 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x |
1451 | ||
1452 | const char *iwl_get_tx_fail_reason(u32 status) | |
1453 | { | |
1454 | switch (status & TX_STATUS_MSK) { | |
1455 | case TX_STATUS_SUCCESS: | |
1456 | return "SUCCESS"; | |
1457 | TX_STATUS_ENTRY(SHORT_LIMIT); | |
1458 | TX_STATUS_ENTRY(LONG_LIMIT); | |
1459 | TX_STATUS_ENTRY(FIFO_UNDERRUN); | |
1460 | TX_STATUS_ENTRY(MGMNT_ABORT); | |
1461 | TX_STATUS_ENTRY(NEXT_FRAG); | |
1462 | TX_STATUS_ENTRY(LIFE_EXPIRE); | |
1463 | TX_STATUS_ENTRY(DEST_PS); | |
1464 | TX_STATUS_ENTRY(ABORTED); | |
1465 | TX_STATUS_ENTRY(BT_RETRY); | |
1466 | TX_STATUS_ENTRY(STA_INVALID); | |
1467 | TX_STATUS_ENTRY(FRAG_DROPPED); | |
1468 | TX_STATUS_ENTRY(TID_DISABLE); | |
1469 | TX_STATUS_ENTRY(FRAME_FLUSHED); | |
1470 | TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); | |
1471 | TX_STATUS_ENTRY(TX_LOCKED); | |
1472 | TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); | |
1473 | } | |
1474 | ||
1475 | return "UNKNOWN"; | |
1476 | } | |
1477 | EXPORT_SYMBOL(iwl_get_tx_fail_reason); | |
1478 | #endif /* CONFIG_IWLWIFI_DEBUG */ |