iwlwifi: remove unused op-code in PHY Calibration command
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-tx.c
CommitLineData
1053d35f
RR
1/******************************************************************************
2 *
1f447808 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
1053d35f
RR
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
1053d35f
RR
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
fd4abac5 30#include <linux/etherdevice.h>
d43c36dc 31#include <linux/sched.h>
1053d35f
RR
32#include <net/mac80211.h>
33#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39
30e553e3
TW
40static const u16 default_tid_to_tx_fifo[] = {
41 IWL_TX_FIFO_AC1,
42 IWL_TX_FIFO_AC0,
43 IWL_TX_FIFO_AC0,
44 IWL_TX_FIFO_AC1,
45 IWL_TX_FIFO_AC2,
46 IWL_TX_FIFO_AC2,
47 IWL_TX_FIFO_AC3,
48 IWL_TX_FIFO_AC3,
49 IWL_TX_FIFO_NONE,
50 IWL_TX_FIFO_NONE,
51 IWL_TX_FIFO_NONE,
52 IWL_TX_FIFO_NONE,
53 IWL_TX_FIFO_NONE,
54 IWL_TX_FIFO_NONE,
55 IWL_TX_FIFO_NONE,
56 IWL_TX_FIFO_NONE,
57 IWL_TX_FIFO_AC3
58};
59
4ddbb7d0
TW
60static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
61 struct iwl_dma_ptr *ptr, size_t size)
62{
63 ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
64 if (!ptr->addr)
65 return -ENOMEM;
66 ptr->size = size;
67 return 0;
68}
69
70static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
71 struct iwl_dma_ptr *ptr)
72{
73 if (unlikely(!ptr->addr))
74 return;
75
76 pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
77 memset(ptr, 0, sizeof(*ptr));
78}
79
fd4abac5
TW
80/**
81 * iwl_txq_update_write_ptr - Send new write index to hardware
82 */
83int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
84{
85 u32 reg = 0;
86 int ret = 0;
87 int txq_id = txq->q.id;
88
89 if (txq->need_update == 0)
90 return ret;
91
92 /* if we're trying to save power */
93 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
94 /* wake up nic if it's powered down ...
95 * uCode will wake up, and interrupt us again, so next
96 * time we'll skip this part. */
97 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
98
99 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
309e731a
BC
100 IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
101 txq_id, reg);
fd4abac5
TW
102 iwl_set_bit(priv, CSR_GP_CNTRL,
103 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
104 return ret;
105 }
106
fd4abac5
TW
107 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
108 txq->q.write_ptr | (txq_id << 8));
fd4abac5
TW
109
110 /* else not in power-save mode, uCode will never sleep when we're
111 * trying to tx (during RFKILL, we're not trying to tx). */
112 } else
113 iwl_write32(priv, HBUS_TARG_WRPTR,
114 txq->q.write_ptr | (txq_id << 8));
115
116 txq->need_update = 0;
117
118 return ret;
119}
120EXPORT_SYMBOL(iwl_txq_update_write_ptr);
121
122
1053d35f
RR
123/**
124 * iwl_tx_queue_free - Deallocate DMA queue.
125 * @txq: Transmit queue to deallocate.
126 *
127 * Empty queue by removing and destroying all BD's.
128 * Free all buffers.
129 * 0-fill, but do not free "txq" descriptor structure.
130 */
a8e74e27 131void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
1053d35f 132{
da99c4b6 133 struct iwl_tx_queue *txq = &priv->txq[txq_id];
443cfd45 134 struct iwl_queue *q = &txq->q;
1053d35f 135 struct pci_dev *dev = priv->pci_dev;
71c55d90 136 int i;
1053d35f
RR
137
138 if (q->n_bd == 0)
139 return;
140
141 /* first, empty all BD's */
142 for (; q->write_ptr != q->read_ptr;
143 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
7aaa1d79 144 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1053d35f 145
1053d35f 146 /* De-alloc array of command/tx buffers */
961ba60a 147 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
da99c4b6 148 kfree(txq->cmd[i]);
1053d35f
RR
149
150 /* De-alloc circular buffer of TFDs */
151 if (txq->q.n_bd)
a8e74e27 152 pci_free_consistent(dev, priv->hw_params.tfd_size *
499b1883 153 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
1053d35f
RR
154
155 /* De-alloc array of per-TFD driver data */
156 kfree(txq->txb);
157 txq->txb = NULL;
158
c2acea8e
JB
159 /* deallocate arrays */
160 kfree(txq->cmd);
161 kfree(txq->meta);
162 txq->cmd = NULL;
163 txq->meta = NULL;
164
1053d35f
RR
165 /* 0-fill queue descriptor structure */
166 memset(txq, 0, sizeof(*txq));
167}
a8e74e27 168EXPORT_SYMBOL(iwl_tx_queue_free);
961ba60a
TW
169
170/**
171 * iwl_cmd_queue_free - Deallocate DMA queue.
172 * @txq: Transmit queue to deallocate.
173 *
174 * Empty queue by removing and destroying all BD's.
175 * Free all buffers.
176 * 0-fill, but do not free "txq" descriptor structure.
177 */
3e5d238f 178void iwl_cmd_queue_free(struct iwl_priv *priv)
961ba60a
TW
179{
180 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
181 struct iwl_queue *q = &txq->q;
182 struct pci_dev *dev = priv->pci_dev;
71c55d90 183 int i;
961ba60a
TW
184
185 if (q->n_bd == 0)
186 return;
187
961ba60a
TW
188 /* De-alloc array of command/tx buffers */
189 for (i = 0; i <= TFD_CMD_SLOTS; i++)
190 kfree(txq->cmd[i]);
191
192 /* De-alloc circular buffer of TFDs */
193 if (txq->q.n_bd)
3e5d238f 194 pci_free_consistent(dev, priv->hw_params.tfd_size *
499b1883 195 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
961ba60a 196
28142986
RC
197 /* deallocate arrays */
198 kfree(txq->cmd);
199 kfree(txq->meta);
200 txq->cmd = NULL;
201 txq->meta = NULL;
202
961ba60a
TW
203 /* 0-fill queue descriptor structure */
204 memset(txq, 0, sizeof(*txq));
205}
3e5d238f
AK
206EXPORT_SYMBOL(iwl_cmd_queue_free);
207
fd4abac5
TW
208/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
209 * DMA services
210 *
211 * Theory of operation
212 *
213 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
214 * of buffer descriptors, each of which points to one or more data buffers for
215 * the device to read from or fill. Driver and device exchange status of each
216 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
217 * entries in each circular buffer, to protect against confusing empty and full
218 * queue states.
219 *
220 * The device reads or writes the data in the queues via the device's several
221 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
222 *
223 * For Tx queue, there are low mark and high mark limits. If, after queuing
224 * the packet for Tx, free space become < low mark, Tx queue stopped. When
225 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
226 * Tx queue resumed.
227 *
228 * See more detailed info in iwl-4965-hw.h.
229 ***************************************************/
230
231int iwl_queue_space(const struct iwl_queue *q)
232{
233 int s = q->read_ptr - q->write_ptr;
234
235 if (q->read_ptr > q->write_ptr)
236 s -= q->n_bd;
237
238 if (s <= 0)
239 s += q->n_window;
240 /* keep some reserve to not confuse empty and full situations */
241 s -= 2;
242 if (s < 0)
243 s = 0;
244 return s;
245}
246EXPORT_SYMBOL(iwl_queue_space);
247
248
1053d35f
RR
249/**
250 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
251 */
443cfd45 252static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
1053d35f
RR
253 int count, int slots_num, u32 id)
254{
255 q->n_bd = count;
256 q->n_window = slots_num;
257 q->id = id;
258
259 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
260 * and iwl_queue_dec_wrap are broken. */
261 BUG_ON(!is_power_of_2(count));
262
263 /* slots_num must be power-of-two size, otherwise
264 * get_cmd_index is broken. */
265 BUG_ON(!is_power_of_2(slots_num));
266
267 q->low_mark = q->n_window / 4;
268 if (q->low_mark < 4)
269 q->low_mark = 4;
270
271 q->high_mark = q->n_window / 8;
272 if (q->high_mark < 2)
273 q->high_mark = 2;
274
275 q->write_ptr = q->read_ptr = 0;
276
277 return 0;
278}
279
280/**
281 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
282 */
283static int iwl_tx_queue_alloc(struct iwl_priv *priv,
16466903 284 struct iwl_tx_queue *txq, u32 id)
1053d35f
RR
285{
286 struct pci_dev *dev = priv->pci_dev;
3978e5bc 287 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
1053d35f
RR
288
289 /* Driver private data, only for Tx (not command) queues,
290 * not shared with device. */
291 if (id != IWL_CMD_QUEUE_NUM) {
292 txq->txb = kmalloc(sizeof(txq->txb[0]) *
293 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
294 if (!txq->txb) {
15b1687c 295 IWL_ERR(priv, "kmalloc for auxiliary BD "
1053d35f
RR
296 "structures failed\n");
297 goto error;
298 }
3978e5bc 299 } else {
1053d35f 300 txq->txb = NULL;
3978e5bc 301 }
1053d35f
RR
302
303 /* Circular buffer of transmit frame descriptors (TFDs),
304 * shared with device */
3978e5bc 305 txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
1053d35f 306
499b1883 307 if (!txq->tfds) {
3978e5bc 308 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
1053d35f
RR
309 goto error;
310 }
311 txq->q.id = id;
312
313 return 0;
314
315 error:
316 kfree(txq->txb);
317 txq->txb = NULL;
318
319 return -ENOMEM;
320}
321
1053d35f
RR
322/**
323 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
324 */
a8e74e27
SO
325int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
326 int slots_num, u32 txq_id)
1053d35f 327{
da99c4b6 328 int i, len;
73b7d742 329 int ret;
c2acea8e 330 int actual_slots = slots_num;
1053d35f
RR
331
332 /*
333 * Alloc buffer array for commands (Tx or other types of commands).
334 * For the command queue (#4), allocate command space + one big
335 * command for scan, since scan command is very huge; the system will
336 * not have two scans at the same time, so only one is needed.
337 * For normal Tx queues (all other queues), no super-size command
338 * space is needed.
339 */
c2acea8e
JB
340 if (txq_id == IWL_CMD_QUEUE_NUM)
341 actual_slots++;
342
343 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
344 GFP_KERNEL);
345 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
346 GFP_KERNEL);
347
348 if (!txq->meta || !txq->cmd)
349 goto out_free_arrays;
350
351 len = sizeof(struct iwl_device_cmd);
352 for (i = 0; i < actual_slots; i++) {
353 /* only happens for cmd queue */
354 if (i == slots_num)
355 len += IWL_MAX_SCAN_SIZE;
da99c4b6 356
49898852 357 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
da99c4b6 358 if (!txq->cmd[i])
73b7d742 359 goto err;
da99c4b6 360 }
1053d35f
RR
361
362 /* Alloc driver data array and TFD circular buffer */
73b7d742
TW
363 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
364 if (ret)
365 goto err;
1053d35f 366
1053d35f
RR
367 txq->need_update = 0;
368
1a716557
JB
369 /*
370 * Aggregation TX queues will get their ID when aggregation begins;
371 * they overwrite the setting done here. The command FIFO doesn't
372 * need an swq_id so don't set one to catch errors, all others can
373 * be set up to the identity mapping.
374 */
375 if (txq_id != IWL_CMD_QUEUE_NUM)
45af8195
JB
376 txq->swq_id = txq_id;
377
1053d35f
RR
378 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
379 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
380 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
381
382 /* Initialize queue's high/low-water marks, and head/tail indexes */
383 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
384
385 /* Tell device where to find queue */
a8e74e27 386 priv->cfg->ops->lib->txq_init(priv, txq);
1053d35f
RR
387
388 return 0;
73b7d742 389err:
c2acea8e 390 for (i = 0; i < actual_slots; i++)
73b7d742 391 kfree(txq->cmd[i]);
c2acea8e
JB
392out_free_arrays:
393 kfree(txq->meta);
394 kfree(txq->cmd);
73b7d742 395
73b7d742 396 return -ENOMEM;
1053d35f 397}
a8e74e27
SO
398EXPORT_SYMBOL(iwl_tx_queue_init);
399
da1bc453
TW
400/**
401 * iwl_hw_txq_ctx_free - Free TXQ Context
402 *
403 * Destroy all TX DMA queues and structures
404 */
405void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
406{
407 int txq_id;
408
409 /* Tx queues */
77ca7d9e 410 if (priv->txq) {
88804e2b
WYG
411 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
412 txq_id++)
413 if (txq_id == IWL_CMD_QUEUE_NUM)
414 iwl_cmd_queue_free(priv);
415 else
416 iwl_tx_queue_free(priv, txq_id);
77ca7d9e 417 }
4ddbb7d0
TW
418 iwl_free_dma_ptr(priv, &priv->kw);
419
420 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
88804e2b
WYG
421
422 /* free tx queue structure */
423 iwl_free_txq_mem(priv);
da1bc453
TW
424}
425EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
426
1053d35f
RR
427/**
428 * iwl_txq_ctx_reset - Reset TX queue context
a96a27f9 429 * Destroys all DMA structures and initialize them again
1053d35f
RR
430 *
431 * @param priv
432 * @return error code
433 */
434int iwl_txq_ctx_reset(struct iwl_priv *priv)
435{
436 int ret = 0;
437 int txq_id, slots_num;
da1bc453 438 unsigned long flags;
1053d35f 439
1053d35f
RR
440 /* Free all tx/cmd queues and keep-warm buffer */
441 iwl_hw_txq_ctx_free(priv);
442
4ddbb7d0
TW
443 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
444 priv->hw_params.scd_bc_tbls_size);
445 if (ret) {
15b1687c 446 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
4ddbb7d0
TW
447 goto error_bc_tbls;
448 }
1053d35f 449 /* Alloc keep-warm buffer */
4ddbb7d0 450 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
1053d35f 451 if (ret) {
15b1687c 452 IWL_ERR(priv, "Keep Warm allocation failed\n");
1053d35f
RR
453 goto error_kw;
454 }
88804e2b
WYG
455
456 /* allocate tx queue structure */
457 ret = iwl_alloc_txq_mem(priv);
458 if (ret)
459 goto error;
460
da1bc453 461 spin_lock_irqsave(&priv->lock, flags);
1053d35f
RR
462
463 /* Turn off all Tx DMA fifos */
da1bc453
TW
464 priv->cfg->ops->lib->txq_set_sched(priv, 0);
465
4ddbb7d0
TW
466 /* Tell NIC where to find the "keep warm" buffer */
467 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
468
da1bc453
TW
469 spin_unlock_irqrestore(&priv->lock, flags);
470
da1bc453 471 /* Alloc and init all Tx queues, including the command queue (#4) */
1053d35f
RR
472 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
473 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
474 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
475 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
476 txq_id);
477 if (ret) {
15b1687c 478 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
1053d35f
RR
479 goto error;
480 }
481 }
482
483 return ret;
484
485 error:
486 iwl_hw_txq_ctx_free(priv);
4ddbb7d0 487 iwl_free_dma_ptr(priv, &priv->kw);
1053d35f 488 error_kw:
4ddbb7d0
TW
489 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
490 error_bc_tbls:
1053d35f
RR
491 return ret;
492}
a33c2f47 493
da1bc453
TW
494/**
495 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
496 */
497void iwl_txq_ctx_stop(struct iwl_priv *priv)
498{
f3f911d1 499 int ch;
da1bc453
TW
500 unsigned long flags;
501
da1bc453
TW
502 /* Turn off all Tx DMA fifos */
503 spin_lock_irqsave(&priv->lock, flags);
da1bc453
TW
504
505 priv->cfg->ops->lib->txq_set_sched(priv, 0);
506
507 /* Stop each Tx DMA channel, and wait for it to be idle */
f3f911d1
ZY
508 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
509 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
da1bc453 510 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
f3f911d1 511 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
f056658b 512 1000);
da1bc453 513 }
da1bc453
TW
514 spin_unlock_irqrestore(&priv->lock, flags);
515
516 /* Deallocate memory for all Tx queues */
517 iwl_hw_txq_ctx_free(priv);
518}
519EXPORT_SYMBOL(iwl_txq_ctx_stop);
fd4abac5
TW
520
521/*
522 * handle build REPLY_TX command notification.
523 */
524static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
525 struct iwl_tx_cmd *tx_cmd,
e039fa4a 526 struct ieee80211_tx_info *info,
fd4abac5 527 struct ieee80211_hdr *hdr,
0e7690f1 528 u8 std_id)
fd4abac5 529{
fd7c8a40 530 __le16 fc = hdr->frame_control;
fd4abac5
TW
531 __le32 tx_flags = tx_cmd->tx_flags;
532
533 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
e039fa4a 534 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
fd4abac5 535 tx_flags |= TX_CMD_FLG_ACK_MSK;
fd7c8a40 536 if (ieee80211_is_mgmt(fc))
fd4abac5 537 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
fd7c8a40 538 if (ieee80211_is_probe_resp(fc) &&
fd4abac5
TW
539 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
540 tx_flags |= TX_CMD_FLG_TSF_MSK;
541 } else {
542 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
543 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
544 }
545
fd7c8a40 546 if (ieee80211_is_back_req(fc))
fd4abac5
TW
547 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
548
549
550 tx_cmd->sta_id = std_id;
8b7b1e05 551 if (ieee80211_has_morefrags(fc))
fd4abac5
TW
552 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
553
fd7c8a40
HH
554 if (ieee80211_is_data_qos(fc)) {
555 u8 *qc = ieee80211_get_qos_ctl(hdr);
fd4abac5
TW
556 tx_cmd->tid_tspec = qc[0] & 0xf;
557 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
558 } else {
559 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
560 }
561
a326a5d0 562 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
fd4abac5
TW
563
564 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
565 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
566
567 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
fd7c8a40
HH
568 if (ieee80211_is_mgmt(fc)) {
569 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
fd4abac5
TW
570 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
571 else
572 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
573 } else {
574 tx_cmd->timeout.pm_frame_timeout = 0;
575 }
576
577 tx_cmd->driver_txop = 0;
578 tx_cmd->tx_flags = tx_flags;
579 tx_cmd->next_frame_len = 0;
580}
581
582#define RTS_HCCA_RETRY_LIMIT 3
583#define RTS_DFAULT_RETRY_LIMIT 60
584
585static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
586 struct iwl_tx_cmd *tx_cmd,
e039fa4a 587 struct ieee80211_tx_info *info,
b58ef214 588 __le16 fc, int is_hcca)
fd4abac5 589{
b58ef214 590 u32 rate_flags;
76eff18b 591 int rate_idx;
b58ef214
DH
592 u8 rts_retry_limit;
593 u8 data_retry_limit;
fd4abac5 594 u8 rate_plcp;
2e92e6f2 595
b58ef214 596 /* Set retry limit on DATA packets and Probe Responses*/
1f0436f4 597 if (ieee80211_is_probe_resp(fc))
b58ef214
DH
598 data_retry_limit = 3;
599 else
600 data_retry_limit = IWL_DEFAULT_TX_RETRY;
601 tx_cmd->data_retry_limit = data_retry_limit;
fd4abac5 602
b58ef214
DH
603 /* Set retry limit on RTS packets */
604 rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
605 RTS_DFAULT_RETRY_LIMIT;
606 if (data_retry_limit < rts_retry_limit)
607 rts_retry_limit = data_retry_limit;
608 tx_cmd->rts_retry_limit = rts_retry_limit;
fd4abac5 609
b58ef214
DH
610 /* DATA packets will use the uCode station table for rate/antenna
611 * selection */
fd4abac5
TW
612 if (ieee80211_is_data(fc)) {
613 tx_cmd->initial_rate_index = 0;
614 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
b58ef214
DH
615 return;
616 }
617
618 /**
619 * If the current TX rate stored in mac80211 has the MCS bit set, it's
620 * not really a TX rate. Thus, we use the lowest supported rate for
621 * this band. Also use the lowest supported rate if the stored rate
622 * index is invalid.
623 */
624 rate_idx = info->control.rates[0].idx;
625 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
626 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
627 rate_idx = rate_lowest_index(&priv->bands[info->band],
628 info->control.sta);
629 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
630 if (info->band == IEEE80211_BAND_5GHZ)
631 rate_idx += IWL_FIRST_OFDM_RATE;
632 /* Get PLCP rate for tx_cmd->rate_n_flags */
633 rate_plcp = iwl_rates[rate_idx].plcp;
634 /* Zero out flags for this packet */
635 rate_flags = 0;
fd4abac5 636
b58ef214
DH
637 /* Set CCK flag as needed */
638 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
639 rate_flags |= RATE_MCS_CCK_MSK;
640
641 /* Set up RTS and CTS flags for certain packets */
642 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
643 case cpu_to_le16(IEEE80211_STYPE_AUTH):
644 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
645 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
646 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
647 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
648 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
649 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
650 }
651 break;
652 default:
653 break;
fd4abac5
TW
654 }
655
b58ef214
DH
656 /* Set up antennas */
657 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
658 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
659
660 /* Set the rate in the TX cmd */
e7d326ac 661 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
fd4abac5
TW
662}
663
664static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
e039fa4a 665 struct ieee80211_tx_info *info,
fd4abac5
TW
666 struct iwl_tx_cmd *tx_cmd,
667 struct sk_buff *skb_frag,
668 int sta_id)
669{
e039fa4a 670 struct ieee80211_key_conf *keyconf = info->control.hw_key;
fd4abac5 671
ccc038ab 672 switch (keyconf->alg) {
fd4abac5
TW
673 case ALG_CCMP:
674 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
ccc038ab 675 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
e039fa4a 676 if (info->flags & IEEE80211_TX_CTL_AMPDU)
fd4abac5 677 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
e1623446 678 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
fd4abac5
TW
679 break;
680
681 case ALG_TKIP:
682 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
ccc038ab 683 ieee80211_get_tkip_key(keyconf, skb_frag,
fd4abac5 684 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
e1623446 685 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
fd4abac5
TW
686 break;
687
688 case ALG_WEP:
fd4abac5 689 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
ccc038ab
EG
690 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
691
692 if (keyconf->keylen == WEP_KEY_LEN_128)
693 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
694
695 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
fd4abac5 696
e1623446 697 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
ccc038ab 698 "with key %d\n", keyconf->keyidx);
fd4abac5
TW
699 break;
700
701 default:
978785a3 702 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
fd4abac5
TW
703 break;
704 }
705}
706
fd4abac5
TW
707/*
708 * start REPLY_TX command process
709 */
e039fa4a 710int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
fd4abac5
TW
711{
712 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e039fa4a 713 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
6ab10ff8
JB
714 struct ieee80211_sta *sta = info->control.sta;
715 struct iwl_station_priv *sta_priv = NULL;
f3674227
TW
716 struct iwl_tx_queue *txq;
717 struct iwl_queue *q;
c2acea8e
JB
718 struct iwl_device_cmd *out_cmd;
719 struct iwl_cmd_meta *out_meta;
f3674227
TW
720 struct iwl_tx_cmd *tx_cmd;
721 int swq_id, txq_id;
fd4abac5
TW
722 dma_addr_t phys_addr;
723 dma_addr_t txcmd_phys;
724 dma_addr_t scratch_phys;
be1a71a1 725 u16 len, len_org, firstlen, secondlen;
fd4abac5 726 u16 seq_number = 0;
fd7c8a40 727 __le16 fc;
0e7690f1 728 u8 hdr_len;
f3674227 729 u8 sta_id;
fd4abac5
TW
730 u8 wait_write_ptr = 0;
731 u8 tid = 0;
732 u8 *qc = NULL;
733 unsigned long flags;
734 int ret;
735
736 spin_lock_irqsave(&priv->lock, flags);
737 if (iwl_is_rfkill(priv)) {
e1623446 738 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
fd4abac5
TW
739 goto drop_unlock;
740 }
741
fd7c8a40 742 fc = hdr->frame_control;
fd4abac5
TW
743
744#ifdef CONFIG_IWLWIFI_DEBUG
745 if (ieee80211_is_auth(fc))
e1623446 746 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
fd7c8a40 747 else if (ieee80211_is_assoc_req(fc))
e1623446 748 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
fd7c8a40 749 else if (ieee80211_is_reassoc_req(fc))
e1623446 750 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
fd4abac5
TW
751#endif
752
aa065263 753 /* drop all non-injected data frame if we are not associated */
fd7c8a40 754 if (ieee80211_is_data(fc) &&
aa065263 755 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
d10c4ec8 756 (!iwl_is_associated(priv) ||
05c914fe 757 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
d10c4ec8 758 !priv->assoc_station_added)) {
e1623446 759 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
fd4abac5
TW
760 goto drop_unlock;
761 }
762
7294ec95 763 hdr_len = ieee80211_hdrlen(fc);
fd4abac5
TW
764
765 /* Find (or create) index into station table for destination station */
aa065263
GS
766 if (info->flags & IEEE80211_TX_CTL_INJECTED)
767 sta_id = priv->hw_params.bcast_sta_id;
768 else
769 sta_id = iwl_get_sta_id(priv, hdr);
fd4abac5 770 if (sta_id == IWL_INVALID_STATION) {
e1623446 771 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
e174961c 772 hdr->addr1);
3995bd93 773 goto drop_unlock;
fd4abac5
TW
774 }
775
e1623446 776 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
fd4abac5 777
6ab10ff8
JB
778 if (sta)
779 sta_priv = (void *)sta->drv_priv;
780
781 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
782 sta_priv->asleep) {
783 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
784 /*
785 * This sends an asynchronous command to the device,
786 * but we can rely on it being processed before the
787 * next frame is processed -- and the next frame to
788 * this station is the one that will consume this
789 * counter.
790 * For now set the counter to just 1 since we do not
791 * support uAPSD yet.
792 */
793 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
794 }
795
45af8195 796 txq_id = skb_get_queue_mapping(skb);
fd7c8a40
HH
797 if (ieee80211_is_data_qos(fc)) {
798 qc = ieee80211_get_qos_ctl(hdr);
7294ec95 799 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
e6a6cf4c
RC
800 if (unlikely(tid >= MAX_TID_COUNT))
801 goto drop_unlock;
f3674227
TW
802 seq_number = priv->stations[sta_id].tid[tid].seq_number;
803 seq_number &= IEEE80211_SCTL_SEQ;
804 hdr->seq_ctrl = hdr->seq_ctrl &
c1b4aa3f 805 cpu_to_le16(IEEE80211_SCTL_FRAG);
f3674227 806 hdr->seq_ctrl |= cpu_to_le16(seq_number);
fd4abac5 807 seq_number += 0x10;
fd4abac5 808 /* aggregation is on for this <sta,tid> */
45d42700
WYG
809 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
810 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
fd4abac5 811 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
45d42700 812 }
fd4abac5
TW
813 }
814
fd4abac5 815 txq = &priv->txq[txq_id];
45af8195 816 swq_id = txq->swq_id;
fd4abac5
TW
817 q = &txq->q;
818
3995bd93
JB
819 if (unlikely(iwl_queue_space(q) < q->high_mark))
820 goto drop_unlock;
821
822 if (ieee80211_is_data_qos(fc))
823 priv->stations[sta_id].tid[tid].tfds_in_queue++;
fd4abac5 824
fd4abac5
TW
825 /* Set up driver data for this TFD */
826 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
827 txq->txb[q->write_ptr].skb[0] = skb;
fd4abac5
TW
828
829 /* Set up first empty entry in queue's array of Tx/cmd buffers */
b88b15df 830 out_cmd = txq->cmd[q->write_ptr];
c2acea8e 831 out_meta = &txq->meta[q->write_ptr];
fd4abac5
TW
832 tx_cmd = &out_cmd->cmd.tx;
833 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
834 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
835
836 /*
837 * Set up the Tx-command (not MAC!) header.
838 * Store the chosen Tx queue and TFD index within the sequence field;
839 * after Tx, uCode's Tx response will return this value so driver can
840 * locate the frame within the tx queue and do post-tx processing.
841 */
842 out_cmd->hdr.cmd = REPLY_TX;
843 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
844 INDEX_TO_SEQ(q->write_ptr)));
845
846 /* Copy MAC header from skb into command buffer */
847 memcpy(tx_cmd->hdr, hdr, hdr_len);
848
df833b1d
RC
849
850 /* Total # bytes to be transmitted */
851 len = (u16)skb->len;
852 tx_cmd->len = cpu_to_le16(len);
853
854 if (info->control.hw_key)
855 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
856
857 /* TODO need this for burst mode later on */
858 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
20594eb0 859 iwl_dbg_log_tx_data_frame(priv, len, hdr);
df833b1d
RC
860
861 /* set is_hcca to 0; it probably will never be implemented */
b58ef214 862 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
df833b1d 863
22fdf3c9 864 iwl_update_stats(priv, true, fc, len);
fd4abac5
TW
865 /*
866 * Use the first empty entry in this queue's command buffer array
867 * to contain the Tx command and MAC header concatenated together
868 * (payload data will be in another buffer).
869 * Size of this varies, due to varying MAC header length.
870 * If end is not dword aligned, we'll have 2 extra bytes at the end
871 * of the MAC header (device reads on dword boundaries).
872 * We'll tell device about this padding later.
873 */
874 len = sizeof(struct iwl_tx_cmd) +
875 sizeof(struct iwl_cmd_header) + hdr_len;
876
877 len_org = len;
be1a71a1 878 firstlen = len = (len + 3) & ~3;
fd4abac5
TW
879
880 if (len_org != len)
881 len_org = 1;
882 else
883 len_org = 0;
884
df833b1d
RC
885 /* Tell NIC about any 2-byte padding after MAC header */
886 if (len_org)
887 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
888
fd4abac5
TW
889 /* Physical address of this Tx command's header (not MAC header!),
890 * within command buffer array. */
499b1883 891 txcmd_phys = pci_map_single(priv->pci_dev,
df833b1d 892 &out_cmd->hdr, len,
96891cee 893 PCI_DMA_BIDIRECTIONAL);
c2acea8e
JB
894 pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
895 pci_unmap_len_set(out_meta, len, len);
fd4abac5
TW
896 /* Add buffer containing Tx command and MAC(!) header to TFD's
897 * first entry */
7aaa1d79
SO
898 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
899 txcmd_phys, len, 1, 0);
fd4abac5 900
df833b1d
RC
901 if (!ieee80211_has_morefrags(hdr->frame_control)) {
902 txq->need_update = 1;
903 if (qc)
904 priv->stations[sta_id].tid[tid].seq_number = seq_number;
905 } else {
906 wait_write_ptr = 1;
907 txq->need_update = 0;
908 }
fd4abac5
TW
909
910 /* Set up TFD's 2nd entry to point directly to remainder of skb,
911 * if any (802.11 null frames have no payload). */
be1a71a1 912 secondlen = len = skb->len - hdr_len;
fd4abac5
TW
913 if (len) {
914 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
915 len, PCI_DMA_TODEVICE);
7aaa1d79
SO
916 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
917 phys_addr, len,
918 0, 0);
fd4abac5
TW
919 }
920
fd4abac5 921 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
df833b1d
RC
922 offsetof(struct iwl_tx_cmd, scratch);
923
924 len = sizeof(struct iwl_tx_cmd) +
925 sizeof(struct iwl_cmd_header) + hdr_len;
926 /* take back ownership of DMA buffer to enable update */
927 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
928 len, PCI_DMA_BIDIRECTIONAL);
fd4abac5 929 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
499b1883 930 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
fd4abac5 931
d2ee9cd2
RC
932 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
933 le16_to_cpu(out_cmd->hdr.sequence));
934 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
3d816c77
RC
935 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
936 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
fd4abac5
TW
937
938 /* Set up entry for this TFD in Tx byte-count array */
7b80ece4
RC
939 if (info->flags & IEEE80211_TX_CTL_AMPDU)
940 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
df833b1d
RC
941 le16_to_cpu(tx_cmd->len));
942
943 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
944 len, PCI_DMA_BIDIRECTIONAL);
fd4abac5 945
be1a71a1
JB
946 trace_iwlwifi_dev_tx(priv,
947 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
948 sizeof(struct iwl_tfd),
949 &out_cmd->hdr, firstlen,
950 skb->data + hdr_len, secondlen);
951
fd4abac5
TW
952 /* Tell device the write index *just past* this latest filled TFD */
953 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
954 ret = iwl_txq_update_write_ptr(priv, txq);
955 spin_unlock_irqrestore(&priv->lock, flags);
956
6ab10ff8
JB
957 /*
958 * At this point the frame is "transmitted" successfully
959 * and we will get a TX status notification eventually,
960 * regardless of the value of ret. "ret" only indicates
961 * whether or not we should update the write pointer.
962 */
963
964 /* avoid atomic ops if it isn't an associated client */
965 if (sta_priv && sta_priv->client)
966 atomic_inc(&sta_priv->pending_frames);
967
fd4abac5
TW
968 if (ret)
969 return ret;
970
143b09ef 971 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
fd4abac5
TW
972 if (wait_write_ptr) {
973 spin_lock_irqsave(&priv->lock, flags);
974 txq->need_update = 1;
975 iwl_txq_update_write_ptr(priv, txq);
976 spin_unlock_irqrestore(&priv->lock, flags);
143b09ef 977 } else {
e4e72fb4 978 iwl_stop_queue(priv, txq->swq_id);
fd4abac5 979 }
fd4abac5
TW
980 }
981
982 return 0;
983
984drop_unlock:
985 spin_unlock_irqrestore(&priv->lock, flags);
fd4abac5
TW
986 return -1;
987}
988EXPORT_SYMBOL(iwl_tx_skb);
989
990/*************** HOST COMMAND QUEUE FUNCTIONS *****/
991
992/**
993 * iwl_enqueue_hcmd - enqueue a uCode command
994 * @priv: device private data point
995 * @cmd: a point to the ucode command structure
996 *
997 * The function returns < 0 values to indicate the operation is
998 * failed. On success, it turns the index (> 0) of command in the
999 * command queue.
1000 */
1001int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1002{
1003 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1004 struct iwl_queue *q = &txq->q;
c2acea8e
JB
1005 struct iwl_device_cmd *out_cmd;
1006 struct iwl_cmd_meta *out_meta;
fd4abac5 1007 dma_addr_t phys_addr;
fd4abac5 1008 unsigned long flags;
f3674227
TW
1009 int len, ret;
1010 u32 idx;
1011 u16 fix_size;
fd4abac5
TW
1012
1013 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
1014 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
1015
1016 /* If any of the command structures end up being larger than
1017 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
1018 * we will need to increase the size of the TFD entries */
1019 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
c2acea8e 1020 !(cmd->flags & CMD_SIZE_HUGE));
fd4abac5 1021
7812b167 1022 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
f2f21b49
RC
1023 IWL_WARN(priv, "Not sending command - %s KILL\n",
1024 iwl_is_rfkill(priv) ? "RF" : "CT");
fd4abac5
TW
1025 return -EIO;
1026 }
1027
c2acea8e 1028 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
2d237f71 1029 IWL_ERR(priv, "No space in command queue\n");
7812b167
WYG
1030 if (iwl_within_ct_kill_margin(priv))
1031 iwl_tt_enter_ct_kill(priv);
1032 else {
1033 IWL_ERR(priv, "Restarting adapter due to queue full\n");
1034 queue_work(priv->workqueue, &priv->restart);
1035 }
fd4abac5
TW
1036 return -ENOSPC;
1037 }
1038
1039 spin_lock_irqsave(&priv->hcmd_lock, flags);
1040
c2acea8e 1041 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
da99c4b6 1042 out_cmd = txq->cmd[idx];
c2acea8e
JB
1043 out_meta = &txq->meta[idx];
1044
8ce73f3a 1045 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
c2acea8e
JB
1046 out_meta->flags = cmd->flags;
1047 if (cmd->flags & CMD_WANT_SKB)
1048 out_meta->source = cmd;
1049 if (cmd->flags & CMD_ASYNC)
1050 out_meta->callback = cmd->callback;
fd4abac5
TW
1051
1052 out_cmd->hdr.cmd = cmd->id;
fd4abac5
TW
1053 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
1054
1055 /* At this point, the out_cmd now has all of the incoming cmd
1056 * information */
1057
1058 out_cmd->hdr.flags = 0;
1059 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1060 INDEX_TO_SEQ(q->write_ptr));
c2acea8e 1061 if (cmd->flags & CMD_SIZE_HUGE)
9734cb23 1062 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
c2acea8e 1063 len = sizeof(struct iwl_device_cmd);
df833b1d 1064 len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
499b1883 1065
fd4abac5 1066
ded2ae7c
EK
1067#ifdef CONFIG_IWLWIFI_DEBUG
1068 switch (out_cmd->hdr.cmd) {
1069 case REPLY_TX_LINK_QUALITY_CMD:
1070 case SENSITIVITY_CMD:
e1623446 1071 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
ded2ae7c
EK
1072 "%d bytes at %d[%d]:%d\n",
1073 get_cmd_string(out_cmd->hdr.cmd),
1074 out_cmd->hdr.cmd,
1075 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1076 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1077 break;
1078 default:
e1623446 1079 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
ded2ae7c
EK
1080 "%d bytes at %d[%d]:%d\n",
1081 get_cmd_string(out_cmd->hdr.cmd),
1082 out_cmd->hdr.cmd,
1083 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1084 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1085 }
1086#endif
fd4abac5
TW
1087 txq->need_update = 1;
1088
518099a8
SO
1089 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
1090 /* Set up entry in queue's byte count circular buffer */
1091 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
fd4abac5 1092
df833b1d
RC
1093 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
1094 fix_size, PCI_DMA_BIDIRECTIONAL);
c2acea8e
JB
1095 pci_unmap_addr_set(out_meta, mapping, phys_addr);
1096 pci_unmap_len_set(out_meta, len, fix_size);
df833b1d 1097
be1a71a1
JB
1098 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
1099
df833b1d
RC
1100 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1101 phys_addr, fix_size, 1,
1102 U32_PAD(cmd->len));
1103
fd4abac5
TW
1104 /* Increment and update queue's write index */
1105 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1106 ret = iwl_txq_update_write_ptr(priv, txq);
1107
1108 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1109 return ret ? ret : idx;
1110}
1111
6ab10ff8
JB
1112static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1113{
1114 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1115 struct ieee80211_sta *sta;
1116 struct iwl_station_priv *sta_priv;
1117
1118 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
1119 if (sta) {
1120 sta_priv = (void *)sta->drv_priv;
1121 /* avoid atomic ops if this isn't a client */
1122 if (sta_priv->client &&
1123 atomic_dec_return(&sta_priv->pending_frames) == 0)
1124 ieee80211_sta_block_awake(priv->hw, sta, false);
1125 }
1126
1127 ieee80211_tx_status_irqsafe(priv->hw, skb);
1128}
1129
17b88929
TW
1130int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1131{
1132 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1133 struct iwl_queue *q = &txq->q;
1134 struct iwl_tx_info *tx_info;
1135 int nfreed = 0;
1136
1137 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
15b1687c 1138 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
17b88929
TW
1139 "is out of range [0-%d] %d %d.\n", txq_id,
1140 index, q->n_bd, q->write_ptr, q->read_ptr);
1141 return 0;
1142 }
1143
499b1883
TW
1144 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1145 q->read_ptr != index;
1146 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
17b88929
TW
1147
1148 tx_info = &txq->txb[txq->q.read_ptr];
6ab10ff8 1149 iwl_tx_status(priv, tx_info->skb[0]);
17b88929 1150 tx_info->skb[0] = NULL;
17b88929 1151
972cf447
TW
1152 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1153 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1154
7aaa1d79 1155 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
17b88929
TW
1156 nfreed++;
1157 }
1158 return nfreed;
1159}
1160EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1161
1162
1163/**
1164 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1165 *
1166 * When FW advances 'R' index, all entries between old and new 'R' index
1167 * need to be reclaimed. As result, some free space forms. If there is
1168 * enough free space (> low mark), wake the stack that feeds us.
1169 */
499b1883
TW
1170static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1171 int idx, int cmd_idx)
17b88929
TW
1172{
1173 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1174 struct iwl_queue *q = &txq->q;
1175 int nfreed = 0;
1176
499b1883 1177 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
15b1687c 1178 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
17b88929 1179 "is out of range [0-%d] %d %d.\n", txq_id,
499b1883 1180 idx, q->n_bd, q->write_ptr, q->read_ptr);
17b88929
TW
1181 return;
1182 }
1183
499b1883
TW
1184 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1185 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
17b88929 1186
499b1883 1187 if (nfreed++ > 0) {
15b1687c 1188 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
17b88929
TW
1189 q->write_ptr, q->read_ptr);
1190 queue_work(priv->workqueue, &priv->restart);
1191 }
da99c4b6 1192
17b88929
TW
1193 }
1194}
1195
1196/**
1197 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
1198 * @rxb: Rx buffer to reclaim
1199 *
1200 * If an Rx buffer has an async callback associated with it the callback
1201 * will be executed. The attached skb (if present) will only be freed
1202 * if the callback returns 1
1203 */
1204void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1205{
2f301227 1206 struct iwl_rx_packet *pkt = rxb_addr(rxb);
17b88929
TW
1207 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1208 int txq_id = SEQ_TO_QUEUE(sequence);
1209 int index = SEQ_TO_INDEX(sequence);
17b88929 1210 int cmd_index;
9734cb23 1211 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
c2acea8e
JB
1212 struct iwl_device_cmd *cmd;
1213 struct iwl_cmd_meta *meta;
17b88929
TW
1214
1215 /* If a Tx command is being handled and it isn't in the actual
1216 * command queue then there a command routing bug has been introduced
1217 * in the queue management code. */
55d6a3cd 1218 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
01ef9323
WT
1219 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1220 txq_id, sequence,
1221 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
1222 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
ec741164 1223 iwl_print_hex_error(priv, pkt, 32);
55d6a3cd 1224 return;
01ef9323 1225 }
17b88929
TW
1226
1227 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
da99c4b6 1228 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
c2acea8e 1229 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
17b88929 1230
c33de625
RC
1231 pci_unmap_single(priv->pci_dev,
1232 pci_unmap_addr(meta, mapping),
1233 pci_unmap_len(meta, len),
1234 PCI_DMA_BIDIRECTIONAL);
1235
17b88929 1236 /* Input error checking is done when commands are added to queue. */
c2acea8e 1237 if (meta->flags & CMD_WANT_SKB) {
2f301227
ZY
1238 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
1239 rxb->page = NULL;
5696aea6 1240 } else if (meta->callback)
2f301227 1241 meta->callback(priv, cmd, pkt);
17b88929 1242
499b1883 1243 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
17b88929 1244
c2acea8e 1245 if (!(meta->flags & CMD_ASYNC)) {
17b88929
TW
1246 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1247 wake_up_interruptible(&priv->wait_command_queue);
1248 }
1249}
1250EXPORT_SYMBOL(iwl_tx_cmd_complete);
1251
30e553e3
TW
1252/*
1253 * Find first available (lowest unused) Tx Queue, mark it "active".
1254 * Called only when finding queue for aggregation.
1255 * Should never return anything < 7, because they should already
1256 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1257 */
1258static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1259{
1260 int txq_id;
1261
1262 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1263 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1264 return txq_id;
1265 return -1;
1266}
1267
1268int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1269{
1270 int sta_id;
1271 int tx_fifo;
1272 int txq_id;
1273 int ret;
1274 unsigned long flags;
1275 struct iwl_tid_data *tid_data;
30e553e3
TW
1276
1277 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1278 tx_fifo = default_tid_to_tx_fifo[tid];
1279 else
1280 return -EINVAL;
1281
39aadf8c 1282 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
e174961c 1283 __func__, ra, tid);
30e553e3
TW
1284
1285 sta_id = iwl_find_station(priv, ra);
3eb92969
WYG
1286 if (sta_id == IWL_INVALID_STATION) {
1287 IWL_ERR(priv, "Start AGG on invalid station\n");
30e553e3 1288 return -ENXIO;
3eb92969 1289 }
082e708a
RK
1290 if (unlikely(tid >= MAX_TID_COUNT))
1291 return -EINVAL;
30e553e3
TW
1292
1293 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
15b1687c 1294 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
30e553e3
TW
1295 return -ENXIO;
1296 }
1297
1298 txq_id = iwl_txq_ctx_activate_free(priv);
3eb92969
WYG
1299 if (txq_id == -1) {
1300 IWL_ERR(priv, "No free aggregation queue available\n");
30e553e3 1301 return -ENXIO;
3eb92969 1302 }
30e553e3
TW
1303
1304 spin_lock_irqsave(&priv->sta_lock, flags);
1305 tid_data = &priv->stations[sta_id].tid[tid];
1306 *ssn = SEQ_TO_SN(tid_data->seq_number);
1307 tid_data->agg.txq_id = txq_id;
45af8195 1308 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
30e553e3
TW
1309 spin_unlock_irqrestore(&priv->sta_lock, flags);
1310
1311 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1312 sta_id, tid, *ssn);
1313 if (ret)
1314 return ret;
1315
1316 if (tid_data->tfds_in_queue == 0) {
3eb92969 1317 IWL_DEBUG_HT(priv, "HW queue is empty\n");
30e553e3 1318 tid_data->agg.state = IWL_AGG_ON;
c951ad35 1319 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
30e553e3 1320 } else {
e1623446 1321 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
30e553e3
TW
1322 tid_data->tfds_in_queue);
1323 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1324 }
1325 return ret;
1326}
1327EXPORT_SYMBOL(iwl_tx_agg_start);
1328
1329int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1330{
1331 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1332 struct iwl_tid_data *tid_data;
45d42700 1333 int write_ptr, read_ptr;
30e553e3 1334 unsigned long flags;
30e553e3
TW
1335
1336 if (!ra) {
15b1687c 1337 IWL_ERR(priv, "ra = NULL\n");
30e553e3
TW
1338 return -EINVAL;
1339 }
1340
e6a6cf4c
RC
1341 if (unlikely(tid >= MAX_TID_COUNT))
1342 return -EINVAL;
1343
30e553e3
TW
1344 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1345 tx_fifo_id = default_tid_to_tx_fifo[tid];
1346 else
1347 return -EINVAL;
1348
1349 sta_id = iwl_find_station(priv, ra);
1350
a2f1cbeb
WYG
1351 if (sta_id == IWL_INVALID_STATION) {
1352 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
30e553e3 1353 return -ENXIO;
a2f1cbeb 1354 }
30e553e3 1355
827d42c9
JB
1356 if (priv->stations[sta_id].tid[tid].agg.state ==
1357 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1358 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
9b1cb21c 1359 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
827d42c9
JB
1360 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1361 return 0;
1362 }
1363
30e553e3 1364 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
827d42c9 1365 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
30e553e3
TW
1366
1367 tid_data = &priv->stations[sta_id].tid[tid];
1368 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1369 txq_id = tid_data->agg.txq_id;
1370 write_ptr = priv->txq[txq_id].q.write_ptr;
1371 read_ptr = priv->txq[txq_id].q.read_ptr;
1372
1373 /* The queue is not empty */
1374 if (write_ptr != read_ptr) {
e1623446 1375 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
30e553e3
TW
1376 priv->stations[sta_id].tid[tid].agg.state =
1377 IWL_EMPTYING_HW_QUEUE_DELBA;
1378 return 0;
1379 }
1380
e1623446 1381 IWL_DEBUG_HT(priv, "HW queue is empty\n");
30e553e3
TW
1382 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1383
1384 spin_lock_irqsave(&priv->lock, flags);
45d42700
WYG
1385 /*
1386 * the only reason this call can fail is queue number out of range,
1387 * which can happen if uCode is reloaded and all the station
1388 * information are lost. if it is outside the range, there is no need
1389 * to deactivate the uCode queue, just return "success" to allow
1390 * mac80211 to clean up it own data.
1391 */
1392 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
30e553e3
TW
1393 tx_fifo_id);
1394 spin_unlock_irqrestore(&priv->lock, flags);
1395
c951ad35 1396 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
30e553e3
TW
1397
1398 return 0;
1399}
1400EXPORT_SYMBOL(iwl_tx_agg_stop);
1401
1402int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1403{
1404 struct iwl_queue *q = &priv->txq[txq_id].q;
1405 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1406 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1407
1408 switch (priv->stations[sta_id].tid[tid].agg.state) {
1409 case IWL_EMPTYING_HW_QUEUE_DELBA:
1410 /* We are reclaiming the last packet of the */
1411 /* aggregated HW queue */
3fd07a1e
TW
1412 if ((txq_id == tid_data->agg.txq_id) &&
1413 (q->read_ptr == q->write_ptr)) {
30e553e3
TW
1414 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1415 int tx_fifo = default_tid_to_tx_fifo[tid];
e1623446 1416 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
30e553e3
TW
1417 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1418 ssn, tx_fifo);
1419 tid_data->agg.state = IWL_AGG_OFF;
c951ad35 1420 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
30e553e3
TW
1421 }
1422 break;
1423 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1424 /* We are reclaiming the last packet of the queue */
1425 if (tid_data->tfds_in_queue == 0) {
e1623446 1426 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
30e553e3 1427 tid_data->agg.state = IWL_AGG_ON;
c951ad35 1428 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
30e553e3
TW
1429 }
1430 break;
1431 }
1432 return 0;
1433}
1434EXPORT_SYMBOL(iwl_txq_check_empty);
30e553e3 1435
653fa4a0
EG
1436/**
1437 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1438 *
1439 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1440 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1441 */
1442static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1443 struct iwl_ht_agg *agg,
1444 struct iwl_compressed_ba_resp *ba_resp)
1445
1446{
1447 int i, sh, ack;
1448 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1449 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1450 u64 bitmap;
1451 int successes = 0;
1452 struct ieee80211_tx_info *info;
1453
1454 if (unlikely(!agg->wait_for_ba)) {
15b1687c 1455 IWL_ERR(priv, "Received BA when not expected\n");
653fa4a0
EG
1456 return -EINVAL;
1457 }
1458
1459 /* Mark that the expected block-ack response arrived */
1460 agg->wait_for_ba = 0;
e1623446 1461 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
653fa4a0
EG
1462
1463 /* Calculate shift to align block-ack bits with our Tx window bits */
3fd07a1e 1464 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
653fa4a0
EG
1465 if (sh < 0) /* tbw something is wrong with indices */
1466 sh += 0x100;
1467
1468 /* don't use 64-bit values for now */
1469 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1470
1471 if (agg->frame_count > (64 - sh)) {
e1623446 1472 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
653fa4a0
EG
1473 return -1;
1474 }
1475
1476 /* check for success or failure according to the
1477 * transmitted bitmap and block-ack bitmap */
1478 bitmap &= agg->bitmap;
1479
1480 /* For each frame attempted in aggregation,
1481 * update driver's record of tx frame's status. */
1482 for (i = 0; i < agg->frame_count ; i++) {
4aa41f12 1483 ack = bitmap & (1ULL << i);
653fa4a0 1484 successes += !!ack;
e1623446 1485 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
c3056065 1486 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
653fa4a0
EG
1487 agg->start_idx + i);
1488 }
1489
1490 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1491 memset(&info->status, 0, sizeof(info->status));
91a55ae6 1492 info->flags |= IEEE80211_TX_STAT_ACK;
653fa4a0
EG
1493 info->flags |= IEEE80211_TX_STAT_AMPDU;
1494 info->status.ampdu_ack_map = successes;
1495 info->status.ampdu_ack_len = agg->frame_count;
1496 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1497
e1623446 1498 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
653fa4a0
EG
1499
1500 return 0;
1501}
1502
1503/**
1504 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1505 *
1506 * Handles block-acknowledge notification from device, which reports success
1507 * of frames sent via aggregation.
1508 */
1509void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1510 struct iwl_rx_mem_buffer *rxb)
1511{
2f301227 1512 struct iwl_rx_packet *pkt = rxb_addr(rxb);
653fa4a0 1513 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
653fa4a0
EG
1514 struct iwl_tx_queue *txq = NULL;
1515 struct iwl_ht_agg *agg;
3fd07a1e
TW
1516 int index;
1517 int sta_id;
1518 int tid;
653fa4a0
EG
1519
1520 /* "flow" corresponds to Tx queue */
1521 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1522
1523 /* "ssn" is start of block-ack Tx window, corresponds to index
1524 * (in Tx queue's circular buffer) of first TFD/frame in window */
1525 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1526
1527 if (scd_flow >= priv->hw_params.max_txq_num) {
15b1687c
WT
1528 IWL_ERR(priv,
1529 "BUG_ON scd_flow is bigger than number of queues\n");
653fa4a0
EG
1530 return;
1531 }
1532
1533 txq = &priv->txq[scd_flow];
3fd07a1e
TW
1534 sta_id = ba_resp->sta_id;
1535 tid = ba_resp->tid;
1536 agg = &priv->stations[sta_id].tid[tid].agg;
653fa4a0
EG
1537
1538 /* Find index just before block-ack window */
1539 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1540
1541 /* TODO: Need to get this copy more safely - now good for debug */
1542
e1623446 1543 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
653fa4a0
EG
1544 "sta_id = %d\n",
1545 agg->wait_for_ba,
e174961c 1546 (u8 *) &ba_resp->sta_addr_lo32,
653fa4a0 1547 ba_resp->sta_id);
e1623446 1548 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
653fa4a0
EG
1549 "%d, scd_ssn = %d\n",
1550 ba_resp->tid,
1551 ba_resp->seq_ctl,
1552 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1553 ba_resp->scd_flow,
1554 ba_resp->scd_ssn);
e1623446 1555 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
653fa4a0
EG
1556 agg->start_idx,
1557 (unsigned long long)agg->bitmap);
1558
1559 /* Update driver's record of ACK vs. not for each frame in window */
1560 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1561
1562 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1563 * block-ack window (we assume that they've been successfully
1564 * transmitted ... if not, it's too late anyway). */
1565 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1566 /* calculate mac80211 ampdu sw queue to wake */
653fa4a0 1567 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
3fd07a1e
TW
1568 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1569
1570 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1571 priv->mac80211_registered &&
1572 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
e4e72fb4 1573 iwl_wake_queue(priv, txq->swq_id);
3fd07a1e
TW
1574
1575 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
653fa4a0
EG
1576 }
1577}
1578EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1579
994d31f7 1580#ifdef CONFIG_IWLWIFI_DEBUG
a332f8d6
TW
1581#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1582
1583const char *iwl_get_tx_fail_reason(u32 status)
1584{
1585 switch (status & TX_STATUS_MSK) {
1586 case TX_STATUS_SUCCESS:
1587 return "SUCCESS";
1588 TX_STATUS_ENTRY(SHORT_LIMIT);
1589 TX_STATUS_ENTRY(LONG_LIMIT);
1590 TX_STATUS_ENTRY(FIFO_UNDERRUN);
1591 TX_STATUS_ENTRY(MGMNT_ABORT);
1592 TX_STATUS_ENTRY(NEXT_FRAG);
1593 TX_STATUS_ENTRY(LIFE_EXPIRE);
1594 TX_STATUS_ENTRY(DEST_PS);
1595 TX_STATUS_ENTRY(ABORTED);
1596 TX_STATUS_ENTRY(BT_RETRY);
1597 TX_STATUS_ENTRY(STA_INVALID);
1598 TX_STATUS_ENTRY(FRAG_DROPPED);
1599 TX_STATUS_ENTRY(TID_DISABLE);
1600 TX_STATUS_ENTRY(FRAME_FLUSHED);
1601 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
1602 TX_STATUS_ENTRY(TX_LOCKED);
1603 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
1604 }
1605
1606 return "UNKNOWN";
1607}
1608EXPORT_SYMBOL(iwl_get_tx_fail_reason);
1609#endif /* CONFIG_IWLWIFI_DEBUG */
This page took 0.511706 seconds and 5 git commands to generate.