iwlagn: warn about buggy fw that doesn't set SEQ_RX_FRAME
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-trans-tx-pcie.c
CommitLineData
1053d35f
RR
1/******************************************************************************
2 *
901069c7 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
1053d35f
RR
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
1053d35f
RR
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
fd4abac5 29#include <linux/etherdevice.h>
5a0e3ad6 30#include <linux/slab.h>
253a634c 31#include <linux/sched.h>
253a634c 32
522376d2 33/* TODO: remove include to iwl-dev.h */
1053d35f 34#include "iwl-dev.h"
522376d2
EG
35#include "iwl-debug.h"
36#include "iwl-csr.h"
37#include "iwl-prph.h"
1053d35f 38#include "iwl-io.h"
522376d2 39#include "iwl-agn-hw.h"
1053d35f 40#include "iwl-helpers.h"
253a634c 41#include "iwl-trans-int-pcie.h"
1053d35f 42
522376d2
EG
43#define IWL_TX_CRC_SIZE 4
44#define IWL_TX_DELIMITER_SIZE 4
45
48d42c42
EG
46/**
47 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
48 */
6d8f6eeb 49void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
48d42c42
EG
50 struct iwl_tx_queue *txq,
51 u16 byte_cnt)
52{
105183b1 53 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
105183b1
EG
54 struct iwl_trans_pcie *trans_pcie =
55 IWL_TRANS_GET_PCIE_TRANS(trans);
48d42c42
EG
56 int write_ptr = txq->q.write_ptr;
57 int txq_id = txq->q.id;
58 u8 sec_ctl = 0;
59 u8 sta_id = 0;
60 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
61 __le16 bc_ent;
62
105183b1
EG
63 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
64
48d42c42
EG
65 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
66
67 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
68 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
69
70 switch (sec_ctl & TX_CMD_SEC_MSK) {
71 case TX_CMD_SEC_CCM:
72 len += CCMP_MIC_LEN;
73 break;
74 case TX_CMD_SEC_TKIP:
75 len += TKIP_ICV_LEN;
76 break;
77 case TX_CMD_SEC_WEP:
78 len += WEP_IV_LEN + WEP_ICV_LEN;
79 break;
80 }
81
82 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
83
84 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
85
86 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
87 scd_bc_tbl[txq_id].
88 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
89}
90
fd4abac5
TW
91/**
92 * iwl_txq_update_write_ptr - Send new write index to hardware
93 */
fd656935 94void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
fd4abac5
TW
95{
96 u32 reg = 0;
fd4abac5
TW
97 int txq_id = txq->q.id;
98
99 if (txq->need_update == 0)
7bfedc59 100 return;
fd4abac5 101
fd656935 102 if (hw_params(trans).shadow_reg_enable) {
f81c1f48 103 /* shadow register enabled */
fd656935 104 iwl_write32(bus(trans), HBUS_TARG_WRPTR,
f81c1f48
WYG
105 txq->q.write_ptr | (txq_id << 8));
106 } else {
107 /* if we're trying to save power */
fd656935 108 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
f81c1f48
WYG
109 /* wake up nic if it's powered down ...
110 * uCode will wake up, and interrupt us again, so next
111 * time we'll skip this part. */
fd656935 112 reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
fd4abac5 113
f81c1f48 114 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
fd656935 115 IWL_DEBUG_INFO(trans,
f81c1f48
WYG
116 "Tx queue %d requesting wakeup,"
117 " GP1 = 0x%x\n", txq_id, reg);
fd656935 118 iwl_set_bit(bus(trans), CSR_GP_CNTRL,
f81c1f48
WYG
119 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
120 return;
121 }
fd4abac5 122
fd656935 123 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
fd4abac5 124 txq->q.write_ptr | (txq_id << 8));
fd4abac5 125
f81c1f48
WYG
126 /*
127 * else not in power-save mode,
128 * uCode will never sleep when we're
129 * trying to tx (during RFKILL, we're not trying to tx).
130 */
131 } else
fd656935 132 iwl_write32(bus(trans), HBUS_TARG_WRPTR,
f81c1f48
WYG
133 txq->q.write_ptr | (txq_id << 8));
134 }
fd4abac5 135 txq->need_update = 0;
fd4abac5 136}
fd4abac5 137
214d14d4
JB
138static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
139{
140 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
141
142 dma_addr_t addr = get_unaligned_le32(&tb->lo);
143 if (sizeof(dma_addr_t) > sizeof(u32))
144 addr |=
145 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
146
147 return addr;
148}
149
150static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
151{
152 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
153
154 return le16_to_cpu(tb->hi_n_len) >> 4;
155}
156
157static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
158 dma_addr_t addr, u16 len)
159{
160 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
161 u16 hi_n_len = len << 4;
162
163 put_unaligned_le32(addr, &tb->lo);
164 if (sizeof(dma_addr_t) > sizeof(u32))
165 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
166
167 tb->hi_n_len = cpu_to_le16(hi_n_len);
168
169 tfd->num_tbs = idx + 1;
170}
171
172static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
173{
174 return tfd->num_tbs & 0x1f;
175}
176
6d8f6eeb 177static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
253a634c 178 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
214d14d4 179{
214d14d4
JB
180 int i;
181 int num_tbs;
182
214d14d4
JB
183 /* Sanity check on number of chunks */
184 num_tbs = iwl_tfd_get_num_tbs(tfd);
185
186 if (num_tbs >= IWL_NUM_OF_TBS) {
6d8f6eeb 187 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
214d14d4
JB
188 /* @todo issue fatal error, it is quite serious situation */
189 return;
190 }
191
192 /* Unmap tx_cmd */
193 if (num_tbs)
6d8f6eeb 194 dma_unmap_single(bus(trans)->dev,
4ce7cc2b
JB
195 dma_unmap_addr(meta, mapping),
196 dma_unmap_len(meta, len),
795414db 197 DMA_BIDIRECTIONAL);
214d14d4
JB
198
199 /* Unmap chunks, if any. */
200 for (i = 1; i < num_tbs; i++)
6d8f6eeb 201 dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
e815407d 202 iwl_tfd_tb_get_len(tfd, i), dma_dir);
4ce7cc2b
JB
203}
204
205/**
206 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
6d8f6eeb 207 * @trans - transport private data
4ce7cc2b 208 * @txq - tx queue
1359ca4f 209 * @index - the index of the TFD to be freed
4ce7cc2b
JB
210 *
211 * Does NOT advance any TFD circular buffer read/write indexes
212 * Does NOT free the TFD itself (which is within circular buffer)
213 */
6d8f6eeb 214void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
1359ca4f 215 int index)
4ce7cc2b
JB
216{
217 struct iwl_tfd *tfd_tmp = txq->tfds;
4ce7cc2b 218
6d8f6eeb 219 iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index],
3be3fdb5 220 DMA_TO_DEVICE);
214d14d4
JB
221
222 /* free SKB */
2c452297 223 if (txq->skbs) {
214d14d4
JB
224 struct sk_buff *skb;
225
2c452297 226 skb = txq->skbs[index];
214d14d4
JB
227
228 /* can be called from irqs-disabled context */
229 if (skb) {
230 dev_kfree_skb_any(skb);
2c452297 231 txq->skbs[index] = NULL;
214d14d4
JB
232 }
233 }
234}
235
6d8f6eeb 236int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
214d14d4
JB
237 struct iwl_tx_queue *txq,
238 dma_addr_t addr, u16 len,
4c42db0f 239 u8 reset)
214d14d4
JB
240{
241 struct iwl_queue *q;
242 struct iwl_tfd *tfd, *tfd_tmp;
243 u32 num_tbs;
244
245 q = &txq->q;
4ce7cc2b 246 tfd_tmp = txq->tfds;
214d14d4
JB
247 tfd = &tfd_tmp[q->write_ptr];
248
249 if (reset)
250 memset(tfd, 0, sizeof(*tfd));
251
252 num_tbs = iwl_tfd_get_num_tbs(tfd);
253
254 /* Each TFD can point to a maximum 20 Tx buffers */
255 if (num_tbs >= IWL_NUM_OF_TBS) {
6d8f6eeb 256 IWL_ERR(trans, "Error can not send more than %d chunks\n",
214d14d4
JB
257 IWL_NUM_OF_TBS);
258 return -EINVAL;
259 }
260
261 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
262 return -EINVAL;
263
264 if (unlikely(addr & ~IWL_TX_DMA_MASK))
6d8f6eeb 265 IWL_ERR(trans, "Unaligned address = %llx\n",
214d14d4
JB
266 (unsigned long long)addr);
267
268 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
269
270 return 0;
271}
272
fd4abac5
TW
273/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
274 * DMA services
275 *
276 * Theory of operation
277 *
278 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
279 * of buffer descriptors, each of which points to one or more data buffers for
280 * the device to read from or fill. Driver and device exchange status of each
281 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
282 * entries in each circular buffer, to protect against confusing empty and full
283 * queue states.
284 *
285 * The device reads or writes the data in the queues via the device's several
286 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
287 *
288 * For Tx queue, there are low mark and high mark limits. If, after queuing
289 * the packet for Tx, free space become < low mark, Tx queue stopped. When
290 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
291 * Tx queue resumed.
292 *
fd4abac5
TW
293 ***************************************************/
294
295int iwl_queue_space(const struct iwl_queue *q)
296{
297 int s = q->read_ptr - q->write_ptr;
298
299 if (q->read_ptr > q->write_ptr)
300 s -= q->n_bd;
301
302 if (s <= 0)
303 s += q->n_window;
304 /* keep some reserve to not confuse empty and full situations */
305 s -= 2;
306 if (s < 0)
307 s = 0;
308 return s;
309}
fd4abac5 310
1053d35f
RR
311/**
312 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
313 */
6d8f6eeb 314int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
1053d35f
RR
315{
316 q->n_bd = count;
317 q->n_window = slots_num;
318 q->id = id;
319
320 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
321 * and iwl_queue_dec_wrap are broken. */
3e41ace5
JB
322 if (WARN_ON(!is_power_of_2(count)))
323 return -EINVAL;
1053d35f
RR
324
325 /* slots_num must be power-of-two size, otherwise
326 * get_cmd_index is broken. */
3e41ace5
JB
327 if (WARN_ON(!is_power_of_2(slots_num)))
328 return -EINVAL;
1053d35f
RR
329
330 q->low_mark = q->n_window / 4;
331 if (q->low_mark < 4)
332 q->low_mark = 4;
333
334 q->high_mark = q->n_window / 8;
335 if (q->high_mark < 2)
336 q->high_mark = 2;
337
338 q->write_ptr = q->read_ptr = 0;
339
340 return 0;
341}
342
6d8f6eeb 343static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
48d42c42
EG
344 struct iwl_tx_queue *txq)
345{
105183b1
EG
346 struct iwl_trans_pcie *trans_pcie =
347 IWL_TRANS_GET_PCIE_TRANS(trans);
6d8f6eeb 348 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
48d42c42
EG
349 int txq_id = txq->q.id;
350 int read_ptr = txq->q.read_ptr;
351 u8 sta_id = 0;
352 __le16 bc_ent;
353
354 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
355
6d8f6eeb 356 if (txq_id != trans->shrd->cmd_queue)
48d42c42
EG
357 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
358
359 bc_ent = cpu_to_le16(1 | (sta_id << 12));
360 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
361
362 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
363 scd_bc_tbl[txq_id].
364 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
365}
366
6d8f6eeb 367static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
48d42c42
EG
368 u16 txq_id)
369{
370 u32 tbl_dw_addr;
371 u32 tbl_dw;
372 u16 scd_q2ratid;
373
105183b1
EG
374 struct iwl_trans_pcie *trans_pcie =
375 IWL_TRANS_GET_PCIE_TRANS(trans);
376
48d42c42
EG
377 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
378
105183b1 379 tbl_dw_addr = trans_pcie->scd_base_addr +
48d42c42
EG
380 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
381
83ed9015 382 tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
48d42c42
EG
383
384 if (txq_id & 0x1)
385 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
386 else
387 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
388
83ed9015 389 iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
48d42c42
EG
390
391 return 0;
392}
393
6d8f6eeb 394static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
48d42c42
EG
395{
396 /* Simply stop the queue, but don't change any configuration;
397 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
83ed9015 398 iwl_write_prph(bus(trans),
48d42c42
EG
399 SCD_QUEUE_STATUS_BITS(txq_id),
400 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
401 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
402}
403
6d8f6eeb 404void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
48d42c42
EG
405 int txq_id, u32 index)
406{
83ed9015 407 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
48d42c42 408 (index & 0xff) | (txq_id << 8));
83ed9015 409 iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
48d42c42
EG
410}
411
c91bd124 412void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
48d42c42
EG
413 struct iwl_tx_queue *txq,
414 int tx_fifo_id, int scd_retry)
415{
8ad71bef 416 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
48d42c42 417 int txq_id = txq->q.id;
c91bd124 418 int active =
8ad71bef 419 test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
48d42c42 420
c91bd124 421 iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
48d42c42
EG
422 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
423 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
424 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
425 SCD_QUEUE_STTS_REG_MSK);
426
427 txq->sched_retry = scd_retry;
428
c91bd124 429 IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
48d42c42
EG
430 active ? "Activate" : "Deactivate",
431 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
432}
433
e13c0c59
EG
434static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
435 u8 ctx, u16 tid)
ba562f71 436{
e13c0c59 437 const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
ba562f71 438 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
e13c0c59 439 return ac_to_fifo[tid_to_ac[tid]];
ba562f71
EG
440
441 /* no support for TIDs 8-15 yet */
442 return -EINVAL;
443}
444
c91bd124
EG
445void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
446 enum iwl_rxon_context_id ctx, int sta_id,
447 int tid, int frame_limit)
48d42c42
EG
448{
449 int tx_fifo, txq_id, ssn_idx;
450 u16 ra_tid;
451 unsigned long flags;
452 struct iwl_tid_data *tid_data;
453
105183b1
EG
454 struct iwl_trans_pcie *trans_pcie =
455 IWL_TRANS_GET_PCIE_TRANS(trans);
456
48d42c42
EG
457 if (WARN_ON(sta_id == IWL_INVALID_STATION))
458 return;
5f85a789 459 if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
48d42c42
EG
460 return;
461
e13c0c59 462 tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
ba562f71
EG
463 if (WARN_ON(tx_fifo < 0)) {
464 IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
465 return;
466 }
467
c91bd124
EG
468 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
469 tid_data = &trans->shrd->tid_data[sta_id][tid];
48d42c42
EG
470 ssn_idx = SEQ_TO_SN(tid_data->seq_number);
471 txq_id = tid_data->agg.txq_id;
c91bd124 472 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
48d42c42
EG
473
474 ra_tid = BUILD_RAxTID(sta_id, tid);
475
c91bd124 476 spin_lock_irqsave(&trans->shrd->lock, flags);
48d42c42
EG
477
478 /* Stop this Tx queue before configuring it */
6d8f6eeb 479 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
48d42c42
EG
480
481 /* Map receiver-address / traffic-ID to this queue */
6d8f6eeb 482 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
48d42c42
EG
483
484 /* Set this queue as a chain-building queue */
c91bd124 485 iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
48d42c42
EG
486
487 /* enable aggregations for the queue */
c91bd124 488 iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
48d42c42
EG
489
490 /* Place first TFD at index corresponding to start sequence number.
491 * Assumes that ssn_idx is valid (!= 0xFFF) */
8ad71bef
EG
492 trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
493 trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
6d8f6eeb 494 iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
48d42c42
EG
495
496 /* Set up Tx window size and frame limit for this queue */
c91bd124 497 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
48d42c42
EG
498 SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
499 sizeof(u32),
500 ((frame_limit <<
501 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
502 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
503 ((frame_limit <<
504 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
505 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
506
c91bd124 507 iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
48d42c42
EG
508
509 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
8ad71bef 510 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
c91bd124 511 tx_fifo, 1);
48d42c42 512
8ad71bef
EG
513 trans_pcie->txq[txq_id].sta_id = sta_id;
514 trans_pcie->txq[txq_id].tid = tid;
a0eaad71 515
c91bd124 516 spin_unlock_irqrestore(&trans->shrd->lock, flags);
48d42c42
EG
517}
518
288712a6
EG
519/*
520 * Find first available (lowest unused) Tx Queue, mark it "active".
521 * Called only when finding queue for aggregation.
522 * Should never return anything < 7, because they should already
523 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
524 */
525static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
526{
8ad71bef 527 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
288712a6
EG
528 int txq_id;
529
530 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
531 if (!test_and_set_bit(txq_id,
8ad71bef 532 &trans_pcie->txq_ctx_active_msk))
288712a6
EG
533 return txq_id;
534 return -1;
535}
536
537int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
538 enum iwl_rxon_context_id ctx, int sta_id,
539 int tid, u16 *ssn)
540{
8ad71bef 541 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
288712a6
EG
542 struct iwl_tid_data *tid_data;
543 unsigned long flags;
4690c33d 544 int txq_id;
288712a6
EG
545
546 txq_id = iwlagn_txq_ctx_activate_free(trans);
547 if (txq_id == -1) {
548 IWL_ERR(trans, "No free aggregation queue available\n");
549 return -ENXIO;
550 }
551
552 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
553 tid_data = &trans->shrd->tid_data[sta_id][tid];
554 *ssn = SEQ_TO_SN(tid_data->seq_number);
555 tid_data->agg.txq_id = txq_id;
8ad71bef 556 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
288712a6
EG
557
558 tid_data = &trans->shrd->tid_data[sta_id][tid];
559 if (tid_data->tfds_in_queue == 0) {
560 IWL_DEBUG_HT(trans, "HW queue is empty\n");
561 tid_data->agg.state = IWL_AGG_ON;
562 iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
563 } else {
564 IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
565 "queue\n", tid_data->tfds_in_queue);
566 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
567 }
3e10caeb 568 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
288712a6
EG
569
570 return 0;
571}
7f01d567
EG
572
573void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
48d42c42 574{
8ad71bef 575 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
7f01d567
EG
576 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
577
578 iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
579
8ad71bef
EG
580 trans_pcie->txq[txq_id].q.read_ptr = 0;
581 trans_pcie->txq[txq_id].q.write_ptr = 0;
7f01d567
EG
582 /* supposes that ssn_idx is valid (!= 0xFFF) */
583 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
584
585 iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
8ad71bef
EG
586 iwl_txq_ctx_deactivate(trans_pcie, txq_id);
587 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
7f01d567
EG
588}
589
590int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
591 enum iwl_rxon_context_id ctx, int sta_id,
592 int tid)
593{
8ad71bef 594 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
7f01d567
EG
595 unsigned long flags;
596 int read_ptr, write_ptr;
597 struct iwl_tid_data *tid_data;
598 int txq_id;
599
600 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
601
602 tid_data = &trans->shrd->tid_data[sta_id][tid];
603 txq_id = tid_data->agg.txq_id;
604
48d42c42
EG
605 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
606 (IWLAGN_FIRST_AMPDU_QUEUE +
7f01d567
EG
607 hw_params(trans).num_ampdu_queues <= txq_id)) {
608 IWL_ERR(trans,
48d42c42
EG
609 "queue number out of range: %d, must be %d to %d\n",
610 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
611 IWLAGN_FIRST_AMPDU_QUEUE +
7f01d567
EG
612 hw_params(trans).num_ampdu_queues - 1);
613 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
48d42c42
EG
614 return -EINVAL;
615 }
616
7f01d567
EG
617 switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
618 case IWL_EMPTYING_HW_QUEUE_ADDBA:
619 /*
620 * This can happen if the peer stops aggregation
621 * again before we've had a chance to drain the
622 * queue we selected previously, i.e. before the
623 * session was really started completely.
624 */
625 IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
626 goto turn_off;
627 case IWL_AGG_ON:
628 break;
629 default:
630 IWL_WARN(trans, "Stopping AGG while state not ON"
631 "or starting\n");
632 }
48d42c42 633
8ad71bef
EG
634 write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
635 read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
48d42c42 636
7f01d567
EG
637 /* The queue is not empty */
638 if (write_ptr != read_ptr) {
639 IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
640 trans->shrd->tid_data[sta_id][tid].agg.state =
641 IWL_EMPTYING_HW_QUEUE_DELBA;
642 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
643 return 0;
644 }
645
646 IWL_DEBUG_HT(trans, "HW queue is empty\n");
647turn_off:
648 trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
649
650 /* do not restore/save irqs */
651 spin_unlock(&trans->shrd->sta_lock);
652 spin_lock(&trans->shrd->lock);
653
654 iwl_trans_pcie_txq_agg_disable(trans, txq_id);
655
656 spin_unlock_irqrestore(&trans->shrd->lock, flags);
48d42c42 657
7f01d567 658 iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
48d42c42
EG
659
660 return 0;
661}
662
fd4abac5
TW
663/*************** HOST COMMAND QUEUE FUNCTIONS *****/
664
665/**
666 * iwl_enqueue_hcmd - enqueue a uCode command
667 * @priv: device private data point
668 * @cmd: a point to the ucode command structure
669 *
670 * The function returns < 0 values to indicate the operation is
671 * failed. On success, it turns the index (> 0) of command in the
672 * command queue.
673 */
6d8f6eeb 674static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
fd4abac5 675{
8ad71bef
EG
676 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
677 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
fd4abac5 678 struct iwl_queue *q = &txq->q;
c2acea8e
JB
679 struct iwl_device_cmd *out_cmd;
680 struct iwl_cmd_meta *out_meta;
fd4abac5 681 dma_addr_t phys_addr;
fd4abac5 682 unsigned long flags;
f3674227 683 u32 idx;
4ce7cc2b 684 u16 copy_size, cmd_size;
0975cc8f 685 bool is_ct_kill = false;
4ce7cc2b
JB
686 bool had_nocopy = false;
687 int i;
688 u8 *cmd_dest;
689#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
690 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
691 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
692 int trace_idx;
693#endif
fd4abac5 694
6d8f6eeb
EG
695 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
696 IWL_WARN(trans, "fw recovery, no hcmd send\n");
3083d03c
WYG
697 return -EIO;
698 }
699
fd656935 700 if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
eedb6e35 701 !(cmd->flags & CMD_ON_DEMAND)) {
6d8f6eeb 702 IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
eedb6e35
WYG
703 return -EIO;
704 }
705
4ce7cc2b
JB
706 copy_size = sizeof(out_cmd->hdr);
707 cmd_size = sizeof(out_cmd->hdr);
708
709 /* need one for the header if the first is NOCOPY */
710 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
711
712 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
713 if (!cmd->len[i])
714 continue;
715 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
716 had_nocopy = true;
717 } else {
718 /* NOCOPY must not be followed by normal! */
719 if (WARN_ON(had_nocopy))
720 return -EINVAL;
721 copy_size += cmd->len[i];
722 }
723 cmd_size += cmd->len[i];
724 }
fd4abac5 725
3e41ace5
JB
726 /*
727 * If any of the command structures end up being larger than
4ce7cc2b
JB
728 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
729 * allocated into separate TFDs, then we will need to
730 * increase the size of the buffers.
3e41ace5 731 */
4ce7cc2b 732 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
3e41ace5 733 return -EINVAL;
fd4abac5 734
6d8f6eeb
EG
735 if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
736 IWL_WARN(trans, "Not sending command - %s KILL\n",
737 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
fd4abac5
TW
738 return -EIO;
739 }
7b21f00e 740
72012474 741 spin_lock_irqsave(&trans->hcmd_lock, flags);
3598e177 742
c2acea8e 743 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
72012474 744 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
3598e177 745
6d8f6eeb 746 IWL_ERR(trans, "No space in command queue\n");
fd656935 747 is_ct_kill = iwl_check_for_ct_kill(priv(trans));
0975cc8f 748 if (!is_ct_kill) {
6d8f6eeb 749 IWL_ERR(trans, "Restarting adapter queue is full\n");
fd656935 750 iwlagn_fw_error(priv(trans), false);
7812b167 751 }
fd4abac5
TW
752 return -ENOSPC;
753 }
754
4ce7cc2b 755 idx = get_cmd_index(q, q->write_ptr);
da99c4b6 756 out_cmd = txq->cmd[idx];
c2acea8e
JB
757 out_meta = &txq->meta[idx];
758
8ce73f3a 759 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
c2acea8e
JB
760 if (cmd->flags & CMD_WANT_SKB)
761 out_meta->source = cmd;
762 if (cmd->flags & CMD_ASYNC)
763 out_meta->callback = cmd->callback;
fd4abac5 764
4ce7cc2b 765 /* set up the header */
fd4abac5 766
4ce7cc2b 767 out_cmd->hdr.cmd = cmd->id;
fd4abac5 768 out_cmd->hdr.flags = 0;
cefeaa5f 769 out_cmd->hdr.sequence =
6d8f6eeb 770 cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
cefeaa5f 771 INDEX_TO_SEQ(q->write_ptr));
4ce7cc2b
JB
772
773 /* and copy the data that needs to be copied */
774
775 cmd_dest = &out_cmd->cmd.payload[0];
776 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
777 if (!cmd->len[i])
778 continue;
779 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
780 break;
781 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
782 cmd_dest += cmd->len[i];
ded2ae7c 783 }
4ce7cc2b 784
6d8f6eeb 785 IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
4ce7cc2b
JB
786 "%d bytes at %d[%d]:%d\n",
787 get_cmd_string(out_cmd->hdr.cmd),
788 out_cmd->hdr.cmd,
789 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
6d8f6eeb 790 q->write_ptr, idx, trans->shrd->cmd_queue);
4ce7cc2b 791
6d8f6eeb 792 phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
795414db 793 DMA_BIDIRECTIONAL);
6d8f6eeb 794 if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
2c46f72e
JB
795 idx = -ENOMEM;
796 goto out;
797 }
798
2e724443 799 dma_unmap_addr_set(out_meta, mapping, phys_addr);
4ce7cc2b
JB
800 dma_unmap_len_set(out_meta, len, copy_size);
801
6d8f6eeb
EG
802 iwlagn_txq_attach_buf_to_tfd(trans, txq,
803 phys_addr, copy_size, 1);
4ce7cc2b
JB
804#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
805 trace_bufs[0] = &out_cmd->hdr;
806 trace_lens[0] = copy_size;
807 trace_idx = 1;
808#endif
809
810 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
811 if (!cmd->len[i])
812 continue;
813 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
814 continue;
6d8f6eeb
EG
815 phys_addr = dma_map_single(bus(trans)->dev,
816 (void *)cmd->data[i],
3be3fdb5 817 cmd->len[i], DMA_BIDIRECTIONAL);
6d8f6eeb
EG
818 if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
819 iwlagn_unmap_tfd(trans, out_meta,
e815407d 820 &txq->tfds[q->write_ptr],
3be3fdb5 821 DMA_BIDIRECTIONAL);
4ce7cc2b
JB
822 idx = -ENOMEM;
823 goto out;
824 }
825
6d8f6eeb 826 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
4ce7cc2b
JB
827 cmd->len[i], 0);
828#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
829 trace_bufs[trace_idx] = cmd->data[i];
830 trace_lens[trace_idx] = cmd->len[i];
831 trace_idx++;
832#endif
833 }
df833b1d 834
afaf6b57 835 out_meta->flags = cmd->flags;
2c46f72e
JB
836
837 txq->need_update = 1;
838
4ce7cc2b
JB
839 /* check that tracing gets all possible blocks */
840 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
841#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
fd656935 842 trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
4ce7cc2b
JB
843 trace_bufs[0], trace_lens[0],
844 trace_bufs[1], trace_lens[1],
845 trace_bufs[2], trace_lens[2]);
846#endif
df833b1d 847
fd4abac5
TW
848 /* Increment and update queue's write index */
849 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
fd656935 850 iwl_txq_update_write_ptr(trans, txq);
fd4abac5 851
2c46f72e 852 out:
72012474 853 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
7bfedc59 854 return idx;
fd4abac5
TW
855}
856
17b88929
TW
857/**
858 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
859 *
860 * When FW advances 'R' index, all entries between old and new 'R' index
861 * need to be reclaimed. As result, some free space forms. If there is
862 * enough free space (> low mark), wake the stack that feeds us.
863 */
3e10caeb
EG
864static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
865 int idx)
17b88929 866{
3e10caeb 867 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
8ad71bef 868 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
17b88929
TW
869 struct iwl_queue *q = &txq->q;
870 int nfreed = 0;
871
499b1883 872 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
3e10caeb 873 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
2e5d04da
DH
874 "index %d is out of range [0-%d] %d %d.\n", __func__,
875 txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
17b88929
TW
876 return;
877 }
878
499b1883
TW
879 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
880 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
17b88929 881
499b1883 882 if (nfreed++ > 0) {
3e10caeb 883 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
17b88929 884 q->write_ptr, q->read_ptr);
3e10caeb 885 iwlagn_fw_error(priv(trans), false);
17b88929 886 }
da99c4b6 887
17b88929
TW
888 }
889}
890
891/**
892 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
893 * @rxb: Rx buffer to reclaim
894 *
895 * If an Rx buffer has an async callback associated with it the callback
896 * will be executed. The attached skb (if present) will only be freed
897 * if the callback returns 1
898 */
3e10caeb 899void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb)
17b88929 900{
2f301227 901 struct iwl_rx_packet *pkt = rxb_addr(rxb);
17b88929
TW
902 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
903 int txq_id = SEQ_TO_QUEUE(sequence);
904 int index = SEQ_TO_INDEX(sequence);
17b88929 905 int cmd_index;
c2acea8e
JB
906 struct iwl_device_cmd *cmd;
907 struct iwl_cmd_meta *meta;
8ad71bef
EG
908 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
909 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
3598e177 910 unsigned long flags;
17b88929
TW
911
912 /* If a Tx command is being handled and it isn't in the actual
913 * command queue then there a command routing bug has been introduced
914 * in the queue management code. */
6d8f6eeb 915 if (WARN(txq_id != trans->shrd->cmd_queue,
13bb9483 916 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
6d8f6eeb 917 txq_id, trans->shrd->cmd_queue, sequence,
8ad71bef
EG
918 trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
919 trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
3e10caeb 920 iwl_print_hex_error(trans, pkt, 32);
55d6a3cd 921 return;
01ef9323 922 }
17b88929 923
4ce7cc2b 924 cmd_index = get_cmd_index(&txq->q, index);
dd487449
ZY
925 cmd = txq->cmd[cmd_index];
926 meta = &txq->meta[cmd_index];
17b88929 927
6d8f6eeb
EG
928 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
929 DMA_BIDIRECTIONAL);
c33de625 930
17b88929 931 /* Input error checking is done when commands are added to queue. */
c2acea8e 932 if (meta->flags & CMD_WANT_SKB) {
2f301227
ZY
933 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
934 rxb->page = NULL;
2624e96c 935 } else if (meta->callback)
3e10caeb 936 meta->callback(trans->shrd, cmd, pkt);
2624e96c 937
72012474 938 spin_lock_irqsave(&trans->hcmd_lock, flags);
17b88929 939
3e10caeb 940 iwl_hcmd_queue_reclaim(trans, txq_id, index);
17b88929 941
c2acea8e 942 if (!(meta->flags & CMD_ASYNC)) {
6d8f6eeb
EG
943 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
944 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
d2dfe6df 945 get_cmd_string(cmd->hdr.cmd));
3e10caeb 946 wake_up_interruptible(&trans->shrd->wait_command_queue);
17b88929 947 }
3598e177 948
dd487449 949 meta->flags = 0;
3598e177 950
72012474 951 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
17b88929 952}
253a634c 953
253a634c
EG
954#define HOST_COMPLETE_TIMEOUT (2 * HZ)
955
3e10caeb 956static void iwl_generic_cmd_callback(struct iwl_shared *shrd,
253a634c
EG
957 struct iwl_device_cmd *cmd,
958 struct iwl_rx_packet *pkt)
959{
960 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
3e10caeb 961 IWL_ERR(shrd->trans, "Bad return from %s (0x%08X)\n",
253a634c
EG
962 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
963 return;
964 }
965
966#ifdef CONFIG_IWLWIFI_DEBUG
967 switch (cmd->hdr.cmd) {
968 case REPLY_TX_LINK_QUALITY_CMD:
969 case SENSITIVITY_CMD:
3e10caeb 970 IWL_DEBUG_HC_DUMP(shrd->trans, "back from %s (0x%08X)\n",
253a634c
EG
971 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
972 break;
973 default:
3e10caeb 974 IWL_DEBUG_HC(shrd->trans, "back from %s (0x%08X)\n",
253a634c
EG
975 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
976 }
977#endif
978}
979
6d8f6eeb 980static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
253a634c
EG
981{
982 int ret;
983
984 /* An asynchronous command can not expect an SKB to be set. */
985 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
986 return -EINVAL;
987
988 /* Assign a generic callback if one is not provided */
989 if (!cmd->callback)
990 cmd->callback = iwl_generic_cmd_callback;
991
6d8f6eeb 992 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
253a634c
EG
993 return -EBUSY;
994
6d8f6eeb 995 ret = iwl_enqueue_hcmd(trans, cmd);
253a634c 996 if (ret < 0) {
6d8f6eeb 997 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
253a634c
EG
998 get_cmd_string(cmd->id), ret);
999 return ret;
1000 }
1001 return 0;
1002}
1003
6d8f6eeb 1004static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
253a634c 1005{
8ad71bef 1006 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
253a634c
EG
1007 int cmd_idx;
1008 int ret;
1009
6d8f6eeb 1010 lockdep_assert_held(&trans->shrd->mutex);
253a634c
EG
1011
1012 /* A synchronous command can not have a callback set. */
1013 if (WARN_ON(cmd->callback))
1014 return -EINVAL;
1015
6d8f6eeb 1016 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
253a634c
EG
1017 get_cmd_string(cmd->id));
1018
6d8f6eeb
EG
1019 set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1020 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
253a634c
EG
1021 get_cmd_string(cmd->id));
1022
6d8f6eeb 1023 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
253a634c
EG
1024 if (cmd_idx < 0) {
1025 ret = cmd_idx;
6d8f6eeb
EG
1026 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1027 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
253a634c
EG
1028 get_cmd_string(cmd->id), ret);
1029 return ret;
1030 }
1031
3e10caeb 1032 ret = wait_event_interruptible_timeout(trans->shrd->wait_command_queue,
6d8f6eeb 1033 !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
253a634c
EG
1034 HOST_COMPLETE_TIMEOUT);
1035 if (!ret) {
6d8f6eeb
EG
1036 if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
1037 IWL_ERR(trans,
253a634c
EG
1038 "Error sending %s: time out after %dms.\n",
1039 get_cmd_string(cmd->id),
1040 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1041
6d8f6eeb
EG
1042 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
1043 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
253a634c
EG
1044 "%s\n", get_cmd_string(cmd->id));
1045 ret = -ETIMEDOUT;
1046 goto cancel;
1047 }
1048 }
1049
6d8f6eeb
EG
1050 if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
1051 IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
253a634c
EG
1052 get_cmd_string(cmd->id));
1053 ret = -ECANCELED;
1054 goto fail;
1055 }
6d8f6eeb
EG
1056 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
1057 IWL_ERR(trans, "Command %s failed: FW Error\n",
253a634c
EG
1058 get_cmd_string(cmd->id));
1059 ret = -EIO;
1060 goto fail;
1061 }
1062 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
6d8f6eeb 1063 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
253a634c
EG
1064 get_cmd_string(cmd->id));
1065 ret = -EIO;
1066 goto cancel;
1067 }
1068
1069 return 0;
1070
1071cancel:
1072 if (cmd->flags & CMD_WANT_SKB) {
1073 /*
1074 * Cancel the CMD_WANT_SKB flag for the cmd in the
1075 * TX cmd queue. Otherwise in case the cmd comes
1076 * in later, it will possibly set an invalid
1077 * address (cmd->meta.source).
1078 */
8ad71bef 1079 trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
253a634c
EG
1080 ~CMD_WANT_SKB;
1081 }
1082fail:
1083 if (cmd->reply_page) {
6d8f6eeb 1084 iwl_free_pages(trans->shrd, cmd->reply_page);
253a634c
EG
1085 cmd->reply_page = 0;
1086 }
1087
1088 return ret;
1089}
1090
6d8f6eeb 1091int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
253a634c
EG
1092{
1093 if (cmd->flags & CMD_ASYNC)
6d8f6eeb 1094 return iwl_send_cmd_async(trans, cmd);
253a634c 1095
6d8f6eeb 1096 return iwl_send_cmd_sync(trans, cmd);
253a634c
EG
1097}
1098
6d8f6eeb 1099int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
e6bb4c9c 1100 u16 len, const void *data)
253a634c
EG
1101{
1102 struct iwl_host_cmd cmd = {
1103 .id = id,
1104 .len = { len, },
1105 .data = { data, },
1106 .flags = flags,
1107 };
1108
6d8f6eeb 1109 return iwl_trans_pcie_send_cmd(trans, &cmd);
253a634c 1110}
a0eaad71
EG
1111
1112/* Frees buffers until index _not_ inclusive */
464021ff
EG
1113int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1114 struct sk_buff_head *skbs)
a0eaad71 1115{
8ad71bef
EG
1116 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1117 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
a0eaad71 1118 struct iwl_queue *q = &txq->q;
a0eaad71 1119 int last_to_free;
464021ff 1120 int freed = 0;
a0eaad71
EG
1121
1122 /*Since we free until index _not_ inclusive, the one before index is
1123 * the last we will free. This one must be used */
1124 last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
1125
1126 if ((index >= q->n_bd) ||
1127 (iwl_queue_used(q, last_to_free) == 0)) {
1128 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
1129 "last_to_free %d is out of range [0-%d] %d %d.\n",
1130 __func__, txq_id, last_to_free, q->n_bd,
1131 q->write_ptr, q->read_ptr);
464021ff 1132 return 0;
a0eaad71
EG
1133 }
1134
1135 IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
1136 q->read_ptr, index);
1137
1138 if (WARN_ON(!skb_queue_empty(skbs)))
464021ff 1139 return 0;
a0eaad71
EG
1140
1141 for (;
1142 q->read_ptr != index;
1143 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1144
2c452297 1145 if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
a0eaad71
EG
1146 continue;
1147
2c452297 1148 __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
a0eaad71 1149
2c452297 1150 txq->skbs[txq->q.read_ptr] = NULL;
a0eaad71 1151
6d8f6eeb 1152 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
a0eaad71 1153
6d8f6eeb 1154 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr);
464021ff 1155 freed++;
a0eaad71 1156 }
464021ff 1157 return freed;
a0eaad71 1158}
This page took 0.590916 seconds and 5 git commands to generate.