iwlwifi: don't WARN when a non empty queue is disabled
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / pcie / tx.c
CommitLineData
1053d35f
RR
1/******************************************************************************
2 *
fb4961db 3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
1053d35f
RR
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
1053d35f
RR
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
fd4abac5 29#include <linux/etherdevice.h>
5a0e3ad6 30#include <linux/slab.h>
253a634c 31#include <linux/sched.h>
253a634c 32
522376d2
EG
33#include "iwl-debug.h"
34#include "iwl-csr.h"
35#include "iwl-prph.h"
1053d35f 36#include "iwl-io.h"
ed277c93 37#include "iwl-op-mode.h"
6468a01a 38#include "internal.h"
6238b008 39/* FIXME: need to abstract out TX command (once we know what it looks like) */
1023fdc4 40#include "dvm/commands.h"
1053d35f 41
522376d2
EG
42#define IWL_TX_CRC_SIZE 4
43#define IWL_TX_DELIMITER_SIZE 4
44
48d42c42
EG
45/**
46 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
47 */
6d8f6eeb 48void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
20d3b647
JB
49 struct iwl_tx_queue *txq,
50 u16 byte_cnt)
48d42c42 51{
105183b1 52 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
20d3b647 53 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
48d42c42
EG
54 int write_ptr = txq->q.write_ptr;
55 int txq_id = txq->q.id;
56 u8 sec_ctl = 0;
57 u8 sta_id = 0;
58 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
59 __le16 bc_ent;
132f98c2 60 struct iwl_tx_cmd *tx_cmd =
bf8440e6 61 (void *) txq->entries[txq->q.write_ptr].cmd->payload;
48d42c42 62
105183b1
EG
63 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
64
48d42c42
EG
65 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
66
132f98c2
EG
67 sta_id = tx_cmd->sta_id;
68 sec_ctl = tx_cmd->sec_ctl;
48d42c42
EG
69
70 switch (sec_ctl & TX_CMD_SEC_MSK) {
71 case TX_CMD_SEC_CCM:
72 len += CCMP_MIC_LEN;
73 break;
74 case TX_CMD_SEC_TKIP:
75 len += TKIP_ICV_LEN;
76 break;
77 case TX_CMD_SEC_WEP:
78 len += WEP_IV_LEN + WEP_ICV_LEN;
79 break;
80 }
81
82 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
83
84 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
85
86 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
87 scd_bc_tbl[txq_id].
88 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
89}
90
fd4abac5
TW
91/**
92 * iwl_txq_update_write_ptr - Send new write index to hardware
93 */
fd656935 94void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
fd4abac5
TW
95{
96 u32 reg = 0;
fd4abac5
TW
97 int txq_id = txq->q.id;
98
99 if (txq->need_update == 0)
7bfedc59 100 return;
fd4abac5 101
035f7ff2 102 if (trans->cfg->base_params->shadow_reg_enable) {
f81c1f48 103 /* shadow register enabled */
1042db2a 104 iwl_write32(trans, HBUS_TARG_WRPTR,
f81c1f48
WYG
105 txq->q.write_ptr | (txq_id << 8));
106 } else {
47107e84
DF
107 struct iwl_trans_pcie *trans_pcie =
108 IWL_TRANS_GET_PCIE_TRANS(trans);
f81c1f48 109 /* if we're trying to save power */
01d651d4 110 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
f81c1f48
WYG
111 /* wake up nic if it's powered down ...
112 * uCode will wake up, and interrupt us again, so next
113 * time we'll skip this part. */
1042db2a 114 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
fd4abac5 115
f81c1f48 116 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
fd656935 117 IWL_DEBUG_INFO(trans,
f81c1f48
WYG
118 "Tx queue %d requesting wakeup,"
119 " GP1 = 0x%x\n", txq_id, reg);
1042db2a 120 iwl_set_bit(trans, CSR_GP_CNTRL,
f81c1f48
WYG
121 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
122 return;
123 }
fd4abac5 124
1042db2a 125 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
fd4abac5 126 txq->q.write_ptr | (txq_id << 8));
fd4abac5 127
f81c1f48
WYG
128 /*
129 * else not in power-save mode,
130 * uCode will never sleep when we're
131 * trying to tx (during RFKILL, we're not trying to tx).
132 */
133 } else
1042db2a 134 iwl_write32(trans, HBUS_TARG_WRPTR,
f81c1f48
WYG
135 txq->q.write_ptr | (txq_id << 8));
136 }
fd4abac5 137 txq->need_update = 0;
fd4abac5 138}
fd4abac5 139
214d14d4
JB
140static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
141{
142 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
143
144 dma_addr_t addr = get_unaligned_le32(&tb->lo);
145 if (sizeof(dma_addr_t) > sizeof(u32))
146 addr |=
147 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
148
149 return addr;
150}
151
152static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
153{
154 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
155
156 return le16_to_cpu(tb->hi_n_len) >> 4;
157}
158
159static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
160 dma_addr_t addr, u16 len)
161{
162 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
163 u16 hi_n_len = len << 4;
164
165 put_unaligned_le32(addr, &tb->lo);
166 if (sizeof(dma_addr_t) > sizeof(u32))
167 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
168
169 tb->hi_n_len = cpu_to_le16(hi_n_len);
170
171 tfd->num_tbs = idx + 1;
172}
173
174static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
175{
176 return tfd->num_tbs & 0x1f;
177}
178
eec373f0
EG
179static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
180 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
214d14d4 181{
214d14d4
JB
182 int i;
183 int num_tbs;
184
214d14d4
JB
185 /* Sanity check on number of chunks */
186 num_tbs = iwl_tfd_get_num_tbs(tfd);
187
188 if (num_tbs >= IWL_NUM_OF_TBS) {
6d8f6eeb 189 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
214d14d4
JB
190 /* @todo issue fatal error, it is quite serious situation */
191 return;
192 }
193
194 /* Unmap tx_cmd */
195 if (num_tbs)
1042db2a 196 dma_unmap_single(trans->dev,
4ce7cc2b
JB
197 dma_unmap_addr(meta, mapping),
198 dma_unmap_len(meta, len),
795414db 199 DMA_BIDIRECTIONAL);
214d14d4
JB
200
201 /* Unmap chunks, if any. */
202 for (i = 1; i < num_tbs; i++)
1042db2a 203 dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
e815407d 204 iwl_tfd_tb_get_len(tfd, i), dma_dir);
ebed633c
EG
205
206 tfd->num_tbs = 0;
4ce7cc2b
JB
207}
208
209/**
bc2529c3 210 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
6d8f6eeb 211 * @trans - transport private data
4ce7cc2b 212 * @txq - tx queue
ebed633c 213 * @dma_dir - the direction of the DMA mapping
4ce7cc2b
JB
214 *
215 * Does NOT advance any TFD circular buffer read/write indexes
216 * Does NOT free the TFD itself (which is within circular buffer)
217 */
bc2529c3
EG
218void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
219 enum dma_data_direction dma_dir)
4ce7cc2b
JB
220{
221 struct iwl_tfd *tfd_tmp = txq->tfds;
4ce7cc2b 222
ebed633c
EG
223 /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
224 int rd_ptr = txq->q.read_ptr;
225 int idx = get_cmd_index(&txq->q, rd_ptr);
226
015c15e1
JB
227 lockdep_assert_held(&txq->lock);
228
ebed633c 229 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
eec373f0
EG
230 iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
231 dma_dir);
214d14d4
JB
232
233 /* free SKB */
bf8440e6 234 if (txq->entries) {
214d14d4
JB
235 struct sk_buff *skb;
236
ebed633c 237 skb = txq->entries[idx].skb;
214d14d4 238
909e9b23
EG
239 /* Can be called from irqs-disabled context
240 * If skb is not NULL, it means that the whole queue is being
241 * freed and that the queue is not empty - free the skb
242 */
214d14d4 243 if (skb) {
ed277c93 244 iwl_op_mode_free_skb(trans->op_mode, skb);
ebed633c 245 txq->entries[idx].skb = NULL;
214d14d4
JB
246 }
247 }
248}
249
6d8f6eeb 250int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
214d14d4
JB
251 struct iwl_tx_queue *txq,
252 dma_addr_t addr, u16 len,
4c42db0f 253 u8 reset)
214d14d4
JB
254{
255 struct iwl_queue *q;
256 struct iwl_tfd *tfd, *tfd_tmp;
257 u32 num_tbs;
258
259 q = &txq->q;
4ce7cc2b 260 tfd_tmp = txq->tfds;
214d14d4
JB
261 tfd = &tfd_tmp[q->write_ptr];
262
263 if (reset)
264 memset(tfd, 0, sizeof(*tfd));
265
266 num_tbs = iwl_tfd_get_num_tbs(tfd);
267
268 /* Each TFD can point to a maximum 20 Tx buffers */
269 if (num_tbs >= IWL_NUM_OF_TBS) {
6d8f6eeb 270 IWL_ERR(trans, "Error can not send more than %d chunks\n",
20d3b647 271 IWL_NUM_OF_TBS);
214d14d4
JB
272 return -EINVAL;
273 }
274
275 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
276 return -EINVAL;
277
278 if (unlikely(addr & ~IWL_TX_DMA_MASK))
6d8f6eeb 279 IWL_ERR(trans, "Unaligned address = %llx\n",
20d3b647 280 (unsigned long long)addr);
214d14d4
JB
281
282 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
283
284 return 0;
285}
286
fd4abac5
TW
287/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
288 * DMA services
289 *
290 * Theory of operation
291 *
292 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
293 * of buffer descriptors, each of which points to one or more data buffers for
294 * the device to read from or fill. Driver and device exchange status of each
295 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
296 * entries in each circular buffer, to protect against confusing empty and full
297 * queue states.
298 *
299 * The device reads or writes the data in the queues via the device's several
300 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
301 *
302 * For Tx queue, there are low mark and high mark limits. If, after queuing
303 * the packet for Tx, free space become < low mark, Tx queue stopped. When
304 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
305 * Tx queue resumed.
306 *
fd4abac5
TW
307 ***************************************************/
308
309int iwl_queue_space(const struct iwl_queue *q)
310{
311 int s = q->read_ptr - q->write_ptr;
312
313 if (q->read_ptr > q->write_ptr)
314 s -= q->n_bd;
315
316 if (s <= 0)
317 s += q->n_window;
318 /* keep some reserve to not confuse empty and full situations */
319 s -= 2;
320 if (s < 0)
321 s = 0;
322 return s;
323}
fd4abac5 324
1053d35f
RR
325/**
326 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
327 */
6d8f6eeb 328int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
1053d35f
RR
329{
330 q->n_bd = count;
331 q->n_window = slots_num;
332 q->id = id;
333
334 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
335 * and iwl_queue_dec_wrap are broken. */
3e41ace5
JB
336 if (WARN_ON(!is_power_of_2(count)))
337 return -EINVAL;
1053d35f
RR
338
339 /* slots_num must be power-of-two size, otherwise
340 * get_cmd_index is broken. */
3e41ace5
JB
341 if (WARN_ON(!is_power_of_2(slots_num)))
342 return -EINVAL;
1053d35f
RR
343
344 q->low_mark = q->n_window / 4;
345 if (q->low_mark < 4)
346 q->low_mark = 4;
347
348 q->high_mark = q->n_window / 8;
349 if (q->high_mark < 2)
350 q->high_mark = 2;
351
352 q->write_ptr = q->read_ptr = 0;
353
354 return 0;
355}
356
6d8f6eeb 357static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
48d42c42
EG
358 struct iwl_tx_queue *txq)
359{
105183b1
EG
360 struct iwl_trans_pcie *trans_pcie =
361 IWL_TRANS_GET_PCIE_TRANS(trans);
6d8f6eeb 362 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
48d42c42
EG
363 int txq_id = txq->q.id;
364 int read_ptr = txq->q.read_ptr;
365 u8 sta_id = 0;
366 __le16 bc_ent;
132f98c2 367 struct iwl_tx_cmd *tx_cmd =
bf8440e6 368 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
48d42c42
EG
369
370 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
371
c6f600fc 372 if (txq_id != trans_pcie->cmd_queue)
132f98c2 373 sta_id = tx_cmd->sta_id;
48d42c42
EG
374
375 bc_ent = cpu_to_le16(1 | (sta_id << 12));
376 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
377
378 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
379 scd_bc_tbl[txq_id].
380 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
381}
382
1ce8658c
EG
383static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
384 u16 txq_id)
48d42c42 385{
20d3b647 386 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
48d42c42
EG
387 u32 tbl_dw_addr;
388 u32 tbl_dw;
389 u16 scd_q2ratid;
390
391 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
392
105183b1 393 tbl_dw_addr = trans_pcie->scd_base_addr +
48d42c42
EG
394 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
395
1042db2a 396 tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
48d42c42
EG
397
398 if (txq_id & 0x1)
399 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
400 else
401 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
402
1042db2a 403 iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
48d42c42
EG
404
405 return 0;
406}
407
1ce8658c 408static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
48d42c42
EG
409{
410 /* Simply stop the queue, but don't change any configuration;
411 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
1042db2a 412 iwl_write_prph(trans,
48d42c42
EG
413 SCD_QUEUE_STATUS_BITS(txq_id),
414 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
415 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
416}
417
5bf9a89d
EG
418void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
419 int sta_id, int tid, int frame_limit, u16 ssn)
48d42c42 420{
9eae88fa 421 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4beaf6c2 422
9eae88fa
JB
423 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
424 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
48d42c42 425
48d42c42 426 /* Stop this Tx queue before configuring it */
1ce8658c 427 iwl_txq_set_inactive(trans, txq_id);
48d42c42 428
4beaf6c2
EG
429 /* Set this queue as a chain-building queue unless it is CMD queue */
430 if (txq_id != trans_pcie->cmd_queue)
431 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
432
433 /* If this queue is mapped to a certain station: it is an AGG queue */
434 if (sta_id != IWL_INVALID_STATION) {
435 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
48d42c42 436
4beaf6c2 437 /* Map receiver-address / traffic-ID to this queue */
1ce8658c 438 iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
48d42c42 439
4beaf6c2
EG
440 /* enable aggregations for the queue */
441 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
1ce8658c
EG
442 } else {
443 /*
444 * disable aggregations for the queue, this will also make the
445 * ra_tid mapping configuration irrelevant since it is now a
446 * non-AGG queue.
447 */
448 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
4beaf6c2 449 }
48d42c42
EG
450
451 /* Place first TFD at index corresponding to start sequence number.
452 * Assumes that ssn_idx is valid (!= 0xFFF) */
822e8b2a
EG
453 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
454 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
1ce8658c
EG
455
456 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
457 (ssn & 0xff) | (txq_id << 8));
458 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
48d42c42
EG
459
460 /* Set up Tx window size and frame limit for this queue */
4beaf6c2
EG
461 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
462 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1042db2a 463 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
9eae88fa
JB
464 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
465 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
466 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
467 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
468 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
48d42c42 469
48d42c42 470 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
1ce8658c
EG
471 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
472 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
473 (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
474 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
475 SCD_QUEUE_STTS_REG_MSK);
476 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
477 txq_id, fifo, ssn & 0xff);
4beaf6c2
EG
478}
479
d0624be6 480void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
288712a6 481{
8ad71bef 482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
986ea6c9
EG
483 u32 stts_addr = trans_pcie->scd_base_addr +
484 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
485 static const u32 zero_val[4] = {};
288712a6 486
9eae88fa
JB
487 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
488 WARN_ONCE(1, "queue %d not used", txq_id);
489 return;
48d42c42
EG
490 }
491
ac928f8d
EG
492 iwl_txq_set_inactive(trans, txq_id);
493
986ea6c9
EG
494 _iwl_write_targ_mem_dwords(trans, stts_addr,
495 zero_val, ARRAY_SIZE(zero_val));
496
1ce8658c 497 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
48d42c42
EG
498}
499
fd4abac5
TW
500/*************** HOST COMMAND QUEUE FUNCTIONS *****/
501
502/**
503 * iwl_enqueue_hcmd - enqueue a uCode command
504 * @priv: device private data point
505 * @cmd: a point to the ucode command structure
506 *
507 * The function returns < 0 values to indicate the operation is
508 * failed. On success, it turns the index (> 0) of command in the
509 * command queue.
510 */
6d8f6eeb 511static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
fd4abac5 512{
8ad71bef 513 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
c6f600fc 514 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
fd4abac5 515 struct iwl_queue *q = &txq->q;
c2acea8e
JB
516 struct iwl_device_cmd *out_cmd;
517 struct iwl_cmd_meta *out_meta;
fd4abac5 518 dma_addr_t phys_addr;
f3674227 519 u32 idx;
4ce7cc2b 520 u16 copy_size, cmd_size;
4ce7cc2b
JB
521 bool had_nocopy = false;
522 int i;
96791422 523 u32 cmd_pos;
fd4abac5 524
4ce7cc2b
JB
525 copy_size = sizeof(out_cmd->hdr);
526 cmd_size = sizeof(out_cmd->hdr);
527
528 /* need one for the header if the first is NOCOPY */
529 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
530
531 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
532 if (!cmd->len[i])
533 continue;
534 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
535 had_nocopy = true;
536 } else {
537 /* NOCOPY must not be followed by normal! */
538 if (WARN_ON(had_nocopy))
539 return -EINVAL;
540 copy_size += cmd->len[i];
541 }
542 cmd_size += cmd->len[i];
543 }
fd4abac5 544
3e41ace5
JB
545 /*
546 * If any of the command structures end up being larger than
4ce7cc2b
JB
547 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
548 * allocated into separate TFDs, then we will need to
549 * increase the size of the buffers.
3e41ace5 550 */
2a79e45e
JB
551 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
552 "Command %s (%#x) is too large (%d bytes)\n",
553 trans_pcie_get_cmd_string(trans_pcie, cmd->id),
554 cmd->id, copy_size))
3e41ace5 555 return -EINVAL;
fd4abac5 556
015c15e1 557 spin_lock_bh(&txq->lock);
3598e177 558
c2acea8e 559 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
015c15e1 560 spin_unlock_bh(&txq->lock);
3598e177 561
6d8f6eeb 562 IWL_ERR(trans, "No space in command queue\n");
0e781842 563 iwl_op_mode_cmd_queue_full(trans->op_mode);
fd4abac5
TW
564 return -ENOSPC;
565 }
566
4ce7cc2b 567 idx = get_cmd_index(q, q->write_ptr);
bf8440e6
JB
568 out_cmd = txq->entries[idx].cmd;
569 out_meta = &txq->entries[idx].meta;
c2acea8e 570
8ce73f3a 571 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
c2acea8e
JB
572 if (cmd->flags & CMD_WANT_SKB)
573 out_meta->source = cmd;
fd4abac5 574
4ce7cc2b 575 /* set up the header */
fd4abac5 576
4ce7cc2b 577 out_cmd->hdr.cmd = cmd->id;
fd4abac5 578 out_cmd->hdr.flags = 0;
cefeaa5f 579 out_cmd->hdr.sequence =
c6f600fc 580 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
cefeaa5f 581 INDEX_TO_SEQ(q->write_ptr));
4ce7cc2b
JB
582
583 /* and copy the data that needs to be copied */
96791422 584 cmd_pos = offsetof(struct iwl_device_cmd, payload);
4ce7cc2b
JB
585 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
586 if (!cmd->len[i])
587 continue;
588 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
589 break;
96791422
EG
590 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
591 cmd_pos += cmd->len[i];
592 }
593
594 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
595
596 /*
597 * since out_cmd will be the source address of the FH, it will write
598 * the retry count there. So when the user needs to receivce the HCMD
599 * that corresponds to the response in the response handler, it needs
600 * to set CMD_WANT_HCMD.
601 */
602 if (cmd->flags & CMD_WANT_HCMD) {
603 txq->entries[idx].copy_cmd =
604 kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
605 if (unlikely(!txq->entries[idx].copy_cmd)) {
606 idx = -ENOMEM;
607 goto out;
608 }
ded2ae7c 609 }
4ce7cc2b 610
d9fb6465 611 IWL_DEBUG_HC(trans,
20d3b647
JB
612 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
613 trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
614 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
615 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
4ce7cc2b 616
1042db2a 617 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
20d3b647 618 DMA_BIDIRECTIONAL);
1042db2a 619 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
2c46f72e
JB
620 idx = -ENOMEM;
621 goto out;
622 }
623
2e724443 624 dma_unmap_addr_set(out_meta, mapping, phys_addr);
4ce7cc2b
JB
625 dma_unmap_len_set(out_meta, len, copy_size);
626
20d3b647 627 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
4ce7cc2b
JB
628
629 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
630 if (!cmd->len[i])
631 continue;
632 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
633 continue;
20d3b647 634 phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
3be3fdb5 635 cmd->len[i], DMA_BIDIRECTIONAL);
1042db2a 636 if (dma_mapping_error(trans->dev, phys_addr)) {
eec373f0
EG
637 iwl_unmap_tfd(trans, out_meta,
638 &txq->tfds[q->write_ptr],
639 DMA_BIDIRECTIONAL);
4ce7cc2b
JB
640 idx = -ENOMEM;
641 goto out;
642 }
643
6d8f6eeb 644 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
4ce7cc2b 645 cmd->len[i], 0);
4ce7cc2b 646 }
df833b1d 647
afaf6b57 648 out_meta->flags = cmd->flags;
2c46f72e
JB
649
650 txq->need_update = 1;
651
45eab7cc
JB
652 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
653 &out_cmd->hdr, copy_size);
df833b1d 654
7c5ba4a8
JB
655 /* start timer if queue currently empty */
656 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
657 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
658
fd4abac5
TW
659 /* Increment and update queue's write index */
660 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
fd656935 661 iwl_txq_update_write_ptr(trans, txq);
fd4abac5 662
2c46f72e 663 out:
015c15e1 664 spin_unlock_bh(&txq->lock);
7bfedc59 665 return idx;
fd4abac5
TW
666}
667
7c5ba4a8
JB
668static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
669 struct iwl_tx_queue *txq)
670{
671 if (!trans_pcie->wd_timeout)
672 return;
673
674 /*
675 * if empty delete timer, otherwise move timer forward
676 * since we're making progress on this queue
677 */
678 if (txq->q.read_ptr == txq->q.write_ptr)
679 del_timer(&txq->stuck_timer);
680 else
681 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
682}
683
17b88929
TW
684/**
685 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
686 *
687 * When FW advances 'R' index, all entries between old and new 'R' index
688 * need to be reclaimed. As result, some free space forms. If there is
689 * enough free space (> low mark), wake the stack that feeds us.
690 */
3e10caeb
EG
691static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
692 int idx)
17b88929 693{
3e10caeb 694 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
8ad71bef 695 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
17b88929
TW
696 struct iwl_queue *q = &txq->q;
697 int nfreed = 0;
698
015c15e1
JB
699 lockdep_assert_held(&txq->lock);
700
499b1883 701 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
20d3b647
JB
702 IWL_ERR(trans,
703 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
704 __func__, txq_id, idx, q->n_bd,
705 q->write_ptr, q->read_ptr);
17b88929
TW
706 return;
707 }
708
499b1883
TW
709 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
710 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
17b88929 711
499b1883 712 if (nfreed++ > 0) {
20d3b647
JB
713 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
714 idx, q->write_ptr, q->read_ptr);
bcb9321c 715 iwl_op_mode_nic_error(trans->op_mode);
17b88929 716 }
da99c4b6 717
17b88929 718 }
7c5ba4a8
JB
719
720 iwl_queue_progress(trans_pcie, txq);
17b88929
TW
721}
722
723/**
724 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
725 * @rxb: Rx buffer to reclaim
247c61d6
EG
726 * @handler_status: return value of the handler of the command
727 * (put in setup_rx_handlers)
17b88929
TW
728 *
729 * If an Rx buffer has an async callback associated with it the callback
730 * will be executed. The attached skb (if present) will only be freed
731 * if the callback returns 1
732 */
48a2d66f 733void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
247c61d6 734 int handler_status)
17b88929 735{
2f301227 736 struct iwl_rx_packet *pkt = rxb_addr(rxb);
17b88929
TW
737 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
738 int txq_id = SEQ_TO_QUEUE(sequence);
739 int index = SEQ_TO_INDEX(sequence);
17b88929 740 int cmd_index;
c2acea8e
JB
741 struct iwl_device_cmd *cmd;
742 struct iwl_cmd_meta *meta;
8ad71bef 743 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
c6f600fc 744 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
17b88929
TW
745
746 /* If a Tx command is being handled and it isn't in the actual
747 * command queue then there a command routing bug has been introduced
748 * in the queue management code. */
c6f600fc 749 if (WARN(txq_id != trans_pcie->cmd_queue,
13bb9483 750 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
20d3b647
JB
751 txq_id, trans_pcie->cmd_queue, sequence,
752 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
753 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
3e10caeb 754 iwl_print_hex_error(trans, pkt, 32);
55d6a3cd 755 return;
01ef9323 756 }
17b88929 757
015c15e1
JB
758 spin_lock(&txq->lock);
759
4ce7cc2b 760 cmd_index = get_cmd_index(&txq->q, index);
bf8440e6
JB
761 cmd = txq->entries[cmd_index].cmd;
762 meta = &txq->entries[cmd_index].meta;
17b88929 763
eec373f0 764 iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
c33de625 765
17b88929 766 /* Input error checking is done when commands are added to queue. */
c2acea8e 767 if (meta->flags & CMD_WANT_SKB) {
48a2d66f 768 struct page *p = rxb_steal_page(rxb);
65b94a4a 769
65b94a4a
JB
770 meta->source->resp_pkt = pkt;
771 meta->source->_rx_page_addr = (unsigned long)page_address(p);
b2cf410c 772 meta->source->_rx_page_order = trans_pcie->rx_page_order;
247c61d6 773 meta->source->handler_status = handler_status;
247c61d6 774 }
2624e96c 775
3e10caeb 776 iwl_hcmd_queue_reclaim(trans, txq_id, index);
17b88929 777
c2acea8e 778 if (!(meta->flags & CMD_ASYNC)) {
74fda971 779 if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
05c89b91
WYG
780 IWL_WARN(trans,
781 "HCMD_ACTIVE already clear for command %s\n",
d9fb6465
JB
782 trans_pcie_get_cmd_string(trans_pcie,
783 cmd->hdr.cmd));
05c89b91 784 }
74fda971 785 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
6d8f6eeb 786 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
d9fb6465
JB
787 trans_pcie_get_cmd_string(trans_pcie,
788 cmd->hdr.cmd));
69a10b29 789 wake_up(&trans->wait_command_queue);
17b88929 790 }
3598e177 791
dd487449 792 meta->flags = 0;
3598e177 793
015c15e1 794 spin_unlock(&txq->lock);
17b88929 795}
253a634c 796
253a634c
EG
797#define HOST_COMPLETE_TIMEOUT (2 * HZ)
798
6d8f6eeb 799static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
253a634c 800{
d9fb6465 801 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
253a634c
EG
802 int ret;
803
804 /* An asynchronous command can not expect an SKB to be set. */
805 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
806 return -EINVAL;
807
253a634c 808
6d8f6eeb 809 ret = iwl_enqueue_hcmd(trans, cmd);
253a634c 810 if (ret < 0) {
721c32f7 811 IWL_ERR(trans,
b36b110c 812 "Error sending %s: enqueue_hcmd failed: %d\n",
d9fb6465 813 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
253a634c
EG
814 return ret;
815 }
816 return 0;
817}
818
6d8f6eeb 819static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
253a634c 820{
8ad71bef 821 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
253a634c
EG
822 int cmd_idx;
823 int ret;
824
6d8f6eeb 825 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
d9fb6465 826 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
253a634c 827
2cc39c94 828 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
74fda971 829 &trans_pcie->status))) {
2cc39c94 830 IWL_ERR(trans, "Command %s: a command is already active!\n",
d9fb6465 831 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
2cc39c94
JB
832 return -EIO;
833 }
834
6d8f6eeb 835 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
d9fb6465 836 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
253a634c 837
6d8f6eeb 838 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
253a634c
EG
839 if (cmd_idx < 0) {
840 ret = cmd_idx;
74fda971 841 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
721c32f7 842 IWL_ERR(trans,
b36b110c 843 "Error sending %s: enqueue_hcmd failed: %d\n",
d9fb6465 844 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
253a634c
EG
845 return ret;
846 }
847
69a10b29 848 ret = wait_event_timeout(trans->wait_command_queue,
20d3b647
JB
849 !test_bit(STATUS_HCMD_ACTIVE,
850 &trans_pcie->status),
851 HOST_COMPLETE_TIMEOUT);
253a634c 852 if (!ret) {
74fda971 853 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
d10630af 854 struct iwl_tx_queue *txq =
c6f600fc 855 &trans_pcie->txq[trans_pcie->cmd_queue];
d10630af
WYG
856 struct iwl_queue *q = &txq->q;
857
721c32f7 858 IWL_ERR(trans,
253a634c 859 "Error sending %s: time out after %dms.\n",
d9fb6465 860 trans_pcie_get_cmd_string(trans_pcie, cmd->id),
253a634c
EG
861 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
862
721c32f7 863 IWL_ERR(trans,
d10630af
WYG
864 "Current CMD queue read_ptr %d write_ptr %d\n",
865 q->read_ptr, q->write_ptr);
866
74fda971 867 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
d9fb6465
JB
868 IWL_DEBUG_INFO(trans,
869 "Clearing HCMD_ACTIVE for command %s\n",
870 trans_pcie_get_cmd_string(trans_pcie,
871 cmd->id));
253a634c
EG
872 ret = -ETIMEDOUT;
873 goto cancel;
874 }
875 }
876
65b94a4a 877 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
6d8f6eeb 878 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
d9fb6465 879 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
253a634c
EG
880 ret = -EIO;
881 goto cancel;
882 }
883
884 return 0;
885
886cancel:
887 if (cmd->flags & CMD_WANT_SKB) {
888 /*
889 * Cancel the CMD_WANT_SKB flag for the cmd in the
890 * TX cmd queue. Otherwise in case the cmd comes
891 * in later, it will possibly set an invalid
892 * address (cmd->meta.source).
893 */
bf8440e6
JB
894 trans_pcie->txq[trans_pcie->cmd_queue].
895 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
253a634c 896 }
9cac4943 897
65b94a4a
JB
898 if (cmd->resp_pkt) {
899 iwl_free_resp(cmd);
900 cmd->resp_pkt = NULL;
253a634c
EG
901 }
902
903 return ret;
904}
905
6d8f6eeb 906int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
253a634c
EG
907{
908 if (cmd->flags & CMD_ASYNC)
6d8f6eeb 909 return iwl_send_cmd_async(trans, cmd);
253a634c 910
6d8f6eeb 911 return iwl_send_cmd_sync(trans, cmd);
253a634c
EG
912}
913
a0eaad71 914/* Frees buffers until index _not_ inclusive */
464021ff
EG
915int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
916 struct sk_buff_head *skbs)
a0eaad71 917{
8ad71bef
EG
918 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
919 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
a0eaad71 920 struct iwl_queue *q = &txq->q;
a0eaad71 921 int last_to_free;
464021ff 922 int freed = 0;
a0eaad71 923
39644e9a 924 /* This function is not meant to release cmd queue*/
c6f600fc 925 if (WARN_ON(txq_id == trans_pcie->cmd_queue))
39644e9a
EG
926 return 0;
927
015c15e1
JB
928 lockdep_assert_held(&txq->lock);
929
a0eaad71
EG
930 /*Since we free until index _not_ inclusive, the one before index is
931 * the last we will free. This one must be used */
932 last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
933
934 if ((index >= q->n_bd) ||
935 (iwl_queue_used(q, last_to_free) == 0)) {
20d3b647
JB
936 IWL_ERR(trans,
937 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
938 __func__, txq_id, last_to_free, q->n_bd,
939 q->write_ptr, q->read_ptr);
464021ff 940 return 0;
a0eaad71
EG
941 }
942
a0eaad71 943 if (WARN_ON(!skb_queue_empty(skbs)))
464021ff 944 return 0;
a0eaad71
EG
945
946 for (;
947 q->read_ptr != index;
948 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
949
bf8440e6 950 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
a0eaad71
EG
951 continue;
952
bf8440e6 953 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
a0eaad71 954
bf8440e6 955 txq->entries[txq->q.read_ptr].skb = NULL;
a0eaad71 956
6d8f6eeb 957 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
a0eaad71 958
bc2529c3 959 iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
464021ff 960 freed++;
a0eaad71 961 }
7c5ba4a8
JB
962
963 iwl_queue_progress(trans_pcie, txq);
964
464021ff 965 return freed;
a0eaad71 966}
This page took 0.744368 seconds and 5 git commands to generate.