1 /******************************************************************************
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
33 #include "iwl-debug.h"
37 #include "iwl-op-mode.h"
39 /* FIXME: need to abstract out TX command (once we know what it looks like) */
40 #include "dvm/commands.h"
42 #define IWL_TX_CRC_SIZE 4
43 #define IWL_TX_DELIMITER_SIZE 4
46 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
48 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans
*trans
,
49 struct iwl_tx_queue
*txq
,
52 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
;
53 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
54 int write_ptr
= txq
->q
.write_ptr
;
55 int txq_id
= txq
->q
.id
;
58 u16 len
= byte_cnt
+ IWL_TX_CRC_SIZE
+ IWL_TX_DELIMITER_SIZE
;
60 struct iwl_tx_cmd
*tx_cmd
=
61 (void *) txq
->entries
[txq
->q
.write_ptr
].cmd
->payload
;
63 scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
65 WARN_ON(len
> 0xFFF || write_ptr
>= TFD_QUEUE_SIZE_MAX
);
67 sta_id
= tx_cmd
->sta_id
;
68 sec_ctl
= tx_cmd
->sec_ctl
;
70 switch (sec_ctl
& TX_CMD_SEC_MSK
) {
78 len
+= WEP_IV_LEN
+ WEP_ICV_LEN
;
82 bc_ent
= cpu_to_le16((len
& 0xFFF) | (sta_id
<< 12));
84 scd_bc_tbl
[txq_id
].tfd_offset
[write_ptr
] = bc_ent
;
86 if (write_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
88 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ write_ptr
] = bc_ent
;
92 * iwl_txq_update_write_ptr - Send new write index to hardware
94 void iwl_txq_update_write_ptr(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
)
97 int txq_id
= txq
->q
.id
;
99 if (txq
->need_update
== 0)
102 if (trans
->cfg
->base_params
->shadow_reg_enable
) {
103 /* shadow register enabled */
104 iwl_write32(trans
, HBUS_TARG_WRPTR
,
105 txq
->q
.write_ptr
| (txq_id
<< 8));
107 struct iwl_trans_pcie
*trans_pcie
=
108 IWL_TRANS_GET_PCIE_TRANS(trans
);
109 /* if we're trying to save power */
110 if (test_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
)) {
111 /* wake up nic if it's powered down ...
112 * uCode will wake up, and interrupt us again, so next
113 * time we'll skip this part. */
114 reg
= iwl_read32(trans
, CSR_UCODE_DRV_GP1
);
116 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
117 IWL_DEBUG_INFO(trans
,
118 "Tx queue %d requesting wakeup,"
119 " GP1 = 0x%x\n", txq_id
, reg
);
120 iwl_set_bit(trans
, CSR_GP_CNTRL
,
121 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
125 iwl_write_direct32(trans
, HBUS_TARG_WRPTR
,
126 txq
->q
.write_ptr
| (txq_id
<< 8));
129 * else not in power-save mode,
130 * uCode will never sleep when we're
131 * trying to tx (during RFKILL, we're not trying to tx).
134 iwl_write32(trans
, HBUS_TARG_WRPTR
,
135 txq
->q
.write_ptr
| (txq_id
<< 8));
137 txq
->need_update
= 0;
140 static inline dma_addr_t
iwl_tfd_tb_get_addr(struct iwl_tfd
*tfd
, u8 idx
)
142 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
144 dma_addr_t addr
= get_unaligned_le32(&tb
->lo
);
145 if (sizeof(dma_addr_t
) > sizeof(u32
))
147 ((dma_addr_t
)(le16_to_cpu(tb
->hi_n_len
) & 0xF) << 16) << 16;
152 static inline u16
iwl_tfd_tb_get_len(struct iwl_tfd
*tfd
, u8 idx
)
154 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
156 return le16_to_cpu(tb
->hi_n_len
) >> 4;
159 static inline void iwl_tfd_set_tb(struct iwl_tfd
*tfd
, u8 idx
,
160 dma_addr_t addr
, u16 len
)
162 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
163 u16 hi_n_len
= len
<< 4;
165 put_unaligned_le32(addr
, &tb
->lo
);
166 if (sizeof(dma_addr_t
) > sizeof(u32
))
167 hi_n_len
|= ((addr
>> 16) >> 16) & 0xF;
169 tb
->hi_n_len
= cpu_to_le16(hi_n_len
);
171 tfd
->num_tbs
= idx
+ 1;
174 static inline u8
iwl_tfd_get_num_tbs(struct iwl_tfd
*tfd
)
176 return tfd
->num_tbs
& 0x1f;
179 static void iwl_unmap_tfd(struct iwl_trans
*trans
, struct iwl_cmd_meta
*meta
,
180 struct iwl_tfd
*tfd
, enum dma_data_direction dma_dir
)
185 /* Sanity check on number of chunks */
186 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
188 if (num_tbs
>= IWL_NUM_OF_TBS
) {
189 IWL_ERR(trans
, "Too many chunks: %i\n", num_tbs
);
190 /* @todo issue fatal error, it is quite serious situation */
196 dma_unmap_single(trans
->dev
,
197 dma_unmap_addr(meta
, mapping
),
198 dma_unmap_len(meta
, len
),
201 /* Unmap chunks, if any. */
202 for (i
= 1; i
< num_tbs
; i
++)
203 dma_unmap_single(trans
->dev
, iwl_tfd_tb_get_addr(tfd
, i
),
204 iwl_tfd_tb_get_len(tfd
, i
), dma_dir
);
210 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
211 * @trans - transport private data
213 * @dma_dir - the direction of the DMA mapping
215 * Does NOT advance any TFD circular buffer read/write indexes
216 * Does NOT free the TFD itself (which is within circular buffer)
218 void iwl_txq_free_tfd(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
,
219 enum dma_data_direction dma_dir
)
221 struct iwl_tfd
*tfd_tmp
= txq
->tfds
;
223 /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
224 int rd_ptr
= txq
->q
.read_ptr
;
225 int idx
= get_cmd_index(&txq
->q
, rd_ptr
);
227 lockdep_assert_held(&txq
->lock
);
229 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
230 iwl_unmap_tfd(trans
, &txq
->entries
[idx
].meta
, &tfd_tmp
[rd_ptr
],
237 skb
= txq
->entries
[idx
].skb
;
239 /* Can be called from irqs-disabled context
240 * If skb is not NULL, it means that the whole queue is being
241 * freed and that the queue is not empty - free the skb
244 iwl_op_mode_free_skb(trans
->op_mode
, skb
);
245 txq
->entries
[idx
].skb
= NULL
;
250 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans
*trans
,
251 struct iwl_tx_queue
*txq
,
252 dma_addr_t addr
, u16 len
,
256 struct iwl_tfd
*tfd
, *tfd_tmp
;
261 tfd
= &tfd_tmp
[q
->write_ptr
];
264 memset(tfd
, 0, sizeof(*tfd
));
266 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
268 /* Each TFD can point to a maximum 20 Tx buffers */
269 if (num_tbs
>= IWL_NUM_OF_TBS
) {
270 IWL_ERR(trans
, "Error can not send more than %d chunks\n",
275 if (WARN_ON(addr
& ~DMA_BIT_MASK(36)))
278 if (unlikely(addr
& ~IWL_TX_DMA_MASK
))
279 IWL_ERR(trans
, "Unaligned address = %llx\n",
280 (unsigned long long)addr
);
282 iwl_tfd_set_tb(tfd
, num_tbs
, addr
, len
);
287 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
290 * Theory of operation
292 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
293 * of buffer descriptors, each of which points to one or more data buffers for
294 * the device to read from or fill. Driver and device exchange status of each
295 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
296 * entries in each circular buffer, to protect against confusing empty and full
299 * The device reads or writes the data in the queues via the device's several
300 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
302 * For Tx queue, there are low mark and high mark limits. If, after queuing
303 * the packet for Tx, free space become < low mark, Tx queue stopped. When
304 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
307 ***************************************************/
309 int iwl_queue_space(const struct iwl_queue
*q
)
311 int s
= q
->read_ptr
- q
->write_ptr
;
313 if (q
->read_ptr
> q
->write_ptr
)
318 /* keep some reserve to not confuse empty and full situations */
326 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
328 int iwl_queue_init(struct iwl_queue
*q
, int count
, int slots_num
, u32 id
)
331 q
->n_window
= slots_num
;
334 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
335 * and iwl_queue_dec_wrap are broken. */
336 if (WARN_ON(!is_power_of_2(count
)))
339 /* slots_num must be power-of-two size, otherwise
340 * get_cmd_index is broken. */
341 if (WARN_ON(!is_power_of_2(slots_num
)))
344 q
->low_mark
= q
->n_window
/ 4;
348 q
->high_mark
= q
->n_window
/ 8;
349 if (q
->high_mark
< 2)
352 q
->write_ptr
= q
->read_ptr
= 0;
357 static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans
*trans
,
358 struct iwl_tx_queue
*txq
)
360 struct iwl_trans_pcie
*trans_pcie
=
361 IWL_TRANS_GET_PCIE_TRANS(trans
);
362 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
363 int txq_id
= txq
->q
.id
;
364 int read_ptr
= txq
->q
.read_ptr
;
367 struct iwl_tx_cmd
*tx_cmd
=
368 (void *)txq
->entries
[txq
->q
.read_ptr
].cmd
->payload
;
370 WARN_ON(read_ptr
>= TFD_QUEUE_SIZE_MAX
);
372 if (txq_id
!= trans_pcie
->cmd_queue
)
373 sta_id
= tx_cmd
->sta_id
;
375 bc_ent
= cpu_to_le16(1 | (sta_id
<< 12));
376 scd_bc_tbl
[txq_id
].tfd_offset
[read_ptr
] = bc_ent
;
378 if (read_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
380 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ read_ptr
] = bc_ent
;
383 static int iwl_txq_set_ratid_map(struct iwl_trans
*trans
, u16 ra_tid
,
386 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
391 scd_q2ratid
= ra_tid
& SCD_QUEUE_RA_TID_MAP_RATID_MSK
;
393 tbl_dw_addr
= trans_pcie
->scd_base_addr
+
394 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id
);
396 tbl_dw
= iwl_read_targ_mem(trans
, tbl_dw_addr
);
399 tbl_dw
= (scd_q2ratid
<< 16) | (tbl_dw
& 0x0000FFFF);
401 tbl_dw
= scd_q2ratid
| (tbl_dw
& 0xFFFF0000);
403 iwl_write_targ_mem(trans
, tbl_dw_addr
, tbl_dw
);
408 static inline void iwl_txq_set_inactive(struct iwl_trans
*trans
, u16 txq_id
)
410 /* Simply stop the queue, but don't change any configuration;
411 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
412 iwl_write_prph(trans
,
413 SCD_QUEUE_STATUS_BITS(txq_id
),
414 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE
)|
415 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN
));
418 void iwl_trans_pcie_txq_enable(struct iwl_trans
*trans
, int txq_id
, int fifo
,
419 int sta_id
, int tid
, int frame_limit
, u16 ssn
)
421 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
423 if (test_and_set_bit(txq_id
, trans_pcie
->queue_used
))
424 WARN_ONCE(1, "queue %d already used - expect issues", txq_id
);
426 /* Stop this Tx queue before configuring it */
427 iwl_txq_set_inactive(trans
, txq_id
);
429 /* Set this queue as a chain-building queue unless it is CMD queue */
430 if (txq_id
!= trans_pcie
->cmd_queue
)
431 iwl_set_bits_prph(trans
, SCD_QUEUECHAIN_SEL
, BIT(txq_id
));
433 /* If this queue is mapped to a certain station: it is an AGG queue */
434 if (sta_id
!= IWL_INVALID_STATION
) {
435 u16 ra_tid
= BUILD_RAxTID(sta_id
, tid
);
437 /* Map receiver-address / traffic-ID to this queue */
438 iwl_txq_set_ratid_map(trans
, ra_tid
, txq_id
);
440 /* enable aggregations for the queue */
441 iwl_set_bits_prph(trans
, SCD_AGGR_SEL
, BIT(txq_id
));
444 * disable aggregations for the queue, this will also make the
445 * ra_tid mapping configuration irrelevant since it is now a
448 iwl_clear_bits_prph(trans
, SCD_AGGR_SEL
, BIT(txq_id
));
451 /* Place first TFD at index corresponding to start sequence number.
452 * Assumes that ssn_idx is valid (!= 0xFFF) */
453 trans_pcie
->txq
[txq_id
].q
.read_ptr
= (ssn
& 0xff);
454 trans_pcie
->txq
[txq_id
].q
.write_ptr
= (ssn
& 0xff);
456 iwl_write_direct32(trans
, HBUS_TARG_WRPTR
,
457 (ssn
& 0xff) | (txq_id
<< 8));
458 iwl_write_prph(trans
, SCD_QUEUE_RDPTR(txq_id
), ssn
);
460 /* Set up Tx window size and frame limit for this queue */
461 iwl_write_targ_mem(trans
, trans_pcie
->scd_base_addr
+
462 SCD_CONTEXT_QUEUE_OFFSET(txq_id
), 0);
463 iwl_write_targ_mem(trans
, trans_pcie
->scd_base_addr
+
464 SCD_CONTEXT_QUEUE_OFFSET(txq_id
) + sizeof(u32
),
465 ((frame_limit
<< SCD_QUEUE_CTX_REG2_WIN_SIZE_POS
) &
466 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK
) |
467 ((frame_limit
<< SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
) &
468 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
));
470 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
471 iwl_write_prph(trans
, SCD_QUEUE_STATUS_BITS(txq_id
),
472 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE
) |
473 (fifo
<< SCD_QUEUE_STTS_REG_POS_TXF
) |
474 (1 << SCD_QUEUE_STTS_REG_POS_WSL
) |
475 SCD_QUEUE_STTS_REG_MSK
);
476 IWL_DEBUG_TX_QUEUES(trans
, "Activate queue %d on FIFO %d WrPtr: %d\n",
477 txq_id
, fifo
, ssn
& 0xff);
480 void iwl_trans_pcie_txq_disable(struct iwl_trans
*trans
, int txq_id
)
482 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
483 u32 stts_addr
= trans_pcie
->scd_base_addr
+
484 SCD_TX_STTS_QUEUE_OFFSET(txq_id
);
485 static const u32 zero_val
[4] = {};
487 if (!test_and_clear_bit(txq_id
, trans_pcie
->queue_used
)) {
488 WARN_ONCE(1, "queue %d not used", txq_id
);
492 iwl_txq_set_inactive(trans
, txq_id
);
494 _iwl_write_targ_mem_dwords(trans
, stts_addr
,
495 zero_val
, ARRAY_SIZE(zero_val
));
497 iwl_tx_queue_unmap(trans
, txq_id
);
499 IWL_DEBUG_TX_QUEUES(trans
, "Deactivate queue %d\n", txq_id
);
502 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
505 * iwl_enqueue_hcmd - enqueue a uCode command
506 * @priv: device private data point
507 * @cmd: a point to the ucode command structure
509 * The function returns < 0 values to indicate the operation is
510 * failed. On success, it turns the index (> 0) of command in the
513 static int iwl_enqueue_hcmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
515 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
516 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
517 struct iwl_queue
*q
= &txq
->q
;
518 struct iwl_device_cmd
*out_cmd
;
519 struct iwl_cmd_meta
*out_meta
;
520 void *dup_buf
= NULL
;
521 dma_addr_t phys_addr
;
523 u16 copy_size
, cmd_size
;
524 bool had_nocopy
= false;
528 copy_size
= sizeof(out_cmd
->hdr
);
529 cmd_size
= sizeof(out_cmd
->hdr
);
531 /* need one for the header if the first is NOCOPY */
532 BUILD_BUG_ON(IWL_MAX_CMD_TFDS
> IWL_NUM_OF_TBS
- 1);
534 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
537 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
) {
539 if (WARN_ON(cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
)) {
543 } else if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
) {
545 * This is also a chunk that isn't copied
546 * to the static buffer so set had_nocopy.
550 /* only allowed once */
551 if (WARN_ON(dup_buf
)) {
556 dup_buf
= kmemdup(cmd
->data
[i
], cmd
->len
[i
],
561 /* NOCOPY must not be followed by normal! */
562 if (WARN_ON(had_nocopy
)) {
566 copy_size
+= cmd
->len
[i
];
568 cmd_size
+= cmd
->len
[i
];
572 * If any of the command structures end up being larger than
573 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
574 * allocated into separate TFDs, then we will need to
575 * increase the size of the buffers.
577 if (WARN(copy_size
> TFD_MAX_PAYLOAD_SIZE
,
578 "Command %s (%#x) is too large (%d bytes)\n",
579 trans_pcie_get_cmd_string(trans_pcie
, cmd
->id
),
580 cmd
->id
, copy_size
)) {
585 spin_lock_bh(&txq
->lock
);
587 if (iwl_queue_space(q
) < ((cmd
->flags
& CMD_ASYNC
) ? 2 : 1)) {
588 spin_unlock_bh(&txq
->lock
);
590 IWL_ERR(trans
, "No space in command queue\n");
591 iwl_op_mode_cmd_queue_full(trans
->op_mode
);
596 idx
= get_cmd_index(q
, q
->write_ptr
);
597 out_cmd
= txq
->entries
[idx
].cmd
;
598 out_meta
= &txq
->entries
[idx
].meta
;
600 memset(out_meta
, 0, sizeof(*out_meta
)); /* re-initialize to NULL */
601 if (cmd
->flags
& CMD_WANT_SKB
)
602 out_meta
->source
= cmd
;
604 /* set up the header */
606 out_cmd
->hdr
.cmd
= cmd
->id
;
607 out_cmd
->hdr
.flags
= 0;
608 out_cmd
->hdr
.sequence
=
609 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie
->cmd_queue
) |
610 INDEX_TO_SEQ(q
->write_ptr
));
612 /* and copy the data that needs to be copied */
613 cmd_pos
= offsetof(struct iwl_device_cmd
, payload
);
614 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
617 if (cmd
->dataflags
[i
] & (IWL_HCMD_DFL_NOCOPY
|
620 memcpy((u8
*)out_cmd
+ cmd_pos
, cmd
->data
[i
], cmd
->len
[i
]);
621 cmd_pos
+= cmd
->len
[i
];
624 WARN_ON_ONCE(txq
->entries
[idx
].copy_cmd
);
627 * since out_cmd will be the source address of the FH, it will write
628 * the retry count there. So when the user needs to receivce the HCMD
629 * that corresponds to the response in the response handler, it needs
630 * to set CMD_WANT_HCMD.
632 if (cmd
->flags
& CMD_WANT_HCMD
) {
633 txq
->entries
[idx
].copy_cmd
=
634 kmemdup(out_cmd
, cmd_pos
, GFP_ATOMIC
);
635 if (unlikely(!txq
->entries
[idx
].copy_cmd
)) {
642 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
643 trans_pcie_get_cmd_string(trans_pcie
, out_cmd
->hdr
.cmd
),
644 out_cmd
->hdr
.cmd
, le16_to_cpu(out_cmd
->hdr
.sequence
),
645 cmd_size
, q
->write_ptr
, idx
, trans_pcie
->cmd_queue
);
647 phys_addr
= dma_map_single(trans
->dev
, &out_cmd
->hdr
, copy_size
,
649 if (unlikely(dma_mapping_error(trans
->dev
, phys_addr
))) {
654 dma_unmap_addr_set(out_meta
, mapping
, phys_addr
);
655 dma_unmap_len_set(out_meta
, len
, copy_size
);
657 iwlagn_txq_attach_buf_to_tfd(trans
, txq
, phys_addr
, copy_size
, 1);
659 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
660 const void *data
= cmd
->data
[i
];
664 if (!(cmd
->dataflags
[i
] & (IWL_HCMD_DFL_NOCOPY
|
667 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
)
669 phys_addr
= dma_map_single(trans
->dev
, (void *)data
,
670 cmd
->len
[i
], DMA_BIDIRECTIONAL
);
671 if (dma_mapping_error(trans
->dev
, phys_addr
)) {
672 iwl_unmap_tfd(trans
, out_meta
,
673 &txq
->tfds
[q
->write_ptr
],
679 iwlagn_txq_attach_buf_to_tfd(trans
, txq
, phys_addr
,
683 out_meta
->flags
= cmd
->flags
;
684 if (WARN_ON_ONCE(txq
->entries
[idx
].free_buf
))
685 kfree(txq
->entries
[idx
].free_buf
);
686 txq
->entries
[idx
].free_buf
= dup_buf
;
688 txq
->need_update
= 1;
690 trace_iwlwifi_dev_hcmd(trans
->dev
, cmd
, cmd_size
,
691 &out_cmd
->hdr
, copy_size
);
693 /* start timer if queue currently empty */
694 if (q
->read_ptr
== q
->write_ptr
&& trans_pcie
->wd_timeout
)
695 mod_timer(&txq
->stuck_timer
, jiffies
+ trans_pcie
->wd_timeout
);
697 /* Increment and update queue's write index */
698 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
699 iwl_txq_update_write_ptr(trans
, txq
);
702 spin_unlock_bh(&txq
->lock
);
709 static inline void iwl_queue_progress(struct iwl_trans_pcie
*trans_pcie
,
710 struct iwl_tx_queue
*txq
)
712 if (!trans_pcie
->wd_timeout
)
716 * if empty delete timer, otherwise move timer forward
717 * since we're making progress on this queue
719 if (txq
->q
.read_ptr
== txq
->q
.write_ptr
)
720 del_timer(&txq
->stuck_timer
);
722 mod_timer(&txq
->stuck_timer
, jiffies
+ trans_pcie
->wd_timeout
);
726 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
728 * When FW advances 'R' index, all entries between old and new 'R' index
729 * need to be reclaimed. As result, some free space forms. If there is
730 * enough free space (> low mark), wake the stack that feeds us.
732 static void iwl_hcmd_queue_reclaim(struct iwl_trans
*trans
, int txq_id
,
735 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
736 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
737 struct iwl_queue
*q
= &txq
->q
;
740 lockdep_assert_held(&txq
->lock
);
742 if ((idx
>= q
->n_bd
) || (iwl_queue_used(q
, idx
) == 0)) {
744 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
745 __func__
, txq_id
, idx
, q
->n_bd
,
746 q
->write_ptr
, q
->read_ptr
);
750 for (idx
= iwl_queue_inc_wrap(idx
, q
->n_bd
); q
->read_ptr
!= idx
;
751 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
754 IWL_ERR(trans
, "HCMD skipped: index (%d) %d %d\n",
755 idx
, q
->write_ptr
, q
->read_ptr
);
756 iwl_op_mode_nic_error(trans
->op_mode
);
761 iwl_queue_progress(trans_pcie
, txq
);
765 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
766 * @rxb: Rx buffer to reclaim
767 * @handler_status: return value of the handler of the command
768 * (put in setup_rx_handlers)
770 * If an Rx buffer has an async callback associated with it the callback
771 * will be executed. The attached skb (if present) will only be freed
772 * if the callback returns 1
774 void iwl_tx_cmd_complete(struct iwl_trans
*trans
, struct iwl_rx_cmd_buffer
*rxb
,
777 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
778 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
779 int txq_id
= SEQ_TO_QUEUE(sequence
);
780 int index
= SEQ_TO_INDEX(sequence
);
782 struct iwl_device_cmd
*cmd
;
783 struct iwl_cmd_meta
*meta
;
784 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
785 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
787 /* If a Tx command is being handled and it isn't in the actual
788 * command queue then there a command routing bug has been introduced
789 * in the queue management code. */
790 if (WARN(txq_id
!= trans_pcie
->cmd_queue
,
791 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
792 txq_id
, trans_pcie
->cmd_queue
, sequence
,
793 trans_pcie
->txq
[trans_pcie
->cmd_queue
].q
.read_ptr
,
794 trans_pcie
->txq
[trans_pcie
->cmd_queue
].q
.write_ptr
)) {
795 iwl_print_hex_error(trans
, pkt
, 32);
799 spin_lock(&txq
->lock
);
801 cmd_index
= get_cmd_index(&txq
->q
, index
);
802 cmd
= txq
->entries
[cmd_index
].cmd
;
803 meta
= &txq
->entries
[cmd_index
].meta
;
805 iwl_unmap_tfd(trans
, meta
, &txq
->tfds
[index
], DMA_BIDIRECTIONAL
);
807 /* Input error checking is done when commands are added to queue. */
808 if (meta
->flags
& CMD_WANT_SKB
) {
809 struct page
*p
= rxb_steal_page(rxb
);
811 meta
->source
->resp_pkt
= pkt
;
812 meta
->source
->_rx_page_addr
= (unsigned long)page_address(p
);
813 meta
->source
->_rx_page_order
= trans_pcie
->rx_page_order
;
814 meta
->source
->handler_status
= handler_status
;
817 iwl_hcmd_queue_reclaim(trans
, txq_id
, index
);
819 if (!(meta
->flags
& CMD_ASYNC
)) {
820 if (!test_bit(STATUS_HCMD_ACTIVE
, &trans_pcie
->status
)) {
822 "HCMD_ACTIVE already clear for command %s\n",
823 trans_pcie_get_cmd_string(trans_pcie
,
826 clear_bit(STATUS_HCMD_ACTIVE
, &trans_pcie
->status
);
827 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command %s\n",
828 trans_pcie_get_cmd_string(trans_pcie
,
830 wake_up(&trans_pcie
->wait_command_queue
);
835 spin_unlock(&txq
->lock
);
838 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
840 static int iwl_send_cmd_async(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
842 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
845 /* An asynchronous command can not expect an SKB to be set. */
846 if (WARN_ON(cmd
->flags
& CMD_WANT_SKB
))
850 ret
= iwl_enqueue_hcmd(trans
, cmd
);
853 "Error sending %s: enqueue_hcmd failed: %d\n",
854 trans_pcie_get_cmd_string(trans_pcie
, cmd
->id
), ret
);
860 static int iwl_send_cmd_sync(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
862 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
866 IWL_DEBUG_INFO(trans
, "Attempting to send sync command %s\n",
867 trans_pcie_get_cmd_string(trans_pcie
, cmd
->id
));
869 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE
,
870 &trans_pcie
->status
))) {
871 IWL_ERR(trans
, "Command %s: a command is already active!\n",
872 trans_pcie_get_cmd_string(trans_pcie
, cmd
->id
));
876 IWL_DEBUG_INFO(trans
, "Setting HCMD_ACTIVE for command %s\n",
877 trans_pcie_get_cmd_string(trans_pcie
, cmd
->id
));
879 cmd_idx
= iwl_enqueue_hcmd(trans
, cmd
);
882 clear_bit(STATUS_HCMD_ACTIVE
, &trans_pcie
->status
);
884 "Error sending %s: enqueue_hcmd failed: %d\n",
885 trans_pcie_get_cmd_string(trans_pcie
, cmd
->id
), ret
);
889 ret
= wait_event_timeout(trans_pcie
->wait_command_queue
,
890 !test_bit(STATUS_HCMD_ACTIVE
,
891 &trans_pcie
->status
),
892 HOST_COMPLETE_TIMEOUT
);
894 if (test_bit(STATUS_HCMD_ACTIVE
, &trans_pcie
->status
)) {
895 struct iwl_tx_queue
*txq
=
896 &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
897 struct iwl_queue
*q
= &txq
->q
;
900 "Error sending %s: time out after %dms.\n",
901 trans_pcie_get_cmd_string(trans_pcie
, cmd
->id
),
902 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT
));
905 "Current CMD queue read_ptr %d write_ptr %d\n",
906 q
->read_ptr
, q
->write_ptr
);
908 clear_bit(STATUS_HCMD_ACTIVE
, &trans_pcie
->status
);
909 IWL_DEBUG_INFO(trans
,
910 "Clearing HCMD_ACTIVE for command %s\n",
911 trans_pcie_get_cmd_string(trans_pcie
,
918 if (test_bit(STATUS_RFKILL
, &trans_pcie
->status
)) {
919 IWL_DEBUG_RF_KILL(trans
, "RFKILL in SYNC CMD... no rsp\n");
924 if ((cmd
->flags
& CMD_WANT_SKB
) && !cmd
->resp_pkt
) {
925 IWL_ERR(trans
, "Error: Response NULL in '%s'\n",
926 trans_pcie_get_cmd_string(trans_pcie
, cmd
->id
));
934 if (cmd
->flags
& CMD_WANT_SKB
) {
936 * Cancel the CMD_WANT_SKB flag for the cmd in the
937 * TX cmd queue. Otherwise in case the cmd comes
938 * in later, it will possibly set an invalid
939 * address (cmd->meta.source).
941 trans_pcie
->txq
[trans_pcie
->cmd_queue
].
942 entries
[cmd_idx
].meta
.flags
&= ~CMD_WANT_SKB
;
947 cmd
->resp_pkt
= NULL
;
953 int iwl_trans_pcie_send_cmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
955 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
957 if (test_bit(STATUS_RFKILL
, &trans_pcie
->status
))
960 if (cmd
->flags
& CMD_ASYNC
)
961 return iwl_send_cmd_async(trans
, cmd
);
963 /* We still can fail on RFKILL that can be asserted while we wait */
964 return iwl_send_cmd_sync(trans
, cmd
);
967 /* Frees buffers until index _not_ inclusive */
968 int iwl_tx_queue_reclaim(struct iwl_trans
*trans
, int txq_id
, int index
,
969 struct sk_buff_head
*skbs
)
971 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
972 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
973 struct iwl_queue
*q
= &txq
->q
;
977 /* This function is not meant to release cmd queue*/
978 if (WARN_ON(txq_id
== trans_pcie
->cmd_queue
))
981 lockdep_assert_held(&txq
->lock
);
983 /*Since we free until index _not_ inclusive, the one before index is
984 * the last we will free. This one must be used */
985 last_to_free
= iwl_queue_dec_wrap(index
, q
->n_bd
);
987 if ((index
>= q
->n_bd
) ||
988 (iwl_queue_used(q
, last_to_free
) == 0)) {
990 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
991 __func__
, txq_id
, last_to_free
, q
->n_bd
,
992 q
->write_ptr
, q
->read_ptr
);
996 if (WARN_ON(!skb_queue_empty(skbs
)))
1000 q
->read_ptr
!= index
;
1001 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
1003 if (WARN_ON_ONCE(txq
->entries
[txq
->q
.read_ptr
].skb
== NULL
))
1006 __skb_queue_tail(skbs
, txq
->entries
[txq
->q
.read_ptr
].skb
);
1008 txq
->entries
[txq
->q
.read_ptr
].skb
= NULL
;
1010 iwlagn_txq_inval_byte_cnt_tbl(trans
, txq
);
1012 iwl_txq_free_tfd(trans
, txq
, DMA_TO_DEVICE
);
1016 iwl_queue_progress(trans_pcie
, txq
);