1 /******************************************************************************
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
33 #include "iwl-debug.h"
37 #include "iwl-agn-hw.h"
38 #include "iwl-op-mode.h"
39 #include "iwl-trans-pcie-int.h"
41 #define IWL_TX_CRC_SIZE 4
42 #define IWL_TX_DELIMITER_SIZE 4
45 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
47 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans
*trans
,
48 struct iwl_tx_queue
*txq
,
51 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
;
52 struct iwl_trans_pcie
*trans_pcie
=
53 IWL_TRANS_GET_PCIE_TRANS(trans
);
54 int write_ptr
= txq
->q
.write_ptr
;
55 int txq_id
= txq
->q
.id
;
58 u16 len
= byte_cnt
+ IWL_TX_CRC_SIZE
+ IWL_TX_DELIMITER_SIZE
;
60 struct iwl_tx_cmd
*tx_cmd
=
61 (struct iwl_tx_cmd
*) txq
->cmd
[txq
->q
.write_ptr
]->payload
;
63 scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
65 WARN_ON(len
> 0xFFF || write_ptr
>= TFD_QUEUE_SIZE_MAX
);
67 sta_id
= tx_cmd
->sta_id
;
68 sec_ctl
= tx_cmd
->sec_ctl
;
70 switch (sec_ctl
& TX_CMD_SEC_MSK
) {
78 len
+= WEP_IV_LEN
+ WEP_ICV_LEN
;
82 bc_ent
= cpu_to_le16((len
& 0xFFF) | (sta_id
<< 12));
84 scd_bc_tbl
[txq_id
].tfd_offset
[write_ptr
] = bc_ent
;
86 if (write_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
88 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ write_ptr
] = bc_ent
;
92 * iwl_txq_update_write_ptr - Send new write index to hardware
94 void iwl_txq_update_write_ptr(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
)
97 int txq_id
= txq
->q
.id
;
99 if (txq
->need_update
== 0)
102 if (hw_params(trans
).shadow_reg_enable
) {
103 /* shadow register enabled */
104 iwl_write32(trans
, HBUS_TARG_WRPTR
,
105 txq
->q
.write_ptr
| (txq_id
<< 8));
107 /* if we're trying to save power */
108 if (test_bit(STATUS_POWER_PMI
, &trans
->shrd
->status
)) {
109 /* wake up nic if it's powered down ...
110 * uCode will wake up, and interrupt us again, so next
111 * time we'll skip this part. */
112 reg
= iwl_read32(trans
, CSR_UCODE_DRV_GP1
);
114 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
115 IWL_DEBUG_INFO(trans
,
116 "Tx queue %d requesting wakeup,"
117 " GP1 = 0x%x\n", txq_id
, reg
);
118 iwl_set_bit(trans
, CSR_GP_CNTRL
,
119 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
123 iwl_write_direct32(trans
, HBUS_TARG_WRPTR
,
124 txq
->q
.write_ptr
| (txq_id
<< 8));
127 * else not in power-save mode,
128 * uCode will never sleep when we're
129 * trying to tx (during RFKILL, we're not trying to tx).
132 iwl_write32(trans
, HBUS_TARG_WRPTR
,
133 txq
->q
.write_ptr
| (txq_id
<< 8));
135 txq
->need_update
= 0;
138 static inline dma_addr_t
iwl_tfd_tb_get_addr(struct iwl_tfd
*tfd
, u8 idx
)
140 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
142 dma_addr_t addr
= get_unaligned_le32(&tb
->lo
);
143 if (sizeof(dma_addr_t
) > sizeof(u32
))
145 ((dma_addr_t
)(le16_to_cpu(tb
->hi_n_len
) & 0xF) << 16) << 16;
150 static inline u16
iwl_tfd_tb_get_len(struct iwl_tfd
*tfd
, u8 idx
)
152 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
154 return le16_to_cpu(tb
->hi_n_len
) >> 4;
157 static inline void iwl_tfd_set_tb(struct iwl_tfd
*tfd
, u8 idx
,
158 dma_addr_t addr
, u16 len
)
160 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
161 u16 hi_n_len
= len
<< 4;
163 put_unaligned_le32(addr
, &tb
->lo
);
164 if (sizeof(dma_addr_t
) > sizeof(u32
))
165 hi_n_len
|= ((addr
>> 16) >> 16) & 0xF;
167 tb
->hi_n_len
= cpu_to_le16(hi_n_len
);
169 tfd
->num_tbs
= idx
+ 1;
172 static inline u8
iwl_tfd_get_num_tbs(struct iwl_tfd
*tfd
)
174 return tfd
->num_tbs
& 0x1f;
177 static void iwlagn_unmap_tfd(struct iwl_trans
*trans
, struct iwl_cmd_meta
*meta
,
178 struct iwl_tfd
*tfd
, enum dma_data_direction dma_dir
)
183 /* Sanity check on number of chunks */
184 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
186 if (num_tbs
>= IWL_NUM_OF_TBS
) {
187 IWL_ERR(trans
, "Too many chunks: %i\n", num_tbs
);
188 /* @todo issue fatal error, it is quite serious situation */
194 dma_unmap_single(trans
->dev
,
195 dma_unmap_addr(meta
, mapping
),
196 dma_unmap_len(meta
, len
),
199 /* Unmap chunks, if any. */
200 for (i
= 1; i
< num_tbs
; i
++)
201 dma_unmap_single(trans
->dev
, iwl_tfd_tb_get_addr(tfd
, i
),
202 iwl_tfd_tb_get_len(tfd
, i
), dma_dir
);
206 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
207 * @trans - transport private data
209 * @index - the index of the TFD to be freed
210 *@dma_dir - the direction of the DMA mapping
212 * Does NOT advance any TFD circular buffer read/write indexes
213 * Does NOT free the TFD itself (which is within circular buffer)
215 void iwlagn_txq_free_tfd(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
,
216 int index
, enum dma_data_direction dma_dir
)
218 struct iwl_tfd
*tfd_tmp
= txq
->tfds
;
220 lockdep_assert_held(&txq
->lock
);
222 iwlagn_unmap_tfd(trans
, &txq
->meta
[index
], &tfd_tmp
[index
], dma_dir
);
228 skb
= txq
->skbs
[index
];
230 /* Can be called from irqs-disabled context
231 * If skb is not NULL, it means that the whole queue is being
232 * freed and that the queue is not empty - free the skb
235 iwl_op_mode_free_skb(trans
->op_mode
, skb
);
236 txq
->skbs
[index
] = NULL
;
241 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans
*trans
,
242 struct iwl_tx_queue
*txq
,
243 dma_addr_t addr
, u16 len
,
247 struct iwl_tfd
*tfd
, *tfd_tmp
;
252 tfd
= &tfd_tmp
[q
->write_ptr
];
255 memset(tfd
, 0, sizeof(*tfd
));
257 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
259 /* Each TFD can point to a maximum 20 Tx buffers */
260 if (num_tbs
>= IWL_NUM_OF_TBS
) {
261 IWL_ERR(trans
, "Error can not send more than %d chunks\n",
266 if (WARN_ON(addr
& ~DMA_BIT_MASK(36)))
269 if (unlikely(addr
& ~IWL_TX_DMA_MASK
))
270 IWL_ERR(trans
, "Unaligned address = %llx\n",
271 (unsigned long long)addr
);
273 iwl_tfd_set_tb(tfd
, num_tbs
, addr
, len
);
278 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
281 * Theory of operation
283 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
284 * of buffer descriptors, each of which points to one or more data buffers for
285 * the device to read from or fill. Driver and device exchange status of each
286 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
287 * entries in each circular buffer, to protect against confusing empty and full
290 * The device reads or writes the data in the queues via the device's several
291 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
293 * For Tx queue, there are low mark and high mark limits. If, after queuing
294 * the packet for Tx, free space become < low mark, Tx queue stopped. When
295 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
298 ***************************************************/
300 int iwl_queue_space(const struct iwl_queue
*q
)
302 int s
= q
->read_ptr
- q
->write_ptr
;
304 if (q
->read_ptr
> q
->write_ptr
)
309 /* keep some reserve to not confuse empty and full situations */
317 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
319 int iwl_queue_init(struct iwl_queue
*q
, int count
, int slots_num
, u32 id
)
322 q
->n_window
= slots_num
;
325 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
326 * and iwl_queue_dec_wrap are broken. */
327 if (WARN_ON(!is_power_of_2(count
)))
330 /* slots_num must be power-of-two size, otherwise
331 * get_cmd_index is broken. */
332 if (WARN_ON(!is_power_of_2(slots_num
)))
335 q
->low_mark
= q
->n_window
/ 4;
339 q
->high_mark
= q
->n_window
/ 8;
340 if (q
->high_mark
< 2)
343 q
->write_ptr
= q
->read_ptr
= 0;
348 static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans
*trans
,
349 struct iwl_tx_queue
*txq
)
351 struct iwl_trans_pcie
*trans_pcie
=
352 IWL_TRANS_GET_PCIE_TRANS(trans
);
353 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
354 int txq_id
= txq
->q
.id
;
355 int read_ptr
= txq
->q
.read_ptr
;
358 struct iwl_tx_cmd
*tx_cmd
=
359 (struct iwl_tx_cmd
*) txq
->cmd
[txq
->q
.read_ptr
]->payload
;
361 WARN_ON(read_ptr
>= TFD_QUEUE_SIZE_MAX
);
363 if (txq_id
!= trans
->shrd
->cmd_queue
)
364 sta_id
= tx_cmd
->sta_id
;
366 bc_ent
= cpu_to_le16(1 | (sta_id
<< 12));
367 scd_bc_tbl
[txq_id
].tfd_offset
[read_ptr
] = bc_ent
;
369 if (read_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
371 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ read_ptr
] = bc_ent
;
374 static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans
*trans
, u16 ra_tid
,
381 struct iwl_trans_pcie
*trans_pcie
=
382 IWL_TRANS_GET_PCIE_TRANS(trans
);
384 scd_q2ratid
= ra_tid
& SCD_QUEUE_RA_TID_MAP_RATID_MSK
;
386 tbl_dw_addr
= trans_pcie
->scd_base_addr
+
387 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id
);
389 tbl_dw
= iwl_read_targ_mem(trans
, tbl_dw_addr
);
392 tbl_dw
= (scd_q2ratid
<< 16) | (tbl_dw
& 0x0000FFFF);
394 tbl_dw
= scd_q2ratid
| (tbl_dw
& 0xFFFF0000);
396 iwl_write_targ_mem(trans
, tbl_dw_addr
, tbl_dw
);
401 static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans
*trans
, u16 txq_id
)
403 /* Simply stop the queue, but don't change any configuration;
404 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
405 iwl_write_prph(trans
,
406 SCD_QUEUE_STATUS_BITS(txq_id
),
407 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE
)|
408 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN
));
411 void iwl_trans_set_wr_ptrs(struct iwl_trans
*trans
,
412 int txq_id
, u32 index
)
414 IWL_DEBUG_TX_QUEUES(trans
, "Q %d WrPtr: %d", txq_id
, index
& 0xff);
415 iwl_write_direct32(trans
, HBUS_TARG_WRPTR
,
416 (index
& 0xff) | (txq_id
<< 8));
417 iwl_write_prph(trans
, SCD_QUEUE_RDPTR(txq_id
), index
);
420 void iwl_trans_tx_queue_set_status(struct iwl_trans
*trans
,
421 struct iwl_tx_queue
*txq
,
422 int tx_fifo_id
, int scd_retry
)
424 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
425 int txq_id
= txq
->q
.id
;
427 test_bit(txq_id
, &trans_pcie
->txq_ctx_active_msk
) ? 1 : 0;
429 iwl_write_prph(trans
, SCD_QUEUE_STATUS_BITS(txq_id
),
430 (active
<< SCD_QUEUE_STTS_REG_POS_ACTIVE
) |
431 (tx_fifo_id
<< SCD_QUEUE_STTS_REG_POS_TXF
) |
432 (1 << SCD_QUEUE_STTS_REG_POS_WSL
) |
433 SCD_QUEUE_STTS_REG_MSK
);
435 txq
->sched_retry
= scd_retry
;
438 IWL_DEBUG_TX_QUEUES(trans
, "Activate %s Queue %d on FIFO %d\n",
439 scd_retry
? "BA" : "AC/CMD", txq_id
, tx_fifo_id
);
441 IWL_DEBUG_TX_QUEUES(trans
, "Deactivate %s Queue %d\n",
442 scd_retry
? "BA" : "AC/CMD", txq_id
);
445 static inline int get_fifo_from_tid(struct iwl_trans_pcie
*trans_pcie
,
448 const u8
*ac_to_fifo
= trans_pcie
->ac_to_fifo
[ctx
];
449 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
450 return ac_to_fifo
[tid_to_ac
[tid
]];
452 /* no support for TIDs 8-15 yet */
456 static inline bool is_agg_txqid_valid(struct iwl_trans
*trans
, int txq_id
)
458 if (txq_id
< IWLAGN_FIRST_AMPDU_QUEUE
)
460 return txq_id
< (IWLAGN_FIRST_AMPDU_QUEUE
+
461 hw_params(trans
).num_ampdu_queues
);
464 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans
*trans
,
465 enum iwl_rxon_context_id ctx
, int sta_id
,
466 int tid
, int frame_limit
, u16 ssn
)
472 struct iwl_trans_pcie
*trans_pcie
=
473 IWL_TRANS_GET_PCIE_TRANS(trans
);
475 if (WARN_ON(sta_id
== IWL_INVALID_STATION
))
477 if (WARN_ON(tid
>= IWL_MAX_TID_COUNT
))
480 tx_fifo
= get_fifo_from_tid(trans_pcie
, ctx
, tid
);
481 if (WARN_ON(tx_fifo
< 0)) {
482 IWL_ERR(trans
, "txq_agg_setup, bad fifo: %d\n", tx_fifo
);
486 txq_id
= trans_pcie
->agg_txq
[sta_id
][tid
];
487 if (WARN_ON_ONCE(is_agg_txqid_valid(trans
, txq_id
) == false)) {
489 "queue number out of range: %d, must be %d to %d\n",
490 txq_id
, IWLAGN_FIRST_AMPDU_QUEUE
,
491 IWLAGN_FIRST_AMPDU_QUEUE
+
492 hw_params(trans
).num_ampdu_queues
- 1);
496 ra_tid
= BUILD_RAxTID(sta_id
, tid
);
498 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
500 /* Stop this Tx queue before configuring it */
501 iwlagn_tx_queue_stop_scheduler(trans
, txq_id
);
503 /* Map receiver-address / traffic-ID to this queue */
504 iwlagn_tx_queue_set_q2ratid(trans
, ra_tid
, txq_id
);
506 /* Set this queue as a chain-building queue */
507 iwl_set_bits_prph(trans
, SCD_QUEUECHAIN_SEL
, (1<<txq_id
));
509 /* enable aggregations for the queue */
510 iwl_set_bits_prph(trans
, SCD_AGGR_SEL
, (1<<txq_id
));
512 /* Place first TFD at index corresponding to start sequence number.
513 * Assumes that ssn_idx is valid (!= 0xFFF) */
514 trans_pcie
->txq
[txq_id
].q
.read_ptr
= (ssn
& 0xff);
515 trans_pcie
->txq
[txq_id
].q
.write_ptr
= (ssn
& 0xff);
516 iwl_trans_set_wr_ptrs(trans
, txq_id
, ssn
);
518 /* Set up Tx window size and frame limit for this queue */
519 iwl_write_targ_mem(trans
, trans_pcie
->scd_base_addr
+
520 SCD_CONTEXT_QUEUE_OFFSET(txq_id
) +
523 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS
) &
524 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK
) |
526 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
) &
527 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
));
529 iwl_set_bits_prph(trans
, SCD_INTERRUPT_MASK
, (1 << txq_id
));
531 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
532 iwl_trans_tx_queue_set_status(trans
, &trans_pcie
->txq
[txq_id
],
535 trans_pcie
->txq
[txq_id
].sta_id
= sta_id
;
536 trans_pcie
->txq
[txq_id
].tid
= tid
;
538 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
542 * Find first available (lowest unused) Tx Queue, mark it "active".
543 * Called only when finding queue for aggregation.
544 * Should never return anything < 7, because they should already
545 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
547 static int iwlagn_txq_ctx_activate_free(struct iwl_trans
*trans
)
549 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
552 for (txq_id
= 0; txq_id
< hw_params(trans
).max_txq_num
; txq_id
++)
553 if (!test_and_set_bit(txq_id
,
554 &trans_pcie
->txq_ctx_active_msk
))
559 int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans
*trans
,
562 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
565 txq_id
= iwlagn_txq_ctx_activate_free(trans
);
567 IWL_ERR(trans
, "No free aggregation queue available\n");
571 trans_pcie
->agg_txq
[sta_id
][tid
] = txq_id
;
572 iwl_set_swq_id(&trans_pcie
->txq
[txq_id
], get_ac_from_tid(tid
), txq_id
);
577 int iwl_trans_pcie_tx_agg_disable(struct iwl_trans
*trans
, int sta_id
, int tid
)
579 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
580 u8 txq_id
= trans_pcie
->agg_txq
[sta_id
][tid
];
582 if (WARN_ON_ONCE(is_agg_txqid_valid(trans
, txq_id
) == false)) {
584 "queue number out of range: %d, must be %d to %d\n",
585 txq_id
, IWLAGN_FIRST_AMPDU_QUEUE
,
586 IWLAGN_FIRST_AMPDU_QUEUE
+
587 hw_params(trans
).num_ampdu_queues
- 1);
591 iwlagn_tx_queue_stop_scheduler(trans
, txq_id
);
593 iwl_clear_bits_prph(trans
, SCD_AGGR_SEL
, (1 << txq_id
));
595 trans_pcie
->agg_txq
[sta_id
][tid
] = 0;
596 trans_pcie
->txq
[txq_id
].q
.read_ptr
= 0;
597 trans_pcie
->txq
[txq_id
].q
.write_ptr
= 0;
598 /* supposes that ssn_idx is valid (!= 0xFFF) */
599 iwl_trans_set_wr_ptrs(trans
, txq_id
, 0);
601 iwl_clear_bits_prph(trans
, SCD_INTERRUPT_MASK
, (1 << txq_id
));
602 iwl_txq_ctx_deactivate(trans_pcie
, txq_id
);
603 iwl_trans_tx_queue_set_status(trans
, &trans_pcie
->txq
[txq_id
], 0, 0);
607 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
610 * iwl_enqueue_hcmd - enqueue a uCode command
611 * @priv: device private data point
612 * @cmd: a point to the ucode command structure
614 * The function returns < 0 values to indicate the operation is
615 * failed. On success, it turns the index (> 0) of command in the
618 static int iwl_enqueue_hcmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
620 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
621 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[trans
->shrd
->cmd_queue
];
622 struct iwl_queue
*q
= &txq
->q
;
623 struct iwl_device_cmd
*out_cmd
;
624 struct iwl_cmd_meta
*out_meta
;
625 dma_addr_t phys_addr
;
627 u16 copy_size
, cmd_size
;
628 bool is_ct_kill
= false;
629 bool had_nocopy
= false;
632 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
633 const void *trace_bufs
[IWL_MAX_CMD_TFDS
+ 1] = {};
634 int trace_lens
[IWL_MAX_CMD_TFDS
+ 1] = {};
638 if (test_bit(STATUS_FW_ERROR
, &trans
->shrd
->status
)) {
639 IWL_WARN(trans
, "fw recovery, no hcmd send\n");
643 if ((trans
->shrd
->ucode_owner
== IWL_OWNERSHIP_TM
) &&
644 !(cmd
->flags
& CMD_ON_DEMAND
)) {
645 IWL_DEBUG_HC(trans
, "tm own the uCode, no regular hcmd send\n");
649 copy_size
= sizeof(out_cmd
->hdr
);
650 cmd_size
= sizeof(out_cmd
->hdr
);
652 /* need one for the header if the first is NOCOPY */
653 BUILD_BUG_ON(IWL_MAX_CMD_TFDS
> IWL_NUM_OF_TBS
- 1);
655 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
658 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
) {
661 /* NOCOPY must not be followed by normal! */
662 if (WARN_ON(had_nocopy
))
664 copy_size
+= cmd
->len
[i
];
666 cmd_size
+= cmd
->len
[i
];
670 * If any of the command structures end up being larger than
671 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
672 * allocated into separate TFDs, then we will need to
673 * increase the size of the buffers.
675 if (WARN_ON(copy_size
> TFD_MAX_PAYLOAD_SIZE
))
678 if (iwl_is_rfkill(trans
->shrd
) || iwl_is_ctkill(trans
->shrd
)) {
679 IWL_WARN(trans
, "Not sending command - %s KILL\n",
680 iwl_is_rfkill(trans
->shrd
) ? "RF" : "CT");
684 spin_lock_bh(&txq
->lock
);
686 if (iwl_queue_space(q
) < ((cmd
->flags
& CMD_ASYNC
) ? 2 : 1)) {
687 spin_unlock_bh(&txq
->lock
);
689 IWL_ERR(trans
, "No space in command queue\n");
690 is_ct_kill
= iwl_check_for_ct_kill(priv(trans
));
692 IWL_ERR(trans
, "Restarting adapter queue is full\n");
693 iwl_op_mode_nic_error(trans
->op_mode
);
698 idx
= get_cmd_index(q
, q
->write_ptr
);
699 out_cmd
= txq
->cmd
[idx
];
700 out_meta
= &txq
->meta
[idx
];
702 memset(out_meta
, 0, sizeof(*out_meta
)); /* re-initialize to NULL */
703 if (cmd
->flags
& CMD_WANT_SKB
)
704 out_meta
->source
= cmd
;
706 /* set up the header */
708 out_cmd
->hdr
.cmd
= cmd
->id
;
709 out_cmd
->hdr
.flags
= 0;
710 out_cmd
->hdr
.sequence
=
711 cpu_to_le16(QUEUE_TO_SEQ(trans
->shrd
->cmd_queue
) |
712 INDEX_TO_SEQ(q
->write_ptr
));
714 /* and copy the data that needs to be copied */
716 cmd_dest
= out_cmd
->payload
;
717 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
720 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
)
722 memcpy(cmd_dest
, cmd
->data
[i
], cmd
->len
[i
]);
723 cmd_dest
+= cmd
->len
[i
];
726 IWL_DEBUG_HC(trans
, "Sending command %s (#%x), seq: 0x%04X, "
727 "%d bytes at %d[%d]:%d\n",
728 get_cmd_string(out_cmd
->hdr
.cmd
),
730 le16_to_cpu(out_cmd
->hdr
.sequence
), cmd_size
,
731 q
->write_ptr
, idx
, trans
->shrd
->cmd_queue
);
733 phys_addr
= dma_map_single(trans
->dev
, &out_cmd
->hdr
, copy_size
,
735 if (unlikely(dma_mapping_error(trans
->dev
, phys_addr
))) {
740 dma_unmap_addr_set(out_meta
, mapping
, phys_addr
);
741 dma_unmap_len_set(out_meta
, len
, copy_size
);
743 iwlagn_txq_attach_buf_to_tfd(trans
, txq
,
744 phys_addr
, copy_size
, 1);
745 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
746 trace_bufs
[0] = &out_cmd
->hdr
;
747 trace_lens
[0] = copy_size
;
751 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
754 if (!(cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
))
756 phys_addr
= dma_map_single(trans
->dev
,
757 (void *)cmd
->data
[i
],
758 cmd
->len
[i
], DMA_BIDIRECTIONAL
);
759 if (dma_mapping_error(trans
->dev
, phys_addr
)) {
760 iwlagn_unmap_tfd(trans
, out_meta
,
761 &txq
->tfds
[q
->write_ptr
],
767 iwlagn_txq_attach_buf_to_tfd(trans
, txq
, phys_addr
,
769 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
770 trace_bufs
[trace_idx
] = cmd
->data
[i
];
771 trace_lens
[trace_idx
] = cmd
->len
[i
];
776 out_meta
->flags
= cmd
->flags
;
778 txq
->need_update
= 1;
780 /* check that tracing gets all possible blocks */
781 BUILD_BUG_ON(IWL_MAX_CMD_TFDS
+ 1 != 3);
782 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
783 trace_iwlwifi_dev_hcmd(priv(trans
), cmd
->flags
,
784 trace_bufs
[0], trace_lens
[0],
785 trace_bufs
[1], trace_lens
[1],
786 trace_bufs
[2], trace_lens
[2]);
789 /* Increment and update queue's write index */
790 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
791 iwl_txq_update_write_ptr(trans
, txq
);
794 spin_unlock_bh(&txq
->lock
);
799 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
801 * When FW advances 'R' index, all entries between old and new 'R' index
802 * need to be reclaimed. As result, some free space forms. If there is
803 * enough free space (> low mark), wake the stack that feeds us.
805 static void iwl_hcmd_queue_reclaim(struct iwl_trans
*trans
, int txq_id
,
808 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
809 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
810 struct iwl_queue
*q
= &txq
->q
;
813 lockdep_assert_held(&txq
->lock
);
815 if ((idx
>= q
->n_bd
) || (iwl_queue_used(q
, idx
) == 0)) {
816 IWL_ERR(trans
, "%s: Read index for DMA queue txq id (%d), "
817 "index %d is out of range [0-%d] %d %d.\n", __func__
,
818 txq_id
, idx
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
822 for (idx
= iwl_queue_inc_wrap(idx
, q
->n_bd
); q
->read_ptr
!= idx
;
823 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
826 IWL_ERR(trans
, "HCMD skipped: index (%d) %d %d\n", idx
,
827 q
->write_ptr
, q
->read_ptr
);
828 iwl_op_mode_nic_error(trans
->op_mode
);
835 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
836 * @rxb: Rx buffer to reclaim
837 * @handler_status: return value of the handler of the command
838 * (put in setup_rx_handlers)
840 * If an Rx buffer has an async callback associated with it the callback
841 * will be executed. The attached skb (if present) will only be freed
842 * if the callback returns 1
844 void iwl_tx_cmd_complete(struct iwl_trans
*trans
, struct iwl_rx_mem_buffer
*rxb
,
847 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
848 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
849 int txq_id
= SEQ_TO_QUEUE(sequence
);
850 int index
= SEQ_TO_INDEX(sequence
);
852 struct iwl_device_cmd
*cmd
;
853 struct iwl_cmd_meta
*meta
;
854 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
855 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[trans
->shrd
->cmd_queue
];
857 /* If a Tx command is being handled and it isn't in the actual
858 * command queue then there a command routing bug has been introduced
859 * in the queue management code. */
860 if (WARN(txq_id
!= trans
->shrd
->cmd_queue
,
861 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
862 txq_id
, trans
->shrd
->cmd_queue
, sequence
,
863 trans_pcie
->txq
[trans
->shrd
->cmd_queue
].q
.read_ptr
,
864 trans_pcie
->txq
[trans
->shrd
->cmd_queue
].q
.write_ptr
)) {
865 iwl_print_hex_error(trans
, pkt
, 32);
869 spin_lock(&txq
->lock
);
871 cmd_index
= get_cmd_index(&txq
->q
, index
);
872 cmd
= txq
->cmd
[cmd_index
];
873 meta
= &txq
->meta
[cmd_index
];
875 txq
->time_stamp
= jiffies
;
877 iwlagn_unmap_tfd(trans
, meta
, &txq
->tfds
[index
],
880 /* Input error checking is done when commands are added to queue. */
881 if (meta
->flags
& CMD_WANT_SKB
) {
882 struct page
*p
= rxb
->page
;
885 meta
->source
->resp_pkt
= pkt
;
886 meta
->source
->_rx_page_addr
= (unsigned long)page_address(p
);
887 meta
->source
->_rx_page_order
= hw_params(trans
).rx_page_order
;
888 meta
->source
->handler_status
= handler_status
;
891 iwl_hcmd_queue_reclaim(trans
, txq_id
, index
);
893 if (!(meta
->flags
& CMD_ASYNC
)) {
894 if (!test_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
)) {
896 "HCMD_ACTIVE already clear for command %s\n",
897 get_cmd_string(cmd
->hdr
.cmd
));
899 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
900 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command %s\n",
901 get_cmd_string(cmd
->hdr
.cmd
));
902 wake_up(&trans
->shrd
->wait_command_queue
);
907 spin_unlock(&txq
->lock
);
910 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
912 static int iwl_send_cmd_async(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
916 /* An asynchronous command can not expect an SKB to be set. */
917 if (WARN_ON(cmd
->flags
& CMD_WANT_SKB
))
921 ret
= iwl_enqueue_hcmd(trans
, cmd
);
923 IWL_DEBUG_QUIET_RFKILL(trans
,
924 "Error sending %s: enqueue_hcmd failed: %d\n",
925 get_cmd_string(cmd
->id
), ret
);
931 static int iwl_send_cmd_sync(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
933 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
937 lockdep_assert_held(&trans
->shrd
->mutex
);
939 IWL_DEBUG_INFO(trans
, "Attempting to send sync command %s\n",
940 get_cmd_string(cmd
->id
));
942 if (test_bit(STATUS_RF_KILL_HW
, &trans
->shrd
->status
)) {
943 IWL_ERR(trans
, "Command %s aborted: RF KILL Switch\n",
944 get_cmd_string(cmd
->id
));
947 if (test_bit(STATUS_FW_ERROR
, &trans
->shrd
->status
)) {
948 IWL_ERR(trans
, "Command %s failed: FW Error\n",
949 get_cmd_string(cmd
->id
));
952 set_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
953 IWL_DEBUG_INFO(trans
, "Setting HCMD_ACTIVE for command %s\n",
954 get_cmd_string(cmd
->id
));
956 cmd_idx
= iwl_enqueue_hcmd(trans
, cmd
);
959 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
960 IWL_DEBUG_QUIET_RFKILL(trans
,
961 "Error sending %s: enqueue_hcmd failed: %d\n",
962 get_cmd_string(cmd
->id
), ret
);
966 ret
= wait_event_timeout(trans
->shrd
->wait_command_queue
,
967 !test_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
),
968 HOST_COMPLETE_TIMEOUT
);
970 if (test_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
)) {
971 struct iwl_tx_queue
*txq
=
972 &trans_pcie
->txq
[trans
->shrd
->cmd_queue
];
973 struct iwl_queue
*q
= &txq
->q
;
975 IWL_DEBUG_QUIET_RFKILL(trans
,
976 "Error sending %s: time out after %dms.\n",
977 get_cmd_string(cmd
->id
),
978 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT
));
980 IWL_DEBUG_QUIET_RFKILL(trans
,
981 "Current CMD queue read_ptr %d write_ptr %d\n",
982 q
->read_ptr
, q
->write_ptr
);
984 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
985 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command"
986 "%s\n", get_cmd_string(cmd
->id
));
992 if ((cmd
->flags
& CMD_WANT_SKB
) && !cmd
->resp_pkt
) {
993 IWL_ERR(trans
, "Error: Response NULL in '%s'\n",
994 get_cmd_string(cmd
->id
));
1002 if (cmd
->flags
& CMD_WANT_SKB
) {
1004 * Cancel the CMD_WANT_SKB flag for the cmd in the
1005 * TX cmd queue. Otherwise in case the cmd comes
1006 * in later, it will possibly set an invalid
1007 * address (cmd->meta.source).
1009 trans_pcie
->txq
[trans
->shrd
->cmd_queue
].meta
[cmd_idx
].flags
&=
1013 if (cmd
->resp_pkt
) {
1015 cmd
->resp_pkt
= NULL
;
1021 int iwl_trans_pcie_send_cmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
1023 if (cmd
->flags
& CMD_ASYNC
)
1024 return iwl_send_cmd_async(trans
, cmd
);
1026 return iwl_send_cmd_sync(trans
, cmd
);
1029 /* Frees buffers until index _not_ inclusive */
1030 int iwl_tx_queue_reclaim(struct iwl_trans
*trans
, int txq_id
, int index
,
1031 struct sk_buff_head
*skbs
)
1033 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1034 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
1035 struct iwl_queue
*q
= &txq
->q
;
1039 /* This function is not meant to release cmd queue*/
1040 if (WARN_ON(txq_id
== trans
->shrd
->cmd_queue
))
1043 lockdep_assert_held(&txq
->lock
);
1045 /*Since we free until index _not_ inclusive, the one before index is
1046 * the last we will free. This one must be used */
1047 last_to_free
= iwl_queue_dec_wrap(index
, q
->n_bd
);
1049 if ((index
>= q
->n_bd
) ||
1050 (iwl_queue_used(q
, last_to_free
) == 0)) {
1051 IWL_ERR(trans
, "%s: Read index for DMA queue txq id (%d), "
1052 "last_to_free %d is out of range [0-%d] %d %d.\n",
1053 __func__
, txq_id
, last_to_free
, q
->n_bd
,
1054 q
->write_ptr
, q
->read_ptr
);
1058 if (WARN_ON(!skb_queue_empty(skbs
)))
1062 q
->read_ptr
!= index
;
1063 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
1065 if (WARN_ON_ONCE(txq
->skbs
[txq
->q
.read_ptr
] == NULL
))
1068 __skb_queue_tail(skbs
, txq
->skbs
[txq
->q
.read_ptr
]);
1070 txq
->skbs
[txq
->q
.read_ptr
] = NULL
;
1072 iwlagn_txq_inval_byte_cnt_tbl(trans
, txq
);
1074 iwlagn_txq_free_tfd(trans
, txq
, txq
->q
.read_ptr
, DMA_TO_DEVICE
);