1 /******************************************************************************
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
37 #include "iwl-helpers.h"
38 #include "iwl-trans-int-pcie.h"
41 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
43 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans
*trans
,
44 struct iwl_tx_queue
*txq
,
47 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
;
48 struct iwl_trans_pcie
*trans_pcie
=
49 IWL_TRANS_GET_PCIE_TRANS(trans
);
50 int write_ptr
= txq
->q
.write_ptr
;
51 int txq_id
= txq
->q
.id
;
54 u16 len
= byte_cnt
+ IWL_TX_CRC_SIZE
+ IWL_TX_DELIMITER_SIZE
;
57 scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
59 WARN_ON(len
> 0xFFF || write_ptr
>= TFD_QUEUE_SIZE_MAX
);
61 sta_id
= txq
->cmd
[txq
->q
.write_ptr
]->cmd
.tx
.sta_id
;
62 sec_ctl
= txq
->cmd
[txq
->q
.write_ptr
]->cmd
.tx
.sec_ctl
;
64 switch (sec_ctl
& TX_CMD_SEC_MSK
) {
72 len
+= WEP_IV_LEN
+ WEP_ICV_LEN
;
76 bc_ent
= cpu_to_le16((len
& 0xFFF) | (sta_id
<< 12));
78 scd_bc_tbl
[txq_id
].tfd_offset
[write_ptr
] = bc_ent
;
80 if (write_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
82 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ write_ptr
] = bc_ent
;
86 * iwl_txq_update_write_ptr - Send new write index to hardware
88 void iwl_txq_update_write_ptr(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
)
91 int txq_id
= txq
->q
.id
;
93 if (txq
->need_update
== 0)
96 if (hw_params(trans
).shadow_reg_enable
) {
97 /* shadow register enabled */
98 iwl_write32(bus(trans
), HBUS_TARG_WRPTR
,
99 txq
->q
.write_ptr
| (txq_id
<< 8));
101 /* if we're trying to save power */
102 if (test_bit(STATUS_POWER_PMI
, &trans
->shrd
->status
)) {
103 /* wake up nic if it's powered down ...
104 * uCode will wake up, and interrupt us again, so next
105 * time we'll skip this part. */
106 reg
= iwl_read32(bus(trans
), CSR_UCODE_DRV_GP1
);
108 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
109 IWL_DEBUG_INFO(trans
,
110 "Tx queue %d requesting wakeup,"
111 " GP1 = 0x%x\n", txq_id
, reg
);
112 iwl_set_bit(bus(trans
), CSR_GP_CNTRL
,
113 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
117 iwl_write_direct32(bus(trans
), HBUS_TARG_WRPTR
,
118 txq
->q
.write_ptr
| (txq_id
<< 8));
121 * else not in power-save mode,
122 * uCode will never sleep when we're
123 * trying to tx (during RFKILL, we're not trying to tx).
126 iwl_write32(bus(trans
), HBUS_TARG_WRPTR
,
127 txq
->q
.write_ptr
| (txq_id
<< 8));
129 txq
->need_update
= 0;
132 static inline dma_addr_t
iwl_tfd_tb_get_addr(struct iwl_tfd
*tfd
, u8 idx
)
134 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
136 dma_addr_t addr
= get_unaligned_le32(&tb
->lo
);
137 if (sizeof(dma_addr_t
) > sizeof(u32
))
139 ((dma_addr_t
)(le16_to_cpu(tb
->hi_n_len
) & 0xF) << 16) << 16;
144 static inline u16
iwl_tfd_tb_get_len(struct iwl_tfd
*tfd
, u8 idx
)
146 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
148 return le16_to_cpu(tb
->hi_n_len
) >> 4;
151 static inline void iwl_tfd_set_tb(struct iwl_tfd
*tfd
, u8 idx
,
152 dma_addr_t addr
, u16 len
)
154 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
155 u16 hi_n_len
= len
<< 4;
157 put_unaligned_le32(addr
, &tb
->lo
);
158 if (sizeof(dma_addr_t
) > sizeof(u32
))
159 hi_n_len
|= ((addr
>> 16) >> 16) & 0xF;
161 tb
->hi_n_len
= cpu_to_le16(hi_n_len
);
163 tfd
->num_tbs
= idx
+ 1;
166 static inline u8
iwl_tfd_get_num_tbs(struct iwl_tfd
*tfd
)
168 return tfd
->num_tbs
& 0x1f;
171 static void iwlagn_unmap_tfd(struct iwl_trans
*trans
, struct iwl_cmd_meta
*meta
,
172 struct iwl_tfd
*tfd
, enum dma_data_direction dma_dir
)
177 /* Sanity check on number of chunks */
178 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
180 if (num_tbs
>= IWL_NUM_OF_TBS
) {
181 IWL_ERR(trans
, "Too many chunks: %i\n", num_tbs
);
182 /* @todo issue fatal error, it is quite serious situation */
188 dma_unmap_single(bus(trans
)->dev
,
189 dma_unmap_addr(meta
, mapping
),
190 dma_unmap_len(meta
, len
),
193 /* Unmap chunks, if any. */
194 for (i
= 1; i
< num_tbs
; i
++)
195 dma_unmap_single(bus(trans
)->dev
, iwl_tfd_tb_get_addr(tfd
, i
),
196 iwl_tfd_tb_get_len(tfd
, i
), dma_dir
);
200 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
201 * @trans - transport private data
203 * @index - the index of the TFD to be freed
205 * Does NOT advance any TFD circular buffer read/write indexes
206 * Does NOT free the TFD itself (which is within circular buffer)
208 void iwlagn_txq_free_tfd(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
,
211 struct iwl_tfd
*tfd_tmp
= txq
->tfds
;
213 iwlagn_unmap_tfd(trans
, &txq
->meta
[index
], &tfd_tmp
[index
],
220 skb
= txq
->skbs
[index
];
222 /* can be called from irqs-disabled context */
224 dev_kfree_skb_any(skb
);
225 txq
->skbs
[index
] = NULL
;
230 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans
*trans
,
231 struct iwl_tx_queue
*txq
,
232 dma_addr_t addr
, u16 len
,
236 struct iwl_tfd
*tfd
, *tfd_tmp
;
241 tfd
= &tfd_tmp
[q
->write_ptr
];
244 memset(tfd
, 0, sizeof(*tfd
));
246 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
248 /* Each TFD can point to a maximum 20 Tx buffers */
249 if (num_tbs
>= IWL_NUM_OF_TBS
) {
250 IWL_ERR(trans
, "Error can not send more than %d chunks\n",
255 if (WARN_ON(addr
& ~DMA_BIT_MASK(36)))
258 if (unlikely(addr
& ~IWL_TX_DMA_MASK
))
259 IWL_ERR(trans
, "Unaligned address = %llx\n",
260 (unsigned long long)addr
);
262 iwl_tfd_set_tb(tfd
, num_tbs
, addr
, len
);
267 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
270 * Theory of operation
272 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
273 * of buffer descriptors, each of which points to one or more data buffers for
274 * the device to read from or fill. Driver and device exchange status of each
275 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
276 * entries in each circular buffer, to protect against confusing empty and full
279 * The device reads or writes the data in the queues via the device's several
280 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
282 * For Tx queue, there are low mark and high mark limits. If, after queuing
283 * the packet for Tx, free space become < low mark, Tx queue stopped. When
284 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
287 ***************************************************/
289 int iwl_queue_space(const struct iwl_queue
*q
)
291 int s
= q
->read_ptr
- q
->write_ptr
;
293 if (q
->read_ptr
> q
->write_ptr
)
298 /* keep some reserve to not confuse empty and full situations */
306 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
308 int iwl_queue_init(struct iwl_queue
*q
, int count
, int slots_num
, u32 id
)
311 q
->n_window
= slots_num
;
314 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
315 * and iwl_queue_dec_wrap are broken. */
316 if (WARN_ON(!is_power_of_2(count
)))
319 /* slots_num must be power-of-two size, otherwise
320 * get_cmd_index is broken. */
321 if (WARN_ON(!is_power_of_2(slots_num
)))
324 q
->low_mark
= q
->n_window
/ 4;
328 q
->high_mark
= q
->n_window
/ 8;
329 if (q
->high_mark
< 2)
332 q
->write_ptr
= q
->read_ptr
= 0;
337 static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans
*trans
,
338 struct iwl_tx_queue
*txq
)
340 struct iwl_trans_pcie
*trans_pcie
=
341 IWL_TRANS_GET_PCIE_TRANS(trans
);
342 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
343 int txq_id
= txq
->q
.id
;
344 int read_ptr
= txq
->q
.read_ptr
;
348 WARN_ON(read_ptr
>= TFD_QUEUE_SIZE_MAX
);
350 if (txq_id
!= trans
->shrd
->cmd_queue
)
351 sta_id
= txq
->cmd
[read_ptr
]->cmd
.tx
.sta_id
;
353 bc_ent
= cpu_to_le16(1 | (sta_id
<< 12));
354 scd_bc_tbl
[txq_id
].tfd_offset
[read_ptr
] = bc_ent
;
356 if (read_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
358 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ read_ptr
] = bc_ent
;
361 static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans
*trans
, u16 ra_tid
,
368 struct iwl_trans_pcie
*trans_pcie
=
369 IWL_TRANS_GET_PCIE_TRANS(trans
);
371 scd_q2ratid
= ra_tid
& SCD_QUEUE_RA_TID_MAP_RATID_MSK
;
373 tbl_dw_addr
= trans_pcie
->scd_base_addr
+
374 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id
);
376 tbl_dw
= iwl_read_targ_mem(bus(trans
), tbl_dw_addr
);
379 tbl_dw
= (scd_q2ratid
<< 16) | (tbl_dw
& 0x0000FFFF);
381 tbl_dw
= scd_q2ratid
| (tbl_dw
& 0xFFFF0000);
383 iwl_write_targ_mem(bus(trans
), tbl_dw_addr
, tbl_dw
);
388 static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans
*trans
, u16 txq_id
)
390 /* Simply stop the queue, but don't change any configuration;
391 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
392 iwl_write_prph(bus(trans
),
393 SCD_QUEUE_STATUS_BITS(txq_id
),
394 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE
)|
395 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN
));
398 void iwl_trans_set_wr_ptrs(struct iwl_trans
*trans
,
399 int txq_id
, u32 index
)
401 iwl_write_direct32(bus(trans
), HBUS_TARG_WRPTR
,
402 (index
& 0xff) | (txq_id
<< 8));
403 iwl_write_prph(bus(trans
), SCD_QUEUE_RDPTR(txq_id
), index
);
406 void iwl_trans_tx_queue_set_status(struct iwl_priv
*priv
,
407 struct iwl_tx_queue
*txq
,
408 int tx_fifo_id
, int scd_retry
)
410 int txq_id
= txq
->q
.id
;
411 int active
= test_bit(txq_id
, &priv
->txq_ctx_active_msk
) ? 1 : 0;
413 iwl_write_prph(bus(priv
), SCD_QUEUE_STATUS_BITS(txq_id
),
414 (active
<< SCD_QUEUE_STTS_REG_POS_ACTIVE
) |
415 (tx_fifo_id
<< SCD_QUEUE_STTS_REG_POS_TXF
) |
416 (1 << SCD_QUEUE_STTS_REG_POS_WSL
) |
417 SCD_QUEUE_STTS_REG_MSK
);
419 txq
->sched_retry
= scd_retry
;
421 IWL_DEBUG_INFO(priv
, "%s %s Queue %d on FIFO %d\n",
422 active
? "Activate" : "Deactivate",
423 scd_retry
? "BA" : "AC/CMD", txq_id
, tx_fifo_id
);
426 static inline int get_fifo_from_tid(struct iwl_trans_pcie
*trans_pcie
,
429 const u8
*ac_to_fifo
= trans_pcie
->ac_to_fifo
[ctx
];
430 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
431 return ac_to_fifo
[tid_to_ac
[tid
]];
433 /* no support for TIDs 8-15 yet */
437 void iwl_trans_pcie_txq_agg_setup(struct iwl_priv
*priv
,
438 enum iwl_rxon_context_id ctx
, int sta_id
,
439 int tid
, int frame_limit
)
441 int tx_fifo
, txq_id
, ssn_idx
;
444 struct iwl_tid_data
*tid_data
;
446 struct iwl_trans
*trans
= trans(priv
);
447 struct iwl_trans_pcie
*trans_pcie
=
448 IWL_TRANS_GET_PCIE_TRANS(trans
);
450 if (WARN_ON(sta_id
== IWL_INVALID_STATION
))
452 if (WARN_ON(tid
>= IWL_MAX_TID_COUNT
))
455 tx_fifo
= get_fifo_from_tid(trans_pcie
, ctx
, tid
);
456 if (WARN_ON(tx_fifo
< 0)) {
457 IWL_ERR(trans
, "txq_agg_setup, bad fifo: %d\n", tx_fifo
);
461 spin_lock_irqsave(&priv
->shrd
->sta_lock
, flags
);
462 tid_data
= &priv
->shrd
->tid_data
[sta_id
][tid
];
463 ssn_idx
= SEQ_TO_SN(tid_data
->seq_number
);
464 txq_id
= tid_data
->agg
.txq_id
;
465 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);
467 ra_tid
= BUILD_RAxTID(sta_id
, tid
);
469 spin_lock_irqsave(&priv
->shrd
->lock
, flags
);
471 /* Stop this Tx queue before configuring it */
472 iwlagn_tx_queue_stop_scheduler(trans
, txq_id
);
474 /* Map receiver-address / traffic-ID to this queue */
475 iwlagn_tx_queue_set_q2ratid(trans
, ra_tid
, txq_id
);
477 /* Set this queue as a chain-building queue */
478 iwl_set_bits_prph(bus(priv
), SCD_QUEUECHAIN_SEL
, (1<<txq_id
));
480 /* enable aggregations for the queue */
481 iwl_set_bits_prph(bus(priv
), SCD_AGGR_SEL
, (1<<txq_id
));
483 /* Place first TFD at index corresponding to start sequence number.
484 * Assumes that ssn_idx is valid (!= 0xFFF) */
485 priv
->txq
[txq_id
].q
.read_ptr
= (ssn_idx
& 0xff);
486 priv
->txq
[txq_id
].q
.write_ptr
= (ssn_idx
& 0xff);
487 iwl_trans_set_wr_ptrs(trans
, txq_id
, ssn_idx
);
489 /* Set up Tx window size and frame limit for this queue */
490 iwl_write_targ_mem(bus(priv
), trans_pcie
->scd_base_addr
+
491 SCD_CONTEXT_QUEUE_OFFSET(txq_id
) +
494 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS
) &
495 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK
) |
497 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
) &
498 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
));
500 iwl_set_bits_prph(bus(priv
), SCD_INTERRUPT_MASK
, (1 << txq_id
));
502 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
503 iwl_trans_tx_queue_set_status(priv
, &priv
->txq
[txq_id
], tx_fifo
, 1);
505 priv
->txq
[txq_id
].sta_id
= sta_id
;
506 priv
->txq
[txq_id
].tid
= tid
;
508 spin_unlock_irqrestore(&priv
->shrd
->lock
, flags
);
512 * Find first available (lowest unused) Tx Queue, mark it "active".
513 * Called only when finding queue for aggregation.
514 * Should never return anything < 7, because they should already
515 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
517 static int iwlagn_txq_ctx_activate_free(struct iwl_trans
*trans
)
521 for (txq_id
= 0; txq_id
< hw_params(trans
).max_txq_num
; txq_id
++)
522 if (!test_and_set_bit(txq_id
,
523 &priv(trans
)->txq_ctx_active_msk
))
528 int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans
*trans
,
529 enum iwl_rxon_context_id ctx
, int sta_id
,
532 struct iwl_tid_data
*tid_data
;
535 struct iwl_priv
*priv
= priv(trans
);
537 txq_id
= iwlagn_txq_ctx_activate_free(trans
);
539 IWL_ERR(trans
, "No free aggregation queue available\n");
543 spin_lock_irqsave(&trans
->shrd
->sta_lock
, flags
);
544 tid_data
= &trans
->shrd
->tid_data
[sta_id
][tid
];
545 *ssn
= SEQ_TO_SN(tid_data
->seq_number
);
546 tid_data
->agg
.txq_id
= txq_id
;
547 iwl_set_swq_id(&priv
->txq
[txq_id
], get_ac_from_tid(tid
), txq_id
);
549 tid_data
= &trans
->shrd
->tid_data
[sta_id
][tid
];
550 if (tid_data
->tfds_in_queue
== 0) {
551 IWL_DEBUG_HT(trans
, "HW queue is empty\n");
552 tid_data
->agg
.state
= IWL_AGG_ON
;
553 iwl_start_tx_ba_trans_ready(priv(trans
), ctx
, sta_id
, tid
);
555 IWL_DEBUG_HT(trans
, "HW queue is NOT empty: %d packets in HW"
556 "queue\n", tid_data
->tfds_in_queue
);
557 tid_data
->agg
.state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
559 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);
563 int iwl_trans_pcie_txq_agg_disable(struct iwl_priv
*priv
, u16 txq_id
)
565 struct iwl_trans
*trans
= trans(priv
);
566 if ((IWLAGN_FIRST_AMPDU_QUEUE
> txq_id
) ||
567 (IWLAGN_FIRST_AMPDU_QUEUE
+
568 hw_params(priv
).num_ampdu_queues
<= txq_id
)) {
570 "queue number out of range: %d, must be %d to %d\n",
571 txq_id
, IWLAGN_FIRST_AMPDU_QUEUE
,
572 IWLAGN_FIRST_AMPDU_QUEUE
+
573 hw_params(priv
).num_ampdu_queues
- 1);
577 iwlagn_tx_queue_stop_scheduler(trans
, txq_id
);
579 iwl_clear_bits_prph(bus(priv
), SCD_AGGR_SEL
, (1 << txq_id
));
581 priv
->txq
[txq_id
].q
.read_ptr
= 0;
582 priv
->txq
[txq_id
].q
.write_ptr
= 0;
583 /* supposes that ssn_idx is valid (!= 0xFFF) */
584 iwl_trans_set_wr_ptrs(trans
, txq_id
, 0);
586 iwl_clear_bits_prph(bus(priv
), SCD_INTERRUPT_MASK
, (1 << txq_id
));
587 iwl_txq_ctx_deactivate(priv
, txq_id
);
588 iwl_trans_tx_queue_set_status(priv
, &priv
->txq
[txq_id
], 0, 0);
593 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
596 * iwl_enqueue_hcmd - enqueue a uCode command
597 * @priv: device private data point
598 * @cmd: a point to the ucode command structure
600 * The function returns < 0 values to indicate the operation is
601 * failed. On success, it turns the index (> 0) of command in the
604 static int iwl_enqueue_hcmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
606 struct iwl_tx_queue
*txq
= &priv(trans
)->txq
[trans
->shrd
->cmd_queue
];
607 struct iwl_queue
*q
= &txq
->q
;
608 struct iwl_device_cmd
*out_cmd
;
609 struct iwl_cmd_meta
*out_meta
;
610 dma_addr_t phys_addr
;
613 u16 copy_size
, cmd_size
;
614 bool is_ct_kill
= false;
615 bool had_nocopy
= false;
618 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
619 const void *trace_bufs
[IWL_MAX_CMD_TFDS
+ 1] = {};
620 int trace_lens
[IWL_MAX_CMD_TFDS
+ 1] = {};
624 if (test_bit(STATUS_FW_ERROR
, &trans
->shrd
->status
)) {
625 IWL_WARN(trans
, "fw recovery, no hcmd send\n");
629 if ((trans
->shrd
->ucode_owner
== IWL_OWNERSHIP_TM
) &&
630 !(cmd
->flags
& CMD_ON_DEMAND
)) {
631 IWL_DEBUG_HC(trans
, "tm own the uCode, no regular hcmd send\n");
635 copy_size
= sizeof(out_cmd
->hdr
);
636 cmd_size
= sizeof(out_cmd
->hdr
);
638 /* need one for the header if the first is NOCOPY */
639 BUILD_BUG_ON(IWL_MAX_CMD_TFDS
> IWL_NUM_OF_TBS
- 1);
641 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
644 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
) {
647 /* NOCOPY must not be followed by normal! */
648 if (WARN_ON(had_nocopy
))
650 copy_size
+= cmd
->len
[i
];
652 cmd_size
+= cmd
->len
[i
];
656 * If any of the command structures end up being larger than
657 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
658 * allocated into separate TFDs, then we will need to
659 * increase the size of the buffers.
661 if (WARN_ON(copy_size
> TFD_MAX_PAYLOAD_SIZE
))
664 if (iwl_is_rfkill(trans
->shrd
) || iwl_is_ctkill(trans
->shrd
)) {
665 IWL_WARN(trans
, "Not sending command - %s KILL\n",
666 iwl_is_rfkill(trans
->shrd
) ? "RF" : "CT");
670 spin_lock_irqsave(&trans
->hcmd_lock
, flags
);
672 if (iwl_queue_space(q
) < ((cmd
->flags
& CMD_ASYNC
) ? 2 : 1)) {
673 spin_unlock_irqrestore(&trans
->hcmd_lock
, flags
);
675 IWL_ERR(trans
, "No space in command queue\n");
676 is_ct_kill
= iwl_check_for_ct_kill(priv(trans
));
678 IWL_ERR(trans
, "Restarting adapter queue is full\n");
679 iwlagn_fw_error(priv(trans
), false);
684 idx
= get_cmd_index(q
, q
->write_ptr
);
685 out_cmd
= txq
->cmd
[idx
];
686 out_meta
= &txq
->meta
[idx
];
688 memset(out_meta
, 0, sizeof(*out_meta
)); /* re-initialize to NULL */
689 if (cmd
->flags
& CMD_WANT_SKB
)
690 out_meta
->source
= cmd
;
691 if (cmd
->flags
& CMD_ASYNC
)
692 out_meta
->callback
= cmd
->callback
;
694 /* set up the header */
696 out_cmd
->hdr
.cmd
= cmd
->id
;
697 out_cmd
->hdr
.flags
= 0;
698 out_cmd
->hdr
.sequence
=
699 cpu_to_le16(QUEUE_TO_SEQ(trans
->shrd
->cmd_queue
) |
700 INDEX_TO_SEQ(q
->write_ptr
));
702 /* and copy the data that needs to be copied */
704 cmd_dest
= &out_cmd
->cmd
.payload
[0];
705 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
708 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
)
710 memcpy(cmd_dest
, cmd
->data
[i
], cmd
->len
[i
]);
711 cmd_dest
+= cmd
->len
[i
];
714 IWL_DEBUG_HC(trans
, "Sending command %s (#%x), seq: 0x%04X, "
715 "%d bytes at %d[%d]:%d\n",
716 get_cmd_string(out_cmd
->hdr
.cmd
),
718 le16_to_cpu(out_cmd
->hdr
.sequence
), cmd_size
,
719 q
->write_ptr
, idx
, trans
->shrd
->cmd_queue
);
721 phys_addr
= dma_map_single(bus(trans
)->dev
, &out_cmd
->hdr
, copy_size
,
723 if (unlikely(dma_mapping_error(bus(trans
)->dev
, phys_addr
))) {
728 dma_unmap_addr_set(out_meta
, mapping
, phys_addr
);
729 dma_unmap_len_set(out_meta
, len
, copy_size
);
731 iwlagn_txq_attach_buf_to_tfd(trans
, txq
,
732 phys_addr
, copy_size
, 1);
733 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
734 trace_bufs
[0] = &out_cmd
->hdr
;
735 trace_lens
[0] = copy_size
;
739 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
742 if (!(cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
))
744 phys_addr
= dma_map_single(bus(trans
)->dev
,
745 (void *)cmd
->data
[i
],
746 cmd
->len
[i
], DMA_BIDIRECTIONAL
);
747 if (dma_mapping_error(bus(trans
)->dev
, phys_addr
)) {
748 iwlagn_unmap_tfd(trans
, out_meta
,
749 &txq
->tfds
[q
->write_ptr
],
755 iwlagn_txq_attach_buf_to_tfd(trans
, txq
, phys_addr
,
757 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
758 trace_bufs
[trace_idx
] = cmd
->data
[i
];
759 trace_lens
[trace_idx
] = cmd
->len
[i
];
764 out_meta
->flags
= cmd
->flags
;
766 txq
->need_update
= 1;
768 /* check that tracing gets all possible blocks */
769 BUILD_BUG_ON(IWL_MAX_CMD_TFDS
+ 1 != 3);
770 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
771 trace_iwlwifi_dev_hcmd(priv(trans
), cmd
->flags
,
772 trace_bufs
[0], trace_lens
[0],
773 trace_bufs
[1], trace_lens
[1],
774 trace_bufs
[2], trace_lens
[2]);
777 /* Increment and update queue's write index */
778 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
779 iwl_txq_update_write_ptr(trans
, txq
);
782 spin_unlock_irqrestore(&trans
->hcmd_lock
, flags
);
787 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
789 * When FW advances 'R' index, all entries between old and new 'R' index
790 * need to be reclaimed. As result, some free space forms. If there is
791 * enough free space (> low mark), wake the stack that feeds us.
793 static void iwl_hcmd_queue_reclaim(struct iwl_priv
*priv
, int txq_id
, int idx
)
795 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
796 struct iwl_queue
*q
= &txq
->q
;
799 if ((idx
>= q
->n_bd
) || (iwl_queue_used(q
, idx
) == 0)) {
800 IWL_ERR(priv
, "%s: Read index for DMA queue txq id (%d), "
801 "index %d is out of range [0-%d] %d %d.\n", __func__
,
802 txq_id
, idx
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
806 for (idx
= iwl_queue_inc_wrap(idx
, q
->n_bd
); q
->read_ptr
!= idx
;
807 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
810 IWL_ERR(priv
, "HCMD skipped: index (%d) %d %d\n", idx
,
811 q
->write_ptr
, q
->read_ptr
);
812 iwlagn_fw_error(priv
, false);
819 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
820 * @rxb: Rx buffer to reclaim
822 * If an Rx buffer has an async callback associated with it the callback
823 * will be executed. The attached skb (if present) will only be freed
824 * if the callback returns 1
826 void iwl_tx_cmd_complete(struct iwl_priv
*priv
, struct iwl_rx_mem_buffer
*rxb
)
828 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
829 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
830 int txq_id
= SEQ_TO_QUEUE(sequence
);
831 int index
= SEQ_TO_INDEX(sequence
);
833 struct iwl_device_cmd
*cmd
;
834 struct iwl_cmd_meta
*meta
;
835 struct iwl_trans
*trans
= trans(priv
);
836 struct iwl_tx_queue
*txq
= &priv
->txq
[trans
->shrd
->cmd_queue
];
839 /* If a Tx command is being handled and it isn't in the actual
840 * command queue then there a command routing bug has been introduced
841 * in the queue management code. */
842 if (WARN(txq_id
!= trans
->shrd
->cmd_queue
,
843 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
844 txq_id
, trans
->shrd
->cmd_queue
, sequence
,
845 priv
->txq
[trans
->shrd
->cmd_queue
].q
.read_ptr
,
846 priv
->txq
[trans
->shrd
->cmd_queue
].q
.write_ptr
)) {
847 iwl_print_hex_error(priv
, pkt
, 32);
851 cmd_index
= get_cmd_index(&txq
->q
, index
);
852 cmd
= txq
->cmd
[cmd_index
];
853 meta
= &txq
->meta
[cmd_index
];
855 iwlagn_unmap_tfd(trans
, meta
, &txq
->tfds
[index
],
858 /* Input error checking is done when commands are added to queue. */
859 if (meta
->flags
& CMD_WANT_SKB
) {
860 meta
->source
->reply_page
= (unsigned long)rxb_addr(rxb
);
862 } else if (meta
->callback
)
863 meta
->callback(priv
, cmd
, pkt
);
865 spin_lock_irqsave(&trans
->hcmd_lock
, flags
);
867 iwl_hcmd_queue_reclaim(priv
, txq_id
, index
);
869 if (!(meta
->flags
& CMD_ASYNC
)) {
870 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
871 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command %s\n",
872 get_cmd_string(cmd
->hdr
.cmd
));
873 wake_up_interruptible(&priv
->wait_command_queue
);
878 spin_unlock_irqrestore(&trans
->hcmd_lock
, flags
);
881 const char *get_cmd_string(u8 cmd
)
884 IWL_CMD(REPLY_ALIVE
);
885 IWL_CMD(REPLY_ERROR
);
887 IWL_CMD(REPLY_RXON_ASSOC
);
888 IWL_CMD(REPLY_QOS_PARAM
);
889 IWL_CMD(REPLY_RXON_TIMING
);
890 IWL_CMD(REPLY_ADD_STA
);
891 IWL_CMD(REPLY_REMOVE_STA
);
892 IWL_CMD(REPLY_REMOVE_ALL_STA
);
893 IWL_CMD(REPLY_TXFIFO_FLUSH
);
894 IWL_CMD(REPLY_WEPKEY
);
896 IWL_CMD(REPLY_LEDS_CMD
);
897 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD
);
898 IWL_CMD(COEX_PRIORITY_TABLE_CMD
);
899 IWL_CMD(COEX_MEDIUM_NOTIFICATION
);
900 IWL_CMD(COEX_EVENT_CMD
);
901 IWL_CMD(REPLY_QUIET_CMD
);
902 IWL_CMD(REPLY_CHANNEL_SWITCH
);
903 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION
);
904 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD
);
905 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION
);
906 IWL_CMD(POWER_TABLE_CMD
);
907 IWL_CMD(PM_SLEEP_NOTIFICATION
);
908 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC
);
909 IWL_CMD(REPLY_SCAN_CMD
);
910 IWL_CMD(REPLY_SCAN_ABORT_CMD
);
911 IWL_CMD(SCAN_START_NOTIFICATION
);
912 IWL_CMD(SCAN_RESULTS_NOTIFICATION
);
913 IWL_CMD(SCAN_COMPLETE_NOTIFICATION
);
914 IWL_CMD(BEACON_NOTIFICATION
);
915 IWL_CMD(REPLY_TX_BEACON
);
916 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION
);
917 IWL_CMD(QUIET_NOTIFICATION
);
918 IWL_CMD(REPLY_TX_PWR_TABLE_CMD
);
919 IWL_CMD(MEASURE_ABORT_NOTIFICATION
);
920 IWL_CMD(REPLY_BT_CONFIG
);
921 IWL_CMD(REPLY_STATISTICS_CMD
);
922 IWL_CMD(STATISTICS_NOTIFICATION
);
923 IWL_CMD(REPLY_CARD_STATE_CMD
);
924 IWL_CMD(CARD_STATE_NOTIFICATION
);
925 IWL_CMD(MISSED_BEACONS_NOTIFICATION
);
926 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD
);
927 IWL_CMD(SENSITIVITY_CMD
);
928 IWL_CMD(REPLY_PHY_CALIBRATION_CMD
);
929 IWL_CMD(REPLY_RX_PHY_CMD
);
930 IWL_CMD(REPLY_RX_MPDU_CMD
);
932 IWL_CMD(REPLY_COMPRESSED_BA
);
933 IWL_CMD(CALIBRATION_CFG_CMD
);
934 IWL_CMD(CALIBRATION_RES_NOTIFICATION
);
935 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION
);
936 IWL_CMD(REPLY_TX_POWER_DBM_CMD
);
937 IWL_CMD(TEMPERATURE_NOTIFICATION
);
938 IWL_CMD(TX_ANT_CONFIGURATION_CMD
);
939 IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF
);
940 IWL_CMD(REPLY_BT_COEX_PRIO_TABLE
);
941 IWL_CMD(REPLY_BT_COEX_PROT_ENV
);
942 IWL_CMD(REPLY_WIPAN_PARAMS
);
943 IWL_CMD(REPLY_WIPAN_RXON
);
944 IWL_CMD(REPLY_WIPAN_RXON_TIMING
);
945 IWL_CMD(REPLY_WIPAN_RXON_ASSOC
);
946 IWL_CMD(REPLY_WIPAN_QOS_PARAM
);
947 IWL_CMD(REPLY_WIPAN_WEPKEY
);
948 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH
);
949 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION
);
950 IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE
);
951 IWL_CMD(REPLY_WOWLAN_PATTERNS
);
952 IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER
);
953 IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS
);
954 IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS
);
955 IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL
);
956 IWL_CMD(REPLY_WOWLAN_GET_STATUS
);
963 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
965 static void iwl_generic_cmd_callback(struct iwl_priv
*priv
,
966 struct iwl_device_cmd
*cmd
,
967 struct iwl_rx_packet
*pkt
)
969 if (pkt
->hdr
.flags
& IWL_CMD_FAILED_MSK
) {
970 IWL_ERR(priv
, "Bad return from %s (0x%08X)\n",
971 get_cmd_string(cmd
->hdr
.cmd
), pkt
->hdr
.flags
);
975 #ifdef CONFIG_IWLWIFI_DEBUG
976 switch (cmd
->hdr
.cmd
) {
977 case REPLY_TX_LINK_QUALITY_CMD
:
978 case SENSITIVITY_CMD
:
979 IWL_DEBUG_HC_DUMP(priv
, "back from %s (0x%08X)\n",
980 get_cmd_string(cmd
->hdr
.cmd
), pkt
->hdr
.flags
);
983 IWL_DEBUG_HC(priv
, "back from %s (0x%08X)\n",
984 get_cmd_string(cmd
->hdr
.cmd
), pkt
->hdr
.flags
);
989 static int iwl_send_cmd_async(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
993 /* An asynchronous command can not expect an SKB to be set. */
994 if (WARN_ON(cmd
->flags
& CMD_WANT_SKB
))
997 /* Assign a generic callback if one is not provided */
999 cmd
->callback
= iwl_generic_cmd_callback
;
1001 if (test_bit(STATUS_EXIT_PENDING
, &trans
->shrd
->status
))
1004 ret
= iwl_enqueue_hcmd(trans
, cmd
);
1006 IWL_ERR(trans
, "Error sending %s: enqueue_hcmd failed: %d\n",
1007 get_cmd_string(cmd
->id
), ret
);
1013 static int iwl_send_cmd_sync(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
1018 lockdep_assert_held(&trans
->shrd
->mutex
);
1020 /* A synchronous command can not have a callback set. */
1021 if (WARN_ON(cmd
->callback
))
1024 IWL_DEBUG_INFO(trans
, "Attempting to send sync command %s\n",
1025 get_cmd_string(cmd
->id
));
1027 set_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
1028 IWL_DEBUG_INFO(trans
, "Setting HCMD_ACTIVE for command %s\n",
1029 get_cmd_string(cmd
->id
));
1031 cmd_idx
= iwl_enqueue_hcmd(trans
, cmd
);
1034 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
1035 IWL_ERR(trans
, "Error sending %s: enqueue_hcmd failed: %d\n",
1036 get_cmd_string(cmd
->id
), ret
);
1040 ret
= wait_event_interruptible_timeout(priv(trans
)->wait_command_queue
,
1041 !test_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
),
1042 HOST_COMPLETE_TIMEOUT
);
1044 if (test_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
)) {
1046 "Error sending %s: time out after %dms.\n",
1047 get_cmd_string(cmd
->id
),
1048 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT
));
1050 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
1051 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command"
1052 "%s\n", get_cmd_string(cmd
->id
));
1058 if (test_bit(STATUS_RF_KILL_HW
, &trans
->shrd
->status
)) {
1059 IWL_ERR(trans
, "Command %s aborted: RF KILL Switch\n",
1060 get_cmd_string(cmd
->id
));
1064 if (test_bit(STATUS_FW_ERROR
, &trans
->shrd
->status
)) {
1065 IWL_ERR(trans
, "Command %s failed: FW Error\n",
1066 get_cmd_string(cmd
->id
));
1070 if ((cmd
->flags
& CMD_WANT_SKB
) && !cmd
->reply_page
) {
1071 IWL_ERR(trans
, "Error: Response NULL in '%s'\n",
1072 get_cmd_string(cmd
->id
));
1080 if (cmd
->flags
& CMD_WANT_SKB
) {
1082 * Cancel the CMD_WANT_SKB flag for the cmd in the
1083 * TX cmd queue. Otherwise in case the cmd comes
1084 * in later, it will possibly set an invalid
1085 * address (cmd->meta.source).
1087 priv(trans
)->txq
[trans
->shrd
->cmd_queue
].meta
[cmd_idx
].flags
&=
1091 if (cmd
->reply_page
) {
1092 iwl_free_pages(trans
->shrd
, cmd
->reply_page
);
1093 cmd
->reply_page
= 0;
1099 int iwl_trans_pcie_send_cmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
1101 if (cmd
->flags
& CMD_ASYNC
)
1102 return iwl_send_cmd_async(trans
, cmd
);
1104 return iwl_send_cmd_sync(trans
, cmd
);
1107 int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans
*trans
, u8 id
, u32 flags
,
1108 u16 len
, const void *data
)
1110 struct iwl_host_cmd cmd
= {
1117 return iwl_trans_pcie_send_cmd(trans
, &cmd
);
1120 /* Frees buffers until index _not_ inclusive */
1121 void iwl_tx_queue_reclaim(struct iwl_trans
*trans
, int txq_id
, int index
,
1122 struct sk_buff_head
*skbs
)
1124 struct iwl_tx_queue
*txq
= &priv(trans
)->txq
[txq_id
];
1125 struct iwl_queue
*q
= &txq
->q
;
1128 /*Since we free until index _not_ inclusive, the one before index is
1129 * the last we will free. This one must be used */
1130 last_to_free
= iwl_queue_dec_wrap(index
, q
->n_bd
);
1132 if ((index
>= q
->n_bd
) ||
1133 (iwl_queue_used(q
, last_to_free
) == 0)) {
1134 IWL_ERR(trans
, "%s: Read index for DMA queue txq id (%d), "
1135 "last_to_free %d is out of range [0-%d] %d %d.\n",
1136 __func__
, txq_id
, last_to_free
, q
->n_bd
,
1137 q
->write_ptr
, q
->read_ptr
);
1141 IWL_DEBUG_TX_REPLY(trans
, "reclaim: [%d, %d, %d]\n", txq_id
,
1142 q
->read_ptr
, index
);
1144 if (WARN_ON(!skb_queue_empty(skbs
)))
1148 q
->read_ptr
!= index
;
1149 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
1151 if (WARN_ON_ONCE(txq
->skbs
[txq
->q
.read_ptr
] == NULL
))
1154 __skb_queue_tail(skbs
, txq
->skbs
[txq
->q
.read_ptr
]);
1156 txq
->skbs
[txq
->q
.read_ptr
] = NULL
;
1158 iwlagn_txq_inval_byte_cnt_tbl(trans
, txq
);
1160 iwlagn_txq_free_tfd(trans
, txq
, txq
->q
.read_ptr
);