1 /******************************************************************************
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
32 #include <net/mac80211.h>
38 #include "iwl-helpers.h"
39 #include "iwl-trans-int-pcie.h"
42 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
44 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans
*trans
,
45 struct iwl_tx_queue
*txq
,
48 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
;
49 struct iwl_trans_pcie
*trans_pcie
=
50 IWL_TRANS_GET_PCIE_TRANS(trans
);
51 int write_ptr
= txq
->q
.write_ptr
;
52 int txq_id
= txq
->q
.id
;
55 u16 len
= byte_cnt
+ IWL_TX_CRC_SIZE
+ IWL_TX_DELIMITER_SIZE
;
58 scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
60 WARN_ON(len
> 0xFFF || write_ptr
>= TFD_QUEUE_SIZE_MAX
);
62 sta_id
= txq
->cmd
[txq
->q
.write_ptr
]->cmd
.tx
.sta_id
;
63 sec_ctl
= txq
->cmd
[txq
->q
.write_ptr
]->cmd
.tx
.sec_ctl
;
65 switch (sec_ctl
& TX_CMD_SEC_MSK
) {
73 len
+= WEP_IV_LEN
+ WEP_ICV_LEN
;
77 bc_ent
= cpu_to_le16((len
& 0xFFF) | (sta_id
<< 12));
79 scd_bc_tbl
[txq_id
].tfd_offset
[write_ptr
] = bc_ent
;
81 if (write_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
83 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ write_ptr
] = bc_ent
;
87 * iwl_txq_update_write_ptr - Send new write index to hardware
89 void iwl_txq_update_write_ptr(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
)
92 int txq_id
= txq
->q
.id
;
94 if (txq
->need_update
== 0)
97 if (hw_params(trans
).shadow_reg_enable
) {
98 /* shadow register enabled */
99 iwl_write32(bus(trans
), HBUS_TARG_WRPTR
,
100 txq
->q
.write_ptr
| (txq_id
<< 8));
102 /* if we're trying to save power */
103 if (test_bit(STATUS_POWER_PMI
, &trans
->shrd
->status
)) {
104 /* wake up nic if it's powered down ...
105 * uCode will wake up, and interrupt us again, so next
106 * time we'll skip this part. */
107 reg
= iwl_read32(bus(trans
), CSR_UCODE_DRV_GP1
);
109 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
110 IWL_DEBUG_INFO(trans
,
111 "Tx queue %d requesting wakeup,"
112 " GP1 = 0x%x\n", txq_id
, reg
);
113 iwl_set_bit(bus(trans
), CSR_GP_CNTRL
,
114 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
118 iwl_write_direct32(bus(trans
), HBUS_TARG_WRPTR
,
119 txq
->q
.write_ptr
| (txq_id
<< 8));
122 * else not in power-save mode,
123 * uCode will never sleep when we're
124 * trying to tx (during RFKILL, we're not trying to tx).
127 iwl_write32(bus(trans
), HBUS_TARG_WRPTR
,
128 txq
->q
.write_ptr
| (txq_id
<< 8));
130 txq
->need_update
= 0;
133 static inline dma_addr_t
iwl_tfd_tb_get_addr(struct iwl_tfd
*tfd
, u8 idx
)
135 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
137 dma_addr_t addr
= get_unaligned_le32(&tb
->lo
);
138 if (sizeof(dma_addr_t
) > sizeof(u32
))
140 ((dma_addr_t
)(le16_to_cpu(tb
->hi_n_len
) & 0xF) << 16) << 16;
145 static inline u16
iwl_tfd_tb_get_len(struct iwl_tfd
*tfd
, u8 idx
)
147 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
149 return le16_to_cpu(tb
->hi_n_len
) >> 4;
152 static inline void iwl_tfd_set_tb(struct iwl_tfd
*tfd
, u8 idx
,
153 dma_addr_t addr
, u16 len
)
155 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
156 u16 hi_n_len
= len
<< 4;
158 put_unaligned_le32(addr
, &tb
->lo
);
159 if (sizeof(dma_addr_t
) > sizeof(u32
))
160 hi_n_len
|= ((addr
>> 16) >> 16) & 0xF;
162 tb
->hi_n_len
= cpu_to_le16(hi_n_len
);
164 tfd
->num_tbs
= idx
+ 1;
167 static inline u8
iwl_tfd_get_num_tbs(struct iwl_tfd
*tfd
)
169 return tfd
->num_tbs
& 0x1f;
172 static void iwlagn_unmap_tfd(struct iwl_trans
*trans
, struct iwl_cmd_meta
*meta
,
173 struct iwl_tfd
*tfd
, enum dma_data_direction dma_dir
)
178 /* Sanity check on number of chunks */
179 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
181 if (num_tbs
>= IWL_NUM_OF_TBS
) {
182 IWL_ERR(trans
, "Too many chunks: %i\n", num_tbs
);
183 /* @todo issue fatal error, it is quite serious situation */
189 dma_unmap_single(bus(trans
)->dev
,
190 dma_unmap_addr(meta
, mapping
),
191 dma_unmap_len(meta
, len
),
194 /* Unmap chunks, if any. */
195 for (i
= 1; i
< num_tbs
; i
++)
196 dma_unmap_single(bus(trans
)->dev
, iwl_tfd_tb_get_addr(tfd
, i
),
197 iwl_tfd_tb_get_len(tfd
, i
), dma_dir
);
201 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
202 * @trans - transport private data
204 * @index - the index of the TFD to be freed
206 * Does NOT advance any TFD circular buffer read/write indexes
207 * Does NOT free the TFD itself (which is within circular buffer)
209 void iwlagn_txq_free_tfd(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
,
212 struct iwl_tfd
*tfd_tmp
= txq
->tfds
;
214 iwlagn_unmap_tfd(trans
, &txq
->meta
[index
], &tfd_tmp
[index
],
221 skb
= txq
->skbs
[index
];
223 /* can be called from irqs-disabled context */
225 dev_kfree_skb_any(skb
);
226 txq
->skbs
[index
] = NULL
;
231 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans
*trans
,
232 struct iwl_tx_queue
*txq
,
233 dma_addr_t addr
, u16 len
,
237 struct iwl_tfd
*tfd
, *tfd_tmp
;
242 tfd
= &tfd_tmp
[q
->write_ptr
];
245 memset(tfd
, 0, sizeof(*tfd
));
247 num_tbs
= iwl_tfd_get_num_tbs(tfd
);
249 /* Each TFD can point to a maximum 20 Tx buffers */
250 if (num_tbs
>= IWL_NUM_OF_TBS
) {
251 IWL_ERR(trans
, "Error can not send more than %d chunks\n",
256 if (WARN_ON(addr
& ~DMA_BIT_MASK(36)))
259 if (unlikely(addr
& ~IWL_TX_DMA_MASK
))
260 IWL_ERR(trans
, "Unaligned address = %llx\n",
261 (unsigned long long)addr
);
263 iwl_tfd_set_tb(tfd
, num_tbs
, addr
, len
);
268 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
271 * Theory of operation
273 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
274 * of buffer descriptors, each of which points to one or more data buffers for
275 * the device to read from or fill. Driver and device exchange status of each
276 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
277 * entries in each circular buffer, to protect against confusing empty and full
280 * The device reads or writes the data in the queues via the device's several
281 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
283 * For Tx queue, there are low mark and high mark limits. If, after queuing
284 * the packet for Tx, free space become < low mark, Tx queue stopped. When
285 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
288 ***************************************************/
290 int iwl_queue_space(const struct iwl_queue
*q
)
292 int s
= q
->read_ptr
- q
->write_ptr
;
294 if (q
->read_ptr
> q
->write_ptr
)
299 /* keep some reserve to not confuse empty and full situations */
307 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
309 int iwl_queue_init(struct iwl_queue
*q
, int count
, int slots_num
, u32 id
)
312 q
->n_window
= slots_num
;
315 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
316 * and iwl_queue_dec_wrap are broken. */
317 if (WARN_ON(!is_power_of_2(count
)))
320 /* slots_num must be power-of-two size, otherwise
321 * get_cmd_index is broken. */
322 if (WARN_ON(!is_power_of_2(slots_num
)))
325 q
->low_mark
= q
->n_window
/ 4;
329 q
->high_mark
= q
->n_window
/ 8;
330 if (q
->high_mark
< 2)
333 q
->write_ptr
= q
->read_ptr
= 0;
338 static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans
*trans
,
339 struct iwl_tx_queue
*txq
)
341 struct iwl_trans_pcie
*trans_pcie
=
342 IWL_TRANS_GET_PCIE_TRANS(trans
);
343 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
344 int txq_id
= txq
->q
.id
;
345 int read_ptr
= txq
->q
.read_ptr
;
349 WARN_ON(read_ptr
>= TFD_QUEUE_SIZE_MAX
);
351 if (txq_id
!= trans
->shrd
->cmd_queue
)
352 sta_id
= txq
->cmd
[read_ptr
]->cmd
.tx
.sta_id
;
354 bc_ent
= cpu_to_le16(1 | (sta_id
<< 12));
355 scd_bc_tbl
[txq_id
].tfd_offset
[read_ptr
] = bc_ent
;
357 if (read_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
359 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ read_ptr
] = bc_ent
;
362 static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans
*trans
, u16 ra_tid
,
369 struct iwl_trans_pcie
*trans_pcie
=
370 IWL_TRANS_GET_PCIE_TRANS(trans
);
372 scd_q2ratid
= ra_tid
& SCD_QUEUE_RA_TID_MAP_RATID_MSK
;
374 tbl_dw_addr
= trans_pcie
->scd_base_addr
+
375 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id
);
377 tbl_dw
= iwl_read_targ_mem(bus(trans
), tbl_dw_addr
);
380 tbl_dw
= (scd_q2ratid
<< 16) | (tbl_dw
& 0x0000FFFF);
382 tbl_dw
= scd_q2ratid
| (tbl_dw
& 0xFFFF0000);
384 iwl_write_targ_mem(bus(trans
), tbl_dw_addr
, tbl_dw
);
389 static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans
*trans
, u16 txq_id
)
391 /* Simply stop the queue, but don't change any configuration;
392 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
393 iwl_write_prph(bus(trans
),
394 SCD_QUEUE_STATUS_BITS(txq_id
),
395 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE
)|
396 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN
));
399 void iwl_trans_set_wr_ptrs(struct iwl_trans
*trans
,
400 int txq_id
, u32 index
)
402 iwl_write_direct32(bus(trans
), HBUS_TARG_WRPTR
,
403 (index
& 0xff) | (txq_id
<< 8));
404 iwl_write_prph(bus(trans
), SCD_QUEUE_RDPTR(txq_id
), index
);
407 void iwl_trans_tx_queue_set_status(struct iwl_priv
*priv
,
408 struct iwl_tx_queue
*txq
,
409 int tx_fifo_id
, int scd_retry
)
411 int txq_id
= txq
->q
.id
;
412 int active
= test_bit(txq_id
, &priv
->txq_ctx_active_msk
) ? 1 : 0;
414 iwl_write_prph(bus(priv
), SCD_QUEUE_STATUS_BITS(txq_id
),
415 (active
<< SCD_QUEUE_STTS_REG_POS_ACTIVE
) |
416 (tx_fifo_id
<< SCD_QUEUE_STTS_REG_POS_TXF
) |
417 (1 << SCD_QUEUE_STTS_REG_POS_WSL
) |
418 SCD_QUEUE_STTS_REG_MSK
);
420 txq
->sched_retry
= scd_retry
;
422 IWL_DEBUG_INFO(priv
, "%s %s Queue %d on FIFO %d\n",
423 active
? "Activate" : "Deactivate",
424 scd_retry
? "BA" : "AC/CMD", txq_id
, tx_fifo_id
);
427 static inline int get_fifo_from_tid(struct iwl_rxon_context
*ctx
, u16 tid
)
429 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
430 return ctx
->ac_to_fifo
[tid_to_ac
[tid
]];
432 /* no support for TIDs 8-15 yet */
436 void iwl_trans_pcie_txq_agg_setup(struct iwl_priv
*priv
,
437 enum iwl_rxon_context_id ctx
, int sta_id
,
438 int tid
, int frame_limit
)
440 int tx_fifo
, txq_id
, ssn_idx
;
443 struct iwl_tid_data
*tid_data
;
445 struct iwl_trans
*trans
= trans(priv
);
446 struct iwl_trans_pcie
*trans_pcie
=
447 IWL_TRANS_GET_PCIE_TRANS(trans
);
449 if (WARN_ON(sta_id
== IWL_INVALID_STATION
))
451 if (WARN_ON(tid
>= IWL_MAX_TID_COUNT
))
454 tx_fifo
= get_fifo_from_tid(&priv
->contexts
[ctx
], tid
);
455 if (WARN_ON(tx_fifo
< 0)) {
456 IWL_ERR(trans
, "txq_agg_setup, bad fifo: %d\n", tx_fifo
);
460 spin_lock_irqsave(&priv
->shrd
->sta_lock
, flags
);
461 tid_data
= &priv
->shrd
->tid_data
[sta_id
][tid
];
462 ssn_idx
= SEQ_TO_SN(tid_data
->seq_number
);
463 txq_id
= tid_data
->agg
.txq_id
;
464 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);
466 ra_tid
= BUILD_RAxTID(sta_id
, tid
);
468 spin_lock_irqsave(&priv
->shrd
->lock
, flags
);
470 /* Stop this Tx queue before configuring it */
471 iwlagn_tx_queue_stop_scheduler(trans
, txq_id
);
473 /* Map receiver-address / traffic-ID to this queue */
474 iwlagn_tx_queue_set_q2ratid(trans
, ra_tid
, txq_id
);
476 /* Set this queue as a chain-building queue */
477 iwl_set_bits_prph(bus(priv
), SCD_QUEUECHAIN_SEL
, (1<<txq_id
));
479 /* enable aggregations for the queue */
480 iwl_set_bits_prph(bus(priv
), SCD_AGGR_SEL
, (1<<txq_id
));
482 /* Place first TFD at index corresponding to start sequence number.
483 * Assumes that ssn_idx is valid (!= 0xFFF) */
484 priv
->txq
[txq_id
].q
.read_ptr
= (ssn_idx
& 0xff);
485 priv
->txq
[txq_id
].q
.write_ptr
= (ssn_idx
& 0xff);
486 iwl_trans_set_wr_ptrs(trans
, txq_id
, ssn_idx
);
488 /* Set up Tx window size and frame limit for this queue */
489 iwl_write_targ_mem(bus(priv
), trans_pcie
->scd_base_addr
+
490 SCD_CONTEXT_QUEUE_OFFSET(txq_id
) +
493 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS
) &
494 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK
) |
496 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
) &
497 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
));
499 iwl_set_bits_prph(bus(priv
), SCD_INTERRUPT_MASK
, (1 << txq_id
));
501 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
502 iwl_trans_tx_queue_set_status(priv
, &priv
->txq
[txq_id
], tx_fifo
, 1);
504 priv
->txq
[txq_id
].sta_id
= sta_id
;
505 priv
->txq
[txq_id
].tid
= tid
;
507 spin_unlock_irqrestore(&priv
->shrd
->lock
, flags
);
510 int iwl_trans_pcie_txq_agg_disable(struct iwl_priv
*priv
, u16 txq_id
)
512 struct iwl_trans
*trans
= trans(priv
);
513 if ((IWLAGN_FIRST_AMPDU_QUEUE
> txq_id
) ||
514 (IWLAGN_FIRST_AMPDU_QUEUE
+
515 hw_params(priv
).num_ampdu_queues
<= txq_id
)) {
517 "queue number out of range: %d, must be %d to %d\n",
518 txq_id
, IWLAGN_FIRST_AMPDU_QUEUE
,
519 IWLAGN_FIRST_AMPDU_QUEUE
+
520 hw_params(priv
).num_ampdu_queues
- 1);
524 iwlagn_tx_queue_stop_scheduler(trans
, txq_id
);
526 iwl_clear_bits_prph(bus(priv
), SCD_AGGR_SEL
, (1 << txq_id
));
528 priv
->txq
[txq_id
].q
.read_ptr
= 0;
529 priv
->txq
[txq_id
].q
.write_ptr
= 0;
530 /* supposes that ssn_idx is valid (!= 0xFFF) */
531 iwl_trans_set_wr_ptrs(trans
, txq_id
, 0);
533 iwl_clear_bits_prph(bus(priv
), SCD_INTERRUPT_MASK
, (1 << txq_id
));
534 iwl_txq_ctx_deactivate(priv
, txq_id
);
535 iwl_trans_tx_queue_set_status(priv
, &priv
->txq
[txq_id
], 0, 0);
540 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
543 * iwl_enqueue_hcmd - enqueue a uCode command
544 * @priv: device private data point
545 * @cmd: a point to the ucode command structure
547 * The function returns < 0 values to indicate the operation is
548 * failed. On success, it turns the index (> 0) of command in the
551 static int iwl_enqueue_hcmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
553 struct iwl_tx_queue
*txq
= &priv(trans
)->txq
[trans
->shrd
->cmd_queue
];
554 struct iwl_queue
*q
= &txq
->q
;
555 struct iwl_device_cmd
*out_cmd
;
556 struct iwl_cmd_meta
*out_meta
;
557 dma_addr_t phys_addr
;
560 u16 copy_size
, cmd_size
;
561 bool is_ct_kill
= false;
562 bool had_nocopy
= false;
565 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
566 const void *trace_bufs
[IWL_MAX_CMD_TFDS
+ 1] = {};
567 int trace_lens
[IWL_MAX_CMD_TFDS
+ 1] = {};
571 if (test_bit(STATUS_FW_ERROR
, &trans
->shrd
->status
)) {
572 IWL_WARN(trans
, "fw recovery, no hcmd send\n");
576 if ((trans
->shrd
->ucode_owner
== IWL_OWNERSHIP_TM
) &&
577 !(cmd
->flags
& CMD_ON_DEMAND
)) {
578 IWL_DEBUG_HC(trans
, "tm own the uCode, no regular hcmd send\n");
582 copy_size
= sizeof(out_cmd
->hdr
);
583 cmd_size
= sizeof(out_cmd
->hdr
);
585 /* need one for the header if the first is NOCOPY */
586 BUILD_BUG_ON(IWL_MAX_CMD_TFDS
> IWL_NUM_OF_TBS
- 1);
588 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
591 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
) {
594 /* NOCOPY must not be followed by normal! */
595 if (WARN_ON(had_nocopy
))
597 copy_size
+= cmd
->len
[i
];
599 cmd_size
+= cmd
->len
[i
];
603 * If any of the command structures end up being larger than
604 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
605 * allocated into separate TFDs, then we will need to
606 * increase the size of the buffers.
608 if (WARN_ON(copy_size
> TFD_MAX_PAYLOAD_SIZE
))
611 if (iwl_is_rfkill(trans
->shrd
) || iwl_is_ctkill(trans
->shrd
)) {
612 IWL_WARN(trans
, "Not sending command - %s KILL\n",
613 iwl_is_rfkill(trans
->shrd
) ? "RF" : "CT");
617 spin_lock_irqsave(&trans
->hcmd_lock
, flags
);
619 if (iwl_queue_space(q
) < ((cmd
->flags
& CMD_ASYNC
) ? 2 : 1)) {
620 spin_unlock_irqrestore(&trans
->hcmd_lock
, flags
);
622 IWL_ERR(trans
, "No space in command queue\n");
623 is_ct_kill
= iwl_check_for_ct_kill(priv(trans
));
625 IWL_ERR(trans
, "Restarting adapter queue is full\n");
626 iwlagn_fw_error(priv(trans
), false);
631 idx
= get_cmd_index(q
, q
->write_ptr
);
632 out_cmd
= txq
->cmd
[idx
];
633 out_meta
= &txq
->meta
[idx
];
635 memset(out_meta
, 0, sizeof(*out_meta
)); /* re-initialize to NULL */
636 if (cmd
->flags
& CMD_WANT_SKB
)
637 out_meta
->source
= cmd
;
638 if (cmd
->flags
& CMD_ASYNC
)
639 out_meta
->callback
= cmd
->callback
;
641 /* set up the header */
643 out_cmd
->hdr
.cmd
= cmd
->id
;
644 out_cmd
->hdr
.flags
= 0;
645 out_cmd
->hdr
.sequence
=
646 cpu_to_le16(QUEUE_TO_SEQ(trans
->shrd
->cmd_queue
) |
647 INDEX_TO_SEQ(q
->write_ptr
));
649 /* and copy the data that needs to be copied */
651 cmd_dest
= &out_cmd
->cmd
.payload
[0];
652 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
655 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
)
657 memcpy(cmd_dest
, cmd
->data
[i
], cmd
->len
[i
]);
658 cmd_dest
+= cmd
->len
[i
];
661 IWL_DEBUG_HC(trans
, "Sending command %s (#%x), seq: 0x%04X, "
662 "%d bytes at %d[%d]:%d\n",
663 get_cmd_string(out_cmd
->hdr
.cmd
),
665 le16_to_cpu(out_cmd
->hdr
.sequence
), cmd_size
,
666 q
->write_ptr
, idx
, trans
->shrd
->cmd_queue
);
668 phys_addr
= dma_map_single(bus(trans
)->dev
, &out_cmd
->hdr
, copy_size
,
670 if (unlikely(dma_mapping_error(bus(trans
)->dev
, phys_addr
))) {
675 dma_unmap_addr_set(out_meta
, mapping
, phys_addr
);
676 dma_unmap_len_set(out_meta
, len
, copy_size
);
678 iwlagn_txq_attach_buf_to_tfd(trans
, txq
,
679 phys_addr
, copy_size
, 1);
680 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
681 trace_bufs
[0] = &out_cmd
->hdr
;
682 trace_lens
[0] = copy_size
;
686 for (i
= 0; i
< IWL_MAX_CMD_TFDS
; i
++) {
689 if (!(cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
))
691 phys_addr
= dma_map_single(bus(trans
)->dev
,
692 (void *)cmd
->data
[i
],
693 cmd
->len
[i
], DMA_BIDIRECTIONAL
);
694 if (dma_mapping_error(bus(trans
)->dev
, phys_addr
)) {
695 iwlagn_unmap_tfd(trans
, out_meta
,
696 &txq
->tfds
[q
->write_ptr
],
702 iwlagn_txq_attach_buf_to_tfd(trans
, txq
, phys_addr
,
704 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
705 trace_bufs
[trace_idx
] = cmd
->data
[i
];
706 trace_lens
[trace_idx
] = cmd
->len
[i
];
711 out_meta
->flags
= cmd
->flags
;
713 txq
->need_update
= 1;
715 /* check that tracing gets all possible blocks */
716 BUILD_BUG_ON(IWL_MAX_CMD_TFDS
+ 1 != 3);
717 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
718 trace_iwlwifi_dev_hcmd(priv(trans
), cmd
->flags
,
719 trace_bufs
[0], trace_lens
[0],
720 trace_bufs
[1], trace_lens
[1],
721 trace_bufs
[2], trace_lens
[2]);
724 /* Increment and update queue's write index */
725 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
726 iwl_txq_update_write_ptr(trans
, txq
);
729 spin_unlock_irqrestore(&trans
->hcmd_lock
, flags
);
734 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
736 * When FW advances 'R' index, all entries between old and new 'R' index
737 * need to be reclaimed. As result, some free space forms. If there is
738 * enough free space (> low mark), wake the stack that feeds us.
740 static void iwl_hcmd_queue_reclaim(struct iwl_priv
*priv
, int txq_id
, int idx
)
742 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
743 struct iwl_queue
*q
= &txq
->q
;
746 if ((idx
>= q
->n_bd
) || (iwl_queue_used(q
, idx
) == 0)) {
747 IWL_ERR(priv
, "%s: Read index for DMA queue txq id (%d), "
748 "index %d is out of range [0-%d] %d %d.\n", __func__
,
749 txq_id
, idx
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
753 for (idx
= iwl_queue_inc_wrap(idx
, q
->n_bd
); q
->read_ptr
!= idx
;
754 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
757 IWL_ERR(priv
, "HCMD skipped: index (%d) %d %d\n", idx
,
758 q
->write_ptr
, q
->read_ptr
);
759 iwlagn_fw_error(priv
, false);
766 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
767 * @rxb: Rx buffer to reclaim
769 * If an Rx buffer has an async callback associated with it the callback
770 * will be executed. The attached skb (if present) will only be freed
771 * if the callback returns 1
773 void iwl_tx_cmd_complete(struct iwl_priv
*priv
, struct iwl_rx_mem_buffer
*rxb
)
775 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
776 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
777 int txq_id
= SEQ_TO_QUEUE(sequence
);
778 int index
= SEQ_TO_INDEX(sequence
);
780 struct iwl_device_cmd
*cmd
;
781 struct iwl_cmd_meta
*meta
;
782 struct iwl_trans
*trans
= trans(priv
);
783 struct iwl_tx_queue
*txq
= &priv
->txq
[trans
->shrd
->cmd_queue
];
786 /* If a Tx command is being handled and it isn't in the actual
787 * command queue then there a command routing bug has been introduced
788 * in the queue management code. */
789 if (WARN(txq_id
!= trans
->shrd
->cmd_queue
,
790 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
791 txq_id
, trans
->shrd
->cmd_queue
, sequence
,
792 priv
->txq
[trans
->shrd
->cmd_queue
].q
.read_ptr
,
793 priv
->txq
[trans
->shrd
->cmd_queue
].q
.write_ptr
)) {
794 iwl_print_hex_error(priv
, pkt
, 32);
798 cmd_index
= get_cmd_index(&txq
->q
, index
);
799 cmd
= txq
->cmd
[cmd_index
];
800 meta
= &txq
->meta
[cmd_index
];
802 iwlagn_unmap_tfd(trans
, meta
, &txq
->tfds
[index
],
805 /* Input error checking is done when commands are added to queue. */
806 if (meta
->flags
& CMD_WANT_SKB
) {
807 meta
->source
->reply_page
= (unsigned long)rxb_addr(rxb
);
809 } else if (meta
->callback
)
810 meta
->callback(priv
, cmd
, pkt
);
812 spin_lock_irqsave(&trans
->hcmd_lock
, flags
);
814 iwl_hcmd_queue_reclaim(priv
, txq_id
, index
);
816 if (!(meta
->flags
& CMD_ASYNC
)) {
817 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
818 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command %s\n",
819 get_cmd_string(cmd
->hdr
.cmd
));
820 wake_up_interruptible(&priv
->wait_command_queue
);
825 spin_unlock_irqrestore(&trans
->hcmd_lock
, flags
);
828 const char *get_cmd_string(u8 cmd
)
831 IWL_CMD(REPLY_ALIVE
);
832 IWL_CMD(REPLY_ERROR
);
834 IWL_CMD(REPLY_RXON_ASSOC
);
835 IWL_CMD(REPLY_QOS_PARAM
);
836 IWL_CMD(REPLY_RXON_TIMING
);
837 IWL_CMD(REPLY_ADD_STA
);
838 IWL_CMD(REPLY_REMOVE_STA
);
839 IWL_CMD(REPLY_REMOVE_ALL_STA
);
840 IWL_CMD(REPLY_TXFIFO_FLUSH
);
841 IWL_CMD(REPLY_WEPKEY
);
843 IWL_CMD(REPLY_LEDS_CMD
);
844 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD
);
845 IWL_CMD(COEX_PRIORITY_TABLE_CMD
);
846 IWL_CMD(COEX_MEDIUM_NOTIFICATION
);
847 IWL_CMD(COEX_EVENT_CMD
);
848 IWL_CMD(REPLY_QUIET_CMD
);
849 IWL_CMD(REPLY_CHANNEL_SWITCH
);
850 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION
);
851 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD
);
852 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION
);
853 IWL_CMD(POWER_TABLE_CMD
);
854 IWL_CMD(PM_SLEEP_NOTIFICATION
);
855 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC
);
856 IWL_CMD(REPLY_SCAN_CMD
);
857 IWL_CMD(REPLY_SCAN_ABORT_CMD
);
858 IWL_CMD(SCAN_START_NOTIFICATION
);
859 IWL_CMD(SCAN_RESULTS_NOTIFICATION
);
860 IWL_CMD(SCAN_COMPLETE_NOTIFICATION
);
861 IWL_CMD(BEACON_NOTIFICATION
);
862 IWL_CMD(REPLY_TX_BEACON
);
863 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION
);
864 IWL_CMD(QUIET_NOTIFICATION
);
865 IWL_CMD(REPLY_TX_PWR_TABLE_CMD
);
866 IWL_CMD(MEASURE_ABORT_NOTIFICATION
);
867 IWL_CMD(REPLY_BT_CONFIG
);
868 IWL_CMD(REPLY_STATISTICS_CMD
);
869 IWL_CMD(STATISTICS_NOTIFICATION
);
870 IWL_CMD(REPLY_CARD_STATE_CMD
);
871 IWL_CMD(CARD_STATE_NOTIFICATION
);
872 IWL_CMD(MISSED_BEACONS_NOTIFICATION
);
873 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD
);
874 IWL_CMD(SENSITIVITY_CMD
);
875 IWL_CMD(REPLY_PHY_CALIBRATION_CMD
);
876 IWL_CMD(REPLY_RX_PHY_CMD
);
877 IWL_CMD(REPLY_RX_MPDU_CMD
);
879 IWL_CMD(REPLY_COMPRESSED_BA
);
880 IWL_CMD(CALIBRATION_CFG_CMD
);
881 IWL_CMD(CALIBRATION_RES_NOTIFICATION
);
882 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION
);
883 IWL_CMD(REPLY_TX_POWER_DBM_CMD
);
884 IWL_CMD(TEMPERATURE_NOTIFICATION
);
885 IWL_CMD(TX_ANT_CONFIGURATION_CMD
);
886 IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF
);
887 IWL_CMD(REPLY_BT_COEX_PRIO_TABLE
);
888 IWL_CMD(REPLY_BT_COEX_PROT_ENV
);
889 IWL_CMD(REPLY_WIPAN_PARAMS
);
890 IWL_CMD(REPLY_WIPAN_RXON
);
891 IWL_CMD(REPLY_WIPAN_RXON_TIMING
);
892 IWL_CMD(REPLY_WIPAN_RXON_ASSOC
);
893 IWL_CMD(REPLY_WIPAN_QOS_PARAM
);
894 IWL_CMD(REPLY_WIPAN_WEPKEY
);
895 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH
);
896 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION
);
897 IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE
);
898 IWL_CMD(REPLY_WOWLAN_PATTERNS
);
899 IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER
);
900 IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS
);
901 IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS
);
902 IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL
);
903 IWL_CMD(REPLY_WOWLAN_GET_STATUS
);
910 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
912 static void iwl_generic_cmd_callback(struct iwl_priv
*priv
,
913 struct iwl_device_cmd
*cmd
,
914 struct iwl_rx_packet
*pkt
)
916 if (pkt
->hdr
.flags
& IWL_CMD_FAILED_MSK
) {
917 IWL_ERR(priv
, "Bad return from %s (0x%08X)\n",
918 get_cmd_string(cmd
->hdr
.cmd
), pkt
->hdr
.flags
);
922 #ifdef CONFIG_IWLWIFI_DEBUG
923 switch (cmd
->hdr
.cmd
) {
924 case REPLY_TX_LINK_QUALITY_CMD
:
925 case SENSITIVITY_CMD
:
926 IWL_DEBUG_HC_DUMP(priv
, "back from %s (0x%08X)\n",
927 get_cmd_string(cmd
->hdr
.cmd
), pkt
->hdr
.flags
);
930 IWL_DEBUG_HC(priv
, "back from %s (0x%08X)\n",
931 get_cmd_string(cmd
->hdr
.cmd
), pkt
->hdr
.flags
);
936 static int iwl_send_cmd_async(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
940 /* An asynchronous command can not expect an SKB to be set. */
941 if (WARN_ON(cmd
->flags
& CMD_WANT_SKB
))
944 /* Assign a generic callback if one is not provided */
946 cmd
->callback
= iwl_generic_cmd_callback
;
948 if (test_bit(STATUS_EXIT_PENDING
, &trans
->shrd
->status
))
951 ret
= iwl_enqueue_hcmd(trans
, cmd
);
953 IWL_ERR(trans
, "Error sending %s: enqueue_hcmd failed: %d\n",
954 get_cmd_string(cmd
->id
), ret
);
960 static int iwl_send_cmd_sync(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
965 lockdep_assert_held(&trans
->shrd
->mutex
);
967 /* A synchronous command can not have a callback set. */
968 if (WARN_ON(cmd
->callback
))
971 IWL_DEBUG_INFO(trans
, "Attempting to send sync command %s\n",
972 get_cmd_string(cmd
->id
));
974 set_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
975 IWL_DEBUG_INFO(trans
, "Setting HCMD_ACTIVE for command %s\n",
976 get_cmd_string(cmd
->id
));
978 cmd_idx
= iwl_enqueue_hcmd(trans
, cmd
);
981 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
982 IWL_ERR(trans
, "Error sending %s: enqueue_hcmd failed: %d\n",
983 get_cmd_string(cmd
->id
), ret
);
987 ret
= wait_event_interruptible_timeout(priv(trans
)->wait_command_queue
,
988 !test_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
),
989 HOST_COMPLETE_TIMEOUT
);
991 if (test_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
)) {
993 "Error sending %s: time out after %dms.\n",
994 get_cmd_string(cmd
->id
),
995 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT
));
997 clear_bit(STATUS_HCMD_ACTIVE
, &trans
->shrd
->status
);
998 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command"
999 "%s\n", get_cmd_string(cmd
->id
));
1005 if (test_bit(STATUS_RF_KILL_HW
, &trans
->shrd
->status
)) {
1006 IWL_ERR(trans
, "Command %s aborted: RF KILL Switch\n",
1007 get_cmd_string(cmd
->id
));
1011 if (test_bit(STATUS_FW_ERROR
, &trans
->shrd
->status
)) {
1012 IWL_ERR(trans
, "Command %s failed: FW Error\n",
1013 get_cmd_string(cmd
->id
));
1017 if ((cmd
->flags
& CMD_WANT_SKB
) && !cmd
->reply_page
) {
1018 IWL_ERR(trans
, "Error: Response NULL in '%s'\n",
1019 get_cmd_string(cmd
->id
));
1027 if (cmd
->flags
& CMD_WANT_SKB
) {
1029 * Cancel the CMD_WANT_SKB flag for the cmd in the
1030 * TX cmd queue. Otherwise in case the cmd comes
1031 * in later, it will possibly set an invalid
1032 * address (cmd->meta.source).
1034 priv(trans
)->txq
[trans
->shrd
->cmd_queue
].meta
[cmd_idx
].flags
&=
1038 if (cmd
->reply_page
) {
1039 iwl_free_pages(trans
->shrd
, cmd
->reply_page
);
1040 cmd
->reply_page
= 0;
1046 int iwl_trans_pcie_send_cmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
1048 if (cmd
->flags
& CMD_ASYNC
)
1049 return iwl_send_cmd_async(trans
, cmd
);
1051 return iwl_send_cmd_sync(trans
, cmd
);
1054 int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans
*trans
, u8 id
, u32 flags
,
1055 u16 len
, const void *data
)
1057 struct iwl_host_cmd cmd
= {
1064 return iwl_trans_pcie_send_cmd(trans
, &cmd
);
1067 /* Frees buffers until index _not_ inclusive */
1068 void iwl_tx_queue_reclaim(struct iwl_trans
*trans
, int txq_id
, int index
,
1069 struct sk_buff_head
*skbs
)
1071 struct iwl_tx_queue
*txq
= &priv(trans
)->txq
[txq_id
];
1072 struct iwl_queue
*q
= &txq
->q
;
1075 /*Since we free until index _not_ inclusive, the one before index is
1076 * the last we will free. This one must be used */
1077 last_to_free
= iwl_queue_dec_wrap(index
, q
->n_bd
);
1079 if ((index
>= q
->n_bd
) ||
1080 (iwl_queue_used(q
, last_to_free
) == 0)) {
1081 IWL_ERR(trans
, "%s: Read index for DMA queue txq id (%d), "
1082 "last_to_free %d is out of range [0-%d] %d %d.\n",
1083 __func__
, txq_id
, last_to_free
, q
->n_bd
,
1084 q
->write_ptr
, q
->read_ptr
);
1088 IWL_DEBUG_TX_REPLY(trans
, "reclaim: [%d, %d, %d]\n", txq_id
,
1089 q
->read_ptr
, index
);
1091 if (WARN_ON(!skb_queue_empty(skbs
)))
1095 q
->read_ptr
!= index
;
1096 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
1098 if (WARN_ON_ONCE(txq
->skbs
[txq
->q
.read_ptr
] == NULL
))
1101 __skb_queue_tail(skbs
, txq
->skbs
[txq
->q
.read_ptr
]);
1103 txq
->skbs
[txq
->q
.read_ptr
] = NULL
;
1105 iwlagn_txq_inval_byte_cnt_tbl(trans
, txq
);
1107 iwlagn_txq_free_tfd(trans
, txq
, txq
->q
.read_ptr
);