1 /******************************************************************************
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
33 #include "iwl-debug.h"
37 #include "iwl-op-mode.h"
39 /* FIXME: need to abstract out TX command (once we know what it looks like) */
40 #include "dvm/commands.h"
42 #define IWL_TX_CRC_SIZE 4
43 #define IWL_TX_DELIMITER_SIZE 4
45 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
50 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
51 * of buffer descriptors, each of which points to one or more data buffers for
52 * the device to read from or fill. Driver and device exchange status of each
53 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
54 * entries in each circular buffer, to protect against confusing empty and full
57 * The device reads or writes the data in the queues via the device's several
58 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
60 * For Tx queue, there are low mark and high mark limits. If, after queuing
61 * the packet for Tx, free space become < low mark, Tx queue stopped. When
62 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
65 ***************************************************/
66 static int iwl_queue_space(const struct iwl_queue
*q
)
72 * To avoid ambiguity between empty and completely full queues, there
73 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
74 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
75 * to reserve any queue entries for this purpose.
77 if (q
->n_window
< TFD_QUEUE_SIZE_MAX
)
80 max
= TFD_QUEUE_SIZE_MAX
- 1;
83 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
84 * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
86 used
= (q
->write_ptr
- q
->read_ptr
) & (TFD_QUEUE_SIZE_MAX
- 1);
88 if (WARN_ON(used
> max
))
95 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
97 static int iwl_queue_init(struct iwl_queue
*q
, int slots_num
, u32 id
)
99 q
->n_window
= slots_num
;
102 /* slots_num must be power-of-two size, otherwise
103 * get_cmd_index is broken. */
104 if (WARN_ON(!is_power_of_2(slots_num
)))
107 q
->low_mark
= q
->n_window
/ 4;
111 q
->high_mark
= q
->n_window
/ 8;
112 if (q
->high_mark
< 2)
121 static int iwl_pcie_alloc_dma_ptr(struct iwl_trans
*trans
,
122 struct iwl_dma_ptr
*ptr
, size_t size
)
124 if (WARN_ON(ptr
->addr
))
127 ptr
->addr
= dma_alloc_coherent(trans
->dev
, size
,
128 &ptr
->dma
, GFP_KERNEL
);
135 static void iwl_pcie_free_dma_ptr(struct iwl_trans
*trans
,
136 struct iwl_dma_ptr
*ptr
)
138 if (unlikely(!ptr
->addr
))
141 dma_free_coherent(trans
->dev
, ptr
->size
, ptr
->addr
, ptr
->dma
);
142 memset(ptr
, 0, sizeof(*ptr
));
145 static void iwl_pcie_txq_stuck_timer(unsigned long data
)
147 struct iwl_txq
*txq
= (void *)data
;
148 struct iwl_queue
*q
= &txq
->q
;
149 struct iwl_trans_pcie
*trans_pcie
= txq
->trans_pcie
;
150 struct iwl_trans
*trans
= iwl_trans_pcie_get_trans(trans_pcie
);
151 u32 scd_sram_addr
= trans_pcie
->scd_base_addr
+
152 SCD_TX_STTS_QUEUE_OFFSET(txq
->q
.id
);
156 spin_lock(&txq
->lock
);
157 /* check if triggered erroneously */
158 if (txq
->q
.read_ptr
== txq
->q
.write_ptr
) {
159 spin_unlock(&txq
->lock
);
162 spin_unlock(&txq
->lock
);
164 IWL_ERR(trans
, "Queue %d stuck for %u ms.\n", txq
->q
.id
,
165 jiffies_to_msecs(trans_pcie
->wd_timeout
));
166 IWL_ERR(trans
, "Current SW read_ptr %d write_ptr %d\n",
167 txq
->q
.read_ptr
, txq
->q
.write_ptr
);
169 iwl_trans_read_mem_bytes(trans
, scd_sram_addr
, buf
, sizeof(buf
));
171 iwl_print_hex_error(trans
, buf
, sizeof(buf
));
173 for (i
= 0; i
< FH_TCSR_CHNL_NUM
; i
++)
174 IWL_ERR(trans
, "FH TRBs(%d) = 0x%08x\n", i
,
175 iwl_read_direct32(trans
, FH_TX_TRB_REG(i
)));
177 for (i
= 0; i
< trans
->cfg
->base_params
->num_of_queues
; i
++) {
178 u32 status
= iwl_read_prph(trans
, SCD_QUEUE_STATUS_BITS(i
));
179 u8 fifo
= (status
>> SCD_QUEUE_STTS_REG_POS_TXF
) & 0x7;
180 bool active
= !!(status
& BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE
));
182 iwl_trans_read_mem32(trans
,
183 trans_pcie
->scd_base_addr
+
184 SCD_TRANS_TBL_OFFSET_QUEUE(i
));
187 tbl_dw
= (tbl_dw
& 0xFFFF0000) >> 16;
189 tbl_dw
= tbl_dw
& 0x0000FFFF;
192 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
193 i
, active
? "" : "in", fifo
, tbl_dw
,
194 iwl_read_prph(trans
, SCD_QUEUE_RDPTR(i
)) &
195 (TFD_QUEUE_SIZE_MAX
- 1),
196 iwl_read_prph(trans
, SCD_QUEUE_WRPTR(i
)));
199 for (i
= q
->read_ptr
; i
!= q
->write_ptr
;
200 i
= iwl_queue_inc_wrap(i
))
201 IWL_ERR(trans
, "scratch %d = 0x%08x\n", i
,
202 le32_to_cpu(txq
->scratchbufs
[i
].scratch
));
204 iwl_write_prph(trans
, DEVICE_SET_NMI_REG
, 1);
208 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
210 static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans
*trans
,
211 struct iwl_txq
*txq
, u16 byte_cnt
)
213 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
;
214 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
215 int write_ptr
= txq
->q
.write_ptr
;
216 int txq_id
= txq
->q
.id
;
219 u16 len
= byte_cnt
+ IWL_TX_CRC_SIZE
+ IWL_TX_DELIMITER_SIZE
;
221 struct iwl_tx_cmd
*tx_cmd
=
222 (void *) txq
->entries
[txq
->q
.write_ptr
].cmd
->payload
;
224 scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
226 WARN_ON(len
> 0xFFF || write_ptr
>= TFD_QUEUE_SIZE_MAX
);
228 sta_id
= tx_cmd
->sta_id
;
229 sec_ctl
= tx_cmd
->sec_ctl
;
231 switch (sec_ctl
& TX_CMD_SEC_MSK
) {
233 len
+= IEEE80211_CCMP_MIC_LEN
;
235 case TX_CMD_SEC_TKIP
:
236 len
+= IEEE80211_TKIP_ICV_LEN
;
239 len
+= IEEE80211_WEP_IV_LEN
+ IEEE80211_WEP_ICV_LEN
;
243 if (trans_pcie
->bc_table_dword
)
244 len
= DIV_ROUND_UP(len
, 4);
246 bc_ent
= cpu_to_le16(len
| (sta_id
<< 12));
248 scd_bc_tbl
[txq_id
].tfd_offset
[write_ptr
] = bc_ent
;
250 if (write_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
252 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ write_ptr
] = bc_ent
;
255 static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans
*trans
,
258 struct iwl_trans_pcie
*trans_pcie
=
259 IWL_TRANS_GET_PCIE_TRANS(trans
);
260 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= trans_pcie
->scd_bc_tbls
.addr
;
261 int txq_id
= txq
->q
.id
;
262 int read_ptr
= txq
->q
.read_ptr
;
265 struct iwl_tx_cmd
*tx_cmd
=
266 (void *)txq
->entries
[txq
->q
.read_ptr
].cmd
->payload
;
268 WARN_ON(read_ptr
>= TFD_QUEUE_SIZE_MAX
);
270 if (txq_id
!= trans_pcie
->cmd_queue
)
271 sta_id
= tx_cmd
->sta_id
;
273 bc_ent
= cpu_to_le16(1 | (sta_id
<< 12));
274 scd_bc_tbl
[txq_id
].tfd_offset
[read_ptr
] = bc_ent
;
276 if (read_ptr
< TFD_QUEUE_SIZE_BC_DUP
)
278 tfd_offset
[TFD_QUEUE_SIZE_MAX
+ read_ptr
] = bc_ent
;
282 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
284 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans
*trans
,
287 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
289 int txq_id
= txq
->q
.id
;
291 lockdep_assert_held(&txq
->lock
);
294 * explicitly wake up the NIC if:
295 * 1. shadow registers aren't enabled
296 * 2. NIC is woken up for CMD regardless of shadow outside this function
297 * 3. there is a chance that the NIC is asleep
299 if (!trans
->cfg
->base_params
->shadow_reg_enable
&&
300 txq_id
!= trans_pcie
->cmd_queue
&&
301 test_bit(STATUS_TPOWER_PMI
, &trans
->status
)) {
303 * wake up nic if it's powered down ...
304 * uCode will wake up, and interrupt us again, so next
305 * time we'll skip this part.
307 reg
= iwl_read32(trans
, CSR_UCODE_DRV_GP1
);
309 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
310 IWL_DEBUG_INFO(trans
, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
312 iwl_set_bit(trans
, CSR_GP_CNTRL
,
313 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
314 txq
->need_update
= true;
320 * if not in power-save mode, uCode will never sleep when we're
321 * trying to tx (during RFKILL, we're not trying to tx).
323 IWL_DEBUG_TX(trans
, "Q:%d WR: 0x%x\n", txq_id
, txq
->q
.write_ptr
);
324 iwl_write32(trans
, HBUS_TARG_WRPTR
, txq
->q
.write_ptr
| (txq_id
<< 8));
327 void iwl_pcie_txq_check_wrptrs(struct iwl_trans
*trans
)
329 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
332 for (i
= 0; i
< trans
->cfg
->base_params
->num_of_queues
; i
++) {
333 struct iwl_txq
*txq
= &trans_pcie
->txq
[i
];
335 spin_lock(&txq
->lock
);
336 if (trans_pcie
->txq
[i
].need_update
) {
337 iwl_pcie_txq_inc_wr_ptr(trans
, txq
);
338 trans_pcie
->txq
[i
].need_update
= false;
340 spin_unlock(&txq
->lock
);
344 static inline dma_addr_t
iwl_pcie_tfd_tb_get_addr(struct iwl_tfd
*tfd
, u8 idx
)
346 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
348 dma_addr_t addr
= get_unaligned_le32(&tb
->lo
);
349 if (sizeof(dma_addr_t
) > sizeof(u32
))
351 ((dma_addr_t
)(le16_to_cpu(tb
->hi_n_len
) & 0xF) << 16) << 16;
356 static inline u16
iwl_pcie_tfd_tb_get_len(struct iwl_tfd
*tfd
, u8 idx
)
358 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
360 return le16_to_cpu(tb
->hi_n_len
) >> 4;
363 static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd
*tfd
, u8 idx
,
364 dma_addr_t addr
, u16 len
)
366 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
367 u16 hi_n_len
= len
<< 4;
369 put_unaligned_le32(addr
, &tb
->lo
);
370 if (sizeof(dma_addr_t
) > sizeof(u32
))
371 hi_n_len
|= ((addr
>> 16) >> 16) & 0xF;
373 tb
->hi_n_len
= cpu_to_le16(hi_n_len
);
375 tfd
->num_tbs
= idx
+ 1;
378 static inline u8
iwl_pcie_tfd_get_num_tbs(struct iwl_tfd
*tfd
)
380 return tfd
->num_tbs
& 0x1f;
383 static void iwl_pcie_tfd_unmap(struct iwl_trans
*trans
,
384 struct iwl_cmd_meta
*meta
,
390 /* Sanity check on number of chunks */
391 num_tbs
= iwl_pcie_tfd_get_num_tbs(tfd
);
393 if (num_tbs
>= IWL_NUM_OF_TBS
) {
394 IWL_ERR(trans
, "Too many chunks: %i\n", num_tbs
);
395 /* @todo issue fatal error, it is quite serious situation */
399 /* first TB is never freed - it's the scratchbuf data */
401 for (i
= 1; i
< num_tbs
; i
++)
402 dma_unmap_single(trans
->dev
, iwl_pcie_tfd_tb_get_addr(tfd
, i
),
403 iwl_pcie_tfd_tb_get_len(tfd
, i
),
410 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
411 * @trans - transport private data
413 * @dma_dir - the direction of the DMA mapping
415 * Does NOT advance any TFD circular buffer read/write indexes
416 * Does NOT free the TFD itself (which is within circular buffer)
418 static void iwl_pcie_txq_free_tfd(struct iwl_trans
*trans
, struct iwl_txq
*txq
)
420 struct iwl_tfd
*tfd_tmp
= txq
->tfds
;
422 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
423 * idx is bounded by n_window
425 int rd_ptr
= txq
->q
.read_ptr
;
426 int idx
= get_cmd_index(&txq
->q
, rd_ptr
);
428 lockdep_assert_held(&txq
->lock
);
430 /* We have only q->n_window txq->entries, but we use
431 * TFD_QUEUE_SIZE_MAX tfds
433 iwl_pcie_tfd_unmap(trans
, &txq
->entries
[idx
].meta
, &tfd_tmp
[rd_ptr
]);
439 skb
= txq
->entries
[idx
].skb
;
441 /* Can be called from irqs-disabled context
442 * If skb is not NULL, it means that the whole queue is being
443 * freed and that the queue is not empty - free the skb
446 iwl_op_mode_free_skb(trans
->op_mode
, skb
);
447 txq
->entries
[idx
].skb
= NULL
;
452 static int iwl_pcie_txq_build_tfd(struct iwl_trans
*trans
, struct iwl_txq
*txq
,
453 dma_addr_t addr
, u16 len
, bool reset
)
456 struct iwl_tfd
*tfd
, *tfd_tmp
;
461 tfd
= &tfd_tmp
[q
->write_ptr
];
464 memset(tfd
, 0, sizeof(*tfd
));
466 num_tbs
= iwl_pcie_tfd_get_num_tbs(tfd
);
468 /* Each TFD can point to a maximum 20 Tx buffers */
469 if (num_tbs
>= IWL_NUM_OF_TBS
) {
470 IWL_ERR(trans
, "Error can not send more than %d chunks\n",
475 if (WARN(addr
& ~IWL_TX_DMA_MASK
,
476 "Unaligned address = %llx\n", (unsigned long long)addr
))
479 iwl_pcie_tfd_set_tb(tfd
, num_tbs
, addr
, len
);
484 static int iwl_pcie_txq_alloc(struct iwl_trans
*trans
,
485 struct iwl_txq
*txq
, int slots_num
,
488 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
489 size_t tfd_sz
= sizeof(struct iwl_tfd
) * TFD_QUEUE_SIZE_MAX
;
490 size_t scratchbuf_sz
;
493 if (WARN_ON(txq
->entries
|| txq
->tfds
))
496 setup_timer(&txq
->stuck_timer
, iwl_pcie_txq_stuck_timer
,
498 txq
->trans_pcie
= trans_pcie
;
500 txq
->q
.n_window
= slots_num
;
502 txq
->entries
= kcalloc(slots_num
,
503 sizeof(struct iwl_pcie_txq_entry
),
509 if (txq_id
== trans_pcie
->cmd_queue
)
510 for (i
= 0; i
< slots_num
; i
++) {
511 txq
->entries
[i
].cmd
=
512 kmalloc(sizeof(struct iwl_device_cmd
),
514 if (!txq
->entries
[i
].cmd
)
518 /* Circular buffer of transmit frame descriptors (TFDs),
519 * shared with device */
520 txq
->tfds
= dma_alloc_coherent(trans
->dev
, tfd_sz
,
521 &txq
->q
.dma_addr
, GFP_KERNEL
);
525 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE
!= sizeof(*txq
->scratchbufs
));
526 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf
, scratch
) !=
527 sizeof(struct iwl_cmd_header
) +
528 offsetof(struct iwl_tx_cmd
, scratch
));
530 scratchbuf_sz
= sizeof(*txq
->scratchbufs
) * slots_num
;
532 txq
->scratchbufs
= dma_alloc_coherent(trans
->dev
, scratchbuf_sz
,
533 &txq
->scratchbufs_dma
,
535 if (!txq
->scratchbufs
)
542 dma_free_coherent(trans
->dev
, tfd_sz
, txq
->tfds
, txq
->q
.dma_addr
);
544 if (txq
->entries
&& txq_id
== trans_pcie
->cmd_queue
)
545 for (i
= 0; i
< slots_num
; i
++)
546 kfree(txq
->entries
[i
].cmd
);
554 static int iwl_pcie_txq_init(struct iwl_trans
*trans
, struct iwl_txq
*txq
,
555 int slots_num
, u32 txq_id
)
559 txq
->need_update
= false;
561 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
562 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
563 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX
& (TFD_QUEUE_SIZE_MAX
- 1));
565 /* Initialize queue's high/low-water marks, and head/tail indexes */
566 ret
= iwl_queue_init(&txq
->q
, slots_num
, txq_id
);
570 spin_lock_init(&txq
->lock
);
573 * Tell nic where to find circular buffer of Tx Frame Descriptors for
574 * given Tx queue, and enable the DMA channel used for that queue.
575 * Circular buffer (TFD queue in DRAM) physical base address */
576 iwl_write_direct32(trans
, FH_MEM_CBBC_QUEUE(txq_id
),
577 txq
->q
.dma_addr
>> 8);
583 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
585 static void iwl_pcie_txq_unmap(struct iwl_trans
*trans
, int txq_id
)
587 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
588 struct iwl_txq
*txq
= &trans_pcie
->txq
[txq_id
];
589 struct iwl_queue
*q
= &txq
->q
;
591 spin_lock_bh(&txq
->lock
);
592 while (q
->write_ptr
!= q
->read_ptr
) {
593 IWL_DEBUG_TX_REPLY(trans
, "Q %d Free %d\n",
594 txq_id
, q
->read_ptr
);
595 iwl_pcie_txq_free_tfd(trans
, txq
);
596 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
);
599 spin_unlock_bh(&txq
->lock
);
601 /* just in case - this queue may have been stopped */
602 iwl_wake_queue(trans
, txq
);
606 * iwl_pcie_txq_free - Deallocate DMA queue.
607 * @txq: Transmit queue to deallocate.
609 * Empty queue by removing and destroying all BD's.
611 * 0-fill, but do not free "txq" descriptor structure.
613 static void iwl_pcie_txq_free(struct iwl_trans
*trans
, int txq_id
)
615 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
616 struct iwl_txq
*txq
= &trans_pcie
->txq
[txq_id
];
617 struct device
*dev
= trans
->dev
;
623 iwl_pcie_txq_unmap(trans
, txq_id
);
625 /* De-alloc array of command/tx buffers */
626 if (txq_id
== trans_pcie
->cmd_queue
)
627 for (i
= 0; i
< txq
->q
.n_window
; i
++) {
628 kfree(txq
->entries
[i
].cmd
);
629 kfree(txq
->entries
[i
].free_buf
);
632 /* De-alloc circular buffer of TFDs */
634 dma_free_coherent(dev
,
635 sizeof(struct iwl_tfd
) * TFD_QUEUE_SIZE_MAX
,
636 txq
->tfds
, txq
->q
.dma_addr
);
640 dma_free_coherent(dev
,
641 sizeof(*txq
->scratchbufs
) * txq
->q
.n_window
,
642 txq
->scratchbufs
, txq
->scratchbufs_dma
);
648 del_timer_sync(&txq
->stuck_timer
);
650 /* 0-fill queue descriptor structure */
651 memset(txq
, 0, sizeof(*txq
));
655 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
657 static void iwl_pcie_txq_set_sched(struct iwl_trans
*trans
, u32 mask
)
659 struct iwl_trans_pcie __maybe_unused
*trans_pcie
=
660 IWL_TRANS_GET_PCIE_TRANS(trans
);
662 iwl_write_prph(trans
, SCD_TXFACT
, mask
);
665 void iwl_pcie_tx_start(struct iwl_trans
*trans
, u32 scd_base_addr
)
667 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
668 int nq
= trans
->cfg
->base_params
->num_of_queues
;
671 int clear_dwords
= (SCD_TRANS_TBL_OFFSET_QUEUE(nq
) -
672 SCD_CONTEXT_MEM_LOWER_BOUND
) / sizeof(u32
);
674 /* make sure all queue are not stopped/used */
675 memset(trans_pcie
->queue_stopped
, 0, sizeof(trans_pcie
->queue_stopped
));
676 memset(trans_pcie
->queue_used
, 0, sizeof(trans_pcie
->queue_used
));
678 trans_pcie
->scd_base_addr
=
679 iwl_read_prph(trans
, SCD_SRAM_BASE_ADDR
);
681 WARN_ON(scd_base_addr
!= 0 &&
682 scd_base_addr
!= trans_pcie
->scd_base_addr
);
684 /* reset context data, TX status and translation data */
685 iwl_trans_write_mem(trans
, trans_pcie
->scd_base_addr
+
686 SCD_CONTEXT_MEM_LOWER_BOUND
,
689 iwl_write_prph(trans
, SCD_DRAM_BASE_ADDR
,
690 trans_pcie
->scd_bc_tbls
.dma
>> 10);
692 /* The chain extension of the SCD doesn't work well. This feature is
693 * enabled by default by the HW, so we need to disable it manually.
695 if (trans
->cfg
->base_params
->scd_chain_ext_wa
)
696 iwl_write_prph(trans
, SCD_CHAINEXT_EN
, 0);
698 iwl_trans_ac_txq_enable(trans
, trans_pcie
->cmd_queue
,
699 trans_pcie
->cmd_fifo
);
701 /* Activate all Tx DMA/FIFO channels */
702 iwl_pcie_txq_set_sched(trans
, IWL_MASK(0, 7));
704 /* Enable DMA channel */
705 for (chan
= 0; chan
< FH_TCSR_CHNL_NUM
; chan
++)
706 iwl_write_direct32(trans
, FH_TCSR_CHNL_TX_CONFIG_REG(chan
),
707 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
708 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE
);
710 /* Update FH chicken bits */
711 reg_val
= iwl_read_direct32(trans
, FH_TX_CHICKEN_BITS_REG
);
712 iwl_write_direct32(trans
, FH_TX_CHICKEN_BITS_REG
,
713 reg_val
| FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN
);
715 /* Enable L1-Active */
716 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
)
717 iwl_clear_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
718 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
721 void iwl_trans_pcie_tx_reset(struct iwl_trans
*trans
)
723 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
726 for (txq_id
= 0; txq_id
< trans
->cfg
->base_params
->num_of_queues
;
728 struct iwl_txq
*txq
= &trans_pcie
->txq
[txq_id
];
730 iwl_write_direct32(trans
, FH_MEM_CBBC_QUEUE(txq_id
),
731 txq
->q
.dma_addr
>> 8);
732 iwl_pcie_txq_unmap(trans
, txq_id
);
734 txq
->q
.write_ptr
= 0;
737 /* Tell NIC where to find the "keep warm" buffer */
738 iwl_write_direct32(trans
, FH_KW_MEM_ADDR_REG
,
739 trans_pcie
->kw
.dma
>> 4);
741 iwl_pcie_tx_start(trans
, trans_pcie
->scd_base_addr
);
745 * iwl_pcie_tx_stop - Stop all Tx DMA channels
747 int iwl_pcie_tx_stop(struct iwl_trans
*trans
)
749 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
752 /* Turn off all Tx DMA fifos */
753 spin_lock(&trans_pcie
->irq_lock
);
755 iwl_pcie_txq_set_sched(trans
, 0);
757 /* Stop each Tx DMA channel, and wait for it to be idle */
758 for (ch
= 0; ch
< FH_TCSR_CHNL_NUM
; ch
++) {
759 iwl_write_direct32(trans
,
760 FH_TCSR_CHNL_TX_CONFIG_REG(ch
), 0x0);
761 ret
= iwl_poll_direct_bit(trans
, FH_TSSR_TX_STATUS_REG
,
762 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch
), 1000);
765 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
767 iwl_read_direct32(trans
,
768 FH_TSSR_TX_STATUS_REG
));
770 spin_unlock(&trans_pcie
->irq_lock
);
773 * This function can be called before the op_mode disabled the
774 * queues. This happens when we have an rfkill interrupt.
775 * Since we stop Tx altogether - mark the queues as stopped.
777 memset(trans_pcie
->queue_stopped
, 0, sizeof(trans_pcie
->queue_stopped
));
778 memset(trans_pcie
->queue_used
, 0, sizeof(trans_pcie
->queue_used
));
780 /* This can happen: start_hw, stop_device */
781 if (!trans_pcie
->txq
)
784 /* Unmap DMA from host system and free skb's */
785 for (txq_id
= 0; txq_id
< trans
->cfg
->base_params
->num_of_queues
;
787 iwl_pcie_txq_unmap(trans
, txq_id
);
793 * iwl_trans_tx_free - Free TXQ Context
795 * Destroy all TX DMA queues and structures
797 void iwl_pcie_tx_free(struct iwl_trans
*trans
)
800 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
803 if (trans_pcie
->txq
) {
805 txq_id
< trans
->cfg
->base_params
->num_of_queues
; txq_id
++)
806 iwl_pcie_txq_free(trans
, txq_id
);
809 kfree(trans_pcie
->txq
);
810 trans_pcie
->txq
= NULL
;
812 iwl_pcie_free_dma_ptr(trans
, &trans_pcie
->kw
);
814 iwl_pcie_free_dma_ptr(trans
, &trans_pcie
->scd_bc_tbls
);
818 * iwl_pcie_tx_alloc - allocate TX context
819 * Allocate all Tx DMA structures and initialize them
821 static int iwl_pcie_tx_alloc(struct iwl_trans
*trans
)
824 int txq_id
, slots_num
;
825 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
827 u16 scd_bc_tbls_size
= trans
->cfg
->base_params
->num_of_queues
*
828 sizeof(struct iwlagn_scd_bc_tbl
);
830 /*It is not allowed to alloc twice, so warn when this happens.
831 * We cannot rely on the previous allocation, so free and fail */
832 if (WARN_ON(trans_pcie
->txq
)) {
837 ret
= iwl_pcie_alloc_dma_ptr(trans
, &trans_pcie
->scd_bc_tbls
,
840 IWL_ERR(trans
, "Scheduler BC Table allocation failed\n");
844 /* Alloc keep-warm buffer */
845 ret
= iwl_pcie_alloc_dma_ptr(trans
, &trans_pcie
->kw
, IWL_KW_SIZE
);
847 IWL_ERR(trans
, "Keep Warm allocation failed\n");
851 trans_pcie
->txq
= kcalloc(trans
->cfg
->base_params
->num_of_queues
,
852 sizeof(struct iwl_txq
), GFP_KERNEL
);
853 if (!trans_pcie
->txq
) {
854 IWL_ERR(trans
, "Not enough memory for txq\n");
859 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
860 for (txq_id
= 0; txq_id
< trans
->cfg
->base_params
->num_of_queues
;
862 slots_num
= (txq_id
== trans_pcie
->cmd_queue
) ?
863 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
864 ret
= iwl_pcie_txq_alloc(trans
, &trans_pcie
->txq
[txq_id
],
867 IWL_ERR(trans
, "Tx %d queue alloc failed\n", txq_id
);
875 iwl_pcie_tx_free(trans
);
879 int iwl_pcie_tx_init(struct iwl_trans
*trans
)
881 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
883 int txq_id
, slots_num
;
886 if (!trans_pcie
->txq
) {
887 ret
= iwl_pcie_tx_alloc(trans
);
893 spin_lock(&trans_pcie
->irq_lock
);
895 /* Turn off all Tx DMA fifos */
896 iwl_write_prph(trans
, SCD_TXFACT
, 0);
898 /* Tell NIC where to find the "keep warm" buffer */
899 iwl_write_direct32(trans
, FH_KW_MEM_ADDR_REG
,
900 trans_pcie
->kw
.dma
>> 4);
902 spin_unlock(&trans_pcie
->irq_lock
);
904 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
905 for (txq_id
= 0; txq_id
< trans
->cfg
->base_params
->num_of_queues
;
907 slots_num
= (txq_id
== trans_pcie
->cmd_queue
) ?
908 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
909 ret
= iwl_pcie_txq_init(trans
, &trans_pcie
->txq
[txq_id
],
912 IWL_ERR(trans
, "Tx %d queue init failed\n", txq_id
);
919 /*Upon error, free only if we allocated something */
921 iwl_pcie_tx_free(trans
);
925 static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie
*trans_pcie
,
928 if (!trans_pcie
->wd_timeout
)
932 * if empty delete timer, otherwise move timer forward
933 * since we're making progress on this queue
935 if (txq
->q
.read_ptr
== txq
->q
.write_ptr
)
936 del_timer(&txq
->stuck_timer
);
938 mod_timer(&txq
->stuck_timer
, jiffies
+ trans_pcie
->wd_timeout
);
941 /* Frees buffers until index _not_ inclusive */
942 void iwl_trans_pcie_reclaim(struct iwl_trans
*trans
, int txq_id
, int ssn
,
943 struct sk_buff_head
*skbs
)
945 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
946 struct iwl_txq
*txq
= &trans_pcie
->txq
[txq_id
];
947 int tfd_num
= ssn
& (TFD_QUEUE_SIZE_MAX
- 1);
948 struct iwl_queue
*q
= &txq
->q
;
951 /* This function is not meant to release cmd queue*/
952 if (WARN_ON(txq_id
== trans_pcie
->cmd_queue
))
955 spin_lock_bh(&txq
->lock
);
958 IWL_DEBUG_TX_QUEUES(trans
, "Q %d inactive - ignoring idx %d\n",
963 if (txq
->q
.read_ptr
== tfd_num
)
966 IWL_DEBUG_TX_REPLY(trans
, "[Q %d] %d -> %d (%d)\n",
967 txq_id
, txq
->q
.read_ptr
, tfd_num
, ssn
);
969 /*Since we free until index _not_ inclusive, the one before index is
970 * the last we will free. This one must be used */
971 last_to_free
= iwl_queue_dec_wrap(tfd_num
);
973 if (!iwl_queue_used(q
, last_to_free
)) {
975 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
976 __func__
, txq_id
, last_to_free
, TFD_QUEUE_SIZE_MAX
,
977 q
->write_ptr
, q
->read_ptr
);
981 if (WARN_ON(!skb_queue_empty(skbs
)))
985 q
->read_ptr
!= tfd_num
;
986 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
)) {
988 if (WARN_ON_ONCE(txq
->entries
[txq
->q
.read_ptr
].skb
== NULL
))
991 __skb_queue_tail(skbs
, txq
->entries
[txq
->q
.read_ptr
].skb
);
993 txq
->entries
[txq
->q
.read_ptr
].skb
= NULL
;
995 iwl_pcie_txq_inval_byte_cnt_tbl(trans
, txq
);
997 iwl_pcie_txq_free_tfd(trans
, txq
);
1000 iwl_pcie_txq_progress(trans_pcie
, txq
);
1002 if (iwl_queue_space(&txq
->q
) > txq
->q
.low_mark
)
1003 iwl_wake_queue(trans
, txq
);
1005 spin_unlock_bh(&txq
->lock
);
1009 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1011 * When FW advances 'R' index, all entries between old and new 'R' index
1012 * need to be reclaimed. As result, some free space forms. If there is
1013 * enough free space (> low mark), wake the stack that feeds us.
1015 static void iwl_pcie_cmdq_reclaim(struct iwl_trans
*trans
, int txq_id
, int idx
)
1017 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1018 struct iwl_txq
*txq
= &trans_pcie
->txq
[txq_id
];
1019 struct iwl_queue
*q
= &txq
->q
;
1020 unsigned long flags
;
1023 lockdep_assert_held(&txq
->lock
);
1025 if ((idx
>= TFD_QUEUE_SIZE_MAX
) || (!iwl_queue_used(q
, idx
))) {
1027 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1028 __func__
, txq_id
, idx
, TFD_QUEUE_SIZE_MAX
,
1029 q
->write_ptr
, q
->read_ptr
);
1033 for (idx
= iwl_queue_inc_wrap(idx
); q
->read_ptr
!= idx
;
1034 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
)) {
1037 IWL_ERR(trans
, "HCMD skipped: index (%d) %d %d\n",
1038 idx
, q
->write_ptr
, q
->read_ptr
);
1039 iwl_write_prph(trans
, DEVICE_SET_NMI_REG
, 1);
1043 if (trans
->cfg
->base_params
->apmg_wake_up_wa
&&
1044 q
->read_ptr
== q
->write_ptr
) {
1045 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
1046 WARN_ON(!trans_pcie
->cmd_in_flight
);
1047 trans_pcie
->cmd_in_flight
= false;
1048 __iwl_trans_pcie_clear_bit(trans
,
1050 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1051 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
1054 iwl_pcie_txq_progress(trans_pcie
, txq
);
1057 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans
*trans
, u16 ra_tid
,
1060 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1065 scd_q2ratid
= ra_tid
& SCD_QUEUE_RA_TID_MAP_RATID_MSK
;
1067 tbl_dw_addr
= trans_pcie
->scd_base_addr
+
1068 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id
);
1070 tbl_dw
= iwl_trans_read_mem32(trans
, tbl_dw_addr
);
1073 tbl_dw
= (scd_q2ratid
<< 16) | (tbl_dw
& 0x0000FFFF);
1075 tbl_dw
= scd_q2ratid
| (tbl_dw
& 0xFFFF0000);
1077 iwl_trans_write_mem32(trans
, tbl_dw_addr
, tbl_dw
);
1082 static inline void iwl_pcie_txq_set_inactive(struct iwl_trans
*trans
,
1085 /* Simply stop the queue, but don't change any configuration;
1086 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
1087 iwl_write_prph(trans
,
1088 SCD_QUEUE_STATUS_BITS(txq_id
),
1089 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE
)|
1090 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN
));
1093 /* Receiver address (actually, Rx station's index into station table),
1094 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1095 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
1097 void iwl_trans_pcie_txq_enable(struct iwl_trans
*trans
, int txq_id
, int fifo
,
1098 int sta_id
, int tid
, int frame_limit
, u16 ssn
)
1100 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1102 if (test_and_set_bit(txq_id
, trans_pcie
->queue_used
))
1103 WARN_ONCE(1, "queue %d already used - expect issues", txq_id
);
1105 /* Stop this Tx queue before configuring it */
1106 iwl_pcie_txq_set_inactive(trans
, txq_id
);
1108 /* Set this queue as a chain-building queue unless it is CMD queue */
1109 if (txq_id
!= trans_pcie
->cmd_queue
)
1110 iwl_set_bits_prph(trans
, SCD_QUEUECHAIN_SEL
, BIT(txq_id
));
1112 /* If this queue is mapped to a certain station: it is an AGG queue */
1114 u16 ra_tid
= BUILD_RAxTID(sta_id
, tid
);
1116 /* Map receiver-address / traffic-ID to this queue */
1117 iwl_pcie_txq_set_ratid_map(trans
, ra_tid
, txq_id
);
1119 /* enable aggregations for the queue */
1120 iwl_set_bits_prph(trans
, SCD_AGGR_SEL
, BIT(txq_id
));
1121 trans_pcie
->txq
[txq_id
].ampdu
= true;
1124 * disable aggregations for the queue, this will also make the
1125 * ra_tid mapping configuration irrelevant since it is now a
1128 iwl_clear_bits_prph(trans
, SCD_AGGR_SEL
, BIT(txq_id
));
1130 ssn
= trans_pcie
->txq
[txq_id
].q
.read_ptr
;
1133 /* Place first TFD at index corresponding to start sequence number.
1134 * Assumes that ssn_idx is valid (!= 0xFFF) */
1135 trans_pcie
->txq
[txq_id
].q
.read_ptr
= (ssn
& 0xff);
1136 trans_pcie
->txq
[txq_id
].q
.write_ptr
= (ssn
& 0xff);
1138 iwl_write_direct32(trans
, HBUS_TARG_WRPTR
,
1139 (ssn
& 0xff) | (txq_id
<< 8));
1140 iwl_write_prph(trans
, SCD_QUEUE_RDPTR(txq_id
), ssn
);
1142 /* Set up Tx window size and frame limit for this queue */
1143 iwl_trans_write_mem32(trans
, trans_pcie
->scd_base_addr
+
1144 SCD_CONTEXT_QUEUE_OFFSET(txq_id
), 0);
1145 iwl_trans_write_mem32(trans
, trans_pcie
->scd_base_addr
+
1146 SCD_CONTEXT_QUEUE_OFFSET(txq_id
) + sizeof(u32
),
1147 ((frame_limit
<< SCD_QUEUE_CTX_REG2_WIN_SIZE_POS
) &
1148 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK
) |
1149 ((frame_limit
<< SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
) &
1150 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
));
1152 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
1153 iwl_write_prph(trans
, SCD_QUEUE_STATUS_BITS(txq_id
),
1154 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE
) |
1155 (fifo
<< SCD_QUEUE_STTS_REG_POS_TXF
) |
1156 (1 << SCD_QUEUE_STTS_REG_POS_WSL
) |
1157 SCD_QUEUE_STTS_REG_MSK
);
1158 trans_pcie
->txq
[txq_id
].active
= true;
1159 IWL_DEBUG_TX_QUEUES(trans
, "Activate queue %d on FIFO %d WrPtr: %d\n",
1160 txq_id
, fifo
, ssn
& 0xff);
1163 void iwl_trans_pcie_txq_disable(struct iwl_trans
*trans
, int txq_id
)
1165 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1166 u32 stts_addr
= trans_pcie
->scd_base_addr
+
1167 SCD_TX_STTS_QUEUE_OFFSET(txq_id
);
1168 static const u32 zero_val
[4] = {};
1171 * Upon HW Rfkill - we stop the device, and then stop the queues
1172 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1173 * allow the op_mode to call txq_disable after it already called
1176 if (!test_and_clear_bit(txq_id
, trans_pcie
->queue_used
)) {
1177 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
),
1178 "queue %d not used", txq_id
);
1182 iwl_pcie_txq_set_inactive(trans
, txq_id
);
1184 iwl_trans_write_mem(trans
, stts_addr
, (void *)zero_val
,
1185 ARRAY_SIZE(zero_val
));
1187 iwl_pcie_txq_unmap(trans
, txq_id
);
1188 trans_pcie
->txq
[txq_id
].ampdu
= false;
1190 IWL_DEBUG_TX_QUEUES(trans
, "Deactivate queue %d\n", txq_id
);
1193 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
1196 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1197 * @priv: device private data point
1198 * @cmd: a pointer to the ucode command structure
1200 * The function returns < 0 values to indicate the operation
1201 * failed. On success, it returns the index (>= 0) of command in the
1204 static int iwl_pcie_enqueue_hcmd(struct iwl_trans
*trans
,
1205 struct iwl_host_cmd
*cmd
)
1207 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1208 struct iwl_txq
*txq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
1209 struct iwl_queue
*q
= &txq
->q
;
1210 struct iwl_device_cmd
*out_cmd
;
1211 struct iwl_cmd_meta
*out_meta
;
1212 unsigned long flags
;
1213 void *dup_buf
= NULL
;
1214 dma_addr_t phys_addr
;
1216 u16 copy_size
, cmd_size
, scratch_size
;
1217 bool had_nocopy
= false;
1220 const u8
*cmddata
[IWL_MAX_CMD_TBS_PER_TFD
];
1221 u16 cmdlen
[IWL_MAX_CMD_TBS_PER_TFD
];
1223 copy_size
= sizeof(out_cmd
->hdr
);
1224 cmd_size
= sizeof(out_cmd
->hdr
);
1226 /* need one for the header if the first is NOCOPY */
1227 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD
> IWL_NUM_OF_TBS
- 1);
1229 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
1230 cmddata
[i
] = cmd
->data
[i
];
1231 cmdlen
[i
] = cmd
->len
[i
];
1236 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1237 if (copy_size
< IWL_HCMD_SCRATCHBUF_SIZE
) {
1238 int copy
= IWL_HCMD_SCRATCHBUF_SIZE
- copy_size
;
1240 if (copy
> cmdlen
[i
])
1247 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
) {
1249 if (WARN_ON(cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
)) {
1253 } else if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
) {
1255 * This is also a chunk that isn't copied
1256 * to the static buffer so set had_nocopy.
1260 /* only allowed once */
1261 if (WARN_ON(dup_buf
)) {
1266 dup_buf
= kmemdup(cmddata
[i
], cmdlen
[i
],
1271 /* NOCOPY must not be followed by normal! */
1272 if (WARN_ON(had_nocopy
)) {
1276 copy_size
+= cmdlen
[i
];
1278 cmd_size
+= cmd
->len
[i
];
1282 * If any of the command structures end up being larger than
1283 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1284 * allocated into separate TFDs, then we will need to
1285 * increase the size of the buffers.
1287 if (WARN(copy_size
> TFD_MAX_PAYLOAD_SIZE
,
1288 "Command %s (%#x) is too large (%d bytes)\n",
1289 get_cmd_string(trans_pcie
, cmd
->id
), cmd
->id
, copy_size
)) {
1294 spin_lock_bh(&txq
->lock
);
1296 if (iwl_queue_space(q
) < ((cmd
->flags
& CMD_ASYNC
) ? 2 : 1)) {
1297 spin_unlock_bh(&txq
->lock
);
1299 IWL_ERR(trans
, "No space in command queue\n");
1300 iwl_op_mode_cmd_queue_full(trans
->op_mode
);
1305 idx
= get_cmd_index(q
, q
->write_ptr
);
1306 out_cmd
= txq
->entries
[idx
].cmd
;
1307 out_meta
= &txq
->entries
[idx
].meta
;
1309 memset(out_meta
, 0, sizeof(*out_meta
)); /* re-initialize to NULL */
1310 if (cmd
->flags
& CMD_WANT_SKB
)
1311 out_meta
->source
= cmd
;
1313 /* set up the header */
1315 out_cmd
->hdr
.cmd
= cmd
->id
;
1316 out_cmd
->hdr
.flags
= 0;
1317 out_cmd
->hdr
.sequence
=
1318 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie
->cmd_queue
) |
1319 INDEX_TO_SEQ(q
->write_ptr
));
1321 /* and copy the data that needs to be copied */
1322 cmd_pos
= offsetof(struct iwl_device_cmd
, payload
);
1323 copy_size
= sizeof(out_cmd
->hdr
);
1324 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
1330 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1331 if (copy_size
< IWL_HCMD_SCRATCHBUF_SIZE
) {
1332 copy
= IWL_HCMD_SCRATCHBUF_SIZE
- copy_size
;
1334 if (copy
> cmd
->len
[i
])
1338 /* copy everything if not nocopy/dup */
1339 if (!(cmd
->dataflags
[i
] & (IWL_HCMD_DFL_NOCOPY
|
1344 memcpy((u8
*)out_cmd
+ cmd_pos
, cmd
->data
[i
], copy
);
1351 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1352 get_cmd_string(trans_pcie
, out_cmd
->hdr
.cmd
),
1353 out_cmd
->hdr
.cmd
, le16_to_cpu(out_cmd
->hdr
.sequence
),
1354 cmd_size
, q
->write_ptr
, idx
, trans_pcie
->cmd_queue
);
1356 /* start the TFD with the scratchbuf */
1357 scratch_size
= min_t(int, copy_size
, IWL_HCMD_SCRATCHBUF_SIZE
);
1358 memcpy(&txq
->scratchbufs
[q
->write_ptr
], &out_cmd
->hdr
, scratch_size
);
1359 iwl_pcie_txq_build_tfd(trans
, txq
,
1360 iwl_pcie_get_scratchbuf_dma(txq
, q
->write_ptr
),
1361 scratch_size
, true);
1363 /* map first command fragment, if any remains */
1364 if (copy_size
> scratch_size
) {
1365 phys_addr
= dma_map_single(trans
->dev
,
1366 ((u8
*)&out_cmd
->hdr
) + scratch_size
,
1367 copy_size
- scratch_size
,
1369 if (dma_mapping_error(trans
->dev
, phys_addr
)) {
1370 iwl_pcie_tfd_unmap(trans
, out_meta
,
1371 &txq
->tfds
[q
->write_ptr
]);
1376 iwl_pcie_txq_build_tfd(trans
, txq
, phys_addr
,
1377 copy_size
- scratch_size
, false);
1380 /* map the remaining (adjusted) nocopy/dup fragments */
1381 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
1382 const void *data
= cmddata
[i
];
1386 if (!(cmd
->dataflags
[i
] & (IWL_HCMD_DFL_NOCOPY
|
1389 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
)
1391 phys_addr
= dma_map_single(trans
->dev
, (void *)data
,
1392 cmdlen
[i
], DMA_TO_DEVICE
);
1393 if (dma_mapping_error(trans
->dev
, phys_addr
)) {
1394 iwl_pcie_tfd_unmap(trans
, out_meta
,
1395 &txq
->tfds
[q
->write_ptr
]);
1400 iwl_pcie_txq_build_tfd(trans
, txq
, phys_addr
, cmdlen
[i
], false);
1403 out_meta
->flags
= cmd
->flags
;
1404 if (WARN_ON_ONCE(txq
->entries
[idx
].free_buf
))
1405 kfree(txq
->entries
[idx
].free_buf
);
1406 txq
->entries
[idx
].free_buf
= dup_buf
;
1408 trace_iwlwifi_dev_hcmd(trans
->dev
, cmd
, cmd_size
, &out_cmd
->hdr
);
1410 /* start timer if queue currently empty */
1411 if (q
->read_ptr
== q
->write_ptr
&& trans_pcie
->wd_timeout
)
1412 mod_timer(&txq
->stuck_timer
, jiffies
+ trans_pcie
->wd_timeout
);
1414 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
1417 * wake up the NIC to make sure that the firmware will see the host
1418 * command - we will let the NIC sleep once all the host commands
1419 * returned. This needs to be done only on NICs that have
1420 * apmg_wake_up_wa set.
1422 if (trans
->cfg
->base_params
->apmg_wake_up_wa
&&
1423 !trans_pcie
->cmd_in_flight
) {
1424 trans_pcie
->cmd_in_flight
= true;
1425 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
1426 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1427 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
1428 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN
,
1429 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
|
1430 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP
),
1433 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
1434 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1435 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
1436 trans_pcie
->cmd_in_flight
= false;
1442 /* Increment and update queue's write index */
1443 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
);
1444 iwl_pcie_txq_inc_wr_ptr(trans
, txq
);
1446 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
1449 spin_unlock_bh(&txq
->lock
);
1457 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1458 * @rxb: Rx buffer to reclaim
1459 * @handler_status: return value of the handler of the command
1460 * (put in setup_rx_handlers)
1462 * If an Rx buffer has an async callback associated with it the callback
1463 * will be executed. The attached skb (if present) will only be freed
1464 * if the callback returns 1
1466 void iwl_pcie_hcmd_complete(struct iwl_trans
*trans
,
1467 struct iwl_rx_cmd_buffer
*rxb
, int handler_status
)
1469 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1470 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1471 int txq_id
= SEQ_TO_QUEUE(sequence
);
1472 int index
= SEQ_TO_INDEX(sequence
);
1474 struct iwl_device_cmd
*cmd
;
1475 struct iwl_cmd_meta
*meta
;
1476 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1477 struct iwl_txq
*txq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
1479 /* If a Tx command is being handled and it isn't in the actual
1480 * command queue then there a command routing bug has been introduced
1481 * in the queue management code. */
1482 if (WARN(txq_id
!= trans_pcie
->cmd_queue
,
1483 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1484 txq_id
, trans_pcie
->cmd_queue
, sequence
,
1485 trans_pcie
->txq
[trans_pcie
->cmd_queue
].q
.read_ptr
,
1486 trans_pcie
->txq
[trans_pcie
->cmd_queue
].q
.write_ptr
)) {
1487 iwl_print_hex_error(trans
, pkt
, 32);
1491 spin_lock_bh(&txq
->lock
);
1493 cmd_index
= get_cmd_index(&txq
->q
, index
);
1494 cmd
= txq
->entries
[cmd_index
].cmd
;
1495 meta
= &txq
->entries
[cmd_index
].meta
;
1497 iwl_pcie_tfd_unmap(trans
, meta
, &txq
->tfds
[index
]);
1499 /* Input error checking is done when commands are added to queue. */
1500 if (meta
->flags
& CMD_WANT_SKB
) {
1501 struct page
*p
= rxb_steal_page(rxb
);
1503 meta
->source
->resp_pkt
= pkt
;
1504 meta
->source
->_rx_page_addr
= (unsigned long)page_address(p
);
1505 meta
->source
->_rx_page_order
= trans_pcie
->rx_page_order
;
1506 meta
->source
->handler_status
= handler_status
;
1509 iwl_pcie_cmdq_reclaim(trans
, txq_id
, index
);
1511 if (!(meta
->flags
& CMD_ASYNC
)) {
1512 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
)) {
1514 "HCMD_ACTIVE already clear for command %s\n",
1515 get_cmd_string(trans_pcie
, cmd
->hdr
.cmd
));
1517 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1518 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command %s\n",
1519 get_cmd_string(trans_pcie
, cmd
->hdr
.cmd
));
1520 wake_up(&trans_pcie
->wait_command_queue
);
1525 spin_unlock_bh(&txq
->lock
);
1528 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
1530 static int iwl_pcie_send_hcmd_async(struct iwl_trans
*trans
,
1531 struct iwl_host_cmd
*cmd
)
1533 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1536 /* An asynchronous command can not expect an SKB to be set. */
1537 if (WARN_ON(cmd
->flags
& CMD_WANT_SKB
))
1540 ret
= iwl_pcie_enqueue_hcmd(trans
, cmd
);
1543 "Error sending %s: enqueue_hcmd failed: %d\n",
1544 get_cmd_string(trans_pcie
, cmd
->id
), ret
);
1550 static int iwl_pcie_send_hcmd_sync(struct iwl_trans
*trans
,
1551 struct iwl_host_cmd
*cmd
)
1553 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1557 IWL_DEBUG_INFO(trans
, "Attempting to send sync command %s\n",
1558 get_cmd_string(trans_pcie
, cmd
->id
));
1560 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE
,
1562 "Command %s: a command is already active!\n",
1563 get_cmd_string(trans_pcie
, cmd
->id
)))
1566 IWL_DEBUG_INFO(trans
, "Setting HCMD_ACTIVE for command %s\n",
1567 get_cmd_string(trans_pcie
, cmd
->id
));
1569 cmd_idx
= iwl_pcie_enqueue_hcmd(trans
, cmd
);
1572 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1574 "Error sending %s: enqueue_hcmd failed: %d\n",
1575 get_cmd_string(trans_pcie
, cmd
->id
), ret
);
1579 ret
= wait_event_timeout(trans_pcie
->wait_command_queue
,
1580 !test_bit(STATUS_SYNC_HCMD_ACTIVE
,
1582 HOST_COMPLETE_TIMEOUT
);
1584 struct iwl_txq
*txq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
1585 struct iwl_queue
*q
= &txq
->q
;
1587 IWL_ERR(trans
, "Error sending %s: time out after %dms.\n",
1588 get_cmd_string(trans_pcie
, cmd
->id
),
1589 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT
));
1591 IWL_ERR(trans
, "Current CMD queue read_ptr %d write_ptr %d\n",
1592 q
->read_ptr
, q
->write_ptr
);
1594 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1595 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command %s\n",
1596 get_cmd_string(trans_pcie
, cmd
->id
));
1599 iwl_write_prph(trans
, DEVICE_SET_NMI_REG
, 1);
1600 iwl_trans_fw_error(trans
);
1605 if (test_bit(STATUS_FW_ERROR
, &trans
->status
)) {
1606 IWL_ERR(trans
, "FW error in SYNC CMD %s\n",
1607 get_cmd_string(trans_pcie
, cmd
->id
));
1613 if (!(cmd
->flags
& CMD_SEND_IN_RFKILL
) &&
1614 test_bit(STATUS_RFKILL
, &trans
->status
)) {
1615 IWL_DEBUG_RF_KILL(trans
, "RFKILL in SYNC CMD... no rsp\n");
1620 if ((cmd
->flags
& CMD_WANT_SKB
) && !cmd
->resp_pkt
) {
1621 IWL_ERR(trans
, "Error: Response NULL in '%s'\n",
1622 get_cmd_string(trans_pcie
, cmd
->id
));
1630 if (cmd
->flags
& CMD_WANT_SKB
) {
1632 * Cancel the CMD_WANT_SKB flag for the cmd in the
1633 * TX cmd queue. Otherwise in case the cmd comes
1634 * in later, it will possibly set an invalid
1635 * address (cmd->meta.source).
1637 trans_pcie
->txq
[trans_pcie
->cmd_queue
].
1638 entries
[cmd_idx
].meta
.flags
&= ~CMD_WANT_SKB
;
1641 if (cmd
->resp_pkt
) {
1643 cmd
->resp_pkt
= NULL
;
1649 int iwl_trans_pcie_send_hcmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
1651 if (!(cmd
->flags
& CMD_SEND_IN_RFKILL
) &&
1652 test_bit(STATUS_RFKILL
, &trans
->status
)) {
1653 IWL_DEBUG_RF_KILL(trans
, "Dropping CMD 0x%x: RF KILL\n",
1658 if (cmd
->flags
& CMD_ASYNC
)
1659 return iwl_pcie_send_hcmd_async(trans
, cmd
);
1661 /* We still can fail on RFKILL that can be asserted while we wait */
1662 return iwl_pcie_send_hcmd_sync(trans
, cmd
);
1665 int iwl_trans_pcie_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
1666 struct iwl_device_cmd
*dev_cmd
, int txq_id
)
1668 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1669 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1670 struct iwl_tx_cmd
*tx_cmd
= (struct iwl_tx_cmd
*)dev_cmd
->payload
;
1671 struct iwl_cmd_meta
*out_meta
;
1672 struct iwl_txq
*txq
;
1673 struct iwl_queue
*q
;
1674 dma_addr_t tb0_phys
, tb1_phys
, scratch_phys
;
1676 u16 len
, tb1_len
, tb2_len
;
1677 bool wait_write_ptr
;
1678 __le16 fc
= hdr
->frame_control
;
1679 u8 hdr_len
= ieee80211_hdrlen(fc
);
1682 txq
= &trans_pcie
->txq
[txq_id
];
1685 if (WARN_ONCE(!test_bit(txq_id
, trans_pcie
->queue_used
),
1686 "TX on unused queue %d\n", txq_id
))
1689 spin_lock(&txq
->lock
);
1691 /* In AGG mode, the index in the ring must correspond to the WiFi
1692 * sequence number. This is a HW requirements to help the SCD to parse
1694 * Check here that the packets are in the right place on the ring.
1696 wifi_seq
= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr
->seq_ctrl
));
1697 WARN_ONCE(txq
->ampdu
&&
1698 (wifi_seq
& 0xff) != q
->write_ptr
,
1699 "Q: %d WiFi Seq %d tfdNum %d",
1700 txq_id
, wifi_seq
, q
->write_ptr
);
1702 /* Set up driver data for this TFD */
1703 txq
->entries
[q
->write_ptr
].skb
= skb
;
1704 txq
->entries
[q
->write_ptr
].cmd
= dev_cmd
;
1706 dev_cmd
->hdr
.sequence
=
1707 cpu_to_le16((u16
)(QUEUE_TO_SEQ(txq_id
) |
1708 INDEX_TO_SEQ(q
->write_ptr
)));
1710 tb0_phys
= iwl_pcie_get_scratchbuf_dma(txq
, q
->write_ptr
);
1711 scratch_phys
= tb0_phys
+ sizeof(struct iwl_cmd_header
) +
1712 offsetof(struct iwl_tx_cmd
, scratch
);
1714 tx_cmd
->dram_lsb_ptr
= cpu_to_le32(scratch_phys
);
1715 tx_cmd
->dram_msb_ptr
= iwl_get_dma_hi_addr(scratch_phys
);
1717 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1718 out_meta
= &txq
->entries
[q
->write_ptr
].meta
;
1721 * The second TB (tb1) points to the remainder of the TX command
1722 * and the 802.11 header - dword aligned size
1723 * (This calculation modifies the TX command, so do it before the
1724 * setup of the first TB)
1726 len
= sizeof(struct iwl_tx_cmd
) + sizeof(struct iwl_cmd_header
) +
1727 hdr_len
- IWL_HCMD_SCRATCHBUF_SIZE
;
1728 tb1_len
= ALIGN(len
, 4);
1730 /* Tell NIC about any 2-byte padding after MAC header */
1732 tx_cmd
->tx_flags
|= TX_CMD_FLG_MH_PAD_MSK
;
1734 /* The first TB points to the scratchbuf data - min_copy bytes */
1735 memcpy(&txq
->scratchbufs
[q
->write_ptr
], &dev_cmd
->hdr
,
1736 IWL_HCMD_SCRATCHBUF_SIZE
);
1737 iwl_pcie_txq_build_tfd(trans
, txq
, tb0_phys
,
1738 IWL_HCMD_SCRATCHBUF_SIZE
, true);
1740 /* there must be data left over for TB1 or this code must be changed */
1741 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd
) < IWL_HCMD_SCRATCHBUF_SIZE
);
1743 /* map the data for TB1 */
1744 tb1_addr
= ((u8
*)&dev_cmd
->hdr
) + IWL_HCMD_SCRATCHBUF_SIZE
;
1745 tb1_phys
= dma_map_single(trans
->dev
, tb1_addr
, tb1_len
, DMA_TO_DEVICE
);
1746 if (unlikely(dma_mapping_error(trans
->dev
, tb1_phys
)))
1748 iwl_pcie_txq_build_tfd(trans
, txq
, tb1_phys
, tb1_len
, false);
1751 * Set up TFD's third entry to point directly to remainder
1752 * of skb, if any (802.11 null frames have no payload).
1754 tb2_len
= skb
->len
- hdr_len
;
1756 dma_addr_t tb2_phys
= dma_map_single(trans
->dev
,
1757 skb
->data
+ hdr_len
,
1758 tb2_len
, DMA_TO_DEVICE
);
1759 if (unlikely(dma_mapping_error(trans
->dev
, tb2_phys
))) {
1760 iwl_pcie_tfd_unmap(trans
, out_meta
,
1761 &txq
->tfds
[q
->write_ptr
]);
1764 iwl_pcie_txq_build_tfd(trans
, txq
, tb2_phys
, tb2_len
, false);
1767 /* Set up entry for this TFD in Tx byte-count array */
1768 iwl_pcie_txq_update_byte_cnt_tbl(trans
, txq
, le16_to_cpu(tx_cmd
->len
));
1770 trace_iwlwifi_dev_tx(trans
->dev
, skb
,
1771 &txq
->tfds
[txq
->q
.write_ptr
],
1772 sizeof(struct iwl_tfd
),
1773 &dev_cmd
->hdr
, IWL_HCMD_SCRATCHBUF_SIZE
+ tb1_len
,
1774 skb
->data
+ hdr_len
, tb2_len
);
1775 trace_iwlwifi_dev_tx_data(trans
->dev
, skb
,
1776 skb
->data
+ hdr_len
, tb2_len
);
1778 wait_write_ptr
= ieee80211_has_morefrags(fc
);
1780 /* start timer if queue currently empty */
1781 if (txq
->need_update
&& q
->read_ptr
== q
->write_ptr
&&
1782 trans_pcie
->wd_timeout
)
1783 mod_timer(&txq
->stuck_timer
, jiffies
+ trans_pcie
->wd_timeout
);
1785 /* Tell device the write index *just past* this latest filled TFD */
1786 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
);
1787 if (!wait_write_ptr
)
1788 iwl_pcie_txq_inc_wr_ptr(trans
, txq
);
1791 * At this point the frame is "transmitted" successfully
1792 * and we will get a TX status notification eventually.
1794 if (iwl_queue_space(q
) < q
->high_mark
) {
1796 iwl_pcie_txq_inc_wr_ptr(trans
, txq
);
1798 iwl_stop_queue(trans
, txq
);
1800 spin_unlock(&txq
->lock
);
1803 spin_unlock(&txq
->lock
);