1 /******************************************************************************
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #ifndef __iwl_trans_int_pcie_h__
30 #define __iwl_trans_int_pcie_h__
32 #include <linux/spinlock.h>
33 #include <linux/interrupt.h>
34 #include <linux/skbuff.h>
35 #include <linux/wait.h>
36 #include <linux/pci.h>
40 #include "iwl-shared.h"
41 #include "iwl-trans.h"
42 #include "iwl-debug.h"
44 #include "iwl-op-mode.h"
50 /*This file includes the declaration that are internal to the
53 struct iwl_rx_mem_buffer
{
56 struct list_head list
;
60 * struct isr_statistics - interrupt statistics
63 struct isr_statistics
{
78 * struct iwl_rx_queue - Rx queue
79 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
80 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
83 * @read: Shared index to newest available Rx buffer
84 * @write: Shared index to oldest written Rx packet
85 * @free_count: Number of pre-allocated buffers in rx_free
87 * @rx_free: list of free SKBs for use
88 * @rx_used: List of Rx buffers with no SKB
89 * @need_update: flag to indicate we need to update read/write index
90 * @rb_stts: driver's pointer to receive buffer status
91 * @rb_stts_dma: bus address of receive buffer status
94 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
99 struct iwl_rx_mem_buffer pool
[RX_QUEUE_SIZE
+ RX_FREE_BUFFERS
];
100 struct iwl_rx_mem_buffer
*queue
[RX_QUEUE_SIZE
];
105 struct list_head rx_free
;
106 struct list_head rx_used
;
108 struct iwl_rb_status
*rb_stts
;
109 dma_addr_t rb_stts_dma
;
120 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
121 * @index -- current index
122 * @n_bd -- total number of entries in queue (must be power of 2)
124 static inline int iwl_queue_inc_wrap(int index
, int n_bd
)
126 return ++index
& (n_bd
- 1);
130 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
131 * @index -- current index
132 * @n_bd -- total number of entries in queue (must be power of 2)
134 static inline int iwl_queue_dec_wrap(int index
, int n_bd
)
136 return --index
& (n_bd
- 1);
140 * This queue number is required for proper operation
141 * because the ucode will stop/start the scheduler as
144 #define IWL_IPAN_MCAST_QUEUE 8
146 struct iwl_cmd_meta
{
147 /* only for SYNC commands, iff the reply skb is wanted */
148 struct iwl_host_cmd
*source
;
152 DEFINE_DMA_UNMAP_ADDR(mapping
);
153 DEFINE_DMA_UNMAP_LEN(len
);
157 * Generic queue structure
159 * Contains common data for Rx and Tx queues.
161 * Note the difference between n_bd and n_window: the hardware
162 * always assumes 256 descriptors, so n_bd is always 256 (unless
163 * there might be HW changes in the future). For the normal TX
164 * queues, n_window, which is the size of the software queue data
165 * is also 256; however, for the command queue, n_window is only
166 * 32 since we don't need so many commands pending. Since the HW
167 * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
168 * the software buffers (in the variables @meta, @txb in struct
169 * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
170 * in the same struct) have 256.
171 * This means that we end up with the following:
172 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
173 * SW entries: | 0 | ... | 31 |
174 * where N is a number between 0 and 7. This means that the SW
175 * data is a window overlayed over the HW queue.
178 int n_bd
; /* number of BDs in this queue */
179 int write_ptr
; /* 1-st empty entry (index) host_w*/
180 int read_ptr
; /* last used entry (index) host_r*/
181 /* use for monitoring and recovering the stuck queue */
182 dma_addr_t dma_addr
; /* physical addr for BD's */
183 int n_window
; /* safe queue window */
185 int low_mark
; /* low watermark, resume queue if free
186 * space more than this */
187 int high_mark
; /* high watermark, stop queue if free
188 * space less than this */
192 * struct iwl_tx_queue - Tx Queue for DMA
193 * @q: generic Rx/Tx queue descriptor
194 * @bd: base of circular buffer of TFDs
195 * @cmd: array of command/TX buffer pointers
196 * @meta: array of meta data for each command/tx buffer
197 * @dma_addr_cmd: physical address of cmd/tx buffer array
198 * @txb: array of per-TFD driver data
200 * @time_stamp: time (in jiffies) of last read_ptr change
201 * @need_update: indicates need to update read/write index
202 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
203 * @sta_id: valid if sched_retry is set
204 * @tid: valid if sched_retry is set
206 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
207 * descriptors) and required locking structures.
209 #define TFD_TX_CMD_SLOTS 256
210 #define TFD_CMD_SLOTS 32
212 struct iwl_tx_queue
{
214 struct iwl_tfd
*tfds
;
215 struct iwl_device_cmd
**cmd
;
216 struct iwl_cmd_meta
*meta
;
217 struct sk_buff
**skbs
;
219 unsigned long time_stamp
;
230 * struct iwl_trans_pcie - PCIe transport specific data
231 * @rxq: all the RX queue data
232 * @rx_replenish: work that will be called when buffers need to be allocated
233 * @trans: pointer to the generic transport area
234 * @irq - the irq number for the device
235 * @irq_requested: true when the irq has been requested
236 * @scd_base_addr: scheduler sram base address in SRAM
237 * @scd_bc_tbls: pointer to the byte count table of the scheduler
238 * @kw: keep warm address
239 * @ac_to_fifo: to what fifo is a specifc AC mapped ?
240 * @ac_to_queue: to what tx queue is a specifc AC mapped ?
242 * @txq: Tx DMA processing queues
243 * @txq_ctx_active_msk: what queue is active
244 * queue_stopped: tracks what queue is stopped
245 * queue_stop_count: tracks what SW queue is stopped
246 * @pci_dev: basic pci-network driver stuff
247 * @hw_base: pci hardware address support
248 * @ucode_write_complete: indicates that the ucode has been copied.
249 * @ucode_write_waitq: wait queue for uCode load
251 struct iwl_trans_pcie
{
252 struct iwl_rx_queue rxq
;
253 struct work_struct rx_replenish
;
254 struct iwl_trans
*trans
;
258 dma_addr_t ict_tbl_dma
;
263 struct tasklet_struct irq_tasklet
;
264 struct isr_statistics isr_stats
;
270 struct iwl_dma_ptr scd_bc_tbls
;
271 struct iwl_dma_ptr kw
;
273 const u8
*ac_to_fifo
[NUM_IWL_RXON_CTX
];
274 const u8
*ac_to_queue
[NUM_IWL_RXON_CTX
];
275 u8 mcast_queue
[NUM_IWL_RXON_CTX
];
276 u8 agg_txq
[IWLAGN_STATION_COUNT
][IWL_MAX_TID_COUNT
];
278 struct iwl_tx_queue
*txq
;
279 unsigned long txq_ctx_active_msk
;
280 #define IWL_MAX_HW_QUEUES 32
281 unsigned long queue_stopped
[BITS_TO_LONGS(IWL_MAX_HW_QUEUES
)];
282 atomic_t queue_stop_count
[4];
284 /* PCI bus related data */
285 struct pci_dev
*pci_dev
;
286 void __iomem
*hw_base
;
288 bool ucode_write_complete
;
289 wait_queue_head_t ucode_write_waitq
;
292 #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
293 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
295 /*****************************************************
297 ******************************************************/
298 void iwl_bg_rx_replenish(struct work_struct
*data
);
299 void iwl_irq_tasklet(struct iwl_trans
*trans
);
300 void iwlagn_rx_replenish(struct iwl_trans
*trans
);
301 void iwl_rx_queue_update_write_ptr(struct iwl_trans
*trans
,
302 struct iwl_rx_queue
*q
);
304 /*****************************************************
306 ******************************************************/
307 void iwl_reset_ict(struct iwl_trans
*trans
);
308 void iwl_disable_ict(struct iwl_trans
*trans
);
309 int iwl_alloc_isr_ict(struct iwl_trans
*trans
);
310 void iwl_free_isr_ict(struct iwl_trans
*trans
);
311 irqreturn_t
iwl_isr_ict(int irq
, void *data
);
313 /*****************************************************
315 ******************************************************/
316 void iwl_txq_update_write_ptr(struct iwl_trans
*trans
,
317 struct iwl_tx_queue
*txq
);
318 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans
*trans
,
319 struct iwl_tx_queue
*txq
,
320 dma_addr_t addr
, u16 len
, u8 reset
);
321 int iwl_queue_init(struct iwl_queue
*q
, int count
, int slots_num
, u32 id
);
322 int iwl_trans_pcie_send_cmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
);
323 void iwl_tx_cmd_complete(struct iwl_trans
*trans
,
324 struct iwl_rx_cmd_buffer
*rxb
, int handler_status
);
325 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans
*trans
,
326 struct iwl_tx_queue
*txq
,
328 int iwl_trans_pcie_tx_agg_disable(struct iwl_trans
*trans
,
329 int sta_id
, int tid
);
330 void iwl_trans_set_wr_ptrs(struct iwl_trans
*trans
, int txq_id
, u32 index
);
331 void iwl_trans_tx_queue_set_status(struct iwl_trans
*trans
,
332 struct iwl_tx_queue
*txq
,
333 int tx_fifo_id
, int scd_retry
);
334 int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans
*trans
, int sta_id
, int tid
);
335 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans
*trans
,
336 enum iwl_rxon_context_id ctx
,
337 int sta_id
, int tid
, int frame_limit
, u16 ssn
);
338 void iwlagn_txq_free_tfd(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
,
339 int index
, enum dma_data_direction dma_dir
);
340 int iwl_tx_queue_reclaim(struct iwl_trans
*trans
, int txq_id
, int index
,
341 struct sk_buff_head
*skbs
);
342 int iwl_queue_space(const struct iwl_queue
*q
);
344 /*****************************************************
346 ******************************************************/
347 int iwl_dump_nic_event_log(struct iwl_trans
*trans
, bool full_log
,
348 char **buf
, bool display
);
349 int iwl_dump_fh(struct iwl_trans
*trans
, char **buf
, bool display
);
350 void iwl_dump_csr(struct iwl_trans
*trans
);
352 /*****************************************************
354 ******************************************************/
355 static inline void iwl_disable_interrupts(struct iwl_trans
*trans
)
357 clear_bit(STATUS_INT_ENABLED
, &trans
->shrd
->status
);
359 /* disable interrupts from uCode/NIC to host */
360 iwl_write32(trans
, CSR_INT_MASK
, 0x00000000);
362 /* acknowledge/clear/reset any interrupts still pending
363 * from uCode or flow handler (Rx/Tx DMA) */
364 iwl_write32(trans
, CSR_INT
, 0xffffffff);
365 iwl_write32(trans
, CSR_FH_INT_STATUS
, 0xffffffff);
366 IWL_DEBUG_ISR(trans
, "Disabled interrupts\n");
369 static inline void iwl_enable_interrupts(struct iwl_trans
*trans
)
371 struct iwl_trans_pcie
*trans_pcie
=
372 IWL_TRANS_GET_PCIE_TRANS(trans
);
374 IWL_DEBUG_ISR(trans
, "Enabling interrupts\n");
375 set_bit(STATUS_INT_ENABLED
, &trans
->shrd
->status
);
376 iwl_write32(trans
, CSR_INT_MASK
, trans_pcie
->inta_mask
);
380 * we have 8 bits used like this:
384 * | | | | | | +-+-------- AC queue (0-3)
386 * | +-+-+-+-+------------ HW queue ID
388 * +---------------------- unused
390 static inline void iwl_set_swq_id(struct iwl_tx_queue
*txq
, u8 ac
, u8 hwq
)
392 BUG_ON(ac
> 3); /* only have 2 bits */
393 BUG_ON(hwq
> 31); /* only use 5 bits */
395 txq
->swq_id
= (hwq
<< 2) | ac
;
398 static inline u8
iwl_get_queue_ac(struct iwl_tx_queue
*txq
)
400 return txq
->swq_id
& 0x3;
403 static inline void iwl_wake_queue(struct iwl_trans
*trans
,
404 struct iwl_tx_queue
*txq
, const char *msg
)
406 u8 queue
= txq
->swq_id
;
408 u8 hwq
= (queue
>> 2) & 0x1f;
409 struct iwl_trans_pcie
*trans_pcie
=
410 IWL_TRANS_GET_PCIE_TRANS(trans
);
412 if (test_and_clear_bit(hwq
, trans_pcie
->queue_stopped
)) {
413 if (atomic_dec_return(&trans_pcie
->queue_stop_count
[ac
]) <= 0) {
414 iwl_op_mode_queue_not_full(trans
->op_mode
, ac
);
415 IWL_DEBUG_TX_QUEUES(trans
, "Wake hwq %d ac %d. %s",
418 IWL_DEBUG_TX_QUEUES(trans
, "Don't wake hwq %d ac %d"
419 " stop count %d. %s",
420 hwq
, ac
, atomic_read(&trans_pcie
->
421 queue_stop_count
[ac
]), msg
);
426 static inline void iwl_stop_queue(struct iwl_trans
*trans
,
427 struct iwl_tx_queue
*txq
, const char *msg
)
429 u8 queue
= txq
->swq_id
;
431 u8 hwq
= (queue
>> 2) & 0x1f;
432 struct iwl_trans_pcie
*trans_pcie
=
433 IWL_TRANS_GET_PCIE_TRANS(trans
);
435 if (!test_and_set_bit(hwq
, trans_pcie
->queue_stopped
)) {
436 if (atomic_inc_return(&trans_pcie
->queue_stop_count
[ac
]) > 0) {
437 iwl_op_mode_queue_full(trans
->op_mode
, ac
);
438 IWL_DEBUG_TX_QUEUES(trans
, "Stop hwq %d ac %d"
439 " stop count %d. %s",
440 hwq
, ac
, atomic_read(&trans_pcie
->
441 queue_stop_count
[ac
]), msg
);
443 IWL_DEBUG_TX_QUEUES(trans
, "Don't stop hwq %d ac %d"
444 " stop count %d. %s",
445 hwq
, ac
, atomic_read(&trans_pcie
->
446 queue_stop_count
[ac
]), msg
);
449 IWL_DEBUG_TX_QUEUES(trans
, "stop hwq %d, but it is stopped/ %s",
454 static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie
*trans_pcie
,
457 set_bit(txq_id
, &trans_pcie
->txq_ctx_active_msk
);
460 static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie
*trans_pcie
,
463 clear_bit(txq_id
, &trans_pcie
->txq_ctx_active_msk
);
466 static inline int iwl_queue_used(const struct iwl_queue
*q
, int i
)
468 return q
->write_ptr
>= q
->read_ptr
?
469 (i
>= q
->read_ptr
&& i
< q
->write_ptr
) :
470 !(i
< q
->read_ptr
&& i
>= q
->write_ptr
);
473 static inline u8
get_cmd_index(struct iwl_queue
*q
, u32 index
)
475 return index
& (q
->n_window
- 1);
478 #define IWL_TX_FIFO_BK 0 /* shared */
479 #define IWL_TX_FIFO_BE 1
480 #define IWL_TX_FIFO_VI 2 /* shared */
481 #define IWL_TX_FIFO_VO 3
482 #define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
483 #define IWL_TX_FIFO_BE_IPAN 4
484 #define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
485 #define IWL_TX_FIFO_VO_IPAN 5
486 /* re-uses the VO FIFO, uCode will properly flush/schedule */
487 #define IWL_TX_FIFO_AUX 5
488 #define IWL_TX_FIFO_UNUSED -1
490 /* AUX (TX during scan dwell) queue */
491 #define IWL_AUX_QUEUE 10
493 #endif /* __iwl_trans_int_pcie_h__ */