1 /******************************************************************************
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #ifndef __iwl_trans_int_pcie_h__
30 #define __iwl_trans_int_pcie_h__
32 #include <linux/spinlock.h>
33 #include <linux/interrupt.h>
34 #include <linux/skbuff.h>
35 #include <linux/pci.h>
39 #include "iwl-shared.h"
40 #include "iwl-trans.h"
41 #include "iwl-debug.h"
48 /*This file includes the declaration that are internal to the
52 * struct isr_statistics - interrupt statistics
55 struct isr_statistics
{
70 * struct iwl_rx_queue - Rx queue
71 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
72 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
75 * @read: Shared index to newest available Rx buffer
76 * @write: Shared index to oldest written Rx packet
77 * @free_count: Number of pre-allocated buffers in rx_free
79 * @rx_free: list of free SKBs for use
80 * @rx_used: List of Rx buffers with no SKB
81 * @need_update: flag to indicate we need to update read/write index
82 * @rb_stts: driver's pointer to receive buffer status
83 * @rb_stts_dma: bus address of receive buffer status
86 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
91 struct iwl_rx_mem_buffer pool
[RX_QUEUE_SIZE
+ RX_FREE_BUFFERS
];
92 struct iwl_rx_mem_buffer
*queue
[RX_QUEUE_SIZE
];
97 struct list_head rx_free
;
98 struct list_head rx_used
;
100 struct iwl_rb_status
*rb_stts
;
101 dma_addr_t rb_stts_dma
;
112 * This queue number is required for proper operation
113 * because the ucode will stop/start the scheduler as
116 #define IWL_IPAN_MCAST_QUEUE 8
118 struct iwl_cmd_meta
{
119 /* only for SYNC commands, iff the reply skb is wanted */
120 struct iwl_host_cmd
*source
;
122 * only for ASYNC commands
123 * (which is somewhat stupid -- look at iwl-sta.c for instance
124 * which duplicates a bunch of code because the callback isn't
125 * invoked for SYNC commands, if it were and its result passed
126 * through it would be simpler...)
128 void (*callback
)(struct iwl_shared
*shrd
,
129 struct iwl_device_cmd
*cmd
,
130 struct iwl_rx_packet
*pkt
);
134 DEFINE_DMA_UNMAP_ADDR(mapping
);
135 DEFINE_DMA_UNMAP_LEN(len
);
139 * Generic queue structure
141 * Contains common data for Rx and Tx queues.
143 * Note the difference between n_bd and n_window: the hardware
144 * always assumes 256 descriptors, so n_bd is always 256 (unless
145 * there might be HW changes in the future). For the normal TX
146 * queues, n_window, which is the size of the software queue data
147 * is also 256; however, for the command queue, n_window is only
148 * 32 since we don't need so many commands pending. Since the HW
149 * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
150 * the software buffers (in the variables @meta, @txb in struct
151 * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
152 * in the same struct) have 256.
153 * This means that we end up with the following:
154 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
155 * SW entries: | 0 | ... | 31 |
156 * where N is a number between 0 and 7. This means that the SW
157 * data is a window overlayed over the HW queue.
160 int n_bd
; /* number of BDs in this queue */
161 int write_ptr
; /* 1-st empty entry (index) host_w*/
162 int read_ptr
; /* last used entry (index) host_r*/
163 /* use for monitoring and recovering the stuck queue */
164 dma_addr_t dma_addr
; /* physical addr for BD's */
165 int n_window
; /* safe queue window */
167 int low_mark
; /* low watermark, resume queue if free
168 * space more than this */
169 int high_mark
; /* high watermark, stop queue if free
170 * space less than this */
174 * struct iwl_tx_queue - Tx Queue for DMA
175 * @q: generic Rx/Tx queue descriptor
176 * @bd: base of circular buffer of TFDs
177 * @cmd: array of command/TX buffer pointers
178 * @meta: array of meta data for each command/tx buffer
179 * @dma_addr_cmd: physical address of cmd/tx buffer array
180 * @txb: array of per-TFD driver data
181 * @time_stamp: time (in jiffies) of last read_ptr change
182 * @need_update: indicates need to update read/write index
183 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
184 * @sta_id: valid if sched_retry is set
185 * @tid: valid if sched_retry is set
187 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
188 * descriptors) and required locking structures.
190 #define TFD_TX_CMD_SLOTS 256
191 #define TFD_CMD_SLOTS 32
193 struct iwl_tx_queue
{
195 struct iwl_tfd
*tfds
;
196 struct iwl_device_cmd
**cmd
;
197 struct iwl_cmd_meta
*meta
;
198 struct sk_buff
**skbs
;
199 unsigned long time_stamp
;
210 * struct iwl_trans_pcie - PCIe transport specific data
211 * @rxq: all the RX queue data
212 * @rx_replenish: work that will be called when buffers need to be allocated
213 * @trans: pointer to the generic transport area
214 * @scd_base_addr: scheduler sram base address in SRAM
215 * @scd_bc_tbls: pointer to the byte count table of the scheduler
216 * @kw: keep warm address
217 * @ac_to_fifo: to what fifo is a specifc AC mapped ?
218 * @ac_to_queue: to what tx queue is a specifc AC mapped ?
220 * @txq: Tx DMA processing queues
221 * @txq_ctx_active_msk: what queue is active
222 * queue_stopped: tracks what queue is stopped
223 * queue_stop_count: tracks what SW queue is stopped
225 struct iwl_trans_pcie
{
226 struct iwl_rx_queue rxq
;
227 struct work_struct rx_replenish
;
228 struct iwl_trans
*trans
;
233 dma_addr_t ict_tbl_dma
;
234 dma_addr_t aligned_ict_tbl_dma
;
238 struct tasklet_struct irq_tasklet
;
239 struct isr_statistics isr_stats
;
243 struct iwl_dma_ptr scd_bc_tbls
;
244 struct iwl_dma_ptr kw
;
246 const u8
*ac_to_fifo
[NUM_IWL_RXON_CTX
];
247 const u8
*ac_to_queue
[NUM_IWL_RXON_CTX
];
248 u8 mcast_queue
[NUM_IWL_RXON_CTX
];
250 struct iwl_tx_queue
*txq
;
251 unsigned long txq_ctx_active_msk
;
252 #define IWL_MAX_HW_QUEUES 32
253 unsigned long queue_stopped
[BITS_TO_LONGS(IWL_MAX_HW_QUEUES
)];
254 atomic_t queue_stop_count
[4];
257 #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
258 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
260 /*****************************************************
262 ******************************************************/
263 void iwl_bg_rx_replenish(struct work_struct
*data
);
264 void iwl_irq_tasklet(struct iwl_trans
*trans
);
265 void iwlagn_rx_replenish(struct iwl_trans
*trans
);
266 void iwl_rx_queue_update_write_ptr(struct iwl_trans
*trans
,
267 struct iwl_rx_queue
*q
);
269 /*****************************************************
271 ******************************************************/
272 int iwl_reset_ict(struct iwl_trans
*trans
);
273 void iwl_disable_ict(struct iwl_trans
*trans
);
274 int iwl_alloc_isr_ict(struct iwl_trans
*trans
);
275 void iwl_free_isr_ict(struct iwl_trans
*trans
);
276 irqreturn_t
iwl_isr_ict(int irq
, void *data
);
278 /*****************************************************
280 ******************************************************/
281 void iwl_txq_update_write_ptr(struct iwl_trans
*trans
,
282 struct iwl_tx_queue
*txq
);
283 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans
*trans
,
284 struct iwl_tx_queue
*txq
,
285 dma_addr_t addr
, u16 len
, u8 reset
);
286 int iwl_queue_init(struct iwl_queue
*q
, int count
, int slots_num
, u32 id
);
287 int iwl_trans_pcie_send_cmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
);
288 int __must_check
iwl_trans_pcie_send_cmd_pdu(struct iwl_trans
*trans
, u8 id
,
289 u32 flags
, u16 len
, const void *data
);
290 void iwl_tx_cmd_complete(struct iwl_trans
*trans
,
291 struct iwl_rx_mem_buffer
*rxb
);
292 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans
*trans
,
293 struct iwl_tx_queue
*txq
,
295 void iwl_trans_pcie_txq_agg_disable(struct iwl_trans
*trans
, int txq_id
);
296 int iwl_trans_pcie_tx_agg_disable(struct iwl_trans
*trans
,
297 enum iwl_rxon_context_id ctx
, int sta_id
,
299 void iwl_trans_set_wr_ptrs(struct iwl_trans
*trans
, int txq_id
, u32 index
);
300 void iwl_trans_tx_queue_set_status(struct iwl_trans
*trans
,
301 struct iwl_tx_queue
*txq
,
302 int tx_fifo_id
, int scd_retry
);
303 int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans
*trans
,
304 enum iwl_rxon_context_id ctx
, int sta_id
,
306 void iwl_trans_pcie_tx_agg_setup(struct iwl_trans
*trans
,
307 enum iwl_rxon_context_id ctx
,
308 int sta_id
, int tid
, int frame_limit
);
309 void iwlagn_txq_free_tfd(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
,
310 int index
, enum dma_data_direction dma_dir
);
311 int iwl_tx_queue_reclaim(struct iwl_trans
*trans
, int txq_id
, int index
,
312 struct sk_buff_head
*skbs
);
313 int iwl_queue_space(const struct iwl_queue
*q
);
315 /*****************************************************
317 ******************************************************/
318 int iwl_dump_nic_event_log(struct iwl_trans
*trans
, bool full_log
,
319 char **buf
, bool display
);
320 int iwl_dump_fh(struct iwl_trans
*trans
, char **buf
, bool display
);
321 void iwl_dump_csr(struct iwl_trans
*trans
);
323 /*****************************************************
325 ******************************************************/
326 static inline void iwl_disable_interrupts(struct iwl_trans
*trans
)
328 clear_bit(STATUS_INT_ENABLED
, &trans
->shrd
->status
);
330 /* disable interrupts from uCode/NIC to host */
331 iwl_write32(bus(trans
), CSR_INT_MASK
, 0x00000000);
333 /* acknowledge/clear/reset any interrupts still pending
334 * from uCode or flow handler (Rx/Tx DMA) */
335 iwl_write32(bus(trans
), CSR_INT
, 0xffffffff);
336 iwl_write32(bus(trans
), CSR_FH_INT_STATUS
, 0xffffffff);
337 IWL_DEBUG_ISR(trans
, "Disabled interrupts\n");
340 static inline void iwl_enable_interrupts(struct iwl_trans
*trans
)
342 struct iwl_trans_pcie
*trans_pcie
=
343 IWL_TRANS_GET_PCIE_TRANS(trans
);
345 IWL_DEBUG_ISR(trans
, "Enabling interrupts\n");
346 set_bit(STATUS_INT_ENABLED
, &trans
->shrd
->status
);
347 iwl_write32(bus(trans
), CSR_INT_MASK
, trans_pcie
->inta_mask
);
351 * we have 8 bits used like this:
355 * | | | | | | +-+-------- AC queue (0-3)
357 * | +-+-+-+-+------------ HW queue ID
359 * +---------------------- unused
361 static inline void iwl_set_swq_id(struct iwl_tx_queue
*txq
, u8 ac
, u8 hwq
)
363 BUG_ON(ac
> 3); /* only have 2 bits */
364 BUG_ON(hwq
> 31); /* only use 5 bits */
366 txq
->swq_id
= (hwq
<< 2) | ac
;
369 static inline void iwl_wake_queue(struct iwl_trans
*trans
,
370 struct iwl_tx_queue
*txq
)
372 u8 queue
= txq
->swq_id
;
374 u8 hwq
= (queue
>> 2) & 0x1f;
375 struct iwl_trans_pcie
*trans_pcie
=
376 IWL_TRANS_GET_PCIE_TRANS(trans
);
378 if (test_and_clear_bit(hwq
, trans_pcie
->queue_stopped
))
379 if (atomic_dec_return(&trans_pcie
->queue_stop_count
[ac
]) <= 0)
380 iwl_wake_sw_queue(priv(trans
), ac
);
383 static inline void iwl_stop_queue(struct iwl_trans
*trans
,
384 struct iwl_tx_queue
*txq
)
386 u8 queue
= txq
->swq_id
;
388 u8 hwq
= (queue
>> 2) & 0x1f;
389 struct iwl_trans_pcie
*trans_pcie
=
390 IWL_TRANS_GET_PCIE_TRANS(trans
);
392 if (!test_and_set_bit(hwq
, trans_pcie
->queue_stopped
))
393 if (atomic_inc_return(&trans_pcie
->queue_stop_count
[ac
]) > 0)
394 iwl_stop_sw_queue(priv(trans
), ac
);
397 #ifdef ieee80211_stop_queue
398 #undef ieee80211_stop_queue
401 #define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
403 #ifdef ieee80211_wake_queue
404 #undef ieee80211_wake_queue
407 #define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
409 static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie
*trans_pcie
,
412 set_bit(txq_id
, &trans_pcie
->txq_ctx_active_msk
);
415 static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie
*trans_pcie
,
418 clear_bit(txq_id
, &trans_pcie
->txq_ctx_active_msk
);
421 static inline int iwl_queue_used(const struct iwl_queue
*q
, int i
)
423 return q
->write_ptr
>= q
->read_ptr
?
424 (i
>= q
->read_ptr
&& i
< q
->write_ptr
) :
425 !(i
< q
->read_ptr
&& i
>= q
->write_ptr
);
428 static inline u8
get_cmd_index(struct iwl_queue
*q
, u32 index
)
430 return index
& (q
->n_window
- 1);
433 #define IWL_TX_FIFO_BK 0 /* shared */
434 #define IWL_TX_FIFO_BE 1
435 #define IWL_TX_FIFO_VI 2 /* shared */
436 #define IWL_TX_FIFO_VO 3
437 #define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
438 #define IWL_TX_FIFO_BE_IPAN 4
439 #define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
440 #define IWL_TX_FIFO_VO_IPAN 5
441 /* re-uses the VO FIFO, uCode will properly flush/schedule */
442 #define IWL_TX_FIFO_AUX 5
443 #define IWL_TX_FIFO_UNUSED -1
445 /* AUX (TX during scan dwell) queue */
446 #define IWL_AUX_QUEUE 10
448 #endif /* __iwl_trans_int_pcie_h__ */