1 /******************************************************************************
3 * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #ifndef __iwl_trans_int_pcie_h__
30 #define __iwl_trans_int_pcie_h__
32 #include <linux/spinlock.h>
33 #include <linux/interrupt.h>
34 #include <linux/skbuff.h>
35 #include <linux/wait.h>
36 #include <linux/pci.h>
37 #include <linux/timer.h>
41 #include "iwl-trans.h"
42 #include "iwl-debug.h"
44 #include "iwl-op-mode.h"
48 /*This file includes the declaration that are internal to the
51 struct iwl_rx_mem_buffer
{
54 struct list_head list
;
58 * struct isr_statistics - interrupt statistics
61 struct isr_statistics
{
76 * struct iwl_rxq - Rx queue
77 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
78 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
81 * @read: Shared index to newest available Rx buffer
82 * @write: Shared index to oldest written Rx packet
83 * @free_count: Number of pre-allocated buffers in rx_free
85 * @rx_free: list of free SKBs for use
86 * @rx_used: List of Rx buffers with no SKB
87 * @need_update: flag to indicate we need to update read/write index
88 * @rb_stts: driver's pointer to receive buffer status
89 * @rb_stts_dma: bus address of receive buffer status
92 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
97 struct iwl_rx_mem_buffer pool
[RX_QUEUE_SIZE
+ RX_FREE_BUFFERS
];
98 struct iwl_rx_mem_buffer
*queue
[RX_QUEUE_SIZE
];
103 struct list_head rx_free
;
104 struct list_head rx_used
;
106 struct iwl_rb_status
*rb_stts
;
107 dma_addr_t rb_stts_dma
;
118 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
119 * @index -- current index
120 * @n_bd -- total number of entries in queue (must be power of 2)
122 static inline int iwl_queue_inc_wrap(int index
, int n_bd
)
124 return ++index
& (n_bd
- 1);
128 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
129 * @index -- current index
130 * @n_bd -- total number of entries in queue (must be power of 2)
132 static inline int iwl_queue_dec_wrap(int index
, int n_bd
)
134 return --index
& (n_bd
- 1);
137 struct iwl_cmd_meta
{
138 /* only for SYNC commands, iff the reply skb is wanted */
139 struct iwl_host_cmd
*source
;
141 DEFINE_DMA_UNMAP_ADDR(mapping
);
142 DEFINE_DMA_UNMAP_LEN(len
);
148 * Generic queue structure
150 * Contains common data for Rx and Tx queues.
152 * Note the difference between n_bd and n_window: the hardware
153 * always assumes 256 descriptors, so n_bd is always 256 (unless
154 * there might be HW changes in the future). For the normal TX
155 * queues, n_window, which is the size of the software queue data
156 * is also 256; however, for the command queue, n_window is only
157 * 32 since we don't need so many commands pending. Since the HW
158 * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
159 * the software buffers (in the variables @meta, @txb in struct
160 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
161 * the same struct) have 256.
162 * This means that we end up with the following:
163 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
164 * SW entries: | 0 | ... | 31 |
165 * where N is a number between 0 and 7. This means that the SW
166 * data is a window overlayed over the HW queue.
169 int n_bd
; /* number of BDs in this queue */
170 int write_ptr
; /* 1-st empty entry (index) host_w*/
171 int read_ptr
; /* last used entry (index) host_r*/
172 /* use for monitoring and recovering the stuck queue */
173 dma_addr_t dma_addr
; /* physical addr for BD's */
174 int n_window
; /* safe queue window */
176 int low_mark
; /* low watermark, resume queue if free
177 * space more than this */
178 int high_mark
; /* high watermark, stop queue if free
179 * space less than this */
182 #define TFD_TX_CMD_SLOTS 256
183 #define TFD_CMD_SLOTS 32
185 struct iwl_pcie_txq_entry
{
186 struct iwl_device_cmd
*cmd
;
187 struct iwl_device_cmd
*copy_cmd
;
189 /* buffer to free after command completes */
190 const void *free_buf
;
191 struct iwl_cmd_meta meta
;
195 * struct iwl_txq - Tx Queue for DMA
196 * @q: generic Rx/Tx queue descriptor
197 * @tfds: transmit frame descriptors (DMA memory)
198 * @entries: transmit entries (driver state)
200 * @stuck_timer: timer that fires if queue gets stuck
201 * @trans_pcie: pointer back to transport (for timer)
202 * @need_update: indicates need to update read/write index
203 * @active: stores if queue is active
205 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
206 * descriptors) and required locking structures.
210 struct iwl_tfd
*tfds
;
211 struct iwl_pcie_txq_entry
*entries
;
213 struct timer_list stuck_timer
;
214 struct iwl_trans_pcie
*trans_pcie
;
220 * struct iwl_trans_pcie - PCIe transport specific data
221 * @rxq: all the RX queue data
222 * @rx_replenish: work that will be called when buffers need to be allocated
223 * @drv - pointer to iwl_drv
224 * @trans: pointer to the generic transport area
225 * @scd_base_addr: scheduler sram base address in SRAM
226 * @scd_bc_tbls: pointer to the byte count table of the scheduler
227 * @kw: keep warm address
228 * @pci_dev: basic pci-network driver stuff
229 * @hw_base: pci hardware address support
230 * @ucode_write_complete: indicates that the ucode has been copied.
231 * @ucode_write_waitq: wait queue for uCode load
232 * @status - transport specific status flags
233 * @cmd_queue - command queue number
234 * @rx_buf_size_8k: 8 kB RX buffer size
235 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
236 * @rx_page_order: page order for receive buffer size
237 * @wd_timeout: queue watchdog timeout (jiffies)
238 * @reg_lock: protect hw register access
240 struct iwl_trans_pcie
{
242 struct work_struct rx_replenish
;
243 struct iwl_trans
*trans
;
248 dma_addr_t ict_tbl_dma
;
252 struct isr_statistics isr_stats
;
257 struct iwl_dma_ptr scd_bc_tbls
;
258 struct iwl_dma_ptr kw
;
261 unsigned long queue_used
[BITS_TO_LONGS(IWL_MAX_HW_QUEUES
)];
262 unsigned long queue_stopped
[BITS_TO_LONGS(IWL_MAX_HW_QUEUES
)];
264 /* PCI bus related data */
265 struct pci_dev
*pci_dev
;
266 void __iomem
*hw_base
;
268 bool ucode_write_complete
;
269 wait_queue_head_t ucode_write_waitq
;
270 wait_queue_head_t wait_command_queue
;
272 unsigned long status
;
275 u8 n_no_reclaim_cmds
;
276 u8 no_reclaim_cmds
[MAX_NO_RECLAIM_CMDS
];
282 const char **command_names
;
285 unsigned long wd_timeout
;
287 /*protect hw register */
292 * enum iwl_pcie_status: status of the PCIe transport
293 * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
294 * @STATUS_DEVICE_ENABLED: APM is enabled
295 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
296 * @STATUS_INT_ENABLED: interrupts are enabled
297 * @STATUS_RFKILL: the HW RFkill switch is in KILL position
298 * @STATUS_FW_ERROR: the fw is in error state
300 enum iwl_pcie_status
{
302 STATUS_DEVICE_ENABLED
,
309 #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
310 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
312 static inline struct iwl_trans
*
313 iwl_trans_pcie_get_trans(struct iwl_trans_pcie
*trans_pcie
)
315 return container_of((void *)trans_pcie
, struct iwl_trans
,
320 * Convention: trans API functions: iwl_trans_pcie_XXX
321 * Other functions: iwl_pcie_XXX
323 struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev
*pdev
,
324 const struct pci_device_id
*ent
,
325 const struct iwl_cfg
*cfg
);
326 void iwl_trans_pcie_free(struct iwl_trans
*trans
);
328 /*****************************************************
330 ******************************************************/
331 int iwl_pcie_rx_init(struct iwl_trans
*trans
);
332 irqreturn_t
iwl_pcie_irq_handler(int irq
, void *dev_id
);
333 int iwl_pcie_rx_stop(struct iwl_trans
*trans
);
334 void iwl_pcie_rx_free(struct iwl_trans
*trans
);
336 /*****************************************************
337 * ICT - interrupt handling
338 ******************************************************/
339 irqreturn_t
iwl_pcie_isr_ict(int irq
, void *data
);
340 int iwl_pcie_alloc_ict(struct iwl_trans
*trans
);
341 void iwl_pcie_free_ict(struct iwl_trans
*trans
);
342 void iwl_pcie_reset_ict(struct iwl_trans
*trans
);
343 void iwl_pcie_disable_ict(struct iwl_trans
*trans
);
345 /*****************************************************
347 ******************************************************/
348 int iwl_pcie_tx_init(struct iwl_trans
*trans
);
349 void iwl_pcie_tx_start(struct iwl_trans
*trans
, u32 scd_base_addr
);
350 int iwl_pcie_tx_stop(struct iwl_trans
*trans
);
351 void iwl_pcie_tx_free(struct iwl_trans
*trans
);
352 void iwl_trans_pcie_txq_enable(struct iwl_trans
*trans
, int txq_id
, int fifo
,
353 int sta_id
, int tid
, int frame_limit
, u16 ssn
);
354 void iwl_trans_pcie_txq_disable(struct iwl_trans
*trans
, int queue
);
355 int iwl_trans_pcie_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
356 struct iwl_device_cmd
*dev_cmd
, int txq_id
);
357 void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans
*trans
, struct iwl_txq
*txq
);
358 int iwl_trans_pcie_send_hcmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
);
359 void iwl_pcie_hcmd_complete(struct iwl_trans
*trans
,
360 struct iwl_rx_cmd_buffer
*rxb
, int handler_status
);
361 void iwl_trans_pcie_reclaim(struct iwl_trans
*trans
, int txq_id
, int ssn
,
362 struct sk_buff_head
*skbs
);
363 void iwl_trans_pcie_tx_reset(struct iwl_trans
*trans
);
365 /*****************************************************
367 ******************************************************/
368 int iwl_pcie_dump_fh(struct iwl_trans
*trans
, char **buf
);
369 void iwl_pcie_dump_csr(struct iwl_trans
*trans
);
371 /*****************************************************
373 ******************************************************/
374 static inline void iwl_disable_interrupts(struct iwl_trans
*trans
)
376 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
377 clear_bit(STATUS_INT_ENABLED
, &trans_pcie
->status
);
379 /* disable interrupts from uCode/NIC to host */
380 iwl_write32(trans
, CSR_INT_MASK
, 0x00000000);
382 /* acknowledge/clear/reset any interrupts still pending
383 * from uCode or flow handler (Rx/Tx DMA) */
384 iwl_write32(trans
, CSR_INT
, 0xffffffff);
385 iwl_write32(trans
, CSR_FH_INT_STATUS
, 0xffffffff);
386 IWL_DEBUG_ISR(trans
, "Disabled interrupts\n");
389 static inline void iwl_enable_interrupts(struct iwl_trans
*trans
)
391 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
393 IWL_DEBUG_ISR(trans
, "Enabling interrupts\n");
394 set_bit(STATUS_INT_ENABLED
, &trans_pcie
->status
);
395 iwl_write32(trans
, CSR_INT_MASK
, trans_pcie
->inta_mask
);
398 static inline void iwl_enable_rfkill_int(struct iwl_trans
*trans
)
400 IWL_DEBUG_ISR(trans
, "Enabling rfkill interrupt\n");
401 iwl_write32(trans
, CSR_INT_MASK
, CSR_INT_BIT_RF_KILL
);
404 static inline void iwl_wake_queue(struct iwl_trans
*trans
,
407 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
409 if (test_and_clear_bit(txq
->q
.id
, trans_pcie
->queue_stopped
)) {
410 IWL_DEBUG_TX_QUEUES(trans
, "Wake hwq %d\n", txq
->q
.id
);
411 iwl_op_mode_queue_not_full(trans
->op_mode
, txq
->q
.id
);
415 static inline void iwl_stop_queue(struct iwl_trans
*trans
,
418 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
420 if (!test_and_set_bit(txq
->q
.id
, trans_pcie
->queue_stopped
)) {
421 iwl_op_mode_queue_full(trans
->op_mode
, txq
->q
.id
);
422 IWL_DEBUG_TX_QUEUES(trans
, "Stop hwq %d\n", txq
->q
.id
);
424 IWL_DEBUG_TX_QUEUES(trans
, "hwq %d already stopped\n",
428 static inline bool iwl_queue_used(const struct iwl_queue
*q
, int i
)
430 return q
->write_ptr
>= q
->read_ptr
?
431 (i
>= q
->read_ptr
&& i
< q
->write_ptr
) :
432 !(i
< q
->read_ptr
&& i
>= q
->write_ptr
);
435 static inline u8
get_cmd_index(struct iwl_queue
*q
, u32 index
)
437 return index
& (q
->n_window
- 1);
440 static inline const char *get_cmd_string(struct iwl_trans_pcie
*trans_pcie
,
443 if (!trans_pcie
->command_names
|| !trans_pcie
->command_names
[cmd
])
445 return trans_pcie
->command_names
[cmd
];
448 static inline bool iwl_is_rfkill_set(struct iwl_trans
*trans
)
450 return !(iwl_read32(trans
, CSR_GP_CNTRL
) &
451 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
);
454 #endif /* __iwl_trans_int_pcie_h__ */