Commit | Line | Data |
---|---|---|
ab697a9f EG |
1 | /****************************************************************************** |
2 | * | |
4e318262 | 3 | * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. |
ab697a9f EG |
4 | * |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
25 | * Intel Linux Wireless <ilw@linux.intel.com> | |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
27 | * | |
28 | *****************************************************************************/ | |
29 | #ifndef __iwl_trans_int_pcie_h__ | |
30 | #define __iwl_trans_int_pcie_h__ | |
31 | ||
a72b8b08 EG |
32 | #include <linux/spinlock.h> |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/skbuff.h> | |
13df1aab | 35 | #include <linux/wait.h> |
522376d2 | 36 | #include <linux/pci.h> |
a72b8b08 | 37 | |
dda61a44 | 38 | #include "iwl-fh.h" |
a72b8b08 EG |
39 | #include "iwl-csr.h" |
40 | #include "iwl-shared.h" | |
41 | #include "iwl-trans.h" | |
42 | #include "iwl-debug.h" | |
43 | #include "iwl-io.h" | |
02e38358 | 44 | #include "iwl-op-mode.h" |
a72b8b08 EG |
45 | |
46 | struct iwl_tx_queue; | |
47 | struct iwl_queue; | |
48 | struct iwl_host_cmd; | |
dda61a44 | 49 | |
ab697a9f EG |
50 | /*This file includes the declaration that are internal to the |
51 | * trans_pcie layer */ | |
52 | ||
48a2d66f JB |
53 | struct iwl_rx_mem_buffer { |
54 | dma_addr_t page_dma; | |
55 | struct page *page; | |
56 | struct list_head list; | |
57 | }; | |
58 | ||
1f7b6172 EG |
59 | /** |
60 | * struct isr_statistics - interrupt statistics | |
61 | * | |
62 | */ | |
63 | struct isr_statistics { | |
64 | u32 hw; | |
65 | u32 sw; | |
66 | u32 err_code; | |
67 | u32 sch; | |
68 | u32 alive; | |
69 | u32 rfkill; | |
70 | u32 ctkill; | |
71 | u32 wakeup; | |
72 | u32 rx; | |
73 | u32 tx; | |
74 | u32 unhandled; | |
75 | }; | |
76 | ||
5a878bf6 EG |
77 | /** |
78 | * struct iwl_rx_queue - Rx queue | |
79 | * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) | |
80 | * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) | |
81 | * @pool: | |
82 | * @queue: | |
83 | * @read: Shared index to newest available Rx buffer | |
84 | * @write: Shared index to oldest written Rx packet | |
85 | * @free_count: Number of pre-allocated buffers in rx_free | |
86 | * @write_actual: | |
87 | * @rx_free: list of free SKBs for use | |
88 | * @rx_used: List of Rx buffers with no SKB | |
89 | * @need_update: flag to indicate we need to update read/write index | |
90 | * @rb_stts: driver's pointer to receive buffer status | |
91 | * @rb_stts_dma: bus address of receive buffer status | |
92 | * @lock: | |
93 | * | |
94 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers | |
95 | */ | |
96 | struct iwl_rx_queue { | |
97 | __le32 *bd; | |
98 | dma_addr_t bd_dma; | |
99 | struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; | |
100 | struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; | |
101 | u32 read; | |
102 | u32 write; | |
103 | u32 free_count; | |
104 | u32 write_actual; | |
105 | struct list_head rx_free; | |
106 | struct list_head rx_used; | |
107 | int need_update; | |
108 | struct iwl_rb_status *rb_stts; | |
109 | dma_addr_t rb_stts_dma; | |
110 | spinlock_t lock; | |
111 | }; | |
112 | ||
a72b8b08 EG |
113 | struct iwl_dma_ptr { |
114 | dma_addr_t dma; | |
115 | void *addr; | |
116 | size_t size; | |
117 | }; | |
118 | ||
bffc66ce JB |
119 | /** |
120 | * iwl_queue_inc_wrap - increment queue index, wrap back to beginning | |
121 | * @index -- current index | |
122 | * @n_bd -- total number of entries in queue (must be power of 2) | |
123 | */ | |
124 | static inline int iwl_queue_inc_wrap(int index, int n_bd) | |
125 | { | |
126 | return ++index & (n_bd - 1); | |
127 | } | |
128 | ||
129 | /** | |
130 | * iwl_queue_dec_wrap - decrement queue index, wrap back to end | |
131 | * @index -- current index | |
132 | * @n_bd -- total number of entries in queue (must be power of 2) | |
133 | */ | |
134 | static inline int iwl_queue_dec_wrap(int index, int n_bd) | |
135 | { | |
136 | return --index & (n_bd - 1); | |
137 | } | |
138 | ||
522376d2 EG |
139 | struct iwl_cmd_meta { |
140 | /* only for SYNC commands, iff the reply skb is wanted */ | |
141 | struct iwl_host_cmd *source; | |
522376d2 EG |
142 | |
143 | u32 flags; | |
144 | ||
145 | DEFINE_DMA_UNMAP_ADDR(mapping); | |
146 | DEFINE_DMA_UNMAP_LEN(len); | |
147 | }; | |
148 | ||
149 | /* | |
150 | * Generic queue structure | |
151 | * | |
152 | * Contains common data for Rx and Tx queues. | |
153 | * | |
154 | * Note the difference between n_bd and n_window: the hardware | |
155 | * always assumes 256 descriptors, so n_bd is always 256 (unless | |
156 | * there might be HW changes in the future). For the normal TX | |
157 | * queues, n_window, which is the size of the software queue data | |
158 | * is also 256; however, for the command queue, n_window is only | |
159 | * 32 since we don't need so many commands pending. Since the HW | |
160 | * still uses 256 BDs for DMA though, n_bd stays 256. As a result, | |
161 | * the software buffers (in the variables @meta, @txb in struct | |
162 | * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds | |
163 | * in the same struct) have 256. | |
164 | * This means that we end up with the following: | |
165 | * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | | |
166 | * SW entries: | 0 | ... | 31 | | |
167 | * where N is a number between 0 and 7. This means that the SW | |
168 | * data is a window overlayed over the HW queue. | |
169 | */ | |
170 | struct iwl_queue { | |
171 | int n_bd; /* number of BDs in this queue */ | |
172 | int write_ptr; /* 1-st empty entry (index) host_w*/ | |
173 | int read_ptr; /* last used entry (index) host_r*/ | |
174 | /* use for monitoring and recovering the stuck queue */ | |
175 | dma_addr_t dma_addr; /* physical addr for BD's */ | |
176 | int n_window; /* safe queue window */ | |
177 | u32 id; | |
178 | int low_mark; /* low watermark, resume queue if free | |
179 | * space more than this */ | |
180 | int high_mark; /* high watermark, stop queue if free | |
181 | * space less than this */ | |
182 | }; | |
183 | ||
184 | /** | |
185 | * struct iwl_tx_queue - Tx Queue for DMA | |
186 | * @q: generic Rx/Tx queue descriptor | |
187 | * @bd: base of circular buffer of TFDs | |
188 | * @cmd: array of command/TX buffer pointers | |
189 | * @meta: array of meta data for each command/tx buffer | |
190 | * @dma_addr_cmd: physical address of cmd/tx buffer array | |
191 | * @txb: array of per-TFD driver data | |
015c15e1 | 192 | * lock: queue lock |
522376d2 EG |
193 | * @time_stamp: time (in jiffies) of last read_ptr change |
194 | * @need_update: indicates need to update read/write index | |
522376d2 EG |
195 | * |
196 | * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame | |
197 | * descriptors) and required locking structures. | |
198 | */ | |
199 | #define TFD_TX_CMD_SLOTS 256 | |
200 | #define TFD_CMD_SLOTS 32 | |
201 | ||
202 | struct iwl_tx_queue { | |
203 | struct iwl_queue q; | |
204 | struct iwl_tfd *tfds; | |
205 | struct iwl_device_cmd **cmd; | |
206 | struct iwl_cmd_meta *meta; | |
207 | struct sk_buff **skbs; | |
015c15e1 | 208 | spinlock_t lock; |
522376d2 EG |
209 | unsigned long time_stamp; |
210 | u8 need_update; | |
522376d2 | 211 | u8 active; |
522376d2 EG |
212 | }; |
213 | ||
e6bb4c9c EG |
214 | /** |
215 | * struct iwl_trans_pcie - PCIe transport specific data | |
5a878bf6 EG |
216 | * @rxq: all the RX queue data |
217 | * @rx_replenish: work that will be called when buffers need to be allocated | |
218 | * @trans: pointer to the generic transport area | |
75595536 | 219 | * @irq - the irq number for the device |
57a1dc89 | 220 | * @irq_requested: true when the irq has been requested |
105183b1 EG |
221 | * @scd_base_addr: scheduler sram base address in SRAM |
222 | * @scd_bc_tbls: pointer to the byte count table of the scheduler | |
9d6b2cb1 | 223 | * @kw: keep warm address |
a42a1844 EG |
224 | * @pci_dev: basic pci-network driver stuff |
225 | * @hw_base: pci hardware address support | |
13df1aab JB |
226 | * @ucode_write_complete: indicates that the ucode has been copied. |
227 | * @ucode_write_waitq: wait queue for uCode load | |
9a716863 | 228 | * @status - transport specific status flags |
c6f600fc | 229 | * @cmd_queue - command queue number |
e6bb4c9c EG |
230 | */ |
231 | struct iwl_trans_pcie { | |
5a878bf6 EG |
232 | struct iwl_rx_queue rxq; |
233 | struct work_struct rx_replenish; | |
234 | struct iwl_trans *trans; | |
0c325769 EG |
235 | |
236 | /* INT ICT Table */ | |
237 | __le32 *ict_tbl; | |
0c325769 | 238 | dma_addr_t ict_tbl_dma; |
0c325769 EG |
239 | int ict_index; |
240 | u32 inta; | |
241 | bool use_ict; | |
57a1dc89 | 242 | bool irq_requested; |
0c325769 | 243 | struct tasklet_struct irq_tasklet; |
1f7b6172 | 244 | struct isr_statistics isr_stats; |
0c325769 | 245 | |
75595536 | 246 | unsigned int irq; |
7b11488f | 247 | spinlock_t irq_lock; |
0c325769 | 248 | u32 inta_mask; |
105183b1 EG |
249 | u32 scd_base_addr; |
250 | struct iwl_dma_ptr scd_bc_tbls; | |
9d6b2cb1 | 251 | struct iwl_dma_ptr kw; |
e13c0c59 | 252 | |
8ad71bef | 253 | struct iwl_tx_queue *txq; |
9eae88fa | 254 | unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; |
8ad71bef | 255 | unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; |
a42a1844 EG |
256 | |
257 | /* PCI bus related data */ | |
258 | struct pci_dev *pci_dev; | |
259 | void __iomem *hw_base; | |
13df1aab JB |
260 | |
261 | bool ucode_write_complete; | |
262 | wait_queue_head_t ucode_write_waitq; | |
9a716863 | 263 | unsigned long status; |
c6f600fc | 264 | u8 cmd_queue; |
d663ee73 JB |
265 | u8 n_no_reclaim_cmds; |
266 | u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; | |
9eae88fa JB |
267 | u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES]; |
268 | u8 n_q_to_fifo; | |
e6bb4c9c EG |
269 | }; |
270 | ||
5a878bf6 EG |
271 | #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ |
272 | ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) | |
273 | ||
253a634c EG |
274 | /***************************************************** |
275 | * RX | |
276 | ******************************************************/ | |
ab697a9f | 277 | void iwl_bg_rx_replenish(struct work_struct *data); |
0c325769 | 278 | void iwl_irq_tasklet(struct iwl_trans *trans); |
5a878bf6 EG |
279 | void iwlagn_rx_replenish(struct iwl_trans *trans); |
280 | void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, | |
ab697a9f EG |
281 | struct iwl_rx_queue *q); |
282 | ||
1a361cd8 EG |
283 | /***************************************************** |
284 | * ICT | |
285 | ******************************************************/ | |
ed6a3803 | 286 | void iwl_reset_ict(struct iwl_trans *trans); |
0c325769 EG |
287 | void iwl_disable_ict(struct iwl_trans *trans); |
288 | int iwl_alloc_isr_ict(struct iwl_trans *trans); | |
289 | void iwl_free_isr_ict(struct iwl_trans *trans); | |
1a361cd8 EG |
290 | irqreturn_t iwl_isr_ict(int irq, void *data); |
291 | ||
253a634c EG |
292 | /***************************************************** |
293 | * TX / HCMD | |
294 | ******************************************************/ | |
fd656935 EG |
295 | void iwl_txq_update_write_ptr(struct iwl_trans *trans, |
296 | struct iwl_tx_queue *txq); | |
6d8f6eeb | 297 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, |
253a634c EG |
298 | struct iwl_tx_queue *txq, |
299 | dma_addr_t addr, u16 len, u8 reset); | |
6d8f6eeb EG |
300 | int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id); |
301 | int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); | |
3e10caeb | 302 | void iwl_tx_cmd_complete(struct iwl_trans *trans, |
48a2d66f | 303 | struct iwl_rx_cmd_buffer *rxb, int handler_status); |
6d8f6eeb | 304 | void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, |
48d42c42 EG |
305 | struct iwl_tx_queue *txq, |
306 | u16 byte_cnt); | |
9eae88fa | 307 | void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue); |
6d8f6eeb | 308 | void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); |
c91bd124 | 309 | void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, |
9eae88fa JB |
310 | struct iwl_tx_queue *txq, |
311 | int tx_fifo_id, bool active); | |
312 | void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo, | |
822e8b2a | 313 | int sta_id, int tid, int frame_limit, u16 ssn); |
6d8f6eeb | 314 | void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, |
39644e9a | 315 | int index, enum dma_data_direction dma_dir); |
464021ff EG |
316 | int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, |
317 | struct sk_buff_head *skbs); | |
8ad71bef | 318 | int iwl_queue_space(const struct iwl_queue *q); |
253a634c | 319 | |
7ff94706 EG |
320 | /***************************************************** |
321 | * Error handling | |
322 | ******************************************************/ | |
6bb78847 EG |
323 | int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log, |
324 | char **buf, bool display); | |
16db88ba EG |
325 | int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); |
326 | void iwl_dump_csr(struct iwl_trans *trans); | |
327 | ||
8ad71bef EG |
328 | /***************************************************** |
329 | * Helpers | |
330 | ******************************************************/ | |
0c325769 EG |
331 | static inline void iwl_disable_interrupts(struct iwl_trans *trans) |
332 | { | |
83626404 DF |
333 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
334 | clear_bit(STATUS_INT_ENABLED, &trans_pcie->status); | |
0c325769 EG |
335 | |
336 | /* disable interrupts from uCode/NIC to host */ | |
1042db2a | 337 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); |
0c325769 EG |
338 | |
339 | /* acknowledge/clear/reset any interrupts still pending | |
340 | * from uCode or flow handler (Rx/Tx DMA) */ | |
1042db2a EG |
341 | iwl_write32(trans, CSR_INT, 0xffffffff); |
342 | iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); | |
0c325769 EG |
343 | IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); |
344 | } | |
345 | ||
346 | static inline void iwl_enable_interrupts(struct iwl_trans *trans) | |
347 | { | |
83626404 | 348 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
0c325769 EG |
349 | |
350 | IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); | |
83626404 | 351 | set_bit(STATUS_INT_ENABLED, &trans_pcie->status); |
1042db2a | 352 | iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); |
0c325769 EG |
353 | } |
354 | ||
8722c899 SG |
355 | static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) |
356 | { | |
357 | IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); | |
358 | iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); | |
359 | } | |
360 | ||
e20d4341 | 361 | static inline void iwl_wake_queue(struct iwl_trans *trans, |
bada991b | 362 | struct iwl_tx_queue *txq) |
e20d4341 | 363 | { |
9eae88fa JB |
364 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
365 | ||
366 | if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { | |
367 | IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); | |
368 | iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); | |
81a3de1c | 369 | } |
e20d4341 EG |
370 | } |
371 | ||
372 | static inline void iwl_stop_queue(struct iwl_trans *trans, | |
bada991b | 373 | struct iwl_tx_queue *txq) |
e20d4341 | 374 | { |
9eae88fa | 375 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
8ad71bef | 376 | |
9eae88fa JB |
377 | if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { |
378 | iwl_op_mode_queue_full(trans->op_mode, txq->q.id); | |
379 | IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); | |
380 | } else | |
381 | IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", | |
382 | txq->q.id); | |
8ad71bef EG |
383 | } |
384 | ||
385 | static inline int iwl_queue_used(const struct iwl_queue *q, int i) | |
386 | { | |
387 | return q->write_ptr >= q->read_ptr ? | |
388 | (i >= q->read_ptr && i < q->write_ptr) : | |
389 | !(i < q->read_ptr && i >= q->write_ptr); | |
390 | } | |
391 | ||
392 | static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) | |
393 | { | |
394 | return index & (q->n_window - 1); | |
395 | } | |
396 | ||
ab697a9f | 397 | #endif /* __iwl_trans_int_pcie_h__ */ |