Commit | Line | Data |
---|---|---|
ab697a9f EG |
1 | /****************************************************************************** |
2 | * | |
4e318262 | 3 | * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. |
ab697a9f EG |
4 | * |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
25 | * Intel Linux Wireless <ilw@linux.intel.com> | |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
27 | * | |
28 | *****************************************************************************/ | |
29 | #ifndef __iwl_trans_int_pcie_h__ | |
30 | #define __iwl_trans_int_pcie_h__ | |
31 | ||
a72b8b08 EG |
32 | #include <linux/spinlock.h> |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/skbuff.h> | |
522376d2 | 35 | #include <linux/pci.h> |
a72b8b08 | 36 | |
dda61a44 | 37 | #include "iwl-fh.h" |
a72b8b08 EG |
38 | #include "iwl-csr.h" |
39 | #include "iwl-shared.h" | |
40 | #include "iwl-trans.h" | |
41 | #include "iwl-debug.h" | |
42 | #include "iwl-io.h" | |
02e38358 | 43 | #include "iwl-op-mode.h" |
a72b8b08 EG |
44 | |
45 | struct iwl_tx_queue; | |
46 | struct iwl_queue; | |
47 | struct iwl_host_cmd; | |
dda61a44 | 48 | |
ab697a9f EG |
49 | /*This file includes the declaration that are internal to the |
50 | * trans_pcie layer */ | |
51 | ||
1f7b6172 EG |
52 | /** |
53 | * struct isr_statistics - interrupt statistics | |
54 | * | |
55 | */ | |
56 | struct isr_statistics { | |
57 | u32 hw; | |
58 | u32 sw; | |
59 | u32 err_code; | |
60 | u32 sch; | |
61 | u32 alive; | |
62 | u32 rfkill; | |
63 | u32 ctkill; | |
64 | u32 wakeup; | |
65 | u32 rx; | |
66 | u32 tx; | |
67 | u32 unhandled; | |
68 | }; | |
69 | ||
5a878bf6 EG |
70 | /** |
71 | * struct iwl_rx_queue - Rx queue | |
72 | * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) | |
73 | * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) | |
74 | * @pool: | |
75 | * @queue: | |
76 | * @read: Shared index to newest available Rx buffer | |
77 | * @write: Shared index to oldest written Rx packet | |
78 | * @free_count: Number of pre-allocated buffers in rx_free | |
79 | * @write_actual: | |
80 | * @rx_free: list of free SKBs for use | |
81 | * @rx_used: List of Rx buffers with no SKB | |
82 | * @need_update: flag to indicate we need to update read/write index | |
83 | * @rb_stts: driver's pointer to receive buffer status | |
84 | * @rb_stts_dma: bus address of receive buffer status | |
85 | * @lock: | |
86 | * | |
87 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers | |
88 | */ | |
89 | struct iwl_rx_queue { | |
90 | __le32 *bd; | |
91 | dma_addr_t bd_dma; | |
92 | struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; | |
93 | struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; | |
94 | u32 read; | |
95 | u32 write; | |
96 | u32 free_count; | |
97 | u32 write_actual; | |
98 | struct list_head rx_free; | |
99 | struct list_head rx_used; | |
100 | int need_update; | |
101 | struct iwl_rb_status *rb_stts; | |
102 | dma_addr_t rb_stts_dma; | |
103 | spinlock_t lock; | |
104 | }; | |
105 | ||
a72b8b08 EG |
106 | struct iwl_dma_ptr { |
107 | dma_addr_t dma; | |
108 | void *addr; | |
109 | size_t size; | |
110 | }; | |
111 | ||
e13c0c59 EG |
112 | /* |
113 | * This queue number is required for proper operation | |
114 | * because the ucode will stop/start the scheduler as | |
115 | * required. | |
116 | */ | |
117 | #define IWL_IPAN_MCAST_QUEUE 8 | |
118 | ||
522376d2 EG |
119 | struct iwl_cmd_meta { |
120 | /* only for SYNC commands, iff the reply skb is wanted */ | |
121 | struct iwl_host_cmd *source; | |
522376d2 EG |
122 | |
123 | u32 flags; | |
124 | ||
125 | DEFINE_DMA_UNMAP_ADDR(mapping); | |
126 | DEFINE_DMA_UNMAP_LEN(len); | |
127 | }; | |
128 | ||
129 | /* | |
130 | * Generic queue structure | |
131 | * | |
132 | * Contains common data for Rx and Tx queues. | |
133 | * | |
134 | * Note the difference between n_bd and n_window: the hardware | |
135 | * always assumes 256 descriptors, so n_bd is always 256 (unless | |
136 | * there might be HW changes in the future). For the normal TX | |
137 | * queues, n_window, which is the size of the software queue data | |
138 | * is also 256; however, for the command queue, n_window is only | |
139 | * 32 since we don't need so many commands pending. Since the HW | |
140 | * still uses 256 BDs for DMA though, n_bd stays 256. As a result, | |
141 | * the software buffers (in the variables @meta, @txb in struct | |
142 | * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds | |
143 | * in the same struct) have 256. | |
144 | * This means that we end up with the following: | |
145 | * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | | |
146 | * SW entries: | 0 | ... | 31 | | |
147 | * where N is a number between 0 and 7. This means that the SW | |
148 | * data is a window overlayed over the HW queue. | |
149 | */ | |
150 | struct iwl_queue { | |
151 | int n_bd; /* number of BDs in this queue */ | |
152 | int write_ptr; /* 1-st empty entry (index) host_w*/ | |
153 | int read_ptr; /* last used entry (index) host_r*/ | |
154 | /* use for monitoring and recovering the stuck queue */ | |
155 | dma_addr_t dma_addr; /* physical addr for BD's */ | |
156 | int n_window; /* safe queue window */ | |
157 | u32 id; | |
158 | int low_mark; /* low watermark, resume queue if free | |
159 | * space more than this */ | |
160 | int high_mark; /* high watermark, stop queue if free | |
161 | * space less than this */ | |
162 | }; | |
163 | ||
164 | /** | |
165 | * struct iwl_tx_queue - Tx Queue for DMA | |
166 | * @q: generic Rx/Tx queue descriptor | |
167 | * @bd: base of circular buffer of TFDs | |
168 | * @cmd: array of command/TX buffer pointers | |
169 | * @meta: array of meta data for each command/tx buffer | |
170 | * @dma_addr_cmd: physical address of cmd/tx buffer array | |
171 | * @txb: array of per-TFD driver data | |
172 | * @time_stamp: time (in jiffies) of last read_ptr change | |
173 | * @need_update: indicates need to update read/write index | |
174 | * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled | |
175 | * @sta_id: valid if sched_retry is set | |
176 | * @tid: valid if sched_retry is set | |
177 | * | |
178 | * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame | |
179 | * descriptors) and required locking structures. | |
180 | */ | |
181 | #define TFD_TX_CMD_SLOTS 256 | |
182 | #define TFD_CMD_SLOTS 32 | |
183 | ||
184 | struct iwl_tx_queue { | |
185 | struct iwl_queue q; | |
186 | struct iwl_tfd *tfds; | |
187 | struct iwl_device_cmd **cmd; | |
188 | struct iwl_cmd_meta *meta; | |
189 | struct sk_buff **skbs; | |
190 | unsigned long time_stamp; | |
191 | u8 need_update; | |
192 | u8 sched_retry; | |
193 | u8 active; | |
194 | u8 swq_id; | |
195 | ||
196 | u16 sta_id; | |
197 | u16 tid; | |
198 | }; | |
199 | ||
e6bb4c9c EG |
200 | /** |
201 | * struct iwl_trans_pcie - PCIe transport specific data | |
5a878bf6 EG |
202 | * @rxq: all the RX queue data |
203 | * @rx_replenish: work that will be called when buffers need to be allocated | |
204 | * @trans: pointer to the generic transport area | |
57a1dc89 | 205 | * @irq_requested: true when the irq has been requested |
105183b1 EG |
206 | * @scd_base_addr: scheduler sram base address in SRAM |
207 | * @scd_bc_tbls: pointer to the byte count table of the scheduler | |
9d6b2cb1 | 208 | * @kw: keep warm address |
e13c0c59 EG |
209 | * @ac_to_fifo: to what fifo is a specifc AC mapped ? |
210 | * @ac_to_queue: to what tx queue is a specifc AC mapped ? | |
211 | * @mcast_queue: | |
8ad71bef EG |
212 | * @txq: Tx DMA processing queues |
213 | * @txq_ctx_active_msk: what queue is active | |
214 | * queue_stopped: tracks what queue is stopped | |
215 | * queue_stop_count: tracks what SW queue is stopped | |
a42a1844 EG |
216 | * @pci_dev: basic pci-network driver stuff |
217 | * @hw_base: pci hardware address support | |
e6bb4c9c EG |
218 | */ |
219 | struct iwl_trans_pcie { | |
5a878bf6 EG |
220 | struct iwl_rx_queue rxq; |
221 | struct work_struct rx_replenish; | |
222 | struct iwl_trans *trans; | |
0c325769 EG |
223 | |
224 | /* INT ICT Table */ | |
225 | __le32 *ict_tbl; | |
0c325769 | 226 | dma_addr_t ict_tbl_dma; |
0c325769 EG |
227 | int ict_index; |
228 | u32 inta; | |
229 | bool use_ict; | |
57a1dc89 | 230 | bool irq_requested; |
0c325769 | 231 | struct tasklet_struct irq_tasklet; |
1f7b6172 | 232 | struct isr_statistics isr_stats; |
0c325769 | 233 | |
7b11488f | 234 | spinlock_t irq_lock; |
0c325769 | 235 | u32 inta_mask; |
105183b1 EG |
236 | u32 scd_base_addr; |
237 | struct iwl_dma_ptr scd_bc_tbls; | |
9d6b2cb1 | 238 | struct iwl_dma_ptr kw; |
e13c0c59 EG |
239 | |
240 | const u8 *ac_to_fifo[NUM_IWL_RXON_CTX]; | |
241 | const u8 *ac_to_queue[NUM_IWL_RXON_CTX]; | |
242 | u8 mcast_queue[NUM_IWL_RXON_CTX]; | |
76bc10fc | 243 | u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; |
8ad71bef EG |
244 | |
245 | struct iwl_tx_queue *txq; | |
246 | unsigned long txq_ctx_active_msk; | |
247 | #define IWL_MAX_HW_QUEUES 32 | |
248 | unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; | |
249 | atomic_t queue_stop_count[4]; | |
a42a1844 EG |
250 | |
251 | /* PCI bus related data */ | |
252 | struct pci_dev *pci_dev; | |
253 | void __iomem *hw_base; | |
e6bb4c9c EG |
254 | }; |
255 | ||
5a878bf6 EG |
256 | #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ |
257 | ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) | |
258 | ||
253a634c EG |
259 | /***************************************************** |
260 | * RX | |
261 | ******************************************************/ | |
ab697a9f | 262 | void iwl_bg_rx_replenish(struct work_struct *data); |
0c325769 | 263 | void iwl_irq_tasklet(struct iwl_trans *trans); |
5a878bf6 EG |
264 | void iwlagn_rx_replenish(struct iwl_trans *trans); |
265 | void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, | |
ab697a9f EG |
266 | struct iwl_rx_queue *q); |
267 | ||
1a361cd8 EG |
268 | /***************************************************** |
269 | * ICT | |
270 | ******************************************************/ | |
ed6a3803 | 271 | void iwl_reset_ict(struct iwl_trans *trans); |
0c325769 EG |
272 | void iwl_disable_ict(struct iwl_trans *trans); |
273 | int iwl_alloc_isr_ict(struct iwl_trans *trans); | |
274 | void iwl_free_isr_ict(struct iwl_trans *trans); | |
1a361cd8 EG |
275 | irqreturn_t iwl_isr_ict(int irq, void *data); |
276 | ||
253a634c EG |
277 | /***************************************************** |
278 | * TX / HCMD | |
279 | ******************************************************/ | |
fd656935 EG |
280 | void iwl_txq_update_write_ptr(struct iwl_trans *trans, |
281 | struct iwl_tx_queue *txq); | |
6d8f6eeb | 282 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, |
253a634c EG |
283 | struct iwl_tx_queue *txq, |
284 | dma_addr_t addr, u16 len, u8 reset); | |
6d8f6eeb EG |
285 | int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id); |
286 | int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); | |
3e10caeb | 287 | void iwl_tx_cmd_complete(struct iwl_trans *trans, |
247c61d6 | 288 | struct iwl_rx_mem_buffer *rxb, int handler_status); |
6d8f6eeb | 289 | void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, |
48d42c42 EG |
290 | struct iwl_tx_queue *txq, |
291 | u16 byte_cnt); | |
7f01d567 | 292 | int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, |
bc237730 | 293 | int sta_id, int tid); |
6d8f6eeb | 294 | void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); |
c91bd124 | 295 | void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, |
48d42c42 EG |
296 | struct iwl_tx_queue *txq, |
297 | int tx_fifo_id, int scd_retry); | |
3c69b595 | 298 | int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid); |
c91bd124 EG |
299 | void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, |
300 | enum iwl_rxon_context_id ctx, | |
822e8b2a | 301 | int sta_id, int tid, int frame_limit, u16 ssn); |
6d8f6eeb | 302 | void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, |
39644e9a | 303 | int index, enum dma_data_direction dma_dir); |
464021ff EG |
304 | int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, |
305 | struct sk_buff_head *skbs); | |
8ad71bef | 306 | int iwl_queue_space(const struct iwl_queue *q); |
253a634c | 307 | |
7ff94706 EG |
308 | /***************************************************** |
309 | * Error handling | |
310 | ******************************************************/ | |
6bb78847 EG |
311 | int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log, |
312 | char **buf, bool display); | |
16db88ba EG |
313 | int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); |
314 | void iwl_dump_csr(struct iwl_trans *trans); | |
315 | ||
8ad71bef EG |
316 | /***************************************************** |
317 | * Helpers | |
318 | ******************************************************/ | |
0c325769 EG |
319 | static inline void iwl_disable_interrupts(struct iwl_trans *trans) |
320 | { | |
321 | clear_bit(STATUS_INT_ENABLED, &trans->shrd->status); | |
322 | ||
323 | /* disable interrupts from uCode/NIC to host */ | |
1042db2a | 324 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); |
0c325769 EG |
325 | |
326 | /* acknowledge/clear/reset any interrupts still pending | |
327 | * from uCode or flow handler (Rx/Tx DMA) */ | |
1042db2a EG |
328 | iwl_write32(trans, CSR_INT, 0xffffffff); |
329 | iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); | |
0c325769 EG |
330 | IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); |
331 | } | |
332 | ||
333 | static inline void iwl_enable_interrupts(struct iwl_trans *trans) | |
334 | { | |
335 | struct iwl_trans_pcie *trans_pcie = | |
336 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
337 | ||
338 | IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); | |
339 | set_bit(STATUS_INT_ENABLED, &trans->shrd->status); | |
1042db2a | 340 | iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); |
0c325769 EG |
341 | } |
342 | ||
e20d4341 EG |
343 | /* |
344 | * we have 8 bits used like this: | |
345 | * | |
346 | * 7 6 5 4 3 2 1 0 | |
347 | * | | | | | | | | | |
348 | * | | | | | | +-+-------- AC queue (0-3) | |
349 | * | | | | | | | |
350 | * | +-+-+-+-+------------ HW queue ID | |
351 | * | | |
352 | * +---------------------- unused | |
353 | */ | |
354 | static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq) | |
355 | { | |
356 | BUG_ON(ac > 3); /* only have 2 bits */ | |
357 | BUG_ON(hwq > 31); /* only use 5 bits */ | |
358 | ||
359 | txq->swq_id = (hwq << 2) | ac; | |
360 | } | |
361 | ||
1daf04b8 EG |
362 | static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq) |
363 | { | |
364 | return txq->swq_id & 0x3; | |
365 | } | |
366 | ||
e20d4341 | 367 | static inline void iwl_wake_queue(struct iwl_trans *trans, |
81a3de1c | 368 | struct iwl_tx_queue *txq, const char *msg) |
e20d4341 EG |
369 | { |
370 | u8 queue = txq->swq_id; | |
371 | u8 ac = queue & 3; | |
372 | u8 hwq = (queue >> 2) & 0x1f; | |
8ad71bef EG |
373 | struct iwl_trans_pcie *trans_pcie = |
374 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
e20d4341 | 375 | |
81a3de1c EG |
376 | if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) { |
377 | if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) { | |
02e38358 | 378 | iwl_op_mode_queue_not_full(trans->op_mode, ac); |
81a3de1c EG |
379 | IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s", |
380 | hwq, ac, msg); | |
381 | } else { | |
382 | IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d" | |
383 | " stop count %d. %s", | |
384 | hwq, ac, atomic_read(&trans_pcie-> | |
385 | queue_stop_count[ac]), msg); | |
386 | } | |
387 | } | |
e20d4341 EG |
388 | } |
389 | ||
390 | static inline void iwl_stop_queue(struct iwl_trans *trans, | |
81a3de1c | 391 | struct iwl_tx_queue *txq, const char *msg) |
e20d4341 EG |
392 | { |
393 | u8 queue = txq->swq_id; | |
394 | u8 ac = queue & 3; | |
395 | u8 hwq = (queue >> 2) & 0x1f; | |
8ad71bef EG |
396 | struct iwl_trans_pcie *trans_pcie = |
397 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
e20d4341 | 398 | |
81a3de1c EG |
399 | if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) { |
400 | if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) { | |
02e38358 | 401 | iwl_op_mode_queue_full(trans->op_mode, ac); |
81a3de1c EG |
402 | IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d" |
403 | " stop count %d. %s", | |
404 | hwq, ac, atomic_read(&trans_pcie-> | |
405 | queue_stop_count[ac]), msg); | |
406 | } else { | |
407 | IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d" | |
408 | " stop count %d. %s", | |
409 | hwq, ac, atomic_read(&trans_pcie-> | |
410 | queue_stop_count[ac]), msg); | |
411 | } | |
412 | } else { | |
413 | IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s", | |
414 | hwq, msg); | |
415 | } | |
e20d4341 EG |
416 | } |
417 | ||
418 | #ifdef ieee80211_stop_queue | |
419 | #undef ieee80211_stop_queue | |
420 | #endif | |
421 | ||
422 | #define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue | |
423 | ||
424 | #ifdef ieee80211_wake_queue | |
425 | #undef ieee80211_wake_queue | |
426 | #endif | |
427 | ||
428 | #define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue | |
429 | ||
8ad71bef EG |
430 | static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie, |
431 | int txq_id) | |
432 | { | |
433 | set_bit(txq_id, &trans_pcie->txq_ctx_active_msk); | |
434 | } | |
435 | ||
436 | static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie, | |
437 | int txq_id) | |
438 | { | |
439 | clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk); | |
440 | } | |
441 | ||
442 | static inline int iwl_queue_used(const struct iwl_queue *q, int i) | |
443 | { | |
444 | return q->write_ptr >= q->read_ptr ? | |
445 | (i >= q->read_ptr && i < q->write_ptr) : | |
446 | !(i < q->read_ptr && i >= q->write_ptr); | |
447 | } | |
448 | ||
449 | static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) | |
450 | { | |
451 | return index & (q->n_window - 1); | |
452 | } | |
453 | ||
7a10e3e4 EG |
454 | #define IWL_TX_FIFO_BK 0 /* shared */ |
455 | #define IWL_TX_FIFO_BE 1 | |
456 | #define IWL_TX_FIFO_VI 2 /* shared */ | |
457 | #define IWL_TX_FIFO_VO 3 | |
458 | #define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK | |
459 | #define IWL_TX_FIFO_BE_IPAN 4 | |
460 | #define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI | |
461 | #define IWL_TX_FIFO_VO_IPAN 5 | |
462 | /* re-uses the VO FIFO, uCode will properly flush/schedule */ | |
463 | #define IWL_TX_FIFO_AUX 5 | |
464 | #define IWL_TX_FIFO_UNUSED -1 | |
465 | ||
466 | /* AUX (TX during scan dwell) queue */ | |
467 | #define IWL_AUX_QUEUE 10 | |
468 | ||
ab697a9f | 469 | #endif /* __iwl_trans_int_pcie_h__ */ |