Commit | Line | Data |
---|---|---|
ab697a9f EG |
1 | /****************************************************************************** |
2 | * | |
3 | * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. | |
4 | * | |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
25 | * Intel Linux Wireless <ilw@linux.intel.com> | |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
27 | * | |
28 | *****************************************************************************/ | |
29 | #ifndef __iwl_trans_int_pcie_h__ | |
30 | #define __iwl_trans_int_pcie_h__ | |
31 | ||
a72b8b08 EG |
32 | #include <linux/spinlock.h> |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/skbuff.h> | |
522376d2 | 35 | #include <linux/pci.h> |
a72b8b08 | 36 | |
dda61a44 | 37 | #include "iwl-fh.h" |
a72b8b08 EG |
38 | #include "iwl-csr.h" |
39 | #include "iwl-shared.h" | |
40 | #include "iwl-trans.h" | |
41 | #include "iwl-debug.h" | |
42 | #include "iwl-io.h" | |
43 | ||
44 | struct iwl_tx_queue; | |
45 | struct iwl_queue; | |
46 | struct iwl_host_cmd; | |
dda61a44 | 47 | |
ab697a9f EG |
48 | /*This file includes the declaration that are internal to the |
49 | * trans_pcie layer */ | |
50 | ||
1f7b6172 EG |
51 | /** |
52 | * struct isr_statistics - interrupt statistics | |
53 | * | |
54 | */ | |
55 | struct isr_statistics { | |
56 | u32 hw; | |
57 | u32 sw; | |
58 | u32 err_code; | |
59 | u32 sch; | |
60 | u32 alive; | |
61 | u32 rfkill; | |
62 | u32 ctkill; | |
63 | u32 wakeup; | |
64 | u32 rx; | |
65 | u32 tx; | |
66 | u32 unhandled; | |
67 | }; | |
68 | ||
5a878bf6 EG |
69 | /** |
70 | * struct iwl_rx_queue - Rx queue | |
71 | * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) | |
72 | * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) | |
73 | * @pool: | |
74 | * @queue: | |
75 | * @read: Shared index to newest available Rx buffer | |
76 | * @write: Shared index to oldest written Rx packet | |
77 | * @free_count: Number of pre-allocated buffers in rx_free | |
78 | * @write_actual: | |
79 | * @rx_free: list of free SKBs for use | |
80 | * @rx_used: List of Rx buffers with no SKB | |
81 | * @need_update: flag to indicate we need to update read/write index | |
82 | * @rb_stts: driver's pointer to receive buffer status | |
83 | * @rb_stts_dma: bus address of receive buffer status | |
84 | * @lock: | |
85 | * | |
86 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers | |
87 | */ | |
88 | struct iwl_rx_queue { | |
89 | __le32 *bd; | |
90 | dma_addr_t bd_dma; | |
91 | struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; | |
92 | struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; | |
93 | u32 read; | |
94 | u32 write; | |
95 | u32 free_count; | |
96 | u32 write_actual; | |
97 | struct list_head rx_free; | |
98 | struct list_head rx_used; | |
99 | int need_update; | |
100 | struct iwl_rb_status *rb_stts; | |
101 | dma_addr_t rb_stts_dma; | |
102 | spinlock_t lock; | |
103 | }; | |
104 | ||
a72b8b08 EG |
105 | struct iwl_dma_ptr { |
106 | dma_addr_t dma; | |
107 | void *addr; | |
108 | size_t size; | |
109 | }; | |
110 | ||
e13c0c59 EG |
111 | /* |
112 | * This queue number is required for proper operation | |
113 | * because the ucode will stop/start the scheduler as | |
114 | * required. | |
115 | */ | |
116 | #define IWL_IPAN_MCAST_QUEUE 8 | |
117 | ||
522376d2 EG |
118 | struct iwl_cmd_meta { |
119 | /* only for SYNC commands, iff the reply skb is wanted */ | |
120 | struct iwl_host_cmd *source; | |
522376d2 EG |
121 | |
122 | u32 flags; | |
123 | ||
124 | DEFINE_DMA_UNMAP_ADDR(mapping); | |
125 | DEFINE_DMA_UNMAP_LEN(len); | |
126 | }; | |
127 | ||
128 | /* | |
129 | * Generic queue structure | |
130 | * | |
131 | * Contains common data for Rx and Tx queues. | |
132 | * | |
133 | * Note the difference between n_bd and n_window: the hardware | |
134 | * always assumes 256 descriptors, so n_bd is always 256 (unless | |
135 | * there might be HW changes in the future). For the normal TX | |
136 | * queues, n_window, which is the size of the software queue data | |
137 | * is also 256; however, for the command queue, n_window is only | |
138 | * 32 since we don't need so many commands pending. Since the HW | |
139 | * still uses 256 BDs for DMA though, n_bd stays 256. As a result, | |
140 | * the software buffers (in the variables @meta, @txb in struct | |
141 | * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds | |
142 | * in the same struct) have 256. | |
143 | * This means that we end up with the following: | |
144 | * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | | |
145 | * SW entries: | 0 | ... | 31 | | |
146 | * where N is a number between 0 and 7. This means that the SW | |
147 | * data is a window overlayed over the HW queue. | |
148 | */ | |
149 | struct iwl_queue { | |
150 | int n_bd; /* number of BDs in this queue */ | |
151 | int write_ptr; /* 1-st empty entry (index) host_w*/ | |
152 | int read_ptr; /* last used entry (index) host_r*/ | |
153 | /* use for monitoring and recovering the stuck queue */ | |
154 | dma_addr_t dma_addr; /* physical addr for BD's */ | |
155 | int n_window; /* safe queue window */ | |
156 | u32 id; | |
157 | int low_mark; /* low watermark, resume queue if free | |
158 | * space more than this */ | |
159 | int high_mark; /* high watermark, stop queue if free | |
160 | * space less than this */ | |
161 | }; | |
162 | ||
163 | /** | |
164 | * struct iwl_tx_queue - Tx Queue for DMA | |
165 | * @q: generic Rx/Tx queue descriptor | |
166 | * @bd: base of circular buffer of TFDs | |
167 | * @cmd: array of command/TX buffer pointers | |
168 | * @meta: array of meta data for each command/tx buffer | |
169 | * @dma_addr_cmd: physical address of cmd/tx buffer array | |
170 | * @txb: array of per-TFD driver data | |
171 | * @time_stamp: time (in jiffies) of last read_ptr change | |
172 | * @need_update: indicates need to update read/write index | |
173 | * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled | |
174 | * @sta_id: valid if sched_retry is set | |
175 | * @tid: valid if sched_retry is set | |
176 | * | |
177 | * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame | |
178 | * descriptors) and required locking structures. | |
179 | */ | |
180 | #define TFD_TX_CMD_SLOTS 256 | |
181 | #define TFD_CMD_SLOTS 32 | |
182 | ||
183 | struct iwl_tx_queue { | |
184 | struct iwl_queue q; | |
185 | struct iwl_tfd *tfds; | |
186 | struct iwl_device_cmd **cmd; | |
187 | struct iwl_cmd_meta *meta; | |
188 | struct sk_buff **skbs; | |
189 | unsigned long time_stamp; | |
190 | u8 need_update; | |
191 | u8 sched_retry; | |
192 | u8 active; | |
193 | u8 swq_id; | |
194 | ||
195 | u16 sta_id; | |
196 | u16 tid; | |
197 | }; | |
198 | ||
e6bb4c9c EG |
199 | /** |
200 | * struct iwl_trans_pcie - PCIe transport specific data | |
5a878bf6 EG |
201 | * @rxq: all the RX queue data |
202 | * @rx_replenish: work that will be called when buffers need to be allocated | |
203 | * @trans: pointer to the generic transport area | |
105183b1 EG |
204 | * @scd_base_addr: scheduler sram base address in SRAM |
205 | * @scd_bc_tbls: pointer to the byte count table of the scheduler | |
9d6b2cb1 | 206 | * @kw: keep warm address |
e13c0c59 EG |
207 | * @ac_to_fifo: to what fifo is a specifc AC mapped ? |
208 | * @ac_to_queue: to what tx queue is a specifc AC mapped ? | |
209 | * @mcast_queue: | |
8ad71bef EG |
210 | * @txq: Tx DMA processing queues |
211 | * @txq_ctx_active_msk: what queue is active | |
212 | * queue_stopped: tracks what queue is stopped | |
213 | * queue_stop_count: tracks what SW queue is stopped | |
e6bb4c9c EG |
214 | */ |
215 | struct iwl_trans_pcie { | |
5a878bf6 EG |
216 | struct iwl_rx_queue rxq; |
217 | struct work_struct rx_replenish; | |
218 | struct iwl_trans *trans; | |
0c325769 EG |
219 | |
220 | /* INT ICT Table */ | |
221 | __le32 *ict_tbl; | |
0c325769 | 222 | dma_addr_t ict_tbl_dma; |
0c325769 EG |
223 | int ict_index; |
224 | u32 inta; | |
225 | bool use_ict; | |
226 | struct tasklet_struct irq_tasklet; | |
1f7b6172 | 227 | struct isr_statistics isr_stats; |
0c325769 EG |
228 | |
229 | u32 inta_mask; | |
105183b1 EG |
230 | u32 scd_base_addr; |
231 | struct iwl_dma_ptr scd_bc_tbls; | |
9d6b2cb1 | 232 | struct iwl_dma_ptr kw; |
e13c0c59 EG |
233 | |
234 | const u8 *ac_to_fifo[NUM_IWL_RXON_CTX]; | |
235 | const u8 *ac_to_queue[NUM_IWL_RXON_CTX]; | |
236 | u8 mcast_queue[NUM_IWL_RXON_CTX]; | |
76bc10fc | 237 | u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; |
8ad71bef EG |
238 | |
239 | struct iwl_tx_queue *txq; | |
240 | unsigned long txq_ctx_active_msk; | |
241 | #define IWL_MAX_HW_QUEUES 32 | |
242 | unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; | |
243 | atomic_t queue_stop_count[4]; | |
e6bb4c9c EG |
244 | }; |
245 | ||
5a878bf6 EG |
246 | #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ |
247 | ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) | |
248 | ||
253a634c EG |
249 | /***************************************************** |
250 | * RX | |
251 | ******************************************************/ | |
ab697a9f | 252 | void iwl_bg_rx_replenish(struct work_struct *data); |
0c325769 | 253 | void iwl_irq_tasklet(struct iwl_trans *trans); |
5a878bf6 EG |
254 | void iwlagn_rx_replenish(struct iwl_trans *trans); |
255 | void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, | |
ab697a9f EG |
256 | struct iwl_rx_queue *q); |
257 | ||
1a361cd8 EG |
258 | /***************************************************** |
259 | * ICT | |
260 | ******************************************************/ | |
6bb78847 | 261 | int iwl_reset_ict(struct iwl_trans *trans); |
0c325769 EG |
262 | void iwl_disable_ict(struct iwl_trans *trans); |
263 | int iwl_alloc_isr_ict(struct iwl_trans *trans); | |
264 | void iwl_free_isr_ict(struct iwl_trans *trans); | |
1a361cd8 EG |
265 | irqreturn_t iwl_isr_ict(int irq, void *data); |
266 | ||
253a634c EG |
267 | /***************************************************** |
268 | * TX / HCMD | |
269 | ******************************************************/ | |
fd656935 EG |
270 | void iwl_txq_update_write_ptr(struct iwl_trans *trans, |
271 | struct iwl_tx_queue *txq); | |
6d8f6eeb | 272 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, |
253a634c EG |
273 | struct iwl_tx_queue *txq, |
274 | dma_addr_t addr, u16 len, u8 reset); | |
6d8f6eeb EG |
275 | int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id); |
276 | int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); | |
3e10caeb | 277 | void iwl_tx_cmd_complete(struct iwl_trans *trans, |
247c61d6 | 278 | struct iwl_rx_mem_buffer *rxb, int handler_status); |
6d8f6eeb | 279 | void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, |
48d42c42 EG |
280 | struct iwl_tx_queue *txq, |
281 | u16 byte_cnt); | |
7f01d567 | 282 | int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, |
bc237730 | 283 | int sta_id, int tid); |
6d8f6eeb | 284 | void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); |
c91bd124 | 285 | void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, |
48d42c42 EG |
286 | struct iwl_tx_queue *txq, |
287 | int tx_fifo_id, int scd_retry); | |
3c69b595 | 288 | int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid); |
c91bd124 EG |
289 | void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, |
290 | enum iwl_rxon_context_id ctx, | |
822e8b2a | 291 | int sta_id, int tid, int frame_limit, u16 ssn); |
6d8f6eeb | 292 | void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, |
39644e9a | 293 | int index, enum dma_data_direction dma_dir); |
464021ff EG |
294 | int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, |
295 | struct sk_buff_head *skbs); | |
8ad71bef | 296 | int iwl_queue_space(const struct iwl_queue *q); |
253a634c | 297 | |
7ff94706 EG |
298 | /***************************************************** |
299 | * Error handling | |
300 | ******************************************************/ | |
6bb78847 EG |
301 | int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log, |
302 | char **buf, bool display); | |
16db88ba EG |
303 | int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); |
304 | void iwl_dump_csr(struct iwl_trans *trans); | |
305 | ||
8ad71bef EG |
306 | /***************************************************** |
307 | * Helpers | |
308 | ******************************************************/ | |
0c325769 EG |
309 | static inline void iwl_disable_interrupts(struct iwl_trans *trans) |
310 | { | |
311 | clear_bit(STATUS_INT_ENABLED, &trans->shrd->status); | |
312 | ||
313 | /* disable interrupts from uCode/NIC to host */ | |
83ed9015 | 314 | iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000); |
0c325769 EG |
315 | |
316 | /* acknowledge/clear/reset any interrupts still pending | |
317 | * from uCode or flow handler (Rx/Tx DMA) */ | |
83ed9015 EG |
318 | iwl_write32(bus(trans), CSR_INT, 0xffffffff); |
319 | iwl_write32(bus(trans), CSR_FH_INT_STATUS, 0xffffffff); | |
0c325769 EG |
320 | IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); |
321 | } | |
322 | ||
323 | static inline void iwl_enable_interrupts(struct iwl_trans *trans) | |
324 | { | |
325 | struct iwl_trans_pcie *trans_pcie = | |
326 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
327 | ||
328 | IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); | |
329 | set_bit(STATUS_INT_ENABLED, &trans->shrd->status); | |
83ed9015 | 330 | iwl_write32(bus(trans), CSR_INT_MASK, trans_pcie->inta_mask); |
0c325769 EG |
331 | } |
332 | ||
e20d4341 EG |
333 | /* |
334 | * we have 8 bits used like this: | |
335 | * | |
336 | * 7 6 5 4 3 2 1 0 | |
337 | * | | | | | | | | | |
338 | * | | | | | | +-+-------- AC queue (0-3) | |
339 | * | | | | | | | |
340 | * | +-+-+-+-+------------ HW queue ID | |
341 | * | | |
342 | * +---------------------- unused | |
343 | */ | |
344 | static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq) | |
345 | { | |
346 | BUG_ON(ac > 3); /* only have 2 bits */ | |
347 | BUG_ON(hwq > 31); /* only use 5 bits */ | |
348 | ||
349 | txq->swq_id = (hwq << 2) | ac; | |
350 | } | |
351 | ||
1daf04b8 EG |
352 | static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq) |
353 | { | |
354 | return txq->swq_id & 0x3; | |
355 | } | |
356 | ||
e20d4341 | 357 | static inline void iwl_wake_queue(struct iwl_trans *trans, |
81a3de1c | 358 | struct iwl_tx_queue *txq, const char *msg) |
e20d4341 EG |
359 | { |
360 | u8 queue = txq->swq_id; | |
361 | u8 ac = queue & 3; | |
362 | u8 hwq = (queue >> 2) & 0x1f; | |
8ad71bef EG |
363 | struct iwl_trans_pcie *trans_pcie = |
364 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
e20d4341 | 365 | |
81a3de1c EG |
366 | if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) { |
367 | if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) { | |
859cfb0a | 368 | iwl_wake_sw_queue(priv(trans), ac); |
81a3de1c EG |
369 | IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s", |
370 | hwq, ac, msg); | |
371 | } else { | |
372 | IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d" | |
373 | " stop count %d. %s", | |
374 | hwq, ac, atomic_read(&trans_pcie-> | |
375 | queue_stop_count[ac]), msg); | |
376 | } | |
377 | } | |
e20d4341 EG |
378 | } |
379 | ||
380 | static inline void iwl_stop_queue(struct iwl_trans *trans, | |
81a3de1c | 381 | struct iwl_tx_queue *txq, const char *msg) |
e20d4341 EG |
382 | { |
383 | u8 queue = txq->swq_id; | |
384 | u8 ac = queue & 3; | |
385 | u8 hwq = (queue >> 2) & 0x1f; | |
8ad71bef EG |
386 | struct iwl_trans_pcie *trans_pcie = |
387 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
e20d4341 | 388 | |
81a3de1c EG |
389 | if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) { |
390 | if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) { | |
859cfb0a | 391 | iwl_stop_sw_queue(priv(trans), ac); |
81a3de1c EG |
392 | IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d" |
393 | " stop count %d. %s", | |
394 | hwq, ac, atomic_read(&trans_pcie-> | |
395 | queue_stop_count[ac]), msg); | |
396 | } else { | |
397 | IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d" | |
398 | " stop count %d. %s", | |
399 | hwq, ac, atomic_read(&trans_pcie-> | |
400 | queue_stop_count[ac]), msg); | |
401 | } | |
402 | } else { | |
403 | IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s", | |
404 | hwq, msg); | |
405 | } | |
e20d4341 EG |
406 | } |
407 | ||
408 | #ifdef ieee80211_stop_queue | |
409 | #undef ieee80211_stop_queue | |
410 | #endif | |
411 | ||
412 | #define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue | |
413 | ||
414 | #ifdef ieee80211_wake_queue | |
415 | #undef ieee80211_wake_queue | |
416 | #endif | |
417 | ||
418 | #define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue | |
419 | ||
8ad71bef EG |
420 | static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie, |
421 | int txq_id) | |
422 | { | |
423 | set_bit(txq_id, &trans_pcie->txq_ctx_active_msk); | |
424 | } | |
425 | ||
426 | static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie, | |
427 | int txq_id) | |
428 | { | |
429 | clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk); | |
430 | } | |
431 | ||
432 | static inline int iwl_queue_used(const struct iwl_queue *q, int i) | |
433 | { | |
434 | return q->write_ptr >= q->read_ptr ? | |
435 | (i >= q->read_ptr && i < q->write_ptr) : | |
436 | !(i < q->read_ptr && i >= q->write_ptr); | |
437 | } | |
438 | ||
439 | static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) | |
440 | { | |
441 | return index & (q->n_window - 1); | |
442 | } | |
443 | ||
7a10e3e4 EG |
444 | #define IWL_TX_FIFO_BK 0 /* shared */ |
445 | #define IWL_TX_FIFO_BE 1 | |
446 | #define IWL_TX_FIFO_VI 2 /* shared */ | |
447 | #define IWL_TX_FIFO_VO 3 | |
448 | #define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK | |
449 | #define IWL_TX_FIFO_BE_IPAN 4 | |
450 | #define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI | |
451 | #define IWL_TX_FIFO_VO_IPAN 5 | |
452 | /* re-uses the VO FIFO, uCode will properly flush/schedule */ | |
453 | #define IWL_TX_FIFO_AUX 5 | |
454 | #define IWL_TX_FIFO_UNUSED -1 | |
455 | ||
456 | /* AUX (TX during scan dwell) queue */ | |
457 | #define IWL_AUX_QUEUE 10 | |
458 | ||
ab697a9f | 459 | #endif /* __iwl_trans_int_pcie_h__ */ |