Commit | Line | Data |
---|---|---|
ab697a9f EG |
1 | /****************************************************************************** |
2 | * | |
fc8a350d IP |
3 | * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. |
4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | |
ab697a9f EG |
5 | * |
6 | * Portions of this file are derived from the ipw3945 project, as well | |
7 | * as portions of the ieee80211 subsystem header files. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of version 2 of the GNU General Public License as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along with | |
19 | * this program; if not, write to the Free Software Foundation, Inc., | |
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
21 | * | |
22 | * The full GNU General Public License is included in this distribution in the | |
23 | * file called LICENSE. | |
24 | * | |
25 | * Contact Information: | |
26 | * Intel Linux Wireless <ilw@linux.intel.com> | |
27 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
28 | * | |
29 | *****************************************************************************/ | |
30 | #ifndef __iwl_trans_int_pcie_h__ | |
31 | #define __iwl_trans_int_pcie_h__ | |
32 | ||
a72b8b08 EG |
33 | #include <linux/spinlock.h> |
34 | #include <linux/interrupt.h> | |
35 | #include <linux/skbuff.h> | |
13df1aab | 36 | #include <linux/wait.h> |
522376d2 | 37 | #include <linux/pci.h> |
7c5ba4a8 | 38 | #include <linux/timer.h> |
a72b8b08 | 39 | |
dda61a44 | 40 | #include "iwl-fh.h" |
a72b8b08 | 41 | #include "iwl-csr.h" |
a72b8b08 EG |
42 | #include "iwl-trans.h" |
43 | #include "iwl-debug.h" | |
44 | #include "iwl-io.h" | |
02e38358 | 45 | #include "iwl-op-mode.h" |
a72b8b08 | 46 | |
a72b8b08 | 47 | struct iwl_host_cmd; |
dda61a44 | 48 | |
ab697a9f EG |
49 | /*This file includes the declaration that are internal to the |
50 | * trans_pcie layer */ | |
51 | ||
48a2d66f JB |
52 | struct iwl_rx_mem_buffer { |
53 | dma_addr_t page_dma; | |
54 | struct page *page; | |
55 | struct list_head list; | |
56 | }; | |
57 | ||
1f7b6172 EG |
58 | /** |
59 | * struct isr_statistics - interrupt statistics | |
60 | * | |
61 | */ | |
62 | struct isr_statistics { | |
63 | u32 hw; | |
64 | u32 sw; | |
65 | u32 err_code; | |
66 | u32 sch; | |
67 | u32 alive; | |
68 | u32 rfkill; | |
69 | u32 ctkill; | |
70 | u32 wakeup; | |
71 | u32 rx; | |
72 | u32 tx; | |
73 | u32 unhandled; | |
74 | }; | |
75 | ||
5a878bf6 | 76 | /** |
990aa6d7 | 77 | * struct iwl_rxq - Rx queue |
5a878bf6 EG |
78 | * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) |
79 | * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) | |
80 | * @pool: | |
81 | * @queue: | |
82 | * @read: Shared index to newest available Rx buffer | |
83 | * @write: Shared index to oldest written Rx packet | |
84 | * @free_count: Number of pre-allocated buffers in rx_free | |
85 | * @write_actual: | |
86 | * @rx_free: list of free SKBs for use | |
87 | * @rx_used: List of Rx buffers with no SKB | |
88 | * @need_update: flag to indicate we need to update read/write index | |
89 | * @rb_stts: driver's pointer to receive buffer status | |
90 | * @rb_stts_dma: bus address of receive buffer status | |
91 | * @lock: | |
92 | * | |
93 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers | |
94 | */ | |
990aa6d7 | 95 | struct iwl_rxq { |
5a878bf6 EG |
96 | __le32 *bd; |
97 | dma_addr_t bd_dma; | |
98 | struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; | |
99 | struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; | |
100 | u32 read; | |
101 | u32 write; | |
102 | u32 free_count; | |
103 | u32 write_actual; | |
104 | struct list_head rx_free; | |
105 | struct list_head rx_used; | |
5d63f926 | 106 | bool need_update; |
5a878bf6 EG |
107 | struct iwl_rb_status *rb_stts; |
108 | dma_addr_t rb_stts_dma; | |
109 | spinlock_t lock; | |
110 | }; | |
111 | ||
a72b8b08 EG |
112 | struct iwl_dma_ptr { |
113 | dma_addr_t dma; | |
114 | void *addr; | |
115 | size_t size; | |
116 | }; | |
117 | ||
bffc66ce JB |
118 | /** |
119 | * iwl_queue_inc_wrap - increment queue index, wrap back to beginning | |
120 | * @index -- current index | |
bffc66ce | 121 | */ |
83f32a4b | 122 | static inline int iwl_queue_inc_wrap(int index) |
bffc66ce | 123 | { |
83f32a4b | 124 | return ++index & (TFD_QUEUE_SIZE_MAX - 1); |
bffc66ce JB |
125 | } |
126 | ||
127 | /** | |
128 | * iwl_queue_dec_wrap - decrement queue index, wrap back to end | |
129 | * @index -- current index | |
bffc66ce | 130 | */ |
83f32a4b | 131 | static inline int iwl_queue_dec_wrap(int index) |
bffc66ce | 132 | { |
83f32a4b | 133 | return --index & (TFD_QUEUE_SIZE_MAX - 1); |
bffc66ce JB |
134 | } |
135 | ||
522376d2 EG |
136 | struct iwl_cmd_meta { |
137 | /* only for SYNC commands, iff the reply skb is wanted */ | |
138 | struct iwl_host_cmd *source; | |
c14c7372 | 139 | u32 flags; |
522376d2 EG |
140 | }; |
141 | ||
142 | /* | |
143 | * Generic queue structure | |
144 | * | |
145 | * Contains common data for Rx and Tx queues. | |
146 | * | |
83f32a4b JB |
147 | * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware |
148 | * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless | |
522376d2 EG |
149 | * there might be HW changes in the future). For the normal TX |
150 | * queues, n_window, which is the size of the software queue data | |
151 | * is also 256; however, for the command queue, n_window is only | |
152 | * 32 since we don't need so many commands pending. Since the HW | |
83f32a4b | 153 | * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result, |
522376d2 | 154 | * the software buffers (in the variables @meta, @txb in struct |
990aa6d7 EG |
155 | * iwl_txq) only have 32 entries, while the HW buffers (@tfds in |
156 | * the same struct) have 256. | |
522376d2 EG |
157 | * This means that we end up with the following: |
158 | * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | | |
159 | * SW entries: | 0 | ... | 31 | | |
160 | * where N is a number between 0 and 7. This means that the SW | |
161 | * data is a window overlayed over the HW queue. | |
162 | */ | |
163 | struct iwl_queue { | |
522376d2 EG |
164 | int write_ptr; /* 1-st empty entry (index) host_w*/ |
165 | int read_ptr; /* last used entry (index) host_r*/ | |
166 | /* use for monitoring and recovering the stuck queue */ | |
167 | dma_addr_t dma_addr; /* physical addr for BD's */ | |
168 | int n_window; /* safe queue window */ | |
169 | u32 id; | |
170 | int low_mark; /* low watermark, resume queue if free | |
171 | * space more than this */ | |
172 | int high_mark; /* high watermark, stop queue if free | |
173 | * space less than this */ | |
174 | }; | |
175 | ||
bf8440e6 JB |
176 | #define TFD_TX_CMD_SLOTS 256 |
177 | #define TFD_CMD_SLOTS 32 | |
178 | ||
8a964f44 JB |
179 | /* |
180 | * The FH will write back to the first TB only, so we need | |
181 | * to copy some data into the buffer regardless of whether | |
38c0f334 JB |
182 | * it should be mapped or not. This indicates how big the |
183 | * first TB must be to include the scratch buffer. Since | |
184 | * the scratch is 4 bytes at offset 12, it's 16 now. If we | |
185 | * make it bigger then allocations will be bigger and copy | |
186 | * slower, so that's probably not useful. | |
8a964f44 | 187 | */ |
38c0f334 | 188 | #define IWL_HCMD_SCRATCHBUF_SIZE 16 |
8a964f44 | 189 | |
990aa6d7 | 190 | struct iwl_pcie_txq_entry { |
bf8440e6 JB |
191 | struct iwl_device_cmd *cmd; |
192 | struct sk_buff *skb; | |
f4feb8ac JB |
193 | /* buffer to free after command completes */ |
194 | const void *free_buf; | |
bf8440e6 JB |
195 | struct iwl_cmd_meta meta; |
196 | }; | |
197 | ||
38c0f334 JB |
198 | struct iwl_pcie_txq_scratch_buf { |
199 | struct iwl_cmd_header hdr; | |
200 | u8 buf[8]; | |
201 | __le32 scratch; | |
202 | }; | |
203 | ||
522376d2 | 204 | /** |
990aa6d7 | 205 | * struct iwl_txq - Tx Queue for DMA |
522376d2 | 206 | * @q: generic Rx/Tx queue descriptor |
bf8440e6 | 207 | * @tfds: transmit frame descriptors (DMA memory) |
38c0f334 JB |
208 | * @scratchbufs: start of command headers, including scratch buffers, for |
209 | * the writeback -- this is DMA memory and an array holding one buffer | |
210 | * for each command on the queue | |
211 | * @scratchbufs_dma: DMA address for the scratchbufs start | |
bf8440e6 JB |
212 | * @entries: transmit entries (driver state) |
213 | * @lock: queue lock | |
214 | * @stuck_timer: timer that fires if queue gets stuck | |
215 | * @trans_pcie: pointer back to transport (for timer) | |
522376d2 | 216 | * @need_update: indicates need to update read/write index |
bf8440e6 | 217 | * @active: stores if queue is active |
68972c46 | 218 | * @ampdu: true if this queue is an ampdu queue for an specific RA/TID |
4cf677fd | 219 | * @wd_timeout: queue watchdog timeout (jiffies) - per queue |
e0b8d405 EG |
220 | * @frozen: tx stuck queue timer is frozen |
221 | * @frozen_expiry_remainder: remember how long until the timer fires | |
522376d2 EG |
222 | * |
223 | * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame | |
224 | * descriptors) and required locking structures. | |
225 | */ | |
990aa6d7 | 226 | struct iwl_txq { |
522376d2 EG |
227 | struct iwl_queue q; |
228 | struct iwl_tfd *tfds; | |
38c0f334 JB |
229 | struct iwl_pcie_txq_scratch_buf *scratchbufs; |
230 | dma_addr_t scratchbufs_dma; | |
990aa6d7 | 231 | struct iwl_pcie_txq_entry *entries; |
015c15e1 | 232 | spinlock_t lock; |
e0b8d405 | 233 | unsigned long frozen_expiry_remainder; |
7c5ba4a8 JB |
234 | struct timer_list stuck_timer; |
235 | struct iwl_trans_pcie *trans_pcie; | |
43aa616f | 236 | bool need_update; |
e0b8d405 | 237 | bool frozen; |
522376d2 | 238 | u8 active; |
68972c46 | 239 | bool ampdu; |
4cf677fd | 240 | unsigned long wd_timeout; |
522376d2 EG |
241 | }; |
242 | ||
38c0f334 JB |
243 | static inline dma_addr_t |
244 | iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) | |
245 | { | |
246 | return txq->scratchbufs_dma + | |
247 | sizeof(struct iwl_pcie_txq_scratch_buf) * idx; | |
248 | } | |
249 | ||
e6bb4c9c EG |
250 | /** |
251 | * struct iwl_trans_pcie - PCIe transport specific data | |
5a878bf6 EG |
252 | * @rxq: all the RX queue data |
253 | * @rx_replenish: work that will be called when buffers need to be allocated | |
9130bab1 | 254 | * @drv - pointer to iwl_drv |
5a878bf6 | 255 | * @trans: pointer to the generic transport area |
105183b1 EG |
256 | * @scd_base_addr: scheduler sram base address in SRAM |
257 | * @scd_bc_tbls: pointer to the byte count table of the scheduler | |
9d6b2cb1 | 258 | * @kw: keep warm address |
a42a1844 EG |
259 | * @pci_dev: basic pci-network driver stuff |
260 | * @hw_base: pci hardware address support | |
13df1aab JB |
261 | * @ucode_write_complete: indicates that the ucode has been copied. |
262 | * @ucode_write_waitq: wait queue for uCode load | |
c6f600fc | 263 | * @cmd_queue - command queue number |
b2cf410c | 264 | * @rx_buf_size_8k: 8 kB RX buffer size |
046db346 | 265 | * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) |
3a736bcb | 266 | * @scd_set_active: should the transport configure the SCD for HCMD queue |
b2cf410c | 267 | * @rx_page_order: page order for receive buffer size |
e56b04ef | 268 | * @reg_lock: protect hw register access |
b9439491 | 269 | * @cmd_in_flight: true when we have a host command in flight |
c2d20201 EG |
270 | * @fw_mon_phys: physical address of the buffer for the firmware monitor |
271 | * @fw_mon_page: points to the first page of the buffer for the firmware monitor | |
272 | * @fw_mon_size: size of the buffer for the firmware monitor | |
e6bb4c9c EG |
273 | */ |
274 | struct iwl_trans_pcie { | |
990aa6d7 | 275 | struct iwl_rxq rxq; |
5a878bf6 EG |
276 | struct work_struct rx_replenish; |
277 | struct iwl_trans *trans; | |
9130bab1 | 278 | struct iwl_drv *drv; |
0c325769 | 279 | |
f14d6b39 JB |
280 | struct net_device napi_dev; |
281 | struct napi_struct napi; | |
282 | ||
0c325769 EG |
283 | /* INT ICT Table */ |
284 | __le32 *ict_tbl; | |
0c325769 | 285 | dma_addr_t ict_tbl_dma; |
0c325769 | 286 | int ict_index; |
0c325769 | 287 | bool use_ict; |
1f7b6172 | 288 | struct isr_statistics isr_stats; |
0c325769 | 289 | |
7b11488f | 290 | spinlock_t irq_lock; |
0c325769 | 291 | u32 inta_mask; |
105183b1 EG |
292 | u32 scd_base_addr; |
293 | struct iwl_dma_ptr scd_bc_tbls; | |
9d6b2cb1 | 294 | struct iwl_dma_ptr kw; |
e13c0c59 | 295 | |
990aa6d7 | 296 | struct iwl_txq *txq; |
9eae88fa | 297 | unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; |
8ad71bef | 298 | unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; |
a42a1844 EG |
299 | |
300 | /* PCI bus related data */ | |
301 | struct pci_dev *pci_dev; | |
302 | void __iomem *hw_base; | |
13df1aab JB |
303 | |
304 | bool ucode_write_complete; | |
305 | wait_queue_head_t ucode_write_waitq; | |
f946b529 EG |
306 | wait_queue_head_t wait_command_queue; |
307 | ||
c6f600fc | 308 | u8 cmd_queue; |
b04db9ac | 309 | u8 cmd_fifo; |
4cf677fd | 310 | unsigned int cmd_q_wdg_timeout; |
d663ee73 JB |
311 | u8 n_no_reclaim_cmds; |
312 | u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; | |
b2cf410c JB |
313 | |
314 | bool rx_buf_size_8k; | |
046db346 | 315 | bool bc_table_dword; |
3a736bcb | 316 | bool scd_set_active; |
b2cf410c | 317 | u32 rx_page_order; |
7c5ba4a8 | 318 | |
e5209263 | 319 | const char *const *command_names; |
7c5ba4a8 | 320 | |
e56b04ef LE |
321 | /*protect hw register */ |
322 | spinlock_t reg_lock; | |
fc8a350d | 323 | bool cmd_hold_nic_awake; |
7616f334 EP |
324 | bool ref_cmd_in_flight; |
325 | ||
326 | /* protect ref counter */ | |
327 | spinlock_t ref_lock; | |
328 | u32 ref_count; | |
c2d20201 EG |
329 | |
330 | dma_addr_t fw_mon_phys; | |
331 | struct page *fw_mon_page; | |
332 | u32 fw_mon_size; | |
e6bb4c9c EG |
333 | }; |
334 | ||
5a878bf6 EG |
335 | #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ |
336 | ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) | |
337 | ||
7c5ba4a8 JB |
338 | static inline struct iwl_trans * |
339 | iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) | |
340 | { | |
341 | return container_of((void *)trans_pcie, struct iwl_trans, | |
342 | trans_specific); | |
343 | } | |
344 | ||
f02831be EG |
345 | /* |
346 | * Convention: trans API functions: iwl_trans_pcie_XXX | |
347 | * Other functions: iwl_pcie_XXX | |
348 | */ | |
d1ff5253 JB |
349 | struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, |
350 | const struct pci_device_id *ent, | |
351 | const struct iwl_cfg *cfg); | |
352 | void iwl_trans_pcie_free(struct iwl_trans *trans); | |
353 | ||
253a634c EG |
354 | /***************************************************** |
355 | * RX | |
356 | ******************************************************/ | |
9805c446 | 357 | int iwl_pcie_rx_init(struct iwl_trans *trans); |
2bfb5092 | 358 | irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); |
9805c446 EG |
359 | int iwl_pcie_rx_stop(struct iwl_trans *trans); |
360 | void iwl_pcie_rx_free(struct iwl_trans *trans); | |
ab697a9f | 361 | |
1a361cd8 | 362 | /***************************************************** |
990aa6d7 | 363 | * ICT - interrupt handling |
1a361cd8 | 364 | ******************************************************/ |
85bf9da1 | 365 | irqreturn_t iwl_pcie_isr(int irq, void *data); |
990aa6d7 EG |
366 | int iwl_pcie_alloc_ict(struct iwl_trans *trans); |
367 | void iwl_pcie_free_ict(struct iwl_trans *trans); | |
368 | void iwl_pcie_reset_ict(struct iwl_trans *trans); | |
369 | void iwl_pcie_disable_ict(struct iwl_trans *trans); | |
1a361cd8 | 370 | |
253a634c EG |
371 | /***************************************************** |
372 | * TX / HCMD | |
373 | ******************************************************/ | |
f02831be EG |
374 | int iwl_pcie_tx_init(struct iwl_trans *trans); |
375 | void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); | |
376 | int iwl_pcie_tx_stop(struct iwl_trans *trans); | |
377 | void iwl_pcie_tx_free(struct iwl_trans *trans); | |
fea7795f | 378 | void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, |
4cf677fd EG |
379 | const struct iwl_trans_txq_scd_cfg *cfg, |
380 | unsigned int wdg_timeout); | |
d4578ea8 JB |
381 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, |
382 | bool configure_scd); | |
f02831be EG |
383 | int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, |
384 | struct iwl_device_cmd *dev_cmd, int txq_id); | |
ea68f460 | 385 | void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); |
f02831be | 386 | int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); |
990aa6d7 EG |
387 | void iwl_pcie_hcmd_complete(struct iwl_trans *trans, |
388 | struct iwl_rx_cmd_buffer *rxb, int handler_status); | |
f02831be EG |
389 | void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, |
390 | struct sk_buff_head *skbs); | |
ddaf5a5b JB |
391 | void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); |
392 | ||
7616f334 EP |
393 | void iwl_trans_pcie_ref(struct iwl_trans *trans); |
394 | void iwl_trans_pcie_unref(struct iwl_trans *trans); | |
395 | ||
4d075007 JB |
396 | static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) |
397 | { | |
398 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | |
399 | ||
400 | return le16_to_cpu(tb->hi_n_len) >> 4; | |
401 | } | |
402 | ||
7ff94706 EG |
403 | /***************************************************** |
404 | * Error handling | |
405 | ******************************************************/ | |
990aa6d7 | 406 | void iwl_pcie_dump_csr(struct iwl_trans *trans); |
16db88ba | 407 | |
8ad71bef EG |
408 | /***************************************************** |
409 | * Helpers | |
410 | ******************************************************/ | |
0c325769 EG |
411 | static inline void iwl_disable_interrupts(struct iwl_trans *trans) |
412 | { | |
eb7ff77e | 413 | clear_bit(STATUS_INT_ENABLED, &trans->status); |
0c325769 EG |
414 | |
415 | /* disable interrupts from uCode/NIC to host */ | |
1042db2a | 416 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); |
0c325769 EG |
417 | |
418 | /* acknowledge/clear/reset any interrupts still pending | |
419 | * from uCode or flow handler (Rx/Tx DMA) */ | |
1042db2a EG |
420 | iwl_write32(trans, CSR_INT, 0xffffffff); |
421 | iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); | |
0c325769 EG |
422 | IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); |
423 | } | |
424 | ||
425 | static inline void iwl_enable_interrupts(struct iwl_trans *trans) | |
426 | { | |
83626404 | 427 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
0c325769 EG |
428 | |
429 | IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); | |
eb7ff77e | 430 | set_bit(STATUS_INT_ENABLED, &trans->status); |
2dbc368d | 431 | trans_pcie->inta_mask = CSR_INI_SET_MASK; |
1042db2a | 432 | iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); |
0c325769 EG |
433 | } |
434 | ||
8722c899 SG |
435 | static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) |
436 | { | |
2dbc368d EG |
437 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
438 | ||
8722c899 | 439 | IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); |
2dbc368d EG |
440 | trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; |
441 | iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); | |
8722c899 SG |
442 | } |
443 | ||
e20d4341 | 444 | static inline void iwl_wake_queue(struct iwl_trans *trans, |
990aa6d7 | 445 | struct iwl_txq *txq) |
e20d4341 | 446 | { |
9eae88fa JB |
447 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
448 | ||
449 | if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { | |
450 | IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); | |
451 | iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); | |
81a3de1c | 452 | } |
e20d4341 EG |
453 | } |
454 | ||
455 | static inline void iwl_stop_queue(struct iwl_trans *trans, | |
990aa6d7 | 456 | struct iwl_txq *txq) |
e20d4341 | 457 | { |
9eae88fa | 458 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
8ad71bef | 459 | |
9eae88fa JB |
460 | if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { |
461 | iwl_op_mode_queue_full(trans->op_mode, txq->q.id); | |
462 | IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); | |
463 | } else | |
464 | IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", | |
465 | txq->q.id); | |
8ad71bef EG |
466 | } |
467 | ||
6ca6ebc1 | 468 | static inline bool iwl_queue_used(const struct iwl_queue *q, int i) |
8ad71bef EG |
469 | { |
470 | return q->write_ptr >= q->read_ptr ? | |
471 | (i >= q->read_ptr && i < q->write_ptr) : | |
472 | !(i < q->read_ptr && i >= q->write_ptr); | |
473 | } | |
474 | ||
475 | static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) | |
476 | { | |
477 | return index & (q->n_window - 1); | |
478 | } | |
479 | ||
990aa6d7 EG |
480 | static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie, |
481 | u8 cmd) | |
d9fb6465 JB |
482 | { |
483 | if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) | |
484 | return "UNKNOWN"; | |
485 | return trans_pcie->command_names[cmd]; | |
486 | } | |
487 | ||
8d425517 EG |
488 | static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) |
489 | { | |
490 | return !(iwl_read32(trans, CSR_GP_CNTRL) & | |
491 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); | |
492 | } | |
493 | ||
b9439491 EG |
494 | static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, |
495 | u32 reg, u32 mask, u32 value) | |
496 | { | |
497 | u32 v; | |
498 | ||
499 | #ifdef CONFIG_IWLWIFI_DEBUG | |
500 | WARN_ON_ONCE(value & ~mask); | |
501 | #endif | |
502 | ||
503 | v = iwl_read32(trans, reg); | |
504 | v &= ~mask; | |
505 | v |= value; | |
506 | iwl_write32(trans, reg, v); | |
507 | } | |
508 | ||
509 | static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, | |
510 | u32 reg, u32 mask) | |
511 | { | |
512 | __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); | |
513 | } | |
514 | ||
515 | static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, | |
516 | u32 reg, u32 mask) | |
517 | { | |
518 | __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); | |
519 | } | |
520 | ||
14cfca71 JB |
521 | void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); |
522 | ||
ab697a9f | 523 | #endif /* __iwl_trans_int_pcie_h__ */ |