Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
[deliverable/linux.git] / drivers / net / wireless / intel / iwlwifi / pcie / internal.h
CommitLineData
ab697a9f
EG
1/******************************************************************************
2 *
fc8a350d
IP
3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
4cbb8e50 5 * Copyright(c) 2016 Intel Deutschland GmbH
ab697a9f
EG
6 *
7 * Portions of this file are derived from the ipw3945 project, as well
8 * as portions of the ieee80211 subsystem header files.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE.
25 *
26 * Contact Information:
cb2f8277 27 * Intel Linux Wireless <linuxwifi@intel.com>
ab697a9f
EG
28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29 *
30 *****************************************************************************/
31#ifndef __iwl_trans_int_pcie_h__
32#define __iwl_trans_int_pcie_h__
33
a72b8b08
EG
34#include <linux/spinlock.h>
35#include <linux/interrupt.h>
36#include <linux/skbuff.h>
13df1aab 37#include <linux/wait.h>
522376d2 38#include <linux/pci.h>
7c5ba4a8 39#include <linux/timer.h>
a72b8b08 40
dda61a44 41#include "iwl-fh.h"
a72b8b08 42#include "iwl-csr.h"
a72b8b08
EG
43#include "iwl-trans.h"
44#include "iwl-debug.h"
45#include "iwl-io.h"
02e38358 46#include "iwl-op-mode.h"
a72b8b08 47
206eea78
JB
48/* We need 2 entries for the TX command and header, and another one might
49 * be needed for potential data in the SKB's head. The remaining ones can
50 * be used for frags.
51 */
52#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
53
26d535ae
SS
54/*
55 * RX related structures and functions
56 */
57#define RX_NUM_QUEUES 1
58#define RX_POST_REQ_ALLOC 2
59#define RX_CLAIM_REQ_ALLOC 8
78485054 60#define RX_PENDING_WATERMARK 16
26d535ae 61
a72b8b08 62struct iwl_host_cmd;
dda61a44 63
ab697a9f
EG
64/*This file includes the declaration that are internal to the
65 * trans_pcie layer */
66
96a6497b
SS
67/**
68 * struct iwl_rx_mem_buffer
69 * @page_dma: bus address of rxb page
70 * @page: driver's pointer to the rxb page
71 * @vid: index of this rxb in the global table
72 */
48a2d66f
JB
73struct iwl_rx_mem_buffer {
74 dma_addr_t page_dma;
75 struct page *page;
96a6497b 76 u16 vid;
48a2d66f
JB
77 struct list_head list;
78};
79
1f7b6172
EG
80/**
81 * struct isr_statistics - interrupt statistics
82 *
83 */
84struct isr_statistics {
85 u32 hw;
86 u32 sw;
87 u32 err_code;
88 u32 sch;
89 u32 alive;
90 u32 rfkill;
91 u32 ctkill;
92 u32 wakeup;
93 u32 rx;
94 u32 tx;
95 u32 unhandled;
96};
97
5a878bf6 98/**
990aa6d7 99 * struct iwl_rxq - Rx queue
96a6497b
SS
100 * @id: queue index
101 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
102 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
5a878bf6 103 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
96a6497b
SS
104 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
105 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
5a878bf6
EG
106 * @read: Shared index to newest available Rx buffer
107 * @write: Shared index to oldest written Rx packet
108 * @free_count: Number of pre-allocated buffers in rx_free
26d535ae 109 * @used_count: Number of RBDs handled to allocator to use for allocation
5a878bf6 110 * @write_actual:
26d535ae
SS
111 * @rx_free: list of RBDs with allocated RB ready for use
112 * @rx_used: list of RBDs with no RB attached
5a878bf6
EG
113 * @need_update: flag to indicate we need to update read/write index
114 * @rb_stts: driver's pointer to receive buffer status
115 * @rb_stts_dma: bus address of receive buffer status
116 * @lock:
96a6497b 117 * @queue: actual rx queue. Not used for multi-rx queue.
5a878bf6
EG
118 *
119 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
120 */
990aa6d7 121struct iwl_rxq {
96a6497b
SS
122 int id;
123 void *bd;
5a878bf6 124 dma_addr_t bd_dma;
96a6497b
SS
125 __le32 *used_bd;
126 dma_addr_t used_bd_dma;
5a878bf6
EG
127 u32 read;
128 u32 write;
129 u32 free_count;
26d535ae 130 u32 used_count;
5a878bf6 131 u32 write_actual;
96a6497b 132 u32 queue_size;
5a878bf6
EG
133 struct list_head rx_free;
134 struct list_head rx_used;
5d63f926 135 bool need_update;
5a878bf6
EG
136 struct iwl_rb_status *rb_stts;
137 dma_addr_t rb_stts_dma;
138 spinlock_t lock;
bce97731 139 struct napi_struct napi;
26d535ae
SS
140 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
141};
142
143/**
144 * struct iwl_rb_allocator - Rx allocator
26d535ae
SS
145 * @req_pending: number of requests the allcator had not processed yet
146 * @req_ready: number of requests honored and ready for claiming
147 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
148 * the queue. This is a list of &struct iwl_rx_mem_buffer
149 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
150 * of &struct iwl_rx_mem_buffer
151 * @lock: protects the rbd_allocated and rbd_empty lists
152 * @alloc_wq: work queue for background calls
153 * @rx_alloc: work struct for background calls
154 */
155struct iwl_rb_allocator {
26d535ae
SS
156 atomic_t req_pending;
157 atomic_t req_ready;
158 struct list_head rbd_allocated;
159 struct list_head rbd_empty;
160 spinlock_t lock;
161 struct workqueue_struct *alloc_wq;
162 struct work_struct rx_alloc;
5a878bf6
EG
163};
164
a72b8b08
EG
165struct iwl_dma_ptr {
166 dma_addr_t dma;
167 void *addr;
168 size_t size;
169};
170
bffc66ce
JB
171/**
172 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
173 * @index -- current index
bffc66ce 174 */
83f32a4b 175static inline int iwl_queue_inc_wrap(int index)
bffc66ce 176{
83f32a4b 177 return ++index & (TFD_QUEUE_SIZE_MAX - 1);
bffc66ce
JB
178}
179
180/**
181 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
182 * @index -- current index
bffc66ce 183 */
83f32a4b 184static inline int iwl_queue_dec_wrap(int index)
bffc66ce 185{
83f32a4b 186 return --index & (TFD_QUEUE_SIZE_MAX - 1);
bffc66ce
JB
187}
188
522376d2
EG
189struct iwl_cmd_meta {
190 /* only for SYNC commands, iff the reply skb is wanted */
191 struct iwl_host_cmd *source;
c14c7372 192 u32 flags;
522376d2
EG
193};
194
195/*
196 * Generic queue structure
197 *
198 * Contains common data for Rx and Tx queues.
199 *
83f32a4b
JB
200 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
201 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
522376d2
EG
202 * there might be HW changes in the future). For the normal TX
203 * queues, n_window, which is the size of the software queue data
204 * is also 256; however, for the command queue, n_window is only
205 * 32 since we don't need so many commands pending. Since the HW
83f32a4b 206 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
522376d2 207 * the software buffers (in the variables @meta, @txb in struct
990aa6d7
EG
208 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
209 * the same struct) have 256.
522376d2
EG
210 * This means that we end up with the following:
211 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
212 * SW entries: | 0 | ... | 31 |
213 * where N is a number between 0 and 7. This means that the SW
214 * data is a window overlayed over the HW queue.
215 */
216struct iwl_queue {
522376d2
EG
217 int write_ptr; /* 1-st empty entry (index) host_w*/
218 int read_ptr; /* last used entry (index) host_r*/
219 /* use for monitoring and recovering the stuck queue */
220 dma_addr_t dma_addr; /* physical addr for BD's */
221 int n_window; /* safe queue window */
222 u32 id;
223 int low_mark; /* low watermark, resume queue if free
224 * space more than this */
225 int high_mark; /* high watermark, stop queue if free
226 * space less than this */
227};
228
bf8440e6
JB
229#define TFD_TX_CMD_SLOTS 256
230#define TFD_CMD_SLOTS 32
231
8a964f44
JB
232/*
233 * The FH will write back to the first TB only, so we need
234 * to copy some data into the buffer regardless of whether
38c0f334
JB
235 * it should be mapped or not. This indicates how big the
236 * first TB must be to include the scratch buffer. Since
237 * the scratch is 4 bytes at offset 12, it's 16 now. If we
238 * make it bigger then allocations will be bigger and copy
239 * slower, so that's probably not useful.
8a964f44 240 */
38c0f334 241#define IWL_HCMD_SCRATCHBUF_SIZE 16
8a964f44 242
990aa6d7 243struct iwl_pcie_txq_entry {
bf8440e6
JB
244 struct iwl_device_cmd *cmd;
245 struct sk_buff *skb;
f4feb8ac
JB
246 /* buffer to free after command completes */
247 const void *free_buf;
bf8440e6
JB
248 struct iwl_cmd_meta meta;
249};
250
38c0f334
JB
251struct iwl_pcie_txq_scratch_buf {
252 struct iwl_cmd_header hdr;
253 u8 buf[8];
254 __le32 scratch;
255};
256
522376d2 257/**
990aa6d7 258 * struct iwl_txq - Tx Queue for DMA
522376d2 259 * @q: generic Rx/Tx queue descriptor
bf8440e6 260 * @tfds: transmit frame descriptors (DMA memory)
38c0f334
JB
261 * @scratchbufs: start of command headers, including scratch buffers, for
262 * the writeback -- this is DMA memory and an array holding one buffer
263 * for each command on the queue
264 * @scratchbufs_dma: DMA address for the scratchbufs start
bf8440e6
JB
265 * @entries: transmit entries (driver state)
266 * @lock: queue lock
267 * @stuck_timer: timer that fires if queue gets stuck
268 * @trans_pcie: pointer back to transport (for timer)
522376d2 269 * @need_update: indicates need to update read/write index
bf8440e6 270 * @active: stores if queue is active
68972c46 271 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
4cf677fd 272 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
e0b8d405
EG
273 * @frozen: tx stuck queue timer is frozen
274 * @frozen_expiry_remainder: remember how long until the timer fires
522376d2
EG
275 *
276 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
277 * descriptors) and required locking structures.
278 */
990aa6d7 279struct iwl_txq {
522376d2
EG
280 struct iwl_queue q;
281 struct iwl_tfd *tfds;
38c0f334
JB
282 struct iwl_pcie_txq_scratch_buf *scratchbufs;
283 dma_addr_t scratchbufs_dma;
990aa6d7 284 struct iwl_pcie_txq_entry *entries;
015c15e1 285 spinlock_t lock;
e0b8d405 286 unsigned long frozen_expiry_remainder;
7c5ba4a8
JB
287 struct timer_list stuck_timer;
288 struct iwl_trans_pcie *trans_pcie;
43aa616f 289 bool need_update;
e0b8d405 290 bool frozen;
522376d2 291 u8 active;
68972c46 292 bool ampdu;
0cd58eaa 293 bool block;
4cf677fd 294 unsigned long wd_timeout;
3955525d 295 struct sk_buff_head overflow_q;
522376d2
EG
296};
297
38c0f334
JB
298static inline dma_addr_t
299iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
300{
301 return txq->scratchbufs_dma +
302 sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
303}
304
6eb5e529
EG
305struct iwl_tso_hdr_page {
306 struct page *page;
307 u8 *pos;
308};
309
e6bb4c9c
EG
310/**
311 * struct iwl_trans_pcie - PCIe transport specific data
5a878bf6 312 * @rxq: all the RX queue data
78485054 313 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
96a6497b 314 * @global_table: table mapping received VID from hw to rxb
26d535ae 315 * @rba: allocator for RX replenishing
9130bab1 316 * @drv - pointer to iwl_drv
5a878bf6 317 * @trans: pointer to the generic transport area
105183b1
EG
318 * @scd_base_addr: scheduler sram base address in SRAM
319 * @scd_bc_tbls: pointer to the byte count table of the scheduler
9d6b2cb1 320 * @kw: keep warm address
a42a1844
EG
321 * @pci_dev: basic pci-network driver stuff
322 * @hw_base: pci hardware address support
13df1aab
JB
323 * @ucode_write_complete: indicates that the ucode has been copied.
324 * @ucode_write_waitq: wait queue for uCode load
c6f600fc 325 * @cmd_queue - command queue number
6c4fbcbc 326 * @rx_buf_size: Rx buffer size
046db346 327 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
3a736bcb 328 * @scd_set_active: should the transport configure the SCD for HCMD queue
ab02165c 329 * @wide_cmd_header: true when ucode supports wide command header format
41837ca9
EG
330 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
331 * frame.
b2cf410c 332 * @rx_page_order: page order for receive buffer size
e56b04ef 333 * @reg_lock: protect hw register access
fa9f3281 334 * @mutex: to protect stop_device / start_fw / start_hw
b9439491 335 * @cmd_in_flight: true when we have a host command in flight
c2d20201
EG
336 * @fw_mon_phys: physical address of the buffer for the firmware monitor
337 * @fw_mon_page: points to the first page of the buffer for the firmware monitor
338 * @fw_mon_size: size of the buffer for the firmware monitor
2e5d4a8f
HD
339 * @msix_entries: array of MSI-X entries
340 * @msix_enabled: true if managed to enable MSI-X
341 * @allocated_vector: the number of interrupt vector allocated by the OS
342 * @default_irq_num: default irq for non rx interrupt
343 * @fh_init_mask: initial unmasked fh causes
344 * @hw_init_mask: initial unmasked hw causes
345 * @fh_mask: current unmasked fh causes
346 * @hw_mask: current unmasked hw causes
e6bb4c9c
EG
347 */
348struct iwl_trans_pcie {
78485054 349 struct iwl_rxq *rxq;
7b542436 350 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
96a6497b 351 struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
26d535ae 352 struct iwl_rb_allocator rba;
5a878bf6 353 struct iwl_trans *trans;
9130bab1 354 struct iwl_drv *drv;
0c325769 355
f14d6b39 356 struct net_device napi_dev;
f14d6b39 357
6eb5e529
EG
358 struct __percpu iwl_tso_hdr_page *tso_hdr_page;
359
0c325769
EG
360 /* INT ICT Table */
361 __le32 *ict_tbl;
0c325769 362 dma_addr_t ict_tbl_dma;
0c325769 363 int ict_index;
0c325769 364 bool use_ict;
fa9f3281 365 bool is_down;
1f7b6172 366 struct isr_statistics isr_stats;
0c325769 367
7b11488f 368 spinlock_t irq_lock;
fa9f3281 369 struct mutex mutex;
0c325769 370 u32 inta_mask;
105183b1
EG
371 u32 scd_base_addr;
372 struct iwl_dma_ptr scd_bc_tbls;
9d6b2cb1 373 struct iwl_dma_ptr kw;
e13c0c59 374
990aa6d7 375 struct iwl_txq *txq;
9eae88fa 376 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
8ad71bef 377 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
a42a1844
EG
378
379 /* PCI bus related data */
380 struct pci_dev *pci_dev;
381 void __iomem *hw_base;
13df1aab
JB
382
383 bool ucode_write_complete;
384 wait_queue_head_t ucode_write_waitq;
f946b529 385 wait_queue_head_t wait_command_queue;
4cbb8e50 386 wait_queue_head_t d0i3_waitq;
f946b529 387
c6f600fc 388 u8 cmd_queue;
b04db9ac 389 u8 cmd_fifo;
4cf677fd 390 unsigned int cmd_q_wdg_timeout;
d663ee73
JB
391 u8 n_no_reclaim_cmds;
392 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
b2cf410c 393
6c4fbcbc 394 enum iwl_amsdu_size rx_buf_size;
046db346 395 bool bc_table_dword;
3a736bcb 396 bool scd_set_active;
ab02165c 397 bool wide_cmd_header;
41837ca9 398 bool sw_csum_tx;
b2cf410c 399 u32 rx_page_order;
7c5ba4a8 400
e56b04ef
LE
401 /*protect hw register */
402 spinlock_t reg_lock;
fc8a350d 403 bool cmd_hold_nic_awake;
7616f334
EP
404 bool ref_cmd_in_flight;
405
406 /* protect ref counter */
407 spinlock_t ref_lock;
408 u32 ref_count;
c2d20201
EG
409
410 dma_addr_t fw_mon_phys;
411 struct page *fw_mon_page;
412 u32 fw_mon_size;
2e5d4a8f
HD
413
414 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
415 bool msix_enabled;
416 u32 allocated_vector;
417 u32 default_irq_num;
418 u32 fh_init_mask;
419 u32 hw_init_mask;
420 u32 fh_mask;
421 u32 hw_mask;
e6bb4c9c
EG
422};
423
85e5a387
JB
424static inline struct iwl_trans_pcie *
425IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
426{
427 return (void *)trans->trans_specific;
428}
5a878bf6 429
7c5ba4a8
JB
430static inline struct iwl_trans *
431iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
432{
433 return container_of((void *)trans_pcie, struct iwl_trans,
434 trans_specific);
435}
436
f02831be
EG
437/*
438 * Convention: trans API functions: iwl_trans_pcie_XXX
439 * Other functions: iwl_pcie_XXX
440 */
d1ff5253
JB
441struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
442 const struct pci_device_id *ent,
443 const struct iwl_cfg *cfg);
444void iwl_trans_pcie_free(struct iwl_trans *trans);
445
253a634c
EG
446/*****************************************************
447* RX
448******************************************************/
9805c446 449int iwl_pcie_rx_init(struct iwl_trans *trans);
2e5d4a8f 450irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
2bfb5092 451irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
2e5d4a8f
HD
452irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
453irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
9805c446
EG
454int iwl_pcie_rx_stop(struct iwl_trans *trans);
455void iwl_pcie_rx_free(struct iwl_trans *trans);
ab697a9f 456
1a361cd8 457/*****************************************************
990aa6d7 458* ICT - interrupt handling
1a361cd8 459******************************************************/
85bf9da1 460irqreturn_t iwl_pcie_isr(int irq, void *data);
990aa6d7
EG
461int iwl_pcie_alloc_ict(struct iwl_trans *trans);
462void iwl_pcie_free_ict(struct iwl_trans *trans);
463void iwl_pcie_reset_ict(struct iwl_trans *trans);
464void iwl_pcie_disable_ict(struct iwl_trans *trans);
1a361cd8 465
253a634c
EG
466/*****************************************************
467* TX / HCMD
468******************************************************/
f02831be
EG
469int iwl_pcie_tx_init(struct iwl_trans *trans);
470void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
471int iwl_pcie_tx_stop(struct iwl_trans *trans);
472void iwl_pcie_tx_free(struct iwl_trans *trans);
fea7795f 473void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
4cf677fd
EG
474 const struct iwl_trans_txq_scd_cfg *cfg,
475 unsigned int wdg_timeout);
d4578ea8
JB
476void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
477 bool configure_scd);
f02831be
EG
478int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
479 struct iwl_device_cmd *dev_cmd, int txq_id);
ea68f460 480void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
f02831be 481int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
990aa6d7 482void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
f7e6469f 483 struct iwl_rx_cmd_buffer *rxb);
f02831be
EG
484void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
485 struct sk_buff_head *skbs);
ddaf5a5b
JB
486void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
487
7616f334
EP
488void iwl_trans_pcie_ref(struct iwl_trans *trans);
489void iwl_trans_pcie_unref(struct iwl_trans *trans);
490
4d075007
JB
491static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
492{
493 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
494
495 return le16_to_cpu(tb->hi_n_len) >> 4;
496}
497
7ff94706
EG
498/*****************************************************
499* Error handling
500******************************************************/
990aa6d7 501void iwl_pcie_dump_csr(struct iwl_trans *trans);
16db88ba 502
8ad71bef
EG
503/*****************************************************
504* Helpers
505******************************************************/
0c325769
EG
506static inline void iwl_disable_interrupts(struct iwl_trans *trans)
507{
2e5d4a8f 508 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
0c325769 509
2e5d4a8f
HD
510 clear_bit(STATUS_INT_ENABLED, &trans->status);
511 if (!trans_pcie->msix_enabled) {
512 /* disable interrupts from uCode/NIC to host */
513 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
514
515 /* acknowledge/clear/reset any interrupts still pending
516 * from uCode or flow handler (Rx/Tx DMA) */
517 iwl_write32(trans, CSR_INT, 0xffffffff);
518 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
519 } else {
520 /* disable all the interrupt we might use */
521 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
522 trans_pcie->fh_init_mask);
523 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
524 trans_pcie->hw_init_mask);
525 }
0c325769
EG
526 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
527}
528
529static inline void iwl_enable_interrupts(struct iwl_trans *trans)
530{
83626404 531 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
0c325769
EG
532
533 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
eb7ff77e 534 set_bit(STATUS_INT_ENABLED, &trans->status);
2e5d4a8f
HD
535 if (!trans_pcie->msix_enabled) {
536 trans_pcie->inta_mask = CSR_INI_SET_MASK;
537 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
538 } else {
539 /*
540 * fh/hw_mask keeps all the unmasked causes.
541 * Unlike msi, in msix cause is enabled when it is unset.
542 */
543 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
544 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
545 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
546 ~trans_pcie->fh_mask);
547 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
548 ~trans_pcie->hw_mask);
549 }
550}
551
552static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
553{
554 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
555
556 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
557 trans_pcie->hw_mask = msk;
558}
559
560static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
561{
562 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
563
564 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
565 trans_pcie->fh_mask = msk;
0c325769
EG
566}
567
a6bd005f
EG
568static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
569{
570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
571
572 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
2e5d4a8f
HD
573 if (!trans_pcie->msix_enabled) {
574 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
575 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
576 } else {
577 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
578 trans_pcie->hw_init_mask);
579 iwl_enable_fh_int_msk_msix(trans,
580 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
581 }
a6bd005f
EG
582}
583
8722c899
SG
584static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
585{
2dbc368d
EG
586 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
587
8722c899 588 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
2e5d4a8f
HD
589 if (!trans_pcie->msix_enabled) {
590 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
591 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
592 } else {
593 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
594 trans_pcie->fh_init_mask);
595 iwl_enable_hw_int_msk_msix(trans,
596 MSIX_HW_INT_CAUSES_REG_RF_KILL);
597 }
8722c899
SG
598}
599
e20d4341 600static inline void iwl_wake_queue(struct iwl_trans *trans,
990aa6d7 601 struct iwl_txq *txq)
e20d4341 602{
9eae88fa
JB
603 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
604
605 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
606 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
607 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
81a3de1c 608 }
e20d4341
EG
609}
610
611static inline void iwl_stop_queue(struct iwl_trans *trans,
990aa6d7 612 struct iwl_txq *txq)
e20d4341 613{
9eae88fa 614 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
8ad71bef 615
9eae88fa
JB
616 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
617 iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
618 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
619 } else
620 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
621 txq->q.id);
8ad71bef
EG
622}
623
6ca6ebc1 624static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
8ad71bef
EG
625{
626 return q->write_ptr >= q->read_ptr ?
627 (i >= q->read_ptr && i < q->write_ptr) :
628 !(i < q->read_ptr && i >= q->write_ptr);
629}
630
631static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
632{
633 return index & (q->n_window - 1);
634}
635
8d425517
EG
636static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
637{
638 return !(iwl_read32(trans, CSR_GP_CNTRL) &
639 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
640}
641
b9439491
EG
642static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
643 u32 reg, u32 mask, u32 value)
644{
645 u32 v;
646
647#ifdef CONFIG_IWLWIFI_DEBUG
648 WARN_ON_ONCE(value & ~mask);
649#endif
650
651 v = iwl_read32(trans, reg);
652 v &= ~mask;
653 v |= value;
654 iwl_write32(trans, reg, v);
655}
656
657static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
658 u32 reg, u32 mask)
659{
660 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
661}
662
663static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
664 u32 reg, u32 mask)
665{
666 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
667}
668
14cfca71
JB
669void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
670
f8a1edb7
JB
671#ifdef CONFIG_IWLWIFI_DEBUGFS
672int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
673#else
674static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
675{
676 return 0;
677}
678#endif
679
4cbb8e50
LC
680int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
681int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
682
ab697a9f 683#endif /* __iwl_trans_int_pcie_h__ */
This page took 0.33154 seconds and 5 git commands to generate.