1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
63 #include <linux/pci.h>
64 #include <linux/pci-aspm.h>
65 #include <linux/interrupt.h>
66 #include <linux/debugfs.h>
67 #include <linux/sched.h>
68 #include <linux/bitops.h>
69 #include <linux/gfp.h>
72 #include "iwl-trans.h"
73 #include "iwl-trans-pcie-int.h"
76 #include "iwl-eeprom.h"
77 #include "iwl-agn-hw.h"
78 /* FIXME: need to abstract out TX command (once we know what it looks like) */
79 #include "iwl-commands.h"
81 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
83 #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
84 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
85 (~(1<<(trans_pcie)->cmd_queue)))
87 static int iwl_trans_rx_alloc(struct iwl_trans
*trans
)
89 struct iwl_trans_pcie
*trans_pcie
=
90 IWL_TRANS_GET_PCIE_TRANS(trans
);
91 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
92 struct device
*dev
= trans
->dev
;
94 memset(&trans_pcie
->rxq
, 0, sizeof(trans_pcie
->rxq
));
96 spin_lock_init(&rxq
->lock
);
98 if (WARN_ON(rxq
->bd
|| rxq
->rb_stts
))
101 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
102 rxq
->bd
= dma_zalloc_coherent(dev
, sizeof(__le32
) * RX_QUEUE_SIZE
,
103 &rxq
->bd_dma
, GFP_KERNEL
);
107 /*Allocate the driver's pointer to receive buffer status */
108 rxq
->rb_stts
= dma_zalloc_coherent(dev
, sizeof(*rxq
->rb_stts
),
109 &rxq
->rb_stts_dma
, GFP_KERNEL
);
116 dma_free_coherent(dev
, sizeof(__le32
) * RX_QUEUE_SIZE
,
117 rxq
->bd
, rxq
->bd_dma
);
118 memset(&rxq
->bd_dma
, 0, sizeof(rxq
->bd_dma
));
124 static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans
*trans
)
126 struct iwl_trans_pcie
*trans_pcie
=
127 IWL_TRANS_GET_PCIE_TRANS(trans
);
128 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
131 /* Fill the rx_used queue with _all_ of the Rx buffers */
132 for (i
= 0; i
< RX_FREE_BUFFERS
+ RX_QUEUE_SIZE
; i
++) {
133 /* In the reset function, these buffers may have been allocated
134 * to an SKB, so we need to unmap and free potential storage */
135 if (rxq
->pool
[i
].page
!= NULL
) {
136 dma_unmap_page(trans
->dev
, rxq
->pool
[i
].page_dma
,
137 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
139 __free_pages(rxq
->pool
[i
].page
,
140 trans_pcie
->rx_page_order
);
141 rxq
->pool
[i
].page
= NULL
;
143 list_add_tail(&rxq
->pool
[i
].list
, &rxq
->rx_used
);
147 static void iwl_trans_rx_hw_init(struct iwl_trans
*trans
,
148 struct iwl_rx_queue
*rxq
)
150 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
152 const u32 rfdnlog
= RX_QUEUE_SIZE_LOG
; /* 256 RBDs */
153 u32 rb_timeout
= RX_RB_TIMEOUT
; /* FIXME: RX_RB_TIMEOUT for all devices? */
155 if (trans_pcie
->rx_buf_size_8k
)
156 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K
;
158 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
;
161 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
163 /* Reset driver's Rx queue write index */
164 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_RBDCB_WPTR_REG
, 0);
166 /* Tell device where to find RBD circular buffer in DRAM */
167 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
168 (u32
)(rxq
->bd_dma
>> 8));
170 /* Tell device where in DRAM to update its Rx status */
171 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_STTS_WPTR_REG
,
172 rxq
->rb_stts_dma
>> 4);
175 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
176 * the credit mechanism in 5000 HW RX FIFO
177 * Direct rx interrupts to hosts
178 * Rx buffer size 4 or 8k
182 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
,
183 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL
|
184 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY
|
185 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL
|
187 (rb_timeout
<< FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS
)|
188 (rfdnlog
<< FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS
));
190 /* Set interrupt coalescing timer to default (2048 usecs) */
191 iwl_write8(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
194 static int iwl_rx_init(struct iwl_trans
*trans
)
196 struct iwl_trans_pcie
*trans_pcie
=
197 IWL_TRANS_GET_PCIE_TRANS(trans
);
198 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
204 err
= iwl_trans_rx_alloc(trans
);
209 spin_lock_irqsave(&rxq
->lock
, flags
);
210 INIT_LIST_HEAD(&rxq
->rx_free
);
211 INIT_LIST_HEAD(&rxq
->rx_used
);
213 iwl_trans_rxq_free_rx_bufs(trans
);
215 for (i
= 0; i
< RX_QUEUE_SIZE
; i
++)
216 rxq
->queue
[i
] = NULL
;
218 /* Set us so that we have processed and used all buffers, but have
219 * not restocked the Rx queue with fresh buffers */
220 rxq
->read
= rxq
->write
= 0;
221 rxq
->write_actual
= 0;
223 spin_unlock_irqrestore(&rxq
->lock
, flags
);
225 iwlagn_rx_replenish(trans
);
227 iwl_trans_rx_hw_init(trans
, rxq
);
229 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
230 rxq
->need_update
= 1;
231 iwl_rx_queue_update_write_ptr(trans
, rxq
);
232 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
237 static void iwl_trans_pcie_rx_free(struct iwl_trans
*trans
)
239 struct iwl_trans_pcie
*trans_pcie
=
240 IWL_TRANS_GET_PCIE_TRANS(trans
);
241 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
245 /*if rxq->bd is NULL, it means that nothing has been allocated,
248 IWL_DEBUG_INFO(trans
, "Free NULL rx context\n");
252 spin_lock_irqsave(&rxq
->lock
, flags
);
253 iwl_trans_rxq_free_rx_bufs(trans
);
254 spin_unlock_irqrestore(&rxq
->lock
, flags
);
256 dma_free_coherent(trans
->dev
, sizeof(__le32
) * RX_QUEUE_SIZE
,
257 rxq
->bd
, rxq
->bd_dma
);
258 memset(&rxq
->bd_dma
, 0, sizeof(rxq
->bd_dma
));
262 dma_free_coherent(trans
->dev
,
263 sizeof(struct iwl_rb_status
),
264 rxq
->rb_stts
, rxq
->rb_stts_dma
);
266 IWL_DEBUG_INFO(trans
, "Free rxq->rb_stts which is NULL\n");
267 memset(&rxq
->rb_stts_dma
, 0, sizeof(rxq
->rb_stts_dma
));
271 static int iwl_trans_rx_stop(struct iwl_trans
*trans
)
275 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
276 return iwl_poll_direct_bit(trans
, FH_MEM_RSSR_RX_STATUS_REG
,
277 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE
, 1000);
280 static inline int iwlagn_alloc_dma_ptr(struct iwl_trans
*trans
,
281 struct iwl_dma_ptr
*ptr
, size_t size
)
283 if (WARN_ON(ptr
->addr
))
286 ptr
->addr
= dma_alloc_coherent(trans
->dev
, size
,
287 &ptr
->dma
, GFP_KERNEL
);
294 static inline void iwlagn_free_dma_ptr(struct iwl_trans
*trans
,
295 struct iwl_dma_ptr
*ptr
)
297 if (unlikely(!ptr
->addr
))
300 dma_free_coherent(trans
->dev
, ptr
->size
, ptr
->addr
, ptr
->dma
);
301 memset(ptr
, 0, sizeof(*ptr
));
304 static void iwl_trans_pcie_queue_stuck_timer(unsigned long data
)
306 struct iwl_tx_queue
*txq
= (void *)data
;
307 struct iwl_trans_pcie
*trans_pcie
= txq
->trans_pcie
;
308 struct iwl_trans
*trans
= iwl_trans_pcie_get_trans(trans_pcie
);
310 spin_lock(&txq
->lock
);
311 /* check if triggered erroneously */
312 if (txq
->q
.read_ptr
== txq
->q
.write_ptr
) {
313 spin_unlock(&txq
->lock
);
316 spin_unlock(&txq
->lock
);
319 IWL_ERR(trans
, "Queue %d stuck for %u ms.\n", txq
->q
.id
,
320 jiffies_to_msecs(trans_pcie
->wd_timeout
));
321 IWL_ERR(trans
, "Current SW read_ptr %d write_ptr %d\n",
322 txq
->q
.read_ptr
, txq
->q
.write_ptr
);
323 IWL_ERR(trans
, "Current HW read_ptr %d write_ptr %d\n",
324 iwl_read_prph(trans
, SCD_QUEUE_RDPTR(txq
->q
.id
))
325 & (TFD_QUEUE_SIZE_MAX
- 1),
326 iwl_read_prph(trans
, SCD_QUEUE_WRPTR(txq
->q
.id
)));
328 iwl_op_mode_nic_error(trans
->op_mode
);
331 static int iwl_trans_txq_alloc(struct iwl_trans
*trans
,
332 struct iwl_tx_queue
*txq
, int slots_num
,
335 size_t tfd_sz
= sizeof(struct iwl_tfd
) * TFD_QUEUE_SIZE_MAX
;
337 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
339 if (WARN_ON(txq
->entries
|| txq
->tfds
))
342 setup_timer(&txq
->stuck_timer
, iwl_trans_pcie_queue_stuck_timer
,
344 txq
->trans_pcie
= trans_pcie
;
346 txq
->q
.n_window
= slots_num
;
348 txq
->entries
= kcalloc(slots_num
,
349 sizeof(struct iwl_pcie_tx_queue_entry
),
355 if (txq_id
== trans_pcie
->cmd_queue
)
356 for (i
= 0; i
< slots_num
; i
++) {
357 txq
->entries
[i
].cmd
=
358 kmalloc(sizeof(struct iwl_device_cmd
),
360 if (!txq
->entries
[i
].cmd
)
364 /* Circular buffer of transmit frame descriptors (TFDs),
365 * shared with device */
366 txq
->tfds
= dma_alloc_coherent(trans
->dev
, tfd_sz
,
367 &txq
->q
.dma_addr
, GFP_KERNEL
);
369 IWL_ERR(trans
, "dma_alloc_coherent(%zd) failed\n", tfd_sz
);
376 if (txq
->entries
&& txq_id
== trans_pcie
->cmd_queue
)
377 for (i
= 0; i
< slots_num
; i
++)
378 kfree(txq
->entries
[i
].cmd
);
386 static int iwl_trans_txq_init(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
,
387 int slots_num
, u32 txq_id
)
391 txq
->need_update
= 0;
393 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
394 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
395 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX
& (TFD_QUEUE_SIZE_MAX
- 1));
397 /* Initialize queue's high/low-water marks, and head/tail indexes */
398 ret
= iwl_queue_init(&txq
->q
, TFD_QUEUE_SIZE_MAX
, slots_num
,
403 spin_lock_init(&txq
->lock
);
406 * Tell nic where to find circular buffer of Tx Frame Descriptors for
407 * given Tx queue, and enable the DMA channel used for that queue.
408 * Circular buffer (TFD queue in DRAM) physical base address */
409 iwl_write_direct32(trans
, FH_MEM_CBBC_QUEUE(txq_id
),
410 txq
->q
.dma_addr
>> 8);
416 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
418 static void iwl_tx_queue_unmap(struct iwl_trans
*trans
, int txq_id
)
420 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
421 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
422 struct iwl_queue
*q
= &txq
->q
;
423 enum dma_data_direction dma_dir
;
428 /* In the command queue, all the TBs are mapped as BIDI
429 * so unmap them as such.
431 if (txq_id
== trans_pcie
->cmd_queue
)
432 dma_dir
= DMA_BIDIRECTIONAL
;
434 dma_dir
= DMA_TO_DEVICE
;
436 spin_lock_bh(&txq
->lock
);
437 while (q
->write_ptr
!= q
->read_ptr
) {
438 /* The read_ptr needs to bound by q->n_window */
439 iwlagn_txq_free_tfd(trans
, txq
, get_cmd_index(q
, q
->read_ptr
),
441 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
);
443 spin_unlock_bh(&txq
->lock
);
447 * iwl_tx_queue_free - Deallocate DMA queue.
448 * @txq: Transmit queue to deallocate.
450 * Empty queue by removing and destroying all BD's.
452 * 0-fill, but do not free "txq" descriptor structure.
454 static void iwl_tx_queue_free(struct iwl_trans
*trans
, int txq_id
)
456 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
457 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
458 struct device
*dev
= trans
->dev
;
463 iwl_tx_queue_unmap(trans
, txq_id
);
465 /* De-alloc array of command/tx buffers */
467 if (txq_id
== trans_pcie
->cmd_queue
)
468 for (i
= 0; i
< txq
->q
.n_window
; i
++)
469 kfree(txq
->entries
[i
].cmd
);
471 /* De-alloc circular buffer of TFDs */
473 dma_free_coherent(dev
, sizeof(struct iwl_tfd
) *
474 txq
->q
.n_bd
, txq
->tfds
, txq
->q
.dma_addr
);
475 memset(&txq
->q
.dma_addr
, 0, sizeof(txq
->q
.dma_addr
));
481 del_timer_sync(&txq
->stuck_timer
);
483 /* 0-fill queue descriptor structure */
484 memset(txq
, 0, sizeof(*txq
));
488 * iwl_trans_tx_free - Free TXQ Context
490 * Destroy all TX DMA queues and structures
492 static void iwl_trans_pcie_tx_free(struct iwl_trans
*trans
)
495 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
498 if (trans_pcie
->txq
) {
500 txq_id
< trans
->cfg
->base_params
->num_of_queues
; txq_id
++)
501 iwl_tx_queue_free(trans
, txq_id
);
504 kfree(trans_pcie
->txq
);
505 trans_pcie
->txq
= NULL
;
507 iwlagn_free_dma_ptr(trans
, &trans_pcie
->kw
);
509 iwlagn_free_dma_ptr(trans
, &trans_pcie
->scd_bc_tbls
);
513 * iwl_trans_tx_alloc - allocate TX context
514 * Allocate all Tx DMA structures and initialize them
519 static int iwl_trans_tx_alloc(struct iwl_trans
*trans
)
522 int txq_id
, slots_num
;
523 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
525 u16 scd_bc_tbls_size
= trans
->cfg
->base_params
->num_of_queues
*
526 sizeof(struct iwlagn_scd_bc_tbl
);
528 /*It is not allowed to alloc twice, so warn when this happens.
529 * We cannot rely on the previous allocation, so free and fail */
530 if (WARN_ON(trans_pcie
->txq
)) {
535 ret
= iwlagn_alloc_dma_ptr(trans
, &trans_pcie
->scd_bc_tbls
,
538 IWL_ERR(trans
, "Scheduler BC Table allocation failed\n");
542 /* Alloc keep-warm buffer */
543 ret
= iwlagn_alloc_dma_ptr(trans
, &trans_pcie
->kw
, IWL_KW_SIZE
);
545 IWL_ERR(trans
, "Keep Warm allocation failed\n");
549 trans_pcie
->txq
= kcalloc(trans
->cfg
->base_params
->num_of_queues
,
550 sizeof(struct iwl_tx_queue
), GFP_KERNEL
);
551 if (!trans_pcie
->txq
) {
552 IWL_ERR(trans
, "Not enough memory for txq\n");
557 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
558 for (txq_id
= 0; txq_id
< trans
->cfg
->base_params
->num_of_queues
;
560 slots_num
= (txq_id
== trans_pcie
->cmd_queue
) ?
561 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
562 ret
= iwl_trans_txq_alloc(trans
, &trans_pcie
->txq
[txq_id
],
565 IWL_ERR(trans
, "Tx %d queue alloc failed\n", txq_id
);
573 iwl_trans_pcie_tx_free(trans
);
577 static int iwl_tx_init(struct iwl_trans
*trans
)
580 int txq_id
, slots_num
;
583 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
585 if (!trans_pcie
->txq
) {
586 ret
= iwl_trans_tx_alloc(trans
);
592 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
594 /* Turn off all Tx DMA fifos */
595 iwl_write_prph(trans
, SCD_TXFACT
, 0);
597 /* Tell NIC where to find the "keep warm" buffer */
598 iwl_write_direct32(trans
, FH_KW_MEM_ADDR_REG
,
599 trans_pcie
->kw
.dma
>> 4);
601 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
603 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
604 for (txq_id
= 0; txq_id
< trans
->cfg
->base_params
->num_of_queues
;
606 slots_num
= (txq_id
== trans_pcie
->cmd_queue
) ?
607 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
608 ret
= iwl_trans_txq_init(trans
, &trans_pcie
->txq
[txq_id
],
611 IWL_ERR(trans
, "Tx %d queue init failed\n", txq_id
);
618 /*Upon error, free only if we allocated something */
620 iwl_trans_pcie_tx_free(trans
);
624 static void iwl_set_pwr_vmain(struct iwl_trans
*trans
)
627 * (for documentation purposes)
628 * to set power to V_AUX, do:
630 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
631 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
632 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
633 ~APMG_PS_CTRL_MSK_PWR_SRC);
636 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
637 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
638 ~APMG_PS_CTRL_MSK_PWR_SRC
);
642 #define PCI_CFG_RETRY_TIMEOUT 0x041
643 #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
644 #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
646 static u16
iwl_pciexp_link_ctrl(struct iwl_trans
*trans
)
650 struct iwl_trans_pcie
*trans_pcie
=
651 IWL_TRANS_GET_PCIE_TRANS(trans
);
653 struct pci_dev
*pci_dev
= trans_pcie
->pci_dev
;
655 pos
= pci_pcie_cap(pci_dev
);
656 pci_read_config_word(pci_dev
, pos
+ PCI_EXP_LNKCTL
, &pci_lnk_ctl
);
660 static void iwl_apm_config(struct iwl_trans
*trans
)
663 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
664 * Check if BIOS (or OS) enabled L1-ASPM on this device.
665 * If so (likely), disable L0S, so device moves directly L0->L1;
666 * costs negligible amount of power savings.
667 * If not (unlikely), enable L0S, so there is at least some
668 * power savings, even without L1.
670 u16 lctl
= iwl_pciexp_link_ctrl(trans
);
672 if ((lctl
& PCI_CFG_LINK_CTRL_VAL_L1_EN
) ==
673 PCI_CFG_LINK_CTRL_VAL_L1_EN
) {
674 /* L1-ASPM enabled; disable(!) L0S */
675 iwl_set_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
676 dev_printk(KERN_INFO
, trans
->dev
,
677 "L1 Enabled; Disabling L0S\n");
679 /* L1-ASPM disabled; enable(!) L0S */
680 iwl_clear_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
681 dev_printk(KERN_INFO
, trans
->dev
,
682 "L1 Disabled; Enabling L0S\n");
684 trans
->pm_support
= !(lctl
& PCI_CFG_LINK_CTRL_VAL_L0S_EN
);
688 * Start up NIC's basic functionality after it has been reset
689 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
690 * NOTE: This does not load uCode nor start the embedded processor
692 static int iwl_apm_init(struct iwl_trans
*trans
)
694 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
696 IWL_DEBUG_INFO(trans
, "Init card's basic functions\n");
699 * Use "set_bit" below rather than "write", to preserve any hardware
700 * bits already set by default after reset.
703 /* Disable L0S exit timer (platform NMI Work/Around) */
704 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
705 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
708 * Disable L0s without affecting L1;
709 * don't wait for ICH L0s (ICH bug W/A)
711 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
712 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
714 /* Set FH wait threshold to maximum (HW error during stress W/A) */
715 iwl_set_bit(trans
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
718 * Enable HAP INTA (interrupt from management bus) to
719 * wake device's PCI Express link L1a -> L0s
721 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
722 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
724 iwl_apm_config(trans
);
726 /* Configure analog phase-lock-loop before activating to D0A */
727 if (trans
->cfg
->base_params
->pll_cfg_val
)
728 iwl_set_bit(trans
, CSR_ANA_PLL_CFG
,
729 trans
->cfg
->base_params
->pll_cfg_val
);
732 * Set "initialization complete" bit to move adapter from
733 * D0U* --> D0A* (powered-up active) state.
735 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
738 * Wait for clock stabilization; once stabilized, access to
739 * device-internal resources is supported, e.g. iwl_write_prph()
740 * and accesses to uCode SRAM.
742 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
743 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
744 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
746 IWL_DEBUG_INFO(trans
, "Failed to init the card\n");
751 * Enable DMA clock and wait for it to stabilize.
753 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
754 * do not disable clocks. This preserves any hardware bits already
755 * set by default in "CLK_CTRL_REG" after reset.
757 iwl_write_prph(trans
, APMG_CLK_EN_REG
, APMG_CLK_VAL_DMA_CLK_RQT
);
760 /* Disable L1-Active */
761 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
762 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
764 set_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
770 static int iwl_apm_stop_master(struct iwl_trans
*trans
)
774 /* stop device's busmaster DMA activity */
775 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
777 ret
= iwl_poll_bit(trans
, CSR_RESET
,
778 CSR_RESET_REG_FLAG_MASTER_DISABLED
,
779 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
781 IWL_WARN(trans
, "Master Disable Timed Out, 100 usec\n");
783 IWL_DEBUG_INFO(trans
, "stop master\n");
788 static void iwl_apm_stop(struct iwl_trans
*trans
)
790 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
791 IWL_DEBUG_INFO(trans
, "Stop card, put in low power state\n");
793 clear_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
795 /* Stop device's DMA activity */
796 iwl_apm_stop_master(trans
);
798 /* Reset the entire device */
799 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
804 * Clear "initialization complete" bit to move adapter from
805 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
807 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
808 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
811 static int iwl_nic_init(struct iwl_trans
*trans
)
813 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
817 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
820 /* Set interrupt coalescing calibration timer to default (512 usecs) */
821 iwl_write8(trans
, CSR_INT_COALESCING
,
822 IWL_HOST_INT_CALIB_TIMEOUT_DEF
);
824 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
826 iwl_set_pwr_vmain(trans
);
828 iwl_op_mode_nic_config(trans
->op_mode
);
830 #ifndef CONFIG_IWLWIFI_IDI
831 /* Allocate the RX queue, or reset if it is already allocated */
835 /* Allocate or reset and init all Tx and Command queues */
836 if (iwl_tx_init(trans
))
839 if (trans
->cfg
->base_params
->shadow_reg_enable
) {
840 /* enable shadow regs in HW */
841 iwl_set_bit(trans
, CSR_MAC_SHADOW_REG_CTRL
,
848 #define HW_READY_TIMEOUT (50)
850 /* Note: returns poll_bit return value, which is >= 0 if success */
851 static int iwl_set_hw_ready(struct iwl_trans
*trans
)
855 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
856 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
858 /* See if we got it */
859 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
860 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
861 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
864 IWL_DEBUG_INFO(trans
, "hardware%s ready\n", ret
< 0 ? " not" : "");
868 /* Note: returns standard 0/-ERROR code */
869 static int iwl_prepare_card_hw(struct iwl_trans
*trans
)
873 IWL_DEBUG_INFO(trans
, "iwl_trans_prepare_card_hw enter\n");
875 ret
= iwl_set_hw_ready(trans
);
876 /* If the card is ready, exit 0 */
880 /* If HW is not ready, prepare the conditions to check again */
881 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
882 CSR_HW_IF_CONFIG_REG_PREPARE
);
884 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
885 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE
,
886 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE
, 150000);
891 /* HW should be ready by now, check again. */
892 ret
= iwl_set_hw_ready(trans
);
901 static int iwl_load_section(struct iwl_trans
*trans
, u8 section_num
,
902 const struct fw_desc
*section
)
904 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
905 dma_addr_t phy_addr
= section
->p_addr
;
906 u32 byte_cnt
= section
->len
;
907 u32 dst_addr
= section
->offset
;
910 trans_pcie
->ucode_write_complete
= false;
912 iwl_write_direct32(trans
,
913 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
914 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE
);
916 iwl_write_direct32(trans
,
917 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL
), dst_addr
);
919 iwl_write_direct32(trans
,
920 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL
),
921 phy_addr
& FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK
);
923 iwl_write_direct32(trans
,
924 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL
),
925 (iwl_get_dma_hi_addr(phy_addr
)
926 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT
) | byte_cnt
);
928 iwl_write_direct32(trans
,
929 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL
),
930 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM
|
931 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX
|
932 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID
);
934 iwl_write_direct32(trans
,
935 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
936 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
937 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE
|
938 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD
);
940 IWL_DEBUG_FW(trans
, "[%d] uCode section being loaded...\n",
942 ret
= wait_event_timeout(trans_pcie
->ucode_write_waitq
,
943 trans_pcie
->ucode_write_complete
, 5 * HZ
);
945 IWL_ERR(trans
, "Could not load the [%d] uCode section\n",
953 static int iwl_load_given_ucode(struct iwl_trans
*trans
,
954 const struct fw_img
*image
)
959 for (i
= 0; i
< IWL_UCODE_SECTION_MAX
; i
++) {
960 if (!image
->sec
[i
].p_addr
)
963 ret
= iwl_load_section(trans
, i
, &image
->sec
[i
]);
968 /* Remove all resets to allow NIC to operate */
969 iwl_write32(trans
, CSR_RESET
, 0);
974 static int iwl_trans_pcie_start_fw(struct iwl_trans
*trans
,
975 const struct fw_img
*fw
)
980 /* This may fail if AMT took ownership of the device */
981 if (iwl_prepare_card_hw(trans
)) {
982 IWL_WARN(trans
, "Exit HW not ready\n");
986 iwl_enable_rfkill_int(trans
);
988 /* If platform's RF_KILL switch is NOT set to KILL */
989 hw_rfkill
= iwl_is_rfkill_set(trans
);
990 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
994 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
996 ret
= iwl_nic_init(trans
);
998 IWL_ERR(trans
, "Unable to init nic\n");
1002 /* make sure rfkill handshake bits are cleared */
1003 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1004 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
,
1005 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
1007 /* clear (again), then enable host interrupts */
1008 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1009 iwl_enable_interrupts(trans
);
1011 /* really make sure rfkill handshake bits are cleared */
1012 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1013 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1015 /* Load the given image to the HW */
1016 return iwl_load_given_ucode(trans
, fw
);
1020 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
1021 * must be called under the irq lock and with MAC access
1023 static void iwl_trans_txq_set_sched(struct iwl_trans
*trans
, u32 mask
)
1025 struct iwl_trans_pcie __maybe_unused
*trans_pcie
=
1026 IWL_TRANS_GET_PCIE_TRANS(trans
);
1028 lockdep_assert_held(&trans_pcie
->irq_lock
);
1030 iwl_write_prph(trans
, SCD_TXFACT
, mask
);
1033 static void iwl_tx_start(struct iwl_trans
*trans
)
1035 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1037 unsigned long flags
;
1041 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
1043 trans_pcie
->scd_base_addr
=
1044 iwl_read_prph(trans
, SCD_SRAM_BASE_ADDR
);
1045 a
= trans_pcie
->scd_base_addr
+ SCD_CONTEXT_MEM_LOWER_BOUND
;
1046 /* reset conext data memory */
1047 for (; a
< trans_pcie
->scd_base_addr
+ SCD_CONTEXT_MEM_UPPER_BOUND
;
1049 iwl_write_targ_mem(trans
, a
, 0);
1050 /* reset tx status memory */
1051 for (; a
< trans_pcie
->scd_base_addr
+ SCD_TX_STTS_MEM_UPPER_BOUND
;
1053 iwl_write_targ_mem(trans
, a
, 0);
1054 for (; a
< trans_pcie
->scd_base_addr
+
1055 SCD_TRANS_TBL_OFFSET_QUEUE(
1056 trans
->cfg
->base_params
->num_of_queues
);
1058 iwl_write_targ_mem(trans
, a
, 0);
1060 iwl_write_prph(trans
, SCD_DRAM_BASE_ADDR
,
1061 trans_pcie
->scd_bc_tbls
.dma
>> 10);
1063 /* Enable DMA channel */
1064 for (chan
= 0; chan
< FH_TCSR_CHNL_NUM
; chan
++)
1065 iwl_write_direct32(trans
, FH_TCSR_CHNL_TX_CONFIG_REG(chan
),
1066 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
1067 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE
);
1069 /* Update FH chicken bits */
1070 reg_val
= iwl_read_direct32(trans
, FH_TX_CHICKEN_BITS_REG
);
1071 iwl_write_direct32(trans
, FH_TX_CHICKEN_BITS_REG
,
1072 reg_val
| FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN
);
1074 iwl_write_prph(trans
, SCD_QUEUECHAIN_SEL
,
1075 SCD_QUEUECHAIN_SEL_ALL(trans
, trans_pcie
));
1076 iwl_write_prph(trans
, SCD_AGGR_SEL
, 0);
1078 /* initiate the queues */
1079 for (i
= 0; i
< trans
->cfg
->base_params
->num_of_queues
; i
++) {
1080 iwl_write_prph(trans
, SCD_QUEUE_RDPTR(i
), 0);
1081 iwl_write_direct32(trans
, HBUS_TARG_WRPTR
, 0 | (i
<< 8));
1082 iwl_write_targ_mem(trans
, trans_pcie
->scd_base_addr
+
1083 SCD_CONTEXT_QUEUE_OFFSET(i
), 0);
1084 iwl_write_targ_mem(trans
, trans_pcie
->scd_base_addr
+
1085 SCD_CONTEXT_QUEUE_OFFSET(i
) +
1088 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS
) &
1089 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK
) |
1090 ((SCD_FRAME_LIMIT
<<
1091 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
) &
1092 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
));
1095 iwl_write_prph(trans
, SCD_INTERRUPT_MASK
,
1096 IWL_MASK(0, trans
->cfg
->base_params
->num_of_queues
));
1098 /* Activate all Tx DMA/FIFO channels */
1099 iwl_trans_txq_set_sched(trans
, IWL_MASK(0, 7));
1101 iwl_trans_set_wr_ptrs(trans
, trans_pcie
->cmd_queue
, 0);
1103 /* make sure all queue are not stopped/used */
1104 memset(trans_pcie
->queue_stopped
, 0, sizeof(trans_pcie
->queue_stopped
));
1105 memset(trans_pcie
->queue_used
, 0, sizeof(trans_pcie
->queue_used
));
1107 for (i
= 0; i
< trans_pcie
->n_q_to_fifo
; i
++) {
1108 int fifo
= trans_pcie
->setup_q_to_fifo
[i
];
1110 set_bit(i
, trans_pcie
->queue_used
);
1112 iwl_trans_tx_queue_set_status(trans
, &trans_pcie
->txq
[i
],
1116 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
1118 /* Enable L1-Active */
1119 iwl_clear_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
1120 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
1123 static void iwl_trans_pcie_fw_alive(struct iwl_trans
*trans
)
1125 iwl_reset_ict(trans
);
1126 iwl_tx_start(trans
);
1130 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
1132 static int iwl_trans_tx_stop(struct iwl_trans
*trans
)
1134 int ch
, txq_id
, ret
;
1135 unsigned long flags
;
1136 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1138 /* Turn off all Tx DMA fifos */
1139 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
1141 iwl_trans_txq_set_sched(trans
, 0);
1143 /* Stop each Tx DMA channel, and wait for it to be idle */
1144 for (ch
= 0; ch
< FH_TCSR_CHNL_NUM
; ch
++) {
1145 iwl_write_direct32(trans
,
1146 FH_TCSR_CHNL_TX_CONFIG_REG(ch
), 0x0);
1147 ret
= iwl_poll_direct_bit(trans
, FH_TSSR_TX_STATUS_REG
,
1148 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch
),
1151 IWL_ERR(trans
, "Failing on timeout while stopping"
1152 " DMA channel %d [0x%08x]", ch
,
1153 iwl_read_direct32(trans
,
1154 FH_TSSR_TX_STATUS_REG
));
1156 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
1158 if (!trans_pcie
->txq
) {
1159 IWL_WARN(trans
, "Stopping tx queues that aren't allocated...");
1163 /* Unmap DMA from host system and free skb's */
1164 for (txq_id
= 0; txq_id
< trans
->cfg
->base_params
->num_of_queues
;
1166 iwl_tx_queue_unmap(trans
, txq_id
);
1171 static void iwl_trans_pcie_stop_device(struct iwl_trans
*trans
)
1173 unsigned long flags
;
1174 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1176 /* tell the device to stop sending interrupts */
1177 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
1178 iwl_disable_interrupts(trans
);
1179 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
1181 /* device going down, Stop using ICT table */
1182 iwl_disable_ict(trans
);
1185 * If a HW restart happens during firmware loading,
1186 * then the firmware loading might call this function
1187 * and later it might be called again due to the
1188 * restart. So don't process again if the device is
1191 if (test_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
)) {
1192 iwl_trans_tx_stop(trans
);
1193 #ifndef CONFIG_IWLWIFI_IDI
1194 iwl_trans_rx_stop(trans
);
1196 /* Power-down device's busmaster DMA clocks */
1197 iwl_write_prph(trans
, APMG_CLK_DIS_REG
,
1198 APMG_CLK_VAL_DMA_CLK_RQT
);
1202 /* Make sure (redundant) we've released our request to stay awake */
1203 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1204 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1206 /* Stop the device, and put it in low power state */
1207 iwl_apm_stop(trans
);
1209 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1210 * Clean again the interrupt here
1212 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
1213 iwl_disable_interrupts(trans
);
1214 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
1216 iwl_enable_rfkill_int(trans
);
1218 /* wait to make sure we flush pending tasklet*/
1219 synchronize_irq(trans_pcie
->irq
);
1220 tasklet_kill(&trans_pcie
->irq_tasklet
);
1222 cancel_work_sync(&trans_pcie
->rx_replenish
);
1224 /* stop and reset the on-board processor */
1225 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_NEVO_RESET
);
1227 /* clear all status bits */
1228 clear_bit(STATUS_HCMD_ACTIVE
, &trans_pcie
->status
);
1229 clear_bit(STATUS_INT_ENABLED
, &trans_pcie
->status
);
1230 clear_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
1231 clear_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
1234 static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans
*trans
)
1236 /* let the ucode operate on its own */
1237 iwl_write32(trans
, CSR_UCODE_DRV_GP1_SET
,
1238 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE
);
1240 iwl_disable_interrupts(trans
);
1241 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1242 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1245 static int iwl_trans_pcie_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
1246 struct iwl_device_cmd
*dev_cmd
, int txq_id
)
1248 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1249 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1250 struct iwl_tx_cmd
*tx_cmd
= (struct iwl_tx_cmd
*) dev_cmd
->payload
;
1251 struct iwl_cmd_meta
*out_meta
;
1252 struct iwl_tx_queue
*txq
;
1253 struct iwl_queue
*q
;
1254 dma_addr_t phys_addr
= 0;
1255 dma_addr_t txcmd_phys
;
1256 dma_addr_t scratch_phys
;
1257 u16 len
, firstlen
, secondlen
;
1258 u8 wait_write_ptr
= 0;
1259 __le16 fc
= hdr
->frame_control
;
1260 u8 hdr_len
= ieee80211_hdrlen(fc
);
1261 u16 __maybe_unused wifi_seq
;
1263 txq
= &trans_pcie
->txq
[txq_id
];
1266 if (unlikely(!test_bit(txq_id
, trans_pcie
->queue_used
))) {
1271 spin_lock(&txq
->lock
);
1273 /* Set up driver data for this TFD */
1274 txq
->entries
[q
->write_ptr
].skb
= skb
;
1275 txq
->entries
[q
->write_ptr
].cmd
= dev_cmd
;
1277 dev_cmd
->hdr
.cmd
= REPLY_TX
;
1278 dev_cmd
->hdr
.sequence
= cpu_to_le16((u16
)(QUEUE_TO_SEQ(txq_id
) |
1279 INDEX_TO_SEQ(q
->write_ptr
)));
1281 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1282 out_meta
= &txq
->entries
[q
->write_ptr
].meta
;
1285 * Use the first empty entry in this queue's command buffer array
1286 * to contain the Tx command and MAC header concatenated together
1287 * (payload data will be in another buffer).
1288 * Size of this varies, due to varying MAC header length.
1289 * If end is not dword aligned, we'll have 2 extra bytes at the end
1290 * of the MAC header (device reads on dword boundaries).
1291 * We'll tell device about this padding later.
1293 len
= sizeof(struct iwl_tx_cmd
) +
1294 sizeof(struct iwl_cmd_header
) + hdr_len
;
1295 firstlen
= (len
+ 3) & ~3;
1297 /* Tell NIC about any 2-byte padding after MAC header */
1298 if (firstlen
!= len
)
1299 tx_cmd
->tx_flags
|= TX_CMD_FLG_MH_PAD_MSK
;
1301 /* Physical address of this Tx command's header (not MAC header!),
1302 * within command buffer array. */
1303 txcmd_phys
= dma_map_single(trans
->dev
,
1304 &dev_cmd
->hdr
, firstlen
,
1306 if (unlikely(dma_mapping_error(trans
->dev
, txcmd_phys
)))
1308 dma_unmap_addr_set(out_meta
, mapping
, txcmd_phys
);
1309 dma_unmap_len_set(out_meta
, len
, firstlen
);
1311 if (!ieee80211_has_morefrags(fc
)) {
1312 txq
->need_update
= 1;
1315 txq
->need_update
= 0;
1318 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1319 * if any (802.11 null frames have no payload). */
1320 secondlen
= skb
->len
- hdr_len
;
1321 if (secondlen
> 0) {
1322 phys_addr
= dma_map_single(trans
->dev
, skb
->data
+ hdr_len
,
1323 secondlen
, DMA_TO_DEVICE
);
1324 if (unlikely(dma_mapping_error(trans
->dev
, phys_addr
))) {
1325 dma_unmap_single(trans
->dev
,
1326 dma_unmap_addr(out_meta
, mapping
),
1327 dma_unmap_len(out_meta
, len
),
1333 /* Attach buffers to TFD */
1334 iwlagn_txq_attach_buf_to_tfd(trans
, txq
, txcmd_phys
, firstlen
, 1);
1336 iwlagn_txq_attach_buf_to_tfd(trans
, txq
, phys_addr
,
1339 scratch_phys
= txcmd_phys
+ sizeof(struct iwl_cmd_header
) +
1340 offsetof(struct iwl_tx_cmd
, scratch
);
1342 /* take back ownership of DMA buffer to enable update */
1343 dma_sync_single_for_cpu(trans
->dev
, txcmd_phys
, firstlen
,
1345 tx_cmd
->dram_lsb_ptr
= cpu_to_le32(scratch_phys
);
1346 tx_cmd
->dram_msb_ptr
= iwl_get_dma_hi_addr(scratch_phys
);
1348 IWL_DEBUG_TX(trans
, "sequence nr = 0X%x\n",
1349 le16_to_cpu(dev_cmd
->hdr
.sequence
));
1350 IWL_DEBUG_TX(trans
, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd
->tx_flags
));
1352 /* Set up entry for this TFD in Tx byte-count array */
1353 iwl_trans_txq_update_byte_cnt_tbl(trans
, txq
, le16_to_cpu(tx_cmd
->len
));
1355 dma_sync_single_for_device(trans
->dev
, txcmd_phys
, firstlen
,
1358 trace_iwlwifi_dev_tx(trans
->dev
,
1359 &((struct iwl_tfd
*)txq
->tfds
)[txq
->q
.write_ptr
],
1360 sizeof(struct iwl_tfd
),
1361 &dev_cmd
->hdr
, firstlen
,
1362 skb
->data
+ hdr_len
, secondlen
);
1364 /* start timer if queue currently empty */
1365 if (q
->read_ptr
== q
->write_ptr
&& trans_pcie
->wd_timeout
)
1366 mod_timer(&txq
->stuck_timer
, jiffies
+ trans_pcie
->wd_timeout
);
1368 /* Tell device the write index *just past* this latest filled TFD */
1369 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
1370 iwl_txq_update_write_ptr(trans
, txq
);
1373 * At this point the frame is "transmitted" successfully
1374 * and we will get a TX status notification eventually,
1375 * regardless of the value of ret. "ret" only indicates
1376 * whether or not we should update the write pointer.
1378 if (iwl_queue_space(q
) < q
->high_mark
) {
1379 if (wait_write_ptr
) {
1380 txq
->need_update
= 1;
1381 iwl_txq_update_write_ptr(trans
, txq
);
1383 iwl_stop_queue(trans
, txq
);
1386 spin_unlock(&txq
->lock
);
1389 spin_unlock(&txq
->lock
);
1393 static int iwl_trans_pcie_start_hw(struct iwl_trans
*trans
)
1395 struct iwl_trans_pcie
*trans_pcie
=
1396 IWL_TRANS_GET_PCIE_TRANS(trans
);
1400 trans_pcie
->inta_mask
= CSR_INI_SET_MASK
;
1402 if (!trans_pcie
->irq_requested
) {
1403 tasklet_init(&trans_pcie
->irq_tasklet
, (void (*)(unsigned long))
1404 iwl_irq_tasklet
, (unsigned long)trans
);
1406 iwl_alloc_isr_ict(trans
);
1408 err
= request_irq(trans_pcie
->irq
, iwl_isr_ict
, IRQF_SHARED
,
1411 IWL_ERR(trans
, "Error allocating IRQ %d\n",
1416 INIT_WORK(&trans_pcie
->rx_replenish
, iwl_bg_rx_replenish
);
1417 trans_pcie
->irq_requested
= true;
1420 err
= iwl_prepare_card_hw(trans
);
1422 IWL_ERR(trans
, "Error while preparing HW: %d", err
);
1426 iwl_apm_init(trans
);
1428 /* From now on, the op_mode will be kept updated about RF kill state */
1429 iwl_enable_rfkill_int(trans
);
1431 hw_rfkill
= iwl_is_rfkill_set(trans
);
1432 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
1437 free_irq(trans_pcie
->irq
, trans
);
1439 iwl_free_isr_ict(trans
);
1440 tasklet_kill(&trans_pcie
->irq_tasklet
);
1444 static void iwl_trans_pcie_stop_hw(struct iwl_trans
*trans
,
1445 bool op_mode_leaving
)
1448 unsigned long flags
;
1449 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1451 iwl_apm_stop(trans
);
1453 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
1454 iwl_disable_interrupts(trans
);
1455 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
1457 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1459 if (!op_mode_leaving
) {
1461 * Even if we stop the HW, we still want the RF kill
1464 iwl_enable_rfkill_int(trans
);
1467 * Check again since the RF kill state may have changed while
1468 * all the interrupts were disabled, in this case we couldn't
1469 * receive the RF kill interrupt and update the state in the
1472 hw_rfkill
= iwl_is_rfkill_set(trans
);
1473 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
1477 static void iwl_trans_pcie_reclaim(struct iwl_trans
*trans
, int txq_id
, int ssn
,
1478 struct sk_buff_head
*skbs
)
1480 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1481 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
1482 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1483 int tfd_num
= ssn
& (txq
->q
.n_bd
- 1);
1486 spin_lock(&txq
->lock
);
1488 if (txq
->q
.read_ptr
!= tfd_num
) {
1489 IWL_DEBUG_TX_REPLY(trans
, "[Q %d] %d -> %d (%d)\n",
1490 txq_id
, txq
->q
.read_ptr
, tfd_num
, ssn
);
1491 freed
= iwl_tx_queue_reclaim(trans
, txq_id
, tfd_num
, skbs
);
1492 if (iwl_queue_space(&txq
->q
) > txq
->q
.low_mark
)
1493 iwl_wake_queue(trans
, txq
);
1496 spin_unlock(&txq
->lock
);
1499 static void iwl_trans_pcie_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
1501 writeb(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1504 static void iwl_trans_pcie_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
1506 writel(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1509 static u32
iwl_trans_pcie_read32(struct iwl_trans
*trans
, u32 ofs
)
1511 return readl(IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1514 static void iwl_trans_pcie_configure(struct iwl_trans
*trans
,
1515 const struct iwl_trans_config
*trans_cfg
)
1517 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1519 trans_pcie
->cmd_queue
= trans_cfg
->cmd_queue
;
1520 if (WARN_ON(trans_cfg
->n_no_reclaim_cmds
> MAX_NO_RECLAIM_CMDS
))
1521 trans_pcie
->n_no_reclaim_cmds
= 0;
1523 trans_pcie
->n_no_reclaim_cmds
= trans_cfg
->n_no_reclaim_cmds
;
1524 if (trans_pcie
->n_no_reclaim_cmds
)
1525 memcpy(trans_pcie
->no_reclaim_cmds
, trans_cfg
->no_reclaim_cmds
,
1526 trans_pcie
->n_no_reclaim_cmds
* sizeof(u8
));
1528 trans_pcie
->n_q_to_fifo
= trans_cfg
->n_queue_to_fifo
;
1530 if (WARN_ON(trans_pcie
->n_q_to_fifo
> IWL_MAX_HW_QUEUES
))
1531 trans_pcie
->n_q_to_fifo
= IWL_MAX_HW_QUEUES
;
1533 /* at least the command queue must be mapped */
1534 WARN_ON(!trans_pcie
->n_q_to_fifo
);
1536 memcpy(trans_pcie
->setup_q_to_fifo
, trans_cfg
->queue_to_fifo
,
1537 trans_pcie
->n_q_to_fifo
* sizeof(u8
));
1539 trans_pcie
->rx_buf_size_8k
= trans_cfg
->rx_buf_size_8k
;
1540 if (trans_pcie
->rx_buf_size_8k
)
1541 trans_pcie
->rx_page_order
= get_order(8 * 1024);
1543 trans_pcie
->rx_page_order
= get_order(4 * 1024);
1545 trans_pcie
->wd_timeout
=
1546 msecs_to_jiffies(trans_cfg
->queue_watchdog_timeout
);
1548 trans_pcie
->command_names
= trans_cfg
->command_names
;
1551 void iwl_trans_pcie_free(struct iwl_trans
*trans
)
1553 struct iwl_trans_pcie
*trans_pcie
=
1554 IWL_TRANS_GET_PCIE_TRANS(trans
);
1556 iwl_trans_pcie_tx_free(trans
);
1557 #ifndef CONFIG_IWLWIFI_IDI
1558 iwl_trans_pcie_rx_free(trans
);
1560 if (trans_pcie
->irq_requested
== true) {
1561 free_irq(trans_pcie
->irq
, trans
);
1562 iwl_free_isr_ict(trans
);
1565 pci_disable_msi(trans_pcie
->pci_dev
);
1566 iounmap(trans_pcie
->hw_base
);
1567 pci_release_regions(trans_pcie
->pci_dev
);
1568 pci_disable_device(trans_pcie
->pci_dev
);
1573 static void iwl_trans_pcie_set_pmi(struct iwl_trans
*trans
, bool state
)
1575 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1578 set_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
1580 clear_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
1583 #ifdef CONFIG_PM_SLEEP
1584 static int iwl_trans_pcie_suspend(struct iwl_trans
*trans
)
1589 static int iwl_trans_pcie_resume(struct iwl_trans
*trans
)
1593 iwl_enable_rfkill_int(trans
);
1595 hw_rfkill
= iwl_is_rfkill_set(trans
);
1596 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
1599 iwl_enable_interrupts(trans
);
1603 #endif /* CONFIG_PM_SLEEP */
1605 #define IWL_FLUSH_WAIT_MS 2000
1607 static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans
*trans
)
1609 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1610 struct iwl_tx_queue
*txq
;
1611 struct iwl_queue
*q
;
1613 unsigned long now
= jiffies
;
1616 /* waiting for all the tx frames complete might take a while */
1617 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1618 if (cnt
== trans_pcie
->cmd_queue
)
1620 txq
= &trans_pcie
->txq
[cnt
];
1622 while (q
->read_ptr
!= q
->write_ptr
&& !time_after(jiffies
,
1623 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
)))
1626 if (q
->read_ptr
!= q
->write_ptr
) {
1627 IWL_ERR(trans
, "fail to flush all tx fifo queues\n");
1635 static const char *get_fh_string(int cmd
)
1637 #define IWL_CMD(x) case x: return #x
1639 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG
);
1640 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG
);
1641 IWL_CMD(FH_RSCSR_CHNL0_WPTR
);
1642 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG
);
1643 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG
);
1644 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG
);
1645 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
);
1646 IWL_CMD(FH_TSSR_TX_STATUS_REG
);
1647 IWL_CMD(FH_TSSR_TX_ERROR_REG
);
1654 int iwl_dump_fh(struct iwl_trans
*trans
, char **buf
, bool display
)
1657 #ifdef CONFIG_IWLWIFI_DEBUG
1661 static const u32 fh_tbl
[] = {
1662 FH_RSCSR_CHNL0_STTS_WPTR_REG
,
1663 FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
1664 FH_RSCSR_CHNL0_WPTR
,
1665 FH_MEM_RCSR_CHNL0_CONFIG_REG
,
1666 FH_MEM_RSSR_SHARED_CTRL_REG
,
1667 FH_MEM_RSSR_RX_STATUS_REG
,
1668 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
,
1669 FH_TSSR_TX_STATUS_REG
,
1670 FH_TSSR_TX_ERROR_REG
1672 #ifdef CONFIG_IWLWIFI_DEBUG
1674 bufsz
= ARRAY_SIZE(fh_tbl
) * 48 + 40;
1675 *buf
= kmalloc(bufsz
, GFP_KERNEL
);
1678 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
1679 "FH register values:\n");
1680 for (i
= 0; i
< ARRAY_SIZE(fh_tbl
); i
++) {
1681 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
1683 get_fh_string(fh_tbl
[i
]),
1684 iwl_read_direct32(trans
, fh_tbl
[i
]));
1689 IWL_ERR(trans
, "FH register values:\n");
1690 for (i
= 0; i
< ARRAY_SIZE(fh_tbl
); i
++) {
1691 IWL_ERR(trans
, " %34s: 0X%08x\n",
1692 get_fh_string(fh_tbl
[i
]),
1693 iwl_read_direct32(trans
, fh_tbl
[i
]));
1698 static const char *get_csr_string(int cmd
)
1700 #define IWL_CMD(x) case x: return #x
1702 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
1703 IWL_CMD(CSR_INT_COALESCING
);
1705 IWL_CMD(CSR_INT_MASK
);
1706 IWL_CMD(CSR_FH_INT_STATUS
);
1707 IWL_CMD(CSR_GPIO_IN
);
1709 IWL_CMD(CSR_GP_CNTRL
);
1710 IWL_CMD(CSR_HW_REV
);
1711 IWL_CMD(CSR_EEPROM_REG
);
1712 IWL_CMD(CSR_EEPROM_GP
);
1713 IWL_CMD(CSR_OTP_GP_REG
);
1714 IWL_CMD(CSR_GIO_REG
);
1715 IWL_CMD(CSR_GP_UCODE_REG
);
1716 IWL_CMD(CSR_GP_DRIVER_REG
);
1717 IWL_CMD(CSR_UCODE_DRV_GP1
);
1718 IWL_CMD(CSR_UCODE_DRV_GP2
);
1719 IWL_CMD(CSR_LED_REG
);
1720 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
1721 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
1722 IWL_CMD(CSR_ANA_PLL_CFG
);
1723 IWL_CMD(CSR_HW_REV_WA_REG
);
1724 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
1731 void iwl_dump_csr(struct iwl_trans
*trans
)
1734 static const u32 csr_tbl
[] = {
1735 CSR_HW_IF_CONFIG_REG
,
1753 CSR_DRAM_INT_TBL_REG
,
1754 CSR_GIO_CHICKEN_BITS
,
1757 CSR_DBG_HPET_MEM_REG
1759 IWL_ERR(trans
, "CSR values:\n");
1760 IWL_ERR(trans
, "(2nd byte of CSR_INT_COALESCING is "
1761 "CSR_INT_PERIODIC_REG)\n");
1762 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
1763 IWL_ERR(trans
, " %25s: 0X%08x\n",
1764 get_csr_string(csr_tbl
[i
]),
1765 iwl_read32(trans
, csr_tbl
[i
]));
1769 #ifdef CONFIG_IWLWIFI_DEBUGFS
1770 /* create and remove of files */
1771 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1772 if (!debugfs_create_file(#name, mode, parent, trans, \
1773 &iwl_dbgfs_##name##_ops)) \
1777 /* file operation */
1778 #define DEBUGFS_READ_FUNC(name) \
1779 static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1780 char __user *user_buf, \
1781 size_t count, loff_t *ppos);
1783 #define DEBUGFS_WRITE_FUNC(name) \
1784 static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1785 const char __user *user_buf, \
1786 size_t count, loff_t *ppos);
1789 #define DEBUGFS_READ_FILE_OPS(name) \
1790 DEBUGFS_READ_FUNC(name); \
1791 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1792 .read = iwl_dbgfs_##name##_read, \
1793 .open = simple_open, \
1794 .llseek = generic_file_llseek, \
1797 #define DEBUGFS_WRITE_FILE_OPS(name) \
1798 DEBUGFS_WRITE_FUNC(name); \
1799 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1800 .write = iwl_dbgfs_##name##_write, \
1801 .open = simple_open, \
1802 .llseek = generic_file_llseek, \
1805 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1806 DEBUGFS_READ_FUNC(name); \
1807 DEBUGFS_WRITE_FUNC(name); \
1808 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1809 .write = iwl_dbgfs_##name##_write, \
1810 .read = iwl_dbgfs_##name##_read, \
1811 .open = simple_open, \
1812 .llseek = generic_file_llseek, \
1815 static ssize_t
iwl_dbgfs_tx_queue_read(struct file
*file
,
1816 char __user
*user_buf
,
1817 size_t count
, loff_t
*ppos
)
1819 struct iwl_trans
*trans
= file
->private_data
;
1820 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1821 struct iwl_tx_queue
*txq
;
1822 struct iwl_queue
*q
;
1829 bufsz
= sizeof(char) * 64 * trans
->cfg
->base_params
->num_of_queues
;
1831 if (!trans_pcie
->txq
)
1834 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1838 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1839 txq
= &trans_pcie
->txq
[cnt
];
1841 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1842 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1843 cnt
, q
->read_ptr
, q
->write_ptr
,
1844 !!test_bit(cnt
, trans_pcie
->queue_used
),
1845 !!test_bit(cnt
, trans_pcie
->queue_stopped
));
1847 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1852 static ssize_t
iwl_dbgfs_rx_queue_read(struct file
*file
,
1853 char __user
*user_buf
,
1854 size_t count
, loff_t
*ppos
) {
1855 struct iwl_trans
*trans
= file
->private_data
;
1856 struct iwl_trans_pcie
*trans_pcie
=
1857 IWL_TRANS_GET_PCIE_TRANS(trans
);
1858 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
1861 const size_t bufsz
= sizeof(buf
);
1863 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "read: %u\n",
1865 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "write: %u\n",
1867 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "free_count: %u\n",
1870 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "closed_rb_num: %u\n",
1871 le16_to_cpu(rxq
->rb_stts
->closed_rb_num
) & 0x0FFF);
1873 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1874 "closed_rb_num: Not Allocated\n");
1876 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1879 static ssize_t
iwl_dbgfs_interrupt_read(struct file
*file
,
1880 char __user
*user_buf
,
1881 size_t count
, loff_t
*ppos
) {
1883 struct iwl_trans
*trans
= file
->private_data
;
1884 struct iwl_trans_pcie
*trans_pcie
=
1885 IWL_TRANS_GET_PCIE_TRANS(trans
);
1886 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1890 int bufsz
= 24 * 64; /* 24 items * 64 char per item */
1893 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1897 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1898 "Interrupt Statistics Report:\n");
1900 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "HW Error:\t\t\t %u\n",
1902 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "SW Error:\t\t\t %u\n",
1904 if (isr_stats
->sw
|| isr_stats
->hw
) {
1905 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1906 "\tLast Restarting Code: 0x%X\n",
1907 isr_stats
->err_code
);
1909 #ifdef CONFIG_IWLWIFI_DEBUG
1910 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Frame transmitted:\t\t %u\n",
1912 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Alive interrupt:\t\t %u\n",
1915 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1916 "HW RF KILL switch toggled:\t %u\n", isr_stats
->rfkill
);
1918 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "CT KILL:\t\t\t %u\n",
1921 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Wakeup Interrupt:\t\t %u\n",
1924 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1925 "Rx command responses:\t\t %u\n", isr_stats
->rx
);
1927 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Tx/FH interrupt:\t\t %u\n",
1930 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Unexpected INTA:\t\t %u\n",
1931 isr_stats
->unhandled
);
1933 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1938 static ssize_t
iwl_dbgfs_interrupt_write(struct file
*file
,
1939 const char __user
*user_buf
,
1940 size_t count
, loff_t
*ppos
)
1942 struct iwl_trans
*trans
= file
->private_data
;
1943 struct iwl_trans_pcie
*trans_pcie
=
1944 IWL_TRANS_GET_PCIE_TRANS(trans
);
1945 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1951 memset(buf
, 0, sizeof(buf
));
1952 buf_size
= min(count
, sizeof(buf
) - 1);
1953 if (copy_from_user(buf
, user_buf
, buf_size
))
1955 if (sscanf(buf
, "%x", &reset_flag
) != 1)
1957 if (reset_flag
== 0)
1958 memset(isr_stats
, 0, sizeof(*isr_stats
));
1963 static ssize_t
iwl_dbgfs_csr_write(struct file
*file
,
1964 const char __user
*user_buf
,
1965 size_t count
, loff_t
*ppos
)
1967 struct iwl_trans
*trans
= file
->private_data
;
1972 memset(buf
, 0, sizeof(buf
));
1973 buf_size
= min(count
, sizeof(buf
) - 1);
1974 if (copy_from_user(buf
, user_buf
, buf_size
))
1976 if (sscanf(buf
, "%d", &csr
) != 1)
1979 iwl_dump_csr(trans
);
1984 static ssize_t
iwl_dbgfs_fh_reg_read(struct file
*file
,
1985 char __user
*user_buf
,
1986 size_t count
, loff_t
*ppos
)
1988 struct iwl_trans
*trans
= file
->private_data
;
1991 ssize_t ret
= -EFAULT
;
1993 ret
= pos
= iwl_dump_fh(trans
, &buf
, true);
1995 ret
= simple_read_from_buffer(user_buf
,
1996 count
, ppos
, buf
, pos
);
2003 static ssize_t
iwl_dbgfs_fw_restart_write(struct file
*file
,
2004 const char __user
*user_buf
,
2005 size_t count
, loff_t
*ppos
)
2007 struct iwl_trans
*trans
= file
->private_data
;
2009 if (!trans
->op_mode
)
2012 iwl_op_mode_nic_error(trans
->op_mode
);
2017 DEBUGFS_READ_WRITE_FILE_OPS(interrupt
);
2018 DEBUGFS_READ_FILE_OPS(fh_reg
);
2019 DEBUGFS_READ_FILE_OPS(rx_queue
);
2020 DEBUGFS_READ_FILE_OPS(tx_queue
);
2021 DEBUGFS_WRITE_FILE_OPS(csr
);
2022 DEBUGFS_WRITE_FILE_OPS(fw_restart
);
2025 * Create the debugfs files and directories
2028 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
2031 DEBUGFS_ADD_FILE(rx_queue
, dir
, S_IRUSR
);
2032 DEBUGFS_ADD_FILE(tx_queue
, dir
, S_IRUSR
);
2033 DEBUGFS_ADD_FILE(interrupt
, dir
, S_IWUSR
| S_IRUSR
);
2034 DEBUGFS_ADD_FILE(csr
, dir
, S_IWUSR
);
2035 DEBUGFS_ADD_FILE(fh_reg
, dir
, S_IRUSR
);
2036 DEBUGFS_ADD_FILE(fw_restart
, dir
, S_IWUSR
);
2040 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
2044 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2046 static const struct iwl_trans_ops trans_ops_pcie
= {
2047 .start_hw
= iwl_trans_pcie_start_hw
,
2048 .stop_hw
= iwl_trans_pcie_stop_hw
,
2049 .fw_alive
= iwl_trans_pcie_fw_alive
,
2050 .start_fw
= iwl_trans_pcie_start_fw
,
2051 .stop_device
= iwl_trans_pcie_stop_device
,
2053 .wowlan_suspend
= iwl_trans_pcie_wowlan_suspend
,
2055 .send_cmd
= iwl_trans_pcie_send_cmd
,
2057 .tx
= iwl_trans_pcie_tx
,
2058 .reclaim
= iwl_trans_pcie_reclaim
,
2060 .tx_agg_disable
= iwl_trans_pcie_tx_agg_disable
,
2061 .tx_agg_setup
= iwl_trans_pcie_tx_agg_setup
,
2063 .dbgfs_register
= iwl_trans_pcie_dbgfs_register
,
2065 .wait_tx_queue_empty
= iwl_trans_pcie_wait_tx_queue_empty
,
2067 #ifdef CONFIG_PM_SLEEP
2068 .suspend
= iwl_trans_pcie_suspend
,
2069 .resume
= iwl_trans_pcie_resume
,
2071 .write8
= iwl_trans_pcie_write8
,
2072 .write32
= iwl_trans_pcie_write32
,
2073 .read32
= iwl_trans_pcie_read32
,
2074 .configure
= iwl_trans_pcie_configure
,
2075 .set_pmi
= iwl_trans_pcie_set_pmi
,
2078 struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev
*pdev
,
2079 const struct pci_device_id
*ent
,
2080 const struct iwl_cfg
*cfg
)
2082 struct iwl_trans_pcie
*trans_pcie
;
2083 struct iwl_trans
*trans
;
2087 trans
= kzalloc(sizeof(struct iwl_trans
) +
2088 sizeof(struct iwl_trans_pcie
), GFP_KERNEL
);
2090 if (WARN_ON(!trans
))
2093 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2095 trans
->ops
= &trans_ops_pcie
;
2097 trans_pcie
->trans
= trans
;
2098 spin_lock_init(&trans_pcie
->irq_lock
);
2099 init_waitqueue_head(&trans_pcie
->ucode_write_waitq
);
2101 /* W/A - seems to solve weird behavior. We need to remove this if we
2102 * don't want to stay in L1 all the time. This wastes a lot of power */
2103 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
| PCIE_LINK_STATE_L1
|
2104 PCIE_LINK_STATE_CLKPM
);
2106 if (pci_enable_device(pdev
)) {
2111 pci_set_master(pdev
);
2113 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(36));
2115 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(36));
2117 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2119 err
= pci_set_consistent_dma_mask(pdev
,
2121 /* both attempts failed: */
2123 dev_printk(KERN_ERR
, &pdev
->dev
,
2124 "No suitable DMA available.\n");
2125 goto out_pci_disable_device
;
2129 err
= pci_request_regions(pdev
, DRV_NAME
);
2131 dev_printk(KERN_ERR
, &pdev
->dev
, "pci_request_regions failed");
2132 goto out_pci_disable_device
;
2135 trans_pcie
->hw_base
= pci_ioremap_bar(pdev
, 0);
2136 if (!trans_pcie
->hw_base
) {
2137 dev_printk(KERN_ERR
, &pdev
->dev
, "pci_ioremap_bar failed");
2139 goto out_pci_release_regions
;
2142 dev_printk(KERN_INFO
, &pdev
->dev
,
2143 "pci_resource_len = 0x%08llx\n",
2144 (unsigned long long) pci_resource_len(pdev
, 0));
2145 dev_printk(KERN_INFO
, &pdev
->dev
,
2146 "pci_resource_base = %p\n", trans_pcie
->hw_base
);
2148 dev_printk(KERN_INFO
, &pdev
->dev
,
2149 "HW Revision ID = 0x%X\n", pdev
->revision
);
2151 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2152 * PCI Tx retries from interfering with C3 CPU state */
2153 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
2155 err
= pci_enable_msi(pdev
);
2157 dev_printk(KERN_ERR
, &pdev
->dev
,
2158 "pci_enable_msi failed(0X%x)", err
);
2160 trans
->dev
= &pdev
->dev
;
2161 trans_pcie
->irq
= pdev
->irq
;
2162 trans_pcie
->pci_dev
= pdev
;
2163 trans
->hw_rev
= iwl_read32(trans
, CSR_HW_REV
);
2164 trans
->hw_id
= (pdev
->device
<< 16) + pdev
->subsystem_device
;
2165 snprintf(trans
->hw_id_str
, sizeof(trans
->hw_id_str
),
2166 "PCI ID: 0x%04X:0x%04X", pdev
->device
, pdev
->subsystem_device
);
2168 /* TODO: Move this away, not needed if not MSI */
2169 /* enable rfkill interrupt: hw bug w/a */
2170 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
2171 if (pci_cmd
& PCI_COMMAND_INTX_DISABLE
) {
2172 pci_cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
2173 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
2176 /* Initialize the wait queue for commands */
2177 init_waitqueue_head(&trans
->wait_command_queue
);
2178 spin_lock_init(&trans
->reg_lock
);
2182 out_pci_release_regions
:
2183 pci_release_regions(pdev
);
2184 out_pci_disable_device
:
2185 pci_disable_device(pdev
);