1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
63 #include <linux/pci.h>
64 #include <linux/pci-aspm.h>
65 #include <linux/interrupt.h>
66 #include <linux/debugfs.h>
67 #include <linux/sched.h>
68 #include <linux/bitops.h>
69 #include <linux/gfp.h>
71 #include "iwl-trans.h"
72 #include "iwl-trans-pcie-int.h"
75 #include "iwl-shared.h"
76 #include "iwl-eeprom.h"
77 #include "iwl-agn-hw.h"
79 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
81 #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
82 (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\
83 (~(1<<(trans_pcie)->cmd_queue)))
85 static int iwl_trans_rx_alloc(struct iwl_trans
*trans
)
87 struct iwl_trans_pcie
*trans_pcie
=
88 IWL_TRANS_GET_PCIE_TRANS(trans
);
89 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
90 struct device
*dev
= trans
->dev
;
92 memset(&trans_pcie
->rxq
, 0, sizeof(trans_pcie
->rxq
));
94 spin_lock_init(&rxq
->lock
);
96 if (WARN_ON(rxq
->bd
|| rxq
->rb_stts
))
99 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
100 rxq
->bd
= dma_zalloc_coherent(dev
, sizeof(__le32
) * RX_QUEUE_SIZE
,
101 &rxq
->bd_dma
, GFP_KERNEL
);
105 /*Allocate the driver's pointer to receive buffer status */
106 rxq
->rb_stts
= dma_zalloc_coherent(dev
, sizeof(*rxq
->rb_stts
),
107 &rxq
->rb_stts_dma
, GFP_KERNEL
);
114 dma_free_coherent(dev
, sizeof(__le32
) * RX_QUEUE_SIZE
,
115 rxq
->bd
, rxq
->bd_dma
);
116 memset(&rxq
->bd_dma
, 0, sizeof(rxq
->bd_dma
));
122 static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans
*trans
)
124 struct iwl_trans_pcie
*trans_pcie
=
125 IWL_TRANS_GET_PCIE_TRANS(trans
);
126 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
129 /* Fill the rx_used queue with _all_ of the Rx buffers */
130 for (i
= 0; i
< RX_FREE_BUFFERS
+ RX_QUEUE_SIZE
; i
++) {
131 /* In the reset function, these buffers may have been allocated
132 * to an SKB, so we need to unmap and free potential storage */
133 if (rxq
->pool
[i
].page
!= NULL
) {
134 dma_unmap_page(trans
->dev
, rxq
->pool
[i
].page_dma
,
135 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
137 __free_pages(rxq
->pool
[i
].page
,
138 trans_pcie
->rx_page_order
);
139 rxq
->pool
[i
].page
= NULL
;
141 list_add_tail(&rxq
->pool
[i
].list
, &rxq
->rx_used
);
145 static void iwl_trans_rx_hw_init(struct iwl_trans
*trans
,
146 struct iwl_rx_queue
*rxq
)
148 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
150 const u32 rfdnlog
= RX_QUEUE_SIZE_LOG
; /* 256 RBDs */
151 u32 rb_timeout
= RX_RB_TIMEOUT
; /* FIXME: RX_RB_TIMEOUT for all devices? */
153 if (trans_pcie
->rx_buf_size_8k
)
154 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K
;
156 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
;
159 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
161 /* Reset driver's Rx queue write index */
162 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_RBDCB_WPTR_REG
, 0);
164 /* Tell device where to find RBD circular buffer in DRAM */
165 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
166 (u32
)(rxq
->bd_dma
>> 8));
168 /* Tell device where in DRAM to update its Rx status */
169 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_STTS_WPTR_REG
,
170 rxq
->rb_stts_dma
>> 4);
173 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
174 * the credit mechanism in 5000 HW RX FIFO
175 * Direct rx interrupts to hosts
176 * Rx buffer size 4 or 8k
180 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
,
181 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL
|
182 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY
|
183 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL
|
185 (rb_timeout
<< FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS
)|
186 (rfdnlog
<< FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS
));
188 /* Set interrupt coalescing timer to default (2048 usecs) */
189 iwl_write8(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
192 static int iwl_rx_init(struct iwl_trans
*trans
)
194 struct iwl_trans_pcie
*trans_pcie
=
195 IWL_TRANS_GET_PCIE_TRANS(trans
);
196 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
202 err
= iwl_trans_rx_alloc(trans
);
207 spin_lock_irqsave(&rxq
->lock
, flags
);
208 INIT_LIST_HEAD(&rxq
->rx_free
);
209 INIT_LIST_HEAD(&rxq
->rx_used
);
211 iwl_trans_rxq_free_rx_bufs(trans
);
213 for (i
= 0; i
< RX_QUEUE_SIZE
; i
++)
214 rxq
->queue
[i
] = NULL
;
216 /* Set us so that we have processed and used all buffers, but have
217 * not restocked the Rx queue with fresh buffers */
218 rxq
->read
= rxq
->write
= 0;
219 rxq
->write_actual
= 0;
221 spin_unlock_irqrestore(&rxq
->lock
, flags
);
223 iwlagn_rx_replenish(trans
);
225 iwl_trans_rx_hw_init(trans
, rxq
);
227 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
228 rxq
->need_update
= 1;
229 iwl_rx_queue_update_write_ptr(trans
, rxq
);
230 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
235 static void iwl_trans_pcie_rx_free(struct iwl_trans
*trans
)
237 struct iwl_trans_pcie
*trans_pcie
=
238 IWL_TRANS_GET_PCIE_TRANS(trans
);
239 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
243 /*if rxq->bd is NULL, it means that nothing has been allocated,
246 IWL_DEBUG_INFO(trans
, "Free NULL rx context\n");
250 spin_lock_irqsave(&rxq
->lock
, flags
);
251 iwl_trans_rxq_free_rx_bufs(trans
);
252 spin_unlock_irqrestore(&rxq
->lock
, flags
);
254 dma_free_coherent(trans
->dev
, sizeof(__le32
) * RX_QUEUE_SIZE
,
255 rxq
->bd
, rxq
->bd_dma
);
256 memset(&rxq
->bd_dma
, 0, sizeof(rxq
->bd_dma
));
260 dma_free_coherent(trans
->dev
,
261 sizeof(struct iwl_rb_status
),
262 rxq
->rb_stts
, rxq
->rb_stts_dma
);
264 IWL_DEBUG_INFO(trans
, "Free rxq->rb_stts which is NULL\n");
265 memset(&rxq
->rb_stts_dma
, 0, sizeof(rxq
->rb_stts_dma
));
269 static int iwl_trans_rx_stop(struct iwl_trans
*trans
)
273 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
274 return iwl_poll_direct_bit(trans
, FH_MEM_RSSR_RX_STATUS_REG
,
275 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE
, 1000);
278 static inline int iwlagn_alloc_dma_ptr(struct iwl_trans
*trans
,
279 struct iwl_dma_ptr
*ptr
, size_t size
)
281 if (WARN_ON(ptr
->addr
))
284 ptr
->addr
= dma_alloc_coherent(trans
->dev
, size
,
285 &ptr
->dma
, GFP_KERNEL
);
292 static inline void iwlagn_free_dma_ptr(struct iwl_trans
*trans
,
293 struct iwl_dma_ptr
*ptr
)
295 if (unlikely(!ptr
->addr
))
298 dma_free_coherent(trans
->dev
, ptr
->size
, ptr
->addr
, ptr
->dma
);
299 memset(ptr
, 0, sizeof(*ptr
));
302 static void iwl_trans_pcie_queue_stuck_timer(unsigned long data
)
304 struct iwl_tx_queue
*txq
= (void *)data
;
305 struct iwl_trans_pcie
*trans_pcie
= txq
->trans_pcie
;
306 struct iwl_trans
*trans
= iwl_trans_pcie_get_trans(trans_pcie
);
308 spin_lock(&txq
->lock
);
309 /* check if triggered erroneously */
310 if (txq
->q
.read_ptr
== txq
->q
.write_ptr
) {
311 spin_unlock(&txq
->lock
);
314 spin_unlock(&txq
->lock
);
317 IWL_ERR(trans
, "Queue %d stuck for %u ms.\n", txq
->q
.id
,
318 jiffies_to_msecs(trans_pcie
->wd_timeout
));
319 IWL_ERR(trans
, "Current SW read_ptr %d write_ptr %d\n",
320 txq
->q
.read_ptr
, txq
->q
.write_ptr
);
321 IWL_ERR(trans
, "Current HW read_ptr %d write_ptr %d\n",
322 iwl_read_prph(trans
, SCD_QUEUE_RDPTR(txq
->q
.id
))
323 & (TFD_QUEUE_SIZE_MAX
- 1),
324 iwl_read_prph(trans
, SCD_QUEUE_WRPTR(txq
->q
.id
)));
326 iwl_op_mode_nic_error(trans
->op_mode
);
329 static int iwl_trans_txq_alloc(struct iwl_trans
*trans
,
330 struct iwl_tx_queue
*txq
, int slots_num
,
333 size_t tfd_sz
= sizeof(struct iwl_tfd
) * TFD_QUEUE_SIZE_MAX
;
335 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
337 if (WARN_ON(txq
->meta
|| txq
->cmd
|| txq
->skbs
|| txq
->tfds
))
340 setup_timer(&txq
->stuck_timer
, iwl_trans_pcie_queue_stuck_timer
,
342 txq
->trans_pcie
= trans_pcie
;
344 txq
->q
.n_window
= slots_num
;
346 txq
->meta
= kcalloc(slots_num
, sizeof(txq
->meta
[0]), GFP_KERNEL
);
347 txq
->cmd
= kcalloc(slots_num
, sizeof(txq
->cmd
[0]), GFP_KERNEL
);
349 if (!txq
->meta
|| !txq
->cmd
)
352 if (txq_id
== trans_pcie
->cmd_queue
)
353 for (i
= 0; i
< slots_num
; i
++) {
354 txq
->cmd
[i
] = kmalloc(sizeof(struct iwl_device_cmd
),
360 /* Alloc driver data array and TFD circular buffer */
361 /* Driver private data, only for Tx (not command) queues,
362 * not shared with device. */
363 if (txq_id
!= trans_pcie
->cmd_queue
) {
364 txq
->skbs
= kcalloc(TFD_QUEUE_SIZE_MAX
, sizeof(txq
->skbs
[0]),
367 IWL_ERR(trans
, "kmalloc for auxiliary BD "
368 "structures failed\n");
375 /* Circular buffer of transmit frame descriptors (TFDs),
376 * shared with device */
377 txq
->tfds
= dma_alloc_coherent(trans
->dev
, tfd_sz
,
378 &txq
->q
.dma_addr
, GFP_KERNEL
);
380 IWL_ERR(trans
, "dma_alloc_coherent(%zd) failed\n", tfd_sz
);
389 /* since txq->cmd has been zeroed,
390 * all non allocated cmd[i] will be NULL */
391 if (txq
->cmd
&& txq_id
== trans_pcie
->cmd_queue
)
392 for (i
= 0; i
< slots_num
; i
++)
403 static int iwl_trans_txq_init(struct iwl_trans
*trans
, struct iwl_tx_queue
*txq
,
404 int slots_num
, u32 txq_id
)
408 txq
->need_update
= 0;
409 memset(txq
->meta
, 0, sizeof(txq
->meta
[0]) * slots_num
);
411 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
412 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
413 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX
& (TFD_QUEUE_SIZE_MAX
- 1));
415 /* Initialize queue's high/low-water marks, and head/tail indexes */
416 ret
= iwl_queue_init(&txq
->q
, TFD_QUEUE_SIZE_MAX
, slots_num
,
421 spin_lock_init(&txq
->lock
);
424 * Tell nic where to find circular buffer of Tx Frame Descriptors for
425 * given Tx queue, and enable the DMA channel used for that queue.
426 * Circular buffer (TFD queue in DRAM) physical base address */
427 iwl_write_direct32(trans
, FH_MEM_CBBC_QUEUE(txq_id
),
428 txq
->q
.dma_addr
>> 8);
434 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
436 static void iwl_tx_queue_unmap(struct iwl_trans
*trans
, int txq_id
)
438 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
439 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
440 struct iwl_queue
*q
= &txq
->q
;
441 enum dma_data_direction dma_dir
;
446 /* In the command queue, all the TBs are mapped as BIDI
447 * so unmap them as such.
449 if (txq_id
== trans_pcie
->cmd_queue
)
450 dma_dir
= DMA_BIDIRECTIONAL
;
452 dma_dir
= DMA_TO_DEVICE
;
454 spin_lock_bh(&txq
->lock
);
455 while (q
->write_ptr
!= q
->read_ptr
) {
456 /* The read_ptr needs to bound by q->n_window */
457 iwlagn_txq_free_tfd(trans
, txq
, get_cmd_index(q
, q
->read_ptr
),
459 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
);
461 spin_unlock_bh(&txq
->lock
);
465 * iwl_tx_queue_free - Deallocate DMA queue.
466 * @txq: Transmit queue to deallocate.
468 * Empty queue by removing and destroying all BD's.
470 * 0-fill, but do not free "txq" descriptor structure.
472 static void iwl_tx_queue_free(struct iwl_trans
*trans
, int txq_id
)
474 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
475 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
476 struct device
*dev
= trans
->dev
;
481 iwl_tx_queue_unmap(trans
, txq_id
);
483 /* De-alloc array of command/tx buffers */
485 if (txq_id
== trans_pcie
->cmd_queue
)
486 for (i
= 0; i
< txq
->q
.n_window
; i
++)
489 /* De-alloc circular buffer of TFDs */
491 dma_free_coherent(dev
, sizeof(struct iwl_tfd
) *
492 txq
->q
.n_bd
, txq
->tfds
, txq
->q
.dma_addr
);
493 memset(&txq
->q
.dma_addr
, 0, sizeof(txq
->q
.dma_addr
));
496 /* De-alloc array of per-TFD driver data */
500 /* deallocate arrays */
506 del_timer_sync(&txq
->stuck_timer
);
508 /* 0-fill queue descriptor structure */
509 memset(txq
, 0, sizeof(*txq
));
513 * iwl_trans_tx_free - Free TXQ Context
515 * Destroy all TX DMA queues and structures
517 static void iwl_trans_pcie_tx_free(struct iwl_trans
*trans
)
520 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
523 if (trans_pcie
->txq
) {
525 txq_id
< cfg(trans
)->base_params
->num_of_queues
; txq_id
++)
526 iwl_tx_queue_free(trans
, txq_id
);
529 kfree(trans_pcie
->txq
);
530 trans_pcie
->txq
= NULL
;
532 iwlagn_free_dma_ptr(trans
, &trans_pcie
->kw
);
534 iwlagn_free_dma_ptr(trans
, &trans_pcie
->scd_bc_tbls
);
538 * iwl_trans_tx_alloc - allocate TX context
539 * Allocate all Tx DMA structures and initialize them
544 static int iwl_trans_tx_alloc(struct iwl_trans
*trans
)
547 int txq_id
, slots_num
;
548 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
550 u16 scd_bc_tbls_size
= cfg(trans
)->base_params
->num_of_queues
*
551 sizeof(struct iwlagn_scd_bc_tbl
);
553 /*It is not allowed to alloc twice, so warn when this happens.
554 * We cannot rely on the previous allocation, so free and fail */
555 if (WARN_ON(trans_pcie
->txq
)) {
560 ret
= iwlagn_alloc_dma_ptr(trans
, &trans_pcie
->scd_bc_tbls
,
563 IWL_ERR(trans
, "Scheduler BC Table allocation failed\n");
567 /* Alloc keep-warm buffer */
568 ret
= iwlagn_alloc_dma_ptr(trans
, &trans_pcie
->kw
, IWL_KW_SIZE
);
570 IWL_ERR(trans
, "Keep Warm allocation failed\n");
574 trans_pcie
->txq
= kcalloc(cfg(trans
)->base_params
->num_of_queues
,
575 sizeof(struct iwl_tx_queue
), GFP_KERNEL
);
576 if (!trans_pcie
->txq
) {
577 IWL_ERR(trans
, "Not enough memory for txq\n");
582 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
583 for (txq_id
= 0; txq_id
< cfg(trans
)->base_params
->num_of_queues
;
585 slots_num
= (txq_id
== trans_pcie
->cmd_queue
) ?
586 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
587 ret
= iwl_trans_txq_alloc(trans
, &trans_pcie
->txq
[txq_id
],
590 IWL_ERR(trans
, "Tx %d queue alloc failed\n", txq_id
);
598 iwl_trans_pcie_tx_free(trans
);
602 static int iwl_tx_init(struct iwl_trans
*trans
)
605 int txq_id
, slots_num
;
608 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
610 if (!trans_pcie
->txq
) {
611 ret
= iwl_trans_tx_alloc(trans
);
617 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
619 /* Turn off all Tx DMA fifos */
620 iwl_write_prph(trans
, SCD_TXFACT
, 0);
622 /* Tell NIC where to find the "keep warm" buffer */
623 iwl_write_direct32(trans
, FH_KW_MEM_ADDR_REG
,
624 trans_pcie
->kw
.dma
>> 4);
626 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
628 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
629 for (txq_id
= 0; txq_id
< cfg(trans
)->base_params
->num_of_queues
;
631 slots_num
= (txq_id
== trans_pcie
->cmd_queue
) ?
632 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
633 ret
= iwl_trans_txq_init(trans
, &trans_pcie
->txq
[txq_id
],
636 IWL_ERR(trans
, "Tx %d queue init failed\n", txq_id
);
643 /*Upon error, free only if we allocated something */
645 iwl_trans_pcie_tx_free(trans
);
649 static void iwl_set_pwr_vmain(struct iwl_trans
*trans
)
652 * (for documentation purposes)
653 * to set power to V_AUX, do:
655 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
656 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
657 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
658 ~APMG_PS_CTRL_MSK_PWR_SRC);
661 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
662 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
663 ~APMG_PS_CTRL_MSK_PWR_SRC
);
667 #define PCI_CFG_RETRY_TIMEOUT 0x041
668 #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
669 #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
671 static u16
iwl_pciexp_link_ctrl(struct iwl_trans
*trans
)
675 struct iwl_trans_pcie
*trans_pcie
=
676 IWL_TRANS_GET_PCIE_TRANS(trans
);
678 struct pci_dev
*pci_dev
= trans_pcie
->pci_dev
;
680 pos
= pci_pcie_cap(pci_dev
);
681 pci_read_config_word(pci_dev
, pos
+ PCI_EXP_LNKCTL
, &pci_lnk_ctl
);
685 static void iwl_apm_config(struct iwl_trans
*trans
)
688 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
689 * Check if BIOS (or OS) enabled L1-ASPM on this device.
690 * If so (likely), disable L0S, so device moves directly L0->L1;
691 * costs negligible amount of power savings.
692 * If not (unlikely), enable L0S, so there is at least some
693 * power savings, even without L1.
695 u16 lctl
= iwl_pciexp_link_ctrl(trans
);
697 if ((lctl
& PCI_CFG_LINK_CTRL_VAL_L1_EN
) ==
698 PCI_CFG_LINK_CTRL_VAL_L1_EN
) {
699 /* L1-ASPM enabled; disable(!) L0S */
700 iwl_set_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
701 dev_printk(KERN_INFO
, trans
->dev
,
702 "L1 Enabled; Disabling L0S\n");
704 /* L1-ASPM disabled; enable(!) L0S */
705 iwl_clear_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
706 dev_printk(KERN_INFO
, trans
->dev
,
707 "L1 Disabled; Enabling L0S\n");
709 trans
->pm_support
= !(lctl
& PCI_CFG_LINK_CTRL_VAL_L0S_EN
);
713 * Start up NIC's basic functionality after it has been reset
714 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
715 * NOTE: This does not load uCode nor start the embedded processor
717 static int iwl_apm_init(struct iwl_trans
*trans
)
719 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
721 IWL_DEBUG_INFO(trans
, "Init card's basic functions\n");
724 * Use "set_bit" below rather than "write", to preserve any hardware
725 * bits already set by default after reset.
728 /* Disable L0S exit timer (platform NMI Work/Around) */
729 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
730 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
733 * Disable L0s without affecting L1;
734 * don't wait for ICH L0s (ICH bug W/A)
736 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
737 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
739 /* Set FH wait threshold to maximum (HW error during stress W/A) */
740 iwl_set_bit(trans
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
743 * Enable HAP INTA (interrupt from management bus) to
744 * wake device's PCI Express link L1a -> L0s
746 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
747 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
749 iwl_apm_config(trans
);
751 /* Configure analog phase-lock-loop before activating to D0A */
752 if (cfg(trans
)->base_params
->pll_cfg_val
)
753 iwl_set_bit(trans
, CSR_ANA_PLL_CFG
,
754 cfg(trans
)->base_params
->pll_cfg_val
);
757 * Set "initialization complete" bit to move adapter from
758 * D0U* --> D0A* (powered-up active) state.
760 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
763 * Wait for clock stabilization; once stabilized, access to
764 * device-internal resources is supported, e.g. iwl_write_prph()
765 * and accesses to uCode SRAM.
767 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
768 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
769 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
771 IWL_DEBUG_INFO(trans
, "Failed to init the card\n");
776 * Enable DMA clock and wait for it to stabilize.
778 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
779 * do not disable clocks. This preserves any hardware bits already
780 * set by default in "CLK_CTRL_REG" after reset.
782 iwl_write_prph(trans
, APMG_CLK_EN_REG
, APMG_CLK_VAL_DMA_CLK_RQT
);
785 /* Disable L1-Active */
786 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
787 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
789 set_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
795 static int iwl_apm_stop_master(struct iwl_trans
*trans
)
799 /* stop device's busmaster DMA activity */
800 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
802 ret
= iwl_poll_bit(trans
, CSR_RESET
,
803 CSR_RESET_REG_FLAG_MASTER_DISABLED
,
804 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
806 IWL_WARN(trans
, "Master Disable Timed Out, 100 usec\n");
808 IWL_DEBUG_INFO(trans
, "stop master\n");
813 static void iwl_apm_stop(struct iwl_trans
*trans
)
815 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
816 IWL_DEBUG_INFO(trans
, "Stop card, put in low power state\n");
818 clear_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
820 /* Stop device's DMA activity */
821 iwl_apm_stop_master(trans
);
823 /* Reset the entire device */
824 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
829 * Clear "initialization complete" bit to move adapter from
830 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
832 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
833 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
836 static int iwl_nic_init(struct iwl_trans
*trans
)
838 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
842 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
845 /* Set interrupt coalescing calibration timer to default (512 usecs) */
846 iwl_write8(trans
, CSR_INT_COALESCING
,
847 IWL_HOST_INT_CALIB_TIMEOUT_DEF
);
849 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
851 iwl_set_pwr_vmain(trans
);
853 iwl_op_mode_nic_config(trans
->op_mode
);
855 #ifndef CONFIG_IWLWIFI_IDI
856 /* Allocate the RX queue, or reset if it is already allocated */
860 /* Allocate or reset and init all Tx and Command queues */
861 if (iwl_tx_init(trans
))
864 if (cfg(trans
)->base_params
->shadow_reg_enable
) {
865 /* enable shadow regs in HW */
866 iwl_set_bit(trans
, CSR_MAC_SHADOW_REG_CTRL
,
873 #define HW_READY_TIMEOUT (50)
875 /* Note: returns poll_bit return value, which is >= 0 if success */
876 static int iwl_set_hw_ready(struct iwl_trans
*trans
)
880 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
881 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
883 /* See if we got it */
884 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
885 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
886 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
889 IWL_DEBUG_INFO(trans
, "hardware%s ready\n", ret
< 0 ? " not" : "");
893 /* Note: returns standard 0/-ERROR code */
894 static int iwl_prepare_card_hw(struct iwl_trans
*trans
)
898 IWL_DEBUG_INFO(trans
, "iwl_trans_prepare_card_hw enter\n");
900 ret
= iwl_set_hw_ready(trans
);
901 /* If the card is ready, exit 0 */
905 /* If HW is not ready, prepare the conditions to check again */
906 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
907 CSR_HW_IF_CONFIG_REG_PREPARE
);
909 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
910 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE
,
911 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE
, 150000);
916 /* HW should be ready by now, check again. */
917 ret
= iwl_set_hw_ready(trans
);
926 static int iwl_load_section(struct iwl_trans
*trans
, u8 section_num
,
927 const struct fw_desc
*section
)
929 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
930 dma_addr_t phy_addr
= section
->p_addr
;
931 u32 byte_cnt
= section
->len
;
932 u32 dst_addr
= section
->offset
;
935 trans_pcie
->ucode_write_complete
= false;
937 iwl_write_direct32(trans
,
938 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
939 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE
);
941 iwl_write_direct32(trans
,
942 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL
), dst_addr
);
944 iwl_write_direct32(trans
,
945 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL
),
946 phy_addr
& FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK
);
948 iwl_write_direct32(trans
,
949 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL
),
950 (iwl_get_dma_hi_addr(phy_addr
)
951 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT
) | byte_cnt
);
953 iwl_write_direct32(trans
,
954 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL
),
955 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM
|
956 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX
|
957 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID
);
959 iwl_write_direct32(trans
,
960 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
961 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
962 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE
|
963 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD
);
965 IWL_DEBUG_FW(trans
, "[%d] uCode section being loaded...\n",
967 ret
= wait_event_timeout(trans_pcie
->ucode_write_waitq
,
968 trans_pcie
->ucode_write_complete
, 5 * HZ
);
970 IWL_ERR(trans
, "Could not load the [%d] uCode section\n",
978 static int iwl_load_given_ucode(struct iwl_trans
*trans
,
979 const struct fw_img
*image
)
984 for (i
= 0; i
< IWL_UCODE_SECTION_MAX
; i
++) {
985 if (!image
->sec
[i
].p_addr
)
988 ret
= iwl_load_section(trans
, i
, &image
->sec
[i
]);
993 /* Remove all resets to allow NIC to operate */
994 iwl_write32(trans
, CSR_RESET
, 0);
999 static int iwl_trans_pcie_start_fw(struct iwl_trans
*trans
,
1000 const struct fw_img
*fw
)
1005 /* This may fail if AMT took ownership of the device */
1006 if (iwl_prepare_card_hw(trans
)) {
1007 IWL_WARN(trans
, "Exit HW not ready\n");
1011 /* If platform's RF_KILL switch is NOT set to KILL */
1012 hw_rfkill
= !(iwl_read32(trans
, CSR_GP_CNTRL
) &
1013 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
);
1014 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
1017 iwl_enable_rfkill_int(trans
);
1021 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1023 ret
= iwl_nic_init(trans
);
1025 IWL_ERR(trans
, "Unable to init nic\n");
1029 /* make sure rfkill handshake bits are cleared */
1030 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1031 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
,
1032 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
1034 /* clear (again), then enable host interrupts */
1035 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1036 iwl_enable_interrupts(trans
);
1038 /* really make sure rfkill handshake bits are cleared */
1039 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1040 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1042 /* Load the given image to the HW */
1043 return iwl_load_given_ucode(trans
, fw
);
1047 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
1048 * must be called under the irq lock and with MAC access
1050 static void iwl_trans_txq_set_sched(struct iwl_trans
*trans
, u32 mask
)
1052 struct iwl_trans_pcie __maybe_unused
*trans_pcie
=
1053 IWL_TRANS_GET_PCIE_TRANS(trans
);
1055 lockdep_assert_held(&trans_pcie
->irq_lock
);
1057 iwl_write_prph(trans
, SCD_TXFACT
, mask
);
1060 static void iwl_tx_start(struct iwl_trans
*trans
)
1062 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1064 unsigned long flags
;
1068 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
1070 trans_pcie
->scd_base_addr
=
1071 iwl_read_prph(trans
, SCD_SRAM_BASE_ADDR
);
1072 a
= trans_pcie
->scd_base_addr
+ SCD_CONTEXT_MEM_LOWER_BOUND
;
1073 /* reset conext data memory */
1074 for (; a
< trans_pcie
->scd_base_addr
+ SCD_CONTEXT_MEM_UPPER_BOUND
;
1076 iwl_write_targ_mem(trans
, a
, 0);
1077 /* reset tx status memory */
1078 for (; a
< trans_pcie
->scd_base_addr
+ SCD_TX_STTS_MEM_UPPER_BOUND
;
1080 iwl_write_targ_mem(trans
, a
, 0);
1081 for (; a
< trans_pcie
->scd_base_addr
+
1082 SCD_TRANS_TBL_OFFSET_QUEUE(
1083 cfg(trans
)->base_params
->num_of_queues
);
1085 iwl_write_targ_mem(trans
, a
, 0);
1087 iwl_write_prph(trans
, SCD_DRAM_BASE_ADDR
,
1088 trans_pcie
->scd_bc_tbls
.dma
>> 10);
1090 /* Enable DMA channel */
1091 for (chan
= 0; chan
< FH_TCSR_CHNL_NUM
; chan
++)
1092 iwl_write_direct32(trans
, FH_TCSR_CHNL_TX_CONFIG_REG(chan
),
1093 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
1094 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE
);
1096 /* Update FH chicken bits */
1097 reg_val
= iwl_read_direct32(trans
, FH_TX_CHICKEN_BITS_REG
);
1098 iwl_write_direct32(trans
, FH_TX_CHICKEN_BITS_REG
,
1099 reg_val
| FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN
);
1101 iwl_write_prph(trans
, SCD_QUEUECHAIN_SEL
,
1102 SCD_QUEUECHAIN_SEL_ALL(trans
, trans_pcie
));
1103 iwl_write_prph(trans
, SCD_AGGR_SEL
, 0);
1105 /* initiate the queues */
1106 for (i
= 0; i
< cfg(trans
)->base_params
->num_of_queues
; i
++) {
1107 iwl_write_prph(trans
, SCD_QUEUE_RDPTR(i
), 0);
1108 iwl_write_direct32(trans
, HBUS_TARG_WRPTR
, 0 | (i
<< 8));
1109 iwl_write_targ_mem(trans
, trans_pcie
->scd_base_addr
+
1110 SCD_CONTEXT_QUEUE_OFFSET(i
), 0);
1111 iwl_write_targ_mem(trans
, trans_pcie
->scd_base_addr
+
1112 SCD_CONTEXT_QUEUE_OFFSET(i
) +
1115 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS
) &
1116 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK
) |
1117 ((SCD_FRAME_LIMIT
<<
1118 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
) &
1119 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
));
1122 iwl_write_prph(trans
, SCD_INTERRUPT_MASK
,
1123 IWL_MASK(0, cfg(trans
)->base_params
->num_of_queues
));
1125 /* Activate all Tx DMA/FIFO channels */
1126 iwl_trans_txq_set_sched(trans
, IWL_MASK(0, 7));
1128 iwl_trans_set_wr_ptrs(trans
, trans_pcie
->cmd_queue
, 0);
1130 /* make sure all queue are not stopped/used */
1131 memset(trans_pcie
->queue_stopped
, 0, sizeof(trans_pcie
->queue_stopped
));
1132 memset(trans_pcie
->queue_used
, 0, sizeof(trans_pcie
->queue_used
));
1134 for (i
= 0; i
< trans_pcie
->n_q_to_fifo
; i
++) {
1135 int fifo
= trans_pcie
->setup_q_to_fifo
[i
];
1137 set_bit(i
, trans_pcie
->queue_used
);
1139 iwl_trans_tx_queue_set_status(trans
, &trans_pcie
->txq
[i
],
1143 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
1145 /* Enable L1-Active */
1146 iwl_clear_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
1147 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
1150 static void iwl_trans_pcie_fw_alive(struct iwl_trans
*trans
)
1152 iwl_reset_ict(trans
);
1153 iwl_tx_start(trans
);
1157 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
1159 static int iwl_trans_tx_stop(struct iwl_trans
*trans
)
1161 int ch
, txq_id
, ret
;
1162 unsigned long flags
;
1163 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1165 /* Turn off all Tx DMA fifos */
1166 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
1168 iwl_trans_txq_set_sched(trans
, 0);
1170 /* Stop each Tx DMA channel, and wait for it to be idle */
1171 for (ch
= 0; ch
< FH_TCSR_CHNL_NUM
; ch
++) {
1172 iwl_write_direct32(trans
,
1173 FH_TCSR_CHNL_TX_CONFIG_REG(ch
), 0x0);
1174 ret
= iwl_poll_direct_bit(trans
, FH_TSSR_TX_STATUS_REG
,
1175 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch
),
1178 IWL_ERR(trans
, "Failing on timeout while stopping"
1179 " DMA channel %d [0x%08x]", ch
,
1180 iwl_read_direct32(trans
,
1181 FH_TSSR_TX_STATUS_REG
));
1183 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
1185 if (!trans_pcie
->txq
) {
1186 IWL_WARN(trans
, "Stopping tx queues that aren't allocated...");
1190 /* Unmap DMA from host system and free skb's */
1191 for (txq_id
= 0; txq_id
< cfg(trans
)->base_params
->num_of_queues
;
1193 iwl_tx_queue_unmap(trans
, txq_id
);
1198 static void iwl_trans_pcie_stop_device(struct iwl_trans
*trans
)
1200 unsigned long flags
;
1201 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1203 /* tell the device to stop sending interrupts */
1204 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
1205 iwl_disable_interrupts(trans
);
1206 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
1208 /* device going down, Stop using ICT table */
1209 iwl_disable_ict(trans
);
1212 * If a HW restart happens during firmware loading,
1213 * then the firmware loading might call this function
1214 * and later it might be called again due to the
1215 * restart. So don't process again if the device is
1218 if (test_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
)) {
1219 iwl_trans_tx_stop(trans
);
1220 #ifndef CONFIG_IWLWIFI_IDI
1221 iwl_trans_rx_stop(trans
);
1223 /* Power-down device's busmaster DMA clocks */
1224 iwl_write_prph(trans
, APMG_CLK_DIS_REG
,
1225 APMG_CLK_VAL_DMA_CLK_RQT
);
1229 /* Make sure (redundant) we've released our request to stay awake */
1230 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1231 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1233 /* Stop the device, and put it in low power state */
1234 iwl_apm_stop(trans
);
1236 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1237 * Clean again the interrupt here
1239 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
1240 iwl_disable_interrupts(trans
);
1241 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
1243 /* wait to make sure we flush pending tasklet*/
1244 synchronize_irq(trans_pcie
->irq
);
1245 tasklet_kill(&trans_pcie
->irq_tasklet
);
1247 cancel_work_sync(&trans_pcie
->rx_replenish
);
1249 /* stop and reset the on-board processor */
1250 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_NEVO_RESET
);
1252 /* clear all status bits */
1253 clear_bit(STATUS_HCMD_ACTIVE
, &trans_pcie
->status
);
1254 clear_bit(STATUS_INT_ENABLED
, &trans_pcie
->status
);
1255 clear_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
1256 clear_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
1259 static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans
*trans
)
1261 /* let the ucode operate on its own */
1262 iwl_write32(trans
, CSR_UCODE_DRV_GP1_SET
,
1263 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE
);
1265 iwl_disable_interrupts(trans
);
1266 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1267 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1270 static int iwl_trans_pcie_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
1271 struct iwl_device_cmd
*dev_cmd
, int txq_id
)
1273 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1274 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1275 struct iwl_tx_cmd
*tx_cmd
= (struct iwl_tx_cmd
*) dev_cmd
->payload
;
1276 struct iwl_cmd_meta
*out_meta
;
1277 struct iwl_tx_queue
*txq
;
1278 struct iwl_queue
*q
;
1279 dma_addr_t phys_addr
= 0;
1280 dma_addr_t txcmd_phys
;
1281 dma_addr_t scratch_phys
;
1282 u16 len
, firstlen
, secondlen
;
1283 u8 wait_write_ptr
= 0;
1284 __le16 fc
= hdr
->frame_control
;
1285 u8 hdr_len
= ieee80211_hdrlen(fc
);
1286 u16 __maybe_unused wifi_seq
;
1288 txq
= &trans_pcie
->txq
[txq_id
];
1291 if (unlikely(!test_bit(txq_id
, trans_pcie
->queue_used
))) {
1296 spin_lock(&txq
->lock
);
1298 /* Set up driver data for this TFD */
1299 txq
->skbs
[q
->write_ptr
] = skb
;
1300 txq
->cmd
[q
->write_ptr
] = dev_cmd
;
1302 dev_cmd
->hdr
.cmd
= REPLY_TX
;
1303 dev_cmd
->hdr
.sequence
= cpu_to_le16((u16
)(QUEUE_TO_SEQ(txq_id
) |
1304 INDEX_TO_SEQ(q
->write_ptr
)));
1306 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1307 out_meta
= &txq
->meta
[q
->write_ptr
];
1310 * Use the first empty entry in this queue's command buffer array
1311 * to contain the Tx command and MAC header concatenated together
1312 * (payload data will be in another buffer).
1313 * Size of this varies, due to varying MAC header length.
1314 * If end is not dword aligned, we'll have 2 extra bytes at the end
1315 * of the MAC header (device reads on dword boundaries).
1316 * We'll tell device about this padding later.
1318 len
= sizeof(struct iwl_tx_cmd
) +
1319 sizeof(struct iwl_cmd_header
) + hdr_len
;
1320 firstlen
= (len
+ 3) & ~3;
1322 /* Tell NIC about any 2-byte padding after MAC header */
1323 if (firstlen
!= len
)
1324 tx_cmd
->tx_flags
|= TX_CMD_FLG_MH_PAD_MSK
;
1326 /* Physical address of this Tx command's header (not MAC header!),
1327 * within command buffer array. */
1328 txcmd_phys
= dma_map_single(trans
->dev
,
1329 &dev_cmd
->hdr
, firstlen
,
1331 if (unlikely(dma_mapping_error(trans
->dev
, txcmd_phys
)))
1333 dma_unmap_addr_set(out_meta
, mapping
, txcmd_phys
);
1334 dma_unmap_len_set(out_meta
, len
, firstlen
);
1336 if (!ieee80211_has_morefrags(fc
)) {
1337 txq
->need_update
= 1;
1340 txq
->need_update
= 0;
1343 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1344 * if any (802.11 null frames have no payload). */
1345 secondlen
= skb
->len
- hdr_len
;
1346 if (secondlen
> 0) {
1347 phys_addr
= dma_map_single(trans
->dev
, skb
->data
+ hdr_len
,
1348 secondlen
, DMA_TO_DEVICE
);
1349 if (unlikely(dma_mapping_error(trans
->dev
, phys_addr
))) {
1350 dma_unmap_single(trans
->dev
,
1351 dma_unmap_addr(out_meta
, mapping
),
1352 dma_unmap_len(out_meta
, len
),
1358 /* Attach buffers to TFD */
1359 iwlagn_txq_attach_buf_to_tfd(trans
, txq
, txcmd_phys
, firstlen
, 1);
1361 iwlagn_txq_attach_buf_to_tfd(trans
, txq
, phys_addr
,
1364 scratch_phys
= txcmd_phys
+ sizeof(struct iwl_cmd_header
) +
1365 offsetof(struct iwl_tx_cmd
, scratch
);
1367 /* take back ownership of DMA buffer to enable update */
1368 dma_sync_single_for_cpu(trans
->dev
, txcmd_phys
, firstlen
,
1370 tx_cmd
->dram_lsb_ptr
= cpu_to_le32(scratch_phys
);
1371 tx_cmd
->dram_msb_ptr
= iwl_get_dma_hi_addr(scratch_phys
);
1373 IWL_DEBUG_TX(trans
, "sequence nr = 0X%x\n",
1374 le16_to_cpu(dev_cmd
->hdr
.sequence
));
1375 IWL_DEBUG_TX(trans
, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd
->tx_flags
));
1377 /* Set up entry for this TFD in Tx byte-count array */
1378 iwl_trans_txq_update_byte_cnt_tbl(trans
, txq
, le16_to_cpu(tx_cmd
->len
));
1380 dma_sync_single_for_device(trans
->dev
, txcmd_phys
, firstlen
,
1383 trace_iwlwifi_dev_tx(trans
->dev
,
1384 &((struct iwl_tfd
*)txq
->tfds
)[txq
->q
.write_ptr
],
1385 sizeof(struct iwl_tfd
),
1386 &dev_cmd
->hdr
, firstlen
,
1387 skb
->data
+ hdr_len
, secondlen
);
1389 /* start timer if queue currently empty */
1390 if (q
->read_ptr
== q
->write_ptr
&& trans_pcie
->wd_timeout
)
1391 mod_timer(&txq
->stuck_timer
, jiffies
+ trans_pcie
->wd_timeout
);
1393 /* Tell device the write index *just past* this latest filled TFD */
1394 q
->write_ptr
= iwl_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
1395 iwl_txq_update_write_ptr(trans
, txq
);
1398 * At this point the frame is "transmitted" successfully
1399 * and we will get a TX status notification eventually,
1400 * regardless of the value of ret. "ret" only indicates
1401 * whether or not we should update the write pointer.
1403 if (iwl_queue_space(q
) < q
->high_mark
) {
1404 if (wait_write_ptr
) {
1405 txq
->need_update
= 1;
1406 iwl_txq_update_write_ptr(trans
, txq
);
1408 iwl_stop_queue(trans
, txq
);
1411 spin_unlock(&txq
->lock
);
1414 spin_unlock(&txq
->lock
);
1418 static int iwl_trans_pcie_start_hw(struct iwl_trans
*trans
)
1420 struct iwl_trans_pcie
*trans_pcie
=
1421 IWL_TRANS_GET_PCIE_TRANS(trans
);
1425 trans_pcie
->inta_mask
= CSR_INI_SET_MASK
;
1427 if (!trans_pcie
->irq_requested
) {
1428 tasklet_init(&trans_pcie
->irq_tasklet
, (void (*)(unsigned long))
1429 iwl_irq_tasklet
, (unsigned long)trans
);
1431 iwl_alloc_isr_ict(trans
);
1433 err
= request_irq(trans_pcie
->irq
, iwl_isr_ict
, IRQF_SHARED
,
1436 IWL_ERR(trans
, "Error allocating IRQ %d\n",
1441 INIT_WORK(&trans_pcie
->rx_replenish
, iwl_bg_rx_replenish
);
1442 trans_pcie
->irq_requested
= true;
1445 err
= iwl_prepare_card_hw(trans
);
1447 IWL_ERR(trans
, "Error while preparing HW: %d", err
);
1451 iwl_apm_init(trans
);
1453 hw_rfkill
= !(iwl_read32(trans
, CSR_GP_CNTRL
) &
1454 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
);
1455 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
1460 free_irq(trans_pcie
->irq
, trans
);
1462 iwl_free_isr_ict(trans
);
1463 tasklet_kill(&trans_pcie
->irq_tasklet
);
1467 static void iwl_trans_pcie_stop_hw(struct iwl_trans
*trans
)
1469 iwl_apm_stop(trans
);
1471 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1473 /* Even if we stop the HW, we still want the RF kill interrupt */
1474 iwl_enable_rfkill_int(trans
);
1477 static void iwl_trans_pcie_reclaim(struct iwl_trans
*trans
, int txq_id
, int ssn
,
1478 struct sk_buff_head
*skbs
)
1480 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1481 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[txq_id
];
1482 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1483 int tfd_num
= ssn
& (txq
->q
.n_bd
- 1);
1486 spin_lock(&txq
->lock
);
1488 if (txq
->q
.read_ptr
!= tfd_num
) {
1489 IWL_DEBUG_TX_REPLY(trans
, "[Q %d] %d -> %d (%d)\n",
1490 txq_id
, txq
->q
.read_ptr
, tfd_num
, ssn
);
1491 freed
= iwl_tx_queue_reclaim(trans
, txq_id
, tfd_num
, skbs
);
1492 if (iwl_queue_space(&txq
->q
) > txq
->q
.low_mark
)
1493 iwl_wake_queue(trans
, txq
);
1496 spin_unlock(&txq
->lock
);
1499 static void iwl_trans_pcie_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
1501 writeb(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1504 static void iwl_trans_pcie_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
1506 writel(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1509 static u32
iwl_trans_pcie_read32(struct iwl_trans
*trans
, u32 ofs
)
1511 return readl(IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1514 static void iwl_trans_pcie_configure(struct iwl_trans
*trans
,
1515 const struct iwl_trans_config
*trans_cfg
)
1517 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1519 trans_pcie
->cmd_queue
= trans_cfg
->cmd_queue
;
1520 if (WARN_ON(trans_cfg
->n_no_reclaim_cmds
> MAX_NO_RECLAIM_CMDS
))
1521 trans_pcie
->n_no_reclaim_cmds
= 0;
1523 trans_pcie
->n_no_reclaim_cmds
= trans_cfg
->n_no_reclaim_cmds
;
1524 if (trans_pcie
->n_no_reclaim_cmds
)
1525 memcpy(trans_pcie
->no_reclaim_cmds
, trans_cfg
->no_reclaim_cmds
,
1526 trans_pcie
->n_no_reclaim_cmds
* sizeof(u8
));
1528 trans_pcie
->n_q_to_fifo
= trans_cfg
->n_queue_to_fifo
;
1530 if (WARN_ON(trans_pcie
->n_q_to_fifo
> IWL_MAX_HW_QUEUES
))
1531 trans_pcie
->n_q_to_fifo
= IWL_MAX_HW_QUEUES
;
1533 /* at least the command queue must be mapped */
1534 WARN_ON(!trans_pcie
->n_q_to_fifo
);
1536 memcpy(trans_pcie
->setup_q_to_fifo
, trans_cfg
->queue_to_fifo
,
1537 trans_pcie
->n_q_to_fifo
* sizeof(u8
));
1539 trans_pcie
->rx_buf_size_8k
= trans_cfg
->rx_buf_size_8k
;
1540 if (trans_pcie
->rx_buf_size_8k
)
1541 trans_pcie
->rx_page_order
= get_order(8 * 1024);
1543 trans_pcie
->rx_page_order
= get_order(4 * 1024);
1545 trans_pcie
->wd_timeout
=
1546 msecs_to_jiffies(trans_cfg
->queue_watchdog_timeout
);
1548 trans_pcie
->command_names
= trans_cfg
->command_names
;
1551 static void iwl_trans_pcie_free(struct iwl_trans
*trans
)
1553 struct iwl_trans_pcie
*trans_pcie
=
1554 IWL_TRANS_GET_PCIE_TRANS(trans
);
1556 iwl_trans_pcie_tx_free(trans
);
1557 #ifndef CONFIG_IWLWIFI_IDI
1558 iwl_trans_pcie_rx_free(trans
);
1560 if (trans_pcie
->irq_requested
== true) {
1561 free_irq(trans_pcie
->irq
, trans
);
1562 iwl_free_isr_ict(trans
);
1565 pci_disable_msi(trans_pcie
->pci_dev
);
1566 iounmap(trans_pcie
->hw_base
);
1567 pci_release_regions(trans_pcie
->pci_dev
);
1568 pci_disable_device(trans_pcie
->pci_dev
);
1570 trans
->shrd
->trans
= NULL
;
1574 static void iwl_trans_pcie_set_pmi(struct iwl_trans
*trans
, bool state
)
1576 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1579 set_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
1581 clear_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
1584 #ifdef CONFIG_PM_SLEEP
1585 static int iwl_trans_pcie_suspend(struct iwl_trans
*trans
)
1590 static int iwl_trans_pcie_resume(struct iwl_trans
*trans
)
1594 hw_rfkill
= !(iwl_read32(trans
, CSR_GP_CNTRL
) &
1595 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
);
1598 iwl_enable_rfkill_int(trans
);
1600 iwl_enable_interrupts(trans
);
1602 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
1606 #endif /* CONFIG_PM_SLEEP */
1608 #define IWL_FLUSH_WAIT_MS 2000
1610 static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans
*trans
)
1612 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1613 struct iwl_tx_queue
*txq
;
1614 struct iwl_queue
*q
;
1616 unsigned long now
= jiffies
;
1619 /* waiting for all the tx frames complete might take a while */
1620 for (cnt
= 0; cnt
< cfg(trans
)->base_params
->num_of_queues
; cnt
++) {
1621 if (cnt
== trans_pcie
->cmd_queue
)
1623 txq
= &trans_pcie
->txq
[cnt
];
1625 while (q
->read_ptr
!= q
->write_ptr
&& !time_after(jiffies
,
1626 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
)))
1629 if (q
->read_ptr
!= q
->write_ptr
) {
1630 IWL_ERR(trans
, "fail to flush all tx fifo queues\n");
1638 static const char *get_fh_string(int cmd
)
1640 #define IWL_CMD(x) case x: return #x
1642 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG
);
1643 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG
);
1644 IWL_CMD(FH_RSCSR_CHNL0_WPTR
);
1645 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG
);
1646 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG
);
1647 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG
);
1648 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
);
1649 IWL_CMD(FH_TSSR_TX_STATUS_REG
);
1650 IWL_CMD(FH_TSSR_TX_ERROR_REG
);
1657 int iwl_dump_fh(struct iwl_trans
*trans
, char **buf
, bool display
)
1660 #ifdef CONFIG_IWLWIFI_DEBUG
1664 static const u32 fh_tbl
[] = {
1665 FH_RSCSR_CHNL0_STTS_WPTR_REG
,
1666 FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
1667 FH_RSCSR_CHNL0_WPTR
,
1668 FH_MEM_RCSR_CHNL0_CONFIG_REG
,
1669 FH_MEM_RSSR_SHARED_CTRL_REG
,
1670 FH_MEM_RSSR_RX_STATUS_REG
,
1671 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
,
1672 FH_TSSR_TX_STATUS_REG
,
1673 FH_TSSR_TX_ERROR_REG
1675 #ifdef CONFIG_IWLWIFI_DEBUG
1677 bufsz
= ARRAY_SIZE(fh_tbl
) * 48 + 40;
1678 *buf
= kmalloc(bufsz
, GFP_KERNEL
);
1681 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
1682 "FH register values:\n");
1683 for (i
= 0; i
< ARRAY_SIZE(fh_tbl
); i
++) {
1684 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
1686 get_fh_string(fh_tbl
[i
]),
1687 iwl_read_direct32(trans
, fh_tbl
[i
]));
1692 IWL_ERR(trans
, "FH register values:\n");
1693 for (i
= 0; i
< ARRAY_SIZE(fh_tbl
); i
++) {
1694 IWL_ERR(trans
, " %34s: 0X%08x\n",
1695 get_fh_string(fh_tbl
[i
]),
1696 iwl_read_direct32(trans
, fh_tbl
[i
]));
1701 static const char *get_csr_string(int cmd
)
1703 #define IWL_CMD(x) case x: return #x
1705 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
1706 IWL_CMD(CSR_INT_COALESCING
);
1708 IWL_CMD(CSR_INT_MASK
);
1709 IWL_CMD(CSR_FH_INT_STATUS
);
1710 IWL_CMD(CSR_GPIO_IN
);
1712 IWL_CMD(CSR_GP_CNTRL
);
1713 IWL_CMD(CSR_HW_REV
);
1714 IWL_CMD(CSR_EEPROM_REG
);
1715 IWL_CMD(CSR_EEPROM_GP
);
1716 IWL_CMD(CSR_OTP_GP_REG
);
1717 IWL_CMD(CSR_GIO_REG
);
1718 IWL_CMD(CSR_GP_UCODE_REG
);
1719 IWL_CMD(CSR_GP_DRIVER_REG
);
1720 IWL_CMD(CSR_UCODE_DRV_GP1
);
1721 IWL_CMD(CSR_UCODE_DRV_GP2
);
1722 IWL_CMD(CSR_LED_REG
);
1723 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
1724 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
1725 IWL_CMD(CSR_ANA_PLL_CFG
);
1726 IWL_CMD(CSR_HW_REV_WA_REG
);
1727 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
1734 void iwl_dump_csr(struct iwl_trans
*trans
)
1737 static const u32 csr_tbl
[] = {
1738 CSR_HW_IF_CONFIG_REG
,
1756 CSR_DRAM_INT_TBL_REG
,
1757 CSR_GIO_CHICKEN_BITS
,
1760 CSR_DBG_HPET_MEM_REG
1762 IWL_ERR(trans
, "CSR values:\n");
1763 IWL_ERR(trans
, "(2nd byte of CSR_INT_COALESCING is "
1764 "CSR_INT_PERIODIC_REG)\n");
1765 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
1766 IWL_ERR(trans
, " %25s: 0X%08x\n",
1767 get_csr_string(csr_tbl
[i
]),
1768 iwl_read32(trans
, csr_tbl
[i
]));
1772 #ifdef CONFIG_IWLWIFI_DEBUGFS
1773 /* create and remove of files */
1774 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1775 if (!debugfs_create_file(#name, mode, parent, trans, \
1776 &iwl_dbgfs_##name##_ops)) \
1780 /* file operation */
1781 #define DEBUGFS_READ_FUNC(name) \
1782 static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1783 char __user *user_buf, \
1784 size_t count, loff_t *ppos);
1786 #define DEBUGFS_WRITE_FUNC(name) \
1787 static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1788 const char __user *user_buf, \
1789 size_t count, loff_t *ppos);
1792 #define DEBUGFS_READ_FILE_OPS(name) \
1793 DEBUGFS_READ_FUNC(name); \
1794 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1795 .read = iwl_dbgfs_##name##_read, \
1796 .open = simple_open, \
1797 .llseek = generic_file_llseek, \
1800 #define DEBUGFS_WRITE_FILE_OPS(name) \
1801 DEBUGFS_WRITE_FUNC(name); \
1802 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1803 .write = iwl_dbgfs_##name##_write, \
1804 .open = simple_open, \
1805 .llseek = generic_file_llseek, \
1808 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1809 DEBUGFS_READ_FUNC(name); \
1810 DEBUGFS_WRITE_FUNC(name); \
1811 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1812 .write = iwl_dbgfs_##name##_write, \
1813 .read = iwl_dbgfs_##name##_read, \
1814 .open = simple_open, \
1815 .llseek = generic_file_llseek, \
1818 static ssize_t
iwl_dbgfs_tx_queue_read(struct file
*file
,
1819 char __user
*user_buf
,
1820 size_t count
, loff_t
*ppos
)
1822 struct iwl_trans
*trans
= file
->private_data
;
1823 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1824 struct iwl_tx_queue
*txq
;
1825 struct iwl_queue
*q
;
1832 bufsz
= sizeof(char) * 64 * cfg(trans
)->base_params
->num_of_queues
;
1834 if (!trans_pcie
->txq
) {
1835 IWL_ERR(trans
, "txq not ready\n");
1838 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1842 for (cnt
= 0; cnt
< cfg(trans
)->base_params
->num_of_queues
; cnt
++) {
1843 txq
= &trans_pcie
->txq
[cnt
];
1845 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1846 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1847 cnt
, q
->read_ptr
, q
->write_ptr
,
1848 !!test_bit(cnt
, trans_pcie
->queue_used
),
1849 !!test_bit(cnt
, trans_pcie
->queue_stopped
));
1851 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1856 static ssize_t
iwl_dbgfs_rx_queue_read(struct file
*file
,
1857 char __user
*user_buf
,
1858 size_t count
, loff_t
*ppos
) {
1859 struct iwl_trans
*trans
= file
->private_data
;
1860 struct iwl_trans_pcie
*trans_pcie
=
1861 IWL_TRANS_GET_PCIE_TRANS(trans
);
1862 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
1865 const size_t bufsz
= sizeof(buf
);
1867 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "read: %u\n",
1869 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "write: %u\n",
1871 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "free_count: %u\n",
1874 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "closed_rb_num: %u\n",
1875 le16_to_cpu(rxq
->rb_stts
->closed_rb_num
) & 0x0FFF);
1877 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1878 "closed_rb_num: Not Allocated\n");
1880 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1883 static ssize_t
iwl_dbgfs_interrupt_read(struct file
*file
,
1884 char __user
*user_buf
,
1885 size_t count
, loff_t
*ppos
) {
1887 struct iwl_trans
*trans
= file
->private_data
;
1888 struct iwl_trans_pcie
*trans_pcie
=
1889 IWL_TRANS_GET_PCIE_TRANS(trans
);
1890 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1894 int bufsz
= 24 * 64; /* 24 items * 64 char per item */
1897 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1899 IWL_ERR(trans
, "Can not allocate Buffer\n");
1903 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1904 "Interrupt Statistics Report:\n");
1906 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "HW Error:\t\t\t %u\n",
1908 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "SW Error:\t\t\t %u\n",
1910 if (isr_stats
->sw
|| isr_stats
->hw
) {
1911 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1912 "\tLast Restarting Code: 0x%X\n",
1913 isr_stats
->err_code
);
1915 #ifdef CONFIG_IWLWIFI_DEBUG
1916 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Frame transmitted:\t\t %u\n",
1918 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Alive interrupt:\t\t %u\n",
1921 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1922 "HW RF KILL switch toggled:\t %u\n", isr_stats
->rfkill
);
1924 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "CT KILL:\t\t\t %u\n",
1927 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Wakeup Interrupt:\t\t %u\n",
1930 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1931 "Rx command responses:\t\t %u\n", isr_stats
->rx
);
1933 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Tx/FH interrupt:\t\t %u\n",
1936 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Unexpected INTA:\t\t %u\n",
1937 isr_stats
->unhandled
);
1939 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1944 static ssize_t
iwl_dbgfs_interrupt_write(struct file
*file
,
1945 const char __user
*user_buf
,
1946 size_t count
, loff_t
*ppos
)
1948 struct iwl_trans
*trans
= file
->private_data
;
1949 struct iwl_trans_pcie
*trans_pcie
=
1950 IWL_TRANS_GET_PCIE_TRANS(trans
);
1951 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1957 memset(buf
, 0, sizeof(buf
));
1958 buf_size
= min(count
, sizeof(buf
) - 1);
1959 if (copy_from_user(buf
, user_buf
, buf_size
))
1961 if (sscanf(buf
, "%x", &reset_flag
) != 1)
1963 if (reset_flag
== 0)
1964 memset(isr_stats
, 0, sizeof(*isr_stats
));
1969 static ssize_t
iwl_dbgfs_csr_write(struct file
*file
,
1970 const char __user
*user_buf
,
1971 size_t count
, loff_t
*ppos
)
1973 struct iwl_trans
*trans
= file
->private_data
;
1978 memset(buf
, 0, sizeof(buf
));
1979 buf_size
= min(count
, sizeof(buf
) - 1);
1980 if (copy_from_user(buf
, user_buf
, buf_size
))
1982 if (sscanf(buf
, "%d", &csr
) != 1)
1985 iwl_dump_csr(trans
);
1990 static ssize_t
iwl_dbgfs_fh_reg_read(struct file
*file
,
1991 char __user
*user_buf
,
1992 size_t count
, loff_t
*ppos
)
1994 struct iwl_trans
*trans
= file
->private_data
;
1997 ssize_t ret
= -EFAULT
;
1999 ret
= pos
= iwl_dump_fh(trans
, &buf
, true);
2001 ret
= simple_read_from_buffer(user_buf
,
2002 count
, ppos
, buf
, pos
);
2009 static ssize_t
iwl_dbgfs_fw_restart_write(struct file
*file
,
2010 const char __user
*user_buf
,
2011 size_t count
, loff_t
*ppos
)
2013 struct iwl_trans
*trans
= file
->private_data
;
2015 if (!trans
->op_mode
)
2018 iwl_op_mode_nic_error(trans
->op_mode
);
2023 DEBUGFS_READ_WRITE_FILE_OPS(interrupt
);
2024 DEBUGFS_READ_FILE_OPS(fh_reg
);
2025 DEBUGFS_READ_FILE_OPS(rx_queue
);
2026 DEBUGFS_READ_FILE_OPS(tx_queue
);
2027 DEBUGFS_WRITE_FILE_OPS(csr
);
2028 DEBUGFS_WRITE_FILE_OPS(fw_restart
);
2031 * Create the debugfs files and directories
2034 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
2037 DEBUGFS_ADD_FILE(rx_queue
, dir
, S_IRUSR
);
2038 DEBUGFS_ADD_FILE(tx_queue
, dir
, S_IRUSR
);
2039 DEBUGFS_ADD_FILE(interrupt
, dir
, S_IWUSR
| S_IRUSR
);
2040 DEBUGFS_ADD_FILE(csr
, dir
, S_IWUSR
);
2041 DEBUGFS_ADD_FILE(fh_reg
, dir
, S_IRUSR
);
2042 DEBUGFS_ADD_FILE(fw_restart
, dir
, S_IWUSR
);
2046 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
2050 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2052 const struct iwl_trans_ops trans_ops_pcie
= {
2053 .start_hw
= iwl_trans_pcie_start_hw
,
2054 .stop_hw
= iwl_trans_pcie_stop_hw
,
2055 .fw_alive
= iwl_trans_pcie_fw_alive
,
2056 .start_fw
= iwl_trans_pcie_start_fw
,
2057 .stop_device
= iwl_trans_pcie_stop_device
,
2059 .wowlan_suspend
= iwl_trans_pcie_wowlan_suspend
,
2061 .send_cmd
= iwl_trans_pcie_send_cmd
,
2063 .tx
= iwl_trans_pcie_tx
,
2064 .reclaim
= iwl_trans_pcie_reclaim
,
2066 .tx_agg_disable
= iwl_trans_pcie_tx_agg_disable
,
2067 .tx_agg_setup
= iwl_trans_pcie_tx_agg_setup
,
2069 .free
= iwl_trans_pcie_free
,
2071 .dbgfs_register
= iwl_trans_pcie_dbgfs_register
,
2073 .wait_tx_queue_empty
= iwl_trans_pcie_wait_tx_queue_empty
,
2075 #ifdef CONFIG_PM_SLEEP
2076 .suspend
= iwl_trans_pcie_suspend
,
2077 .resume
= iwl_trans_pcie_resume
,
2079 .write8
= iwl_trans_pcie_write8
,
2080 .write32
= iwl_trans_pcie_write32
,
2081 .read32
= iwl_trans_pcie_read32
,
2082 .configure
= iwl_trans_pcie_configure
,
2083 .set_pmi
= iwl_trans_pcie_set_pmi
,
2086 struct iwl_trans
*iwl_trans_pcie_alloc(struct iwl_shared
*shrd
,
2087 struct pci_dev
*pdev
,
2088 const struct pci_device_id
*ent
)
2090 struct iwl_trans_pcie
*trans_pcie
;
2091 struct iwl_trans
*trans
;
2095 trans
= kzalloc(sizeof(struct iwl_trans
) +
2096 sizeof(struct iwl_trans_pcie
), GFP_KERNEL
);
2098 if (WARN_ON(!trans
))
2101 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2103 trans
->ops
= &trans_ops_pcie
;
2105 trans_pcie
->trans
= trans
;
2106 spin_lock_init(&trans_pcie
->irq_lock
);
2107 init_waitqueue_head(&trans_pcie
->ucode_write_waitq
);
2109 /* W/A - seems to solve weird behavior. We need to remove this if we
2110 * don't want to stay in L1 all the time. This wastes a lot of power */
2111 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
| PCIE_LINK_STATE_L1
|
2112 PCIE_LINK_STATE_CLKPM
);
2114 if (pci_enable_device(pdev
)) {
2119 pci_set_master(pdev
);
2121 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(36));
2123 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(36));
2125 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2127 err
= pci_set_consistent_dma_mask(pdev
,
2129 /* both attempts failed: */
2131 dev_printk(KERN_ERR
, &pdev
->dev
,
2132 "No suitable DMA available.\n");
2133 goto out_pci_disable_device
;
2137 err
= pci_request_regions(pdev
, DRV_NAME
);
2139 dev_printk(KERN_ERR
, &pdev
->dev
, "pci_request_regions failed");
2140 goto out_pci_disable_device
;
2143 trans_pcie
->hw_base
= pci_ioremap_bar(pdev
, 0);
2144 if (!trans_pcie
->hw_base
) {
2145 dev_printk(KERN_ERR
, &pdev
->dev
, "pci_ioremap_bar failed");
2147 goto out_pci_release_regions
;
2150 dev_printk(KERN_INFO
, &pdev
->dev
,
2151 "pci_resource_len = 0x%08llx\n",
2152 (unsigned long long) pci_resource_len(pdev
, 0));
2153 dev_printk(KERN_INFO
, &pdev
->dev
,
2154 "pci_resource_base = %p\n", trans_pcie
->hw_base
);
2156 dev_printk(KERN_INFO
, &pdev
->dev
,
2157 "HW Revision ID = 0x%X\n", pdev
->revision
);
2159 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2160 * PCI Tx retries from interfering with C3 CPU state */
2161 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
2163 err
= pci_enable_msi(pdev
);
2165 dev_printk(KERN_ERR
, &pdev
->dev
,
2166 "pci_enable_msi failed(0X%x)", err
);
2168 trans
->dev
= &pdev
->dev
;
2169 trans_pcie
->irq
= pdev
->irq
;
2170 trans_pcie
->pci_dev
= pdev
;
2171 trans
->hw_rev
= iwl_read32(trans
, CSR_HW_REV
);
2172 trans
->hw_id
= (pdev
->device
<< 16) + pdev
->subsystem_device
;
2173 snprintf(trans
->hw_id_str
, sizeof(trans
->hw_id_str
),
2174 "PCI ID: 0x%04X:0x%04X", pdev
->device
, pdev
->subsystem_device
);
2176 /* TODO: Move this away, not needed if not MSI */
2177 /* enable rfkill interrupt: hw bug w/a */
2178 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
2179 if (pci_cmd
& PCI_COMMAND_INTX_DISABLE
) {
2180 pci_cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
2181 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
2184 /* Initialize the wait queue for commands */
2185 init_waitqueue_head(&trans
->wait_command_queue
);
2189 out_pci_release_regions
:
2190 pci_release_regions(pdev
);
2191 out_pci_disable_device
:
2192 pci_disable_device(pdev
);