1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
34 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *****************************************************************************/
65 #include <linux/pci.h>
66 #include <linux/pci-aspm.h>
67 #include <linux/interrupt.h>
68 #include <linux/debugfs.h>
69 #include <linux/sched.h>
70 #include <linux/bitops.h>
71 #include <linux/gfp.h>
72 #include <linux/vmalloc.h>
75 #include "iwl-trans.h"
79 #include "iwl-agn-hw.h"
80 #include "iwl-fw-error-dump.h"
84 /* extended range in FW SRAM */
85 #define IWL_FW_MEM_EXTENDED_START 0x40000
86 #define IWL_FW_MEM_EXTENDED_END 0x57FFF
88 static void iwl_pcie_free_fw_monitor(struct iwl_trans
*trans
)
90 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
92 if (!trans_pcie
->fw_mon_page
)
95 dma_unmap_page(trans
->dev
, trans_pcie
->fw_mon_phys
,
96 trans_pcie
->fw_mon_size
, DMA_FROM_DEVICE
);
97 __free_pages(trans_pcie
->fw_mon_page
,
98 get_order(trans_pcie
->fw_mon_size
));
99 trans_pcie
->fw_mon_page
= NULL
;
100 trans_pcie
->fw_mon_phys
= 0;
101 trans_pcie
->fw_mon_size
= 0;
104 static void iwl_pcie_alloc_fw_monitor(struct iwl_trans
*trans
)
106 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
107 struct page
*page
= NULL
;
112 if (trans_pcie
->fw_mon_page
) {
113 dma_sync_single_for_device(trans
->dev
, trans_pcie
->fw_mon_phys
,
114 trans_pcie
->fw_mon_size
,
120 for (power
= 26; power
>= 11; power
--) {
124 order
= get_order(size
);
125 page
= alloc_pages(__GFP_COMP
| __GFP_NOWARN
| __GFP_ZERO
,
130 phys
= dma_map_page(trans
->dev
, page
, 0, PAGE_SIZE
<< order
,
132 if (dma_mapping_error(trans
->dev
, phys
)) {
133 __free_pages(page
, order
);
138 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
143 if (WARN_ON_ONCE(!page
))
146 trans_pcie
->fw_mon_page
= page
;
147 trans_pcie
->fw_mon_phys
= phys
;
148 trans_pcie
->fw_mon_size
= size
;
151 static u32
iwl_trans_pcie_read_shr(struct iwl_trans
*trans
, u32 reg
)
153 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_CTRL_REG
,
154 ((reg
& 0x0000ffff) | (2 << 28)));
155 return iwl_read32(trans
, HEEP_CTRL_WRD_PCIEX_DATA_REG
);
158 static void iwl_trans_pcie_write_shr(struct iwl_trans
*trans
, u32 reg
, u32 val
)
160 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_DATA_REG
, val
);
161 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_CTRL_REG
,
162 ((reg
& 0x0000ffff) | (3 << 28)));
165 static void iwl_pcie_set_pwr(struct iwl_trans
*trans
, bool vaux
)
167 if (vaux
&& pci_pme_capable(to_pci_dev(trans
->dev
), PCI_D3cold
))
168 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
169 APMG_PS_CTRL_VAL_PWR_SRC_VAUX
,
170 ~APMG_PS_CTRL_MSK_PWR_SRC
);
172 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
173 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
174 ~APMG_PS_CTRL_MSK_PWR_SRC
);
178 #define PCI_CFG_RETRY_TIMEOUT 0x041
180 static void iwl_pcie_apm_config(struct iwl_trans
*trans
)
182 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
187 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
188 * Check if BIOS (or OS) enabled L1-ASPM on this device.
189 * If so (likely), disable L0S, so device moves directly L0->L1;
190 * costs negligible amount of power savings.
191 * If not (unlikely), enable L0S, so there is at least some
192 * power savings, even without L1.
194 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_LNKCTL
, &lctl
);
195 if (lctl
& PCI_EXP_LNKCTL_ASPM_L1
)
196 iwl_set_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
198 iwl_clear_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
199 trans
->pm_support
= !(lctl
& PCI_EXP_LNKCTL_ASPM_L0S
);
201 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_DEVCTL2
, &cap
);
202 trans
->ltr_enabled
= cap
& PCI_EXP_DEVCTL2_LTR_EN
;
203 dev_info(trans
->dev
, "L1 %sabled - LTR %sabled\n",
204 (lctl
& PCI_EXP_LNKCTL_ASPM_L1
) ? "En" : "Dis",
205 trans
->ltr_enabled
? "En" : "Dis");
209 * Start up NIC's basic functionality after it has been reset
210 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
211 * NOTE: This does not load uCode nor start the embedded processor
213 static int iwl_pcie_apm_init(struct iwl_trans
*trans
)
216 IWL_DEBUG_INFO(trans
, "Init card's basic functions\n");
219 * Use "set_bit" below rather than "write", to preserve any hardware
220 * bits already set by default after reset.
223 /* Disable L0S exit timer (platform NMI Work/Around) */
224 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
)
225 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
226 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
229 * Disable L0s without affecting L1;
230 * don't wait for ICH L0s (ICH bug W/A)
232 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
233 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
235 /* Set FH wait threshold to maximum (HW error during stress W/A) */
236 iwl_set_bit(trans
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
239 * Enable HAP INTA (interrupt from management bus) to
240 * wake device's PCI Express link L1a -> L0s
242 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
243 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
245 iwl_pcie_apm_config(trans
);
247 /* Configure analog phase-lock-loop before activating to D0A */
248 if (trans
->cfg
->base_params
->pll_cfg_val
)
249 iwl_set_bit(trans
, CSR_ANA_PLL_CFG
,
250 trans
->cfg
->base_params
->pll_cfg_val
);
253 * Set "initialization complete" bit to move adapter from
254 * D0U* --> D0A* (powered-up active) state.
256 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
259 * Wait for clock stabilization; once stabilized, access to
260 * device-internal resources is supported, e.g. iwl_write_prph()
261 * and accesses to uCode SRAM.
263 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
264 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
265 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
267 IWL_DEBUG_INFO(trans
, "Failed to init the card\n");
271 if (trans
->cfg
->host_interrupt_operation_mode
) {
273 * This is a bit of an abuse - This is needed for 7260 / 3160
274 * only check host_interrupt_operation_mode even if this is
275 * not related to host_interrupt_operation_mode.
277 * Enable the oscillator to count wake up time for L1 exit. This
278 * consumes slightly more power (100uA) - but allows to be sure
279 * that we wake up from L1 on time.
281 * This looks weird: read twice the same register, discard the
282 * value, set a bit, and yet again, read that same register
283 * just to discard the value. But that's the way the hardware
286 iwl_read_prph(trans
, OSC_CLK
);
287 iwl_read_prph(trans
, OSC_CLK
);
288 iwl_set_bits_prph(trans
, OSC_CLK
, OSC_CLK_FORCE_CONTROL
);
289 iwl_read_prph(trans
, OSC_CLK
);
290 iwl_read_prph(trans
, OSC_CLK
);
294 * Enable DMA clock and wait for it to stabilize.
296 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
297 * bits do not disable clocks. This preserves any hardware
298 * bits already set by default in "CLK_CTRL_REG" after reset.
300 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
) {
301 iwl_write_prph(trans
, APMG_CLK_EN_REG
,
302 APMG_CLK_VAL_DMA_CLK_RQT
);
305 /* Disable L1-Active */
306 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
307 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
309 /* Clear the interrupt in APMG if the NIC is in RFKILL */
310 iwl_write_prph(trans
, APMG_RTC_INT_STT_REG
,
311 APMG_RTC_INT_STT_RFKILL
);
314 set_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
321 * Enable LP XTAL to avoid HW bug where device may consume much power if
322 * FW is not loaded after device reset. LP XTAL is disabled by default
323 * after device HW reset. Do it only if XTAL is fed by internal source.
324 * Configure device's "persistence" mode to avoid resetting XTAL again when
325 * SHRD_HW_RST occurs in S3.
327 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans
*trans
)
331 u32 apmg_xtal_cfg_reg
;
335 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
336 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
338 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
339 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
344 * Set "initialization complete" bit to move adapter from
345 * D0U* --> D0A* (powered-up active) state.
347 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
350 * Wait for clock stabilization; once stabilized, access to
351 * device-internal resources is possible.
353 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
354 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
355 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
357 if (WARN_ON(ret
< 0)) {
358 IWL_ERR(trans
, "Access time out - failed to enable LP XTAL\n");
359 /* Release XTAL ON request */
360 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
361 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
366 * Clear "disable persistence" to avoid LP XTAL resetting when
367 * SHRD_HW_RST is applied in S3.
369 iwl_clear_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
370 APMG_PCIDEV_STT_VAL_PERSIST_DIS
);
373 * Force APMG XTAL to be active to prevent its disabling by HW
374 * caused by APMG idle state.
376 apmg_xtal_cfg_reg
= iwl_trans_pcie_read_shr(trans
,
377 SHR_APMG_XTAL_CFG_REG
);
378 iwl_trans_pcie_write_shr(trans
, SHR_APMG_XTAL_CFG_REG
,
380 SHR_APMG_XTAL_CFG_XTAL_ON_REQ
);
383 * Reset entire device again - do controller reset (results in
384 * SHRD_HW_RST). Turn MAC off before proceeding.
386 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
390 /* Enable LP XTAL by indirect access through CSR */
391 apmg_gp1_reg
= iwl_trans_pcie_read_shr(trans
, SHR_APMG_GP1_REG
);
392 iwl_trans_pcie_write_shr(trans
, SHR_APMG_GP1_REG
, apmg_gp1_reg
|
393 SHR_APMG_GP1_WF_XTAL_LP_EN
|
394 SHR_APMG_GP1_CHICKEN_BIT_SELECT
);
396 /* Clear delay line clock power up */
397 dl_cfg_reg
= iwl_trans_pcie_read_shr(trans
, SHR_APMG_DL_CFG_REG
);
398 iwl_trans_pcie_write_shr(trans
, SHR_APMG_DL_CFG_REG
, dl_cfg_reg
&
399 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP
);
402 * Enable persistence mode to avoid LP XTAL resetting when
403 * SHRD_HW_RST is applied in S3.
405 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
406 CSR_HW_IF_CONFIG_REG_PERSIST_MODE
);
409 * Clear "initialization complete" bit to move adapter from
410 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
412 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
413 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
415 /* Activates XTAL resources monitor */
416 __iwl_trans_pcie_set_bit(trans
, CSR_MONITOR_CFG_REG
,
417 CSR_MONITOR_XTAL_RESOURCES
);
419 /* Release XTAL ON request */
420 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
421 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
424 /* Release APMG XTAL */
425 iwl_trans_pcie_write_shr(trans
, SHR_APMG_XTAL_CFG_REG
,
427 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ
);
430 static int iwl_pcie_apm_stop_master(struct iwl_trans
*trans
)
434 /* stop device's busmaster DMA activity */
435 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
437 ret
= iwl_poll_bit(trans
, CSR_RESET
,
438 CSR_RESET_REG_FLAG_MASTER_DISABLED
,
439 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
441 IWL_WARN(trans
, "Master Disable Timed Out, 100 usec\n");
443 IWL_DEBUG_INFO(trans
, "stop master\n");
448 static void iwl_pcie_apm_stop(struct iwl_trans
*trans
, bool op_mode_leave
)
450 IWL_DEBUG_INFO(trans
, "Stop card, put in low power state\n");
453 if (!test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
454 iwl_pcie_apm_init(trans
);
456 /* inform ME that we are leaving */
457 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
)
458 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
459 APMG_PCIDEV_STT_VAL_WAKE_ME
);
460 else if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
461 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
462 CSR_HW_IF_CONFIG_REG_PREPARE
|
463 CSR_HW_IF_CONFIG_REG_ENABLE_PME
);
467 clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
469 /* Stop device's DMA activity */
470 iwl_pcie_apm_stop_master(trans
);
472 if (trans
->cfg
->lp_xtal_workaround
) {
473 iwl_pcie_apm_lp_xtal_enable(trans
);
477 /* Reset the entire device */
478 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
483 * Clear "initialization complete" bit to move adapter from
484 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
486 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
487 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
490 static int iwl_pcie_nic_init(struct iwl_trans
*trans
)
492 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
495 spin_lock(&trans_pcie
->irq_lock
);
496 iwl_pcie_apm_init(trans
);
498 spin_unlock(&trans_pcie
->irq_lock
);
500 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
)
501 iwl_pcie_set_pwr(trans
, false);
503 iwl_op_mode_nic_config(trans
->op_mode
);
505 /* Allocate the RX queue, or reset if it is already allocated */
506 iwl_pcie_rx_init(trans
);
508 /* Allocate or reset and init all Tx and Command queues */
509 if (iwl_pcie_tx_init(trans
))
512 if (trans
->cfg
->base_params
->shadow_reg_enable
) {
513 /* enable shadow regs in HW */
514 iwl_set_bit(trans
, CSR_MAC_SHADOW_REG_CTRL
, 0x800FFFFF);
515 IWL_DEBUG_INFO(trans
, "Enabling shadow registers in device\n");
521 #define HW_READY_TIMEOUT (50)
523 /* Note: returns poll_bit return value, which is >= 0 if success */
524 static int iwl_pcie_set_hw_ready(struct iwl_trans
*trans
)
528 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
529 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
531 /* See if we got it */
532 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
533 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
534 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
538 iwl_set_bit(trans
, CSR_MBOX_SET_REG
, CSR_MBOX_SET_REG_OS_ALIVE
);
540 IWL_DEBUG_INFO(trans
, "hardware%s ready\n", ret
< 0 ? " not" : "");
544 /* Note: returns standard 0/-ERROR code */
545 static int iwl_pcie_prepare_card_hw(struct iwl_trans
*trans
)
551 IWL_DEBUG_INFO(trans
, "iwl_trans_prepare_card_hw enter\n");
553 ret
= iwl_pcie_set_hw_ready(trans
);
554 /* If the card is ready, exit 0 */
558 for (iter
= 0; iter
< 10; iter
++) {
559 /* If HW is not ready, prepare the conditions to check again */
560 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
561 CSR_HW_IF_CONFIG_REG_PREPARE
);
564 ret
= iwl_pcie_set_hw_ready(trans
);
568 usleep_range(200, 1000);
570 } while (t
< 150000);
574 IWL_ERR(trans
, "Couldn't prepare the card\n");
582 static int iwl_pcie_load_firmware_chunk(struct iwl_trans
*trans
, u32 dst_addr
,
583 dma_addr_t phy_addr
, u32 byte_cnt
)
585 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
588 trans_pcie
->ucode_write_complete
= false;
590 iwl_write_direct32(trans
,
591 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
592 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE
);
594 iwl_write_direct32(trans
,
595 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL
),
598 iwl_write_direct32(trans
,
599 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL
),
600 phy_addr
& FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK
);
602 iwl_write_direct32(trans
,
603 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL
),
604 (iwl_get_dma_hi_addr(phy_addr
)
605 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT
) | byte_cnt
);
607 iwl_write_direct32(trans
,
608 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL
),
609 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM
|
610 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX
|
611 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID
);
613 iwl_write_direct32(trans
,
614 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
615 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
616 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE
|
617 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD
);
619 ret
= wait_event_timeout(trans_pcie
->ucode_write_waitq
,
620 trans_pcie
->ucode_write_complete
, 5 * HZ
);
622 IWL_ERR(trans
, "Failed to load firmware chunk!\n");
629 static int iwl_pcie_load_section(struct iwl_trans
*trans
, u8 section_num
,
630 const struct fw_desc
*section
)
634 u32 offset
, chunk_sz
= min_t(u32
, FH_MEM_TB_MAX_LENGTH
, section
->len
);
637 IWL_DEBUG_FW(trans
, "[%d] uCode section being loaded...\n",
640 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
, &p_addr
,
641 GFP_KERNEL
| __GFP_NOWARN
);
643 IWL_DEBUG_INFO(trans
, "Falling back to small chunks of DMA\n");
644 chunk_sz
= PAGE_SIZE
;
645 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
,
646 &p_addr
, GFP_KERNEL
);
651 for (offset
= 0; offset
< section
->len
; offset
+= chunk_sz
) {
652 u32 copy_size
, dst_addr
;
653 bool extended_addr
= false;
655 copy_size
= min_t(u32
, chunk_sz
, section
->len
- offset
);
656 dst_addr
= section
->offset
+ offset
;
658 if (dst_addr
>= IWL_FW_MEM_EXTENDED_START
&&
659 dst_addr
<= IWL_FW_MEM_EXTENDED_END
)
660 extended_addr
= true;
663 iwl_set_bits_prph(trans
, LMPM_CHICK
,
664 LMPM_CHICK_EXTENDED_ADDR_SPACE
);
666 memcpy(v_addr
, (u8
*)section
->data
+ offset
, copy_size
);
667 ret
= iwl_pcie_load_firmware_chunk(trans
, dst_addr
, p_addr
,
671 iwl_clear_bits_prph(trans
, LMPM_CHICK
,
672 LMPM_CHICK_EXTENDED_ADDR_SPACE
);
676 "Could not load the [%d] uCode section\n",
682 dma_free_coherent(trans
->dev
, chunk_sz
, v_addr
, p_addr
);
687 * Driver Takes the ownership on secure machine before FW load
688 * and prevent race with the BT load.
689 * W/A for ROM bug. (should be remove in the next Si step)
691 static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans
*trans
)
693 u32 val
, loop
= 1000;
696 * Check the RSA semaphore is accessible.
697 * If the HW isn't locked and the rsa semaphore isn't accessible,
700 val
= iwl_read_prph(trans
, PREG_AUX_BUS_WPROT_0
);
701 if (val
& (BIT(1) | BIT(17))) {
703 "can't access the RSA semaphore it is write protected\n");
707 /* take ownership on the AUX IF */
708 iwl_write_prph(trans
, WFPM_CTRL_REG
, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK
);
709 iwl_write_prph(trans
, AUX_MISC_MASTER1_EN
, AUX_MISC_MASTER1_EN_SBE_MSK
);
712 iwl_write_prph(trans
, AUX_MISC_MASTER1_SMPHR_STATUS
, 0x1);
713 val
= iwl_read_prph(trans
, AUX_MISC_MASTER1_SMPHR_STATUS
);
715 iwl_write_prph(trans
, RSA_ENABLE
, 0);
723 IWL_ERR(trans
, "Failed to take ownership on secure machine\n");
727 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans
*trans
,
728 const struct fw_img
*image
,
730 int *first_ucode_section
)
733 int i
, ret
= 0, sec_num
= 0x1;
734 u32 val
, last_read_idx
= 0;
738 *first_ucode_section
= 0;
741 (*first_ucode_section
)++;
744 for (i
= *first_ucode_section
; i
< IWL_UCODE_SECTION_MAX
; i
++) {
747 if (!image
->sec
[i
].data
||
748 image
->sec
[i
].offset
== CPU1_CPU2_SEPARATOR_SECTION
) {
750 "Break since Data not valid or Empty section, sec = %d\n",
755 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
759 /* Notify the ucode of the loaded section number and status */
760 val
= iwl_read_direct32(trans
, FH_UCODE_LOAD_STATUS
);
761 val
= val
| (sec_num
<< shift_param
);
762 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
, val
);
763 sec_num
= (sec_num
<< 1) | 0x1;
766 *first_ucode_section
= last_read_idx
;
769 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
, 0xFFFF);
771 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
, 0xFFFFFFFF);
776 static int iwl_pcie_load_cpu_sections(struct iwl_trans
*trans
,
777 const struct fw_img
*image
,
779 int *first_ucode_section
)
783 u32 last_read_idx
= 0;
787 *first_ucode_section
= 0;
790 (*first_ucode_section
)++;
793 for (i
= *first_ucode_section
; i
< IWL_UCODE_SECTION_MAX
; i
++) {
796 if (!image
->sec
[i
].data
||
797 image
->sec
[i
].offset
== CPU1_CPU2_SEPARATOR_SECTION
) {
799 "Break since Data not valid or Empty section, sec = %d\n",
804 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
809 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
810 iwl_set_bits_prph(trans
,
811 CSR_UCODE_LOAD_STATUS_ADDR
,
812 (LMPM_CPU_UCODE_LOADING_COMPLETED
|
813 LMPM_CPU_HDRS_LOADING_COMPLETED
|
814 LMPM_CPU_UCODE_LOADING_STARTED
) <<
817 *first_ucode_section
= last_read_idx
;
822 static void iwl_pcie_apply_destination(struct iwl_trans
*trans
)
824 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
825 const struct iwl_fw_dbg_dest_tlv
*dest
= trans
->dbg_dest_tlv
;
830 "DBG DEST version is %d - expect issues\n",
833 IWL_INFO(trans
, "Applying debug destination %s\n",
834 get_fw_dbg_mode_string(dest
->monitor_mode
));
836 if (dest
->monitor_mode
== EXTERNAL_MODE
)
837 iwl_pcie_alloc_fw_monitor(trans
);
839 IWL_WARN(trans
, "PCI should have external buffer debug\n");
841 for (i
= 0; i
< trans
->dbg_dest_reg_num
; i
++) {
842 u32 addr
= le32_to_cpu(dest
->reg_ops
[i
].addr
);
843 u32 val
= le32_to_cpu(dest
->reg_ops
[i
].val
);
845 switch (dest
->reg_ops
[i
].op
) {
847 iwl_write32(trans
, addr
, val
);
850 iwl_set_bit(trans
, addr
, BIT(val
));
853 iwl_clear_bit(trans
, addr
, BIT(val
));
856 iwl_write_prph(trans
, addr
, val
);
859 iwl_set_bits_prph(trans
, addr
, BIT(val
));
862 iwl_clear_bits_prph(trans
, addr
, BIT(val
));
865 IWL_ERR(trans
, "FW debug - unknown OP %d\n",
866 dest
->reg_ops
[i
].op
);
871 if (dest
->monitor_mode
== EXTERNAL_MODE
&& trans_pcie
->fw_mon_size
) {
872 iwl_write_prph(trans
, le32_to_cpu(dest
->base_reg
),
873 trans_pcie
->fw_mon_phys
>> dest
->base_shift
);
874 iwl_write_prph(trans
, le32_to_cpu(dest
->end_reg
),
875 (trans_pcie
->fw_mon_phys
+
876 trans_pcie
->fw_mon_size
) >> dest
->end_shift
);
880 static int iwl_pcie_load_given_ucode(struct iwl_trans
*trans
,
881 const struct fw_img
*image
)
883 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
885 int first_ucode_section
;
887 IWL_DEBUG_FW(trans
, "working with %s CPU\n",
888 image
->is_dual_cpus
? "Dual" : "Single");
890 /* load to FW the binary non secured sections of CPU1 */
891 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 1, &first_ucode_section
);
895 if (image
->is_dual_cpus
) {
896 /* set CPU2 header address */
897 iwl_write_prph(trans
,
898 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR
,
899 LMPM_SECURE_CPU2_HDR_MEM_SPACE
);
901 /* load to FW the binary sections of CPU2 */
902 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 2,
903 &first_ucode_section
);
908 /* supported for 7000 only for the moment */
909 if (iwlwifi_mod_params
.fw_monitor
&&
910 trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
) {
911 iwl_pcie_alloc_fw_monitor(trans
);
913 if (trans_pcie
->fw_mon_size
) {
914 iwl_write_prph(trans
, MON_BUFF_BASE_ADDR
,
915 trans_pcie
->fw_mon_phys
>> 4);
916 iwl_write_prph(trans
, MON_BUFF_END_ADDR
,
917 (trans_pcie
->fw_mon_phys
+
918 trans_pcie
->fw_mon_size
) >> 4);
920 } else if (trans
->dbg_dest_tlv
) {
921 iwl_pcie_apply_destination(trans
);
924 /* release CPU reset */
925 iwl_write32(trans
, CSR_RESET
, 0);
930 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans
*trans
,
931 const struct fw_img
*image
)
934 int first_ucode_section
;
936 IWL_DEBUG_FW(trans
, "working with %s CPU\n",
937 image
->is_dual_cpus
? "Dual" : "Single");
939 if (trans
->dbg_dest_tlv
)
940 iwl_pcie_apply_destination(trans
);
942 /* TODO: remove in the next Si step */
943 ret
= iwl_pcie_rsa_race_bug_wa(trans
);
947 /* configure the ucode to be ready to get the secured image */
948 /* release CPU reset */
949 iwl_write_prph(trans
, RELEASE_CPU_RESET
, RELEASE_CPU_RESET_BIT
);
951 /* load to FW the binary Secured sections of CPU1 */
952 ret
= iwl_pcie_load_cpu_sections_8000(trans
, image
, 1,
953 &first_ucode_section
);
957 /* load to FW the binary sections of CPU2 */
958 ret
= iwl_pcie_load_cpu_sections_8000(trans
, image
, 2,
959 &first_ucode_section
);
966 static int iwl_trans_pcie_start_fw(struct iwl_trans
*trans
,
967 const struct fw_img
*fw
, bool run_in_rfkill
)
972 /* This may fail if AMT took ownership of the device */
973 if (iwl_pcie_prepare_card_hw(trans
)) {
974 IWL_WARN(trans
, "Exit HW not ready\n");
978 iwl_enable_rfkill_int(trans
);
980 /* If platform's RF_KILL switch is NOT set to KILL */
981 hw_rfkill
= iwl_is_rfkill_set(trans
);
983 set_bit(STATUS_RFKILL
, &trans
->status
);
985 clear_bit(STATUS_RFKILL
, &trans
->status
);
986 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
987 if (hw_rfkill
&& !run_in_rfkill
)
990 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
992 ret
= iwl_pcie_nic_init(trans
);
994 IWL_ERR(trans
, "Unable to init nic\n");
998 /* make sure rfkill handshake bits are cleared */
999 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1000 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
,
1001 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
1003 /* clear (again), then enable host interrupts */
1004 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1005 iwl_enable_interrupts(trans
);
1007 /* really make sure rfkill handshake bits are cleared */
1008 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1009 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1011 /* Load the given image to the HW */
1012 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
1013 return iwl_pcie_load_given_ucode_8000(trans
, fw
);
1015 return iwl_pcie_load_given_ucode(trans
, fw
);
1018 static void iwl_trans_pcie_fw_alive(struct iwl_trans
*trans
, u32 scd_addr
)
1020 iwl_pcie_reset_ict(trans
);
1021 iwl_pcie_tx_start(trans
, scd_addr
);
1024 static void iwl_trans_pcie_stop_device(struct iwl_trans
*trans
, bool low_power
)
1026 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1027 bool hw_rfkill
, was_hw_rfkill
;
1029 was_hw_rfkill
= iwl_is_rfkill_set(trans
);
1031 /* tell the device to stop sending interrupts */
1032 spin_lock(&trans_pcie
->irq_lock
);
1033 iwl_disable_interrupts(trans
);
1034 spin_unlock(&trans_pcie
->irq_lock
);
1036 /* device going down, Stop using ICT table */
1037 iwl_pcie_disable_ict(trans
);
1040 * If a HW restart happens during firmware loading,
1041 * then the firmware loading might call this function
1042 * and later it might be called again due to the
1043 * restart. So don't process again if the device is
1046 if (test_and_clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
)) {
1047 IWL_DEBUG_INFO(trans
, "DEVICE_ENABLED bit was set and is now cleared\n");
1048 iwl_pcie_tx_stop(trans
);
1049 iwl_pcie_rx_stop(trans
);
1051 /* Power-down device's busmaster DMA clocks */
1052 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
) {
1053 iwl_write_prph(trans
, APMG_CLK_DIS_REG
,
1054 APMG_CLK_VAL_DMA_CLK_RQT
);
1059 /* Make sure (redundant) we've released our request to stay awake */
1060 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1061 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1063 /* Stop the device, and put it in low power state */
1064 iwl_pcie_apm_stop(trans
, false);
1066 /* stop and reset the on-board processor */
1067 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
1071 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1072 * This is a bug in certain verions of the hardware.
1073 * Certain devices also keep sending HW RF kill interrupt all
1074 * the time, unless the interrupt is ACKed even if the interrupt
1075 * should be masked. Re-ACK all the interrupts here.
1077 spin_lock(&trans_pcie
->irq_lock
);
1078 iwl_disable_interrupts(trans
);
1079 spin_unlock(&trans_pcie
->irq_lock
);
1082 /* clear all status bits */
1083 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1084 clear_bit(STATUS_INT_ENABLED
, &trans
->status
);
1085 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1086 clear_bit(STATUS_RFKILL
, &trans
->status
);
1089 * Even if we stop the HW, we still want the RF kill
1092 iwl_enable_rfkill_int(trans
);
1095 * Check again since the RF kill state may have changed while
1096 * all the interrupts were disabled, in this case we couldn't
1097 * receive the RF kill interrupt and update the state in the
1099 * Don't call the op_mode if the rkfill state hasn't changed.
1100 * This allows the op_mode to call stop_device from the rfkill
1101 * notification without endless recursion. Under very rare
1102 * circumstances, we might have a small recursion if the rfkill
1103 * state changed exactly now while we were called from stop_device.
1104 * This is very unlikely but can happen and is supported.
1106 hw_rfkill
= iwl_is_rfkill_set(trans
);
1108 set_bit(STATUS_RFKILL
, &trans
->status
);
1110 clear_bit(STATUS_RFKILL
, &trans
->status
);
1111 if (hw_rfkill
!= was_hw_rfkill
)
1112 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
1114 /* re-take ownership to prevent other users from stealing the deivce */
1115 iwl_pcie_prepare_card_hw(trans
);
1118 void iwl_trans_pcie_rf_kill(struct iwl_trans
*trans
, bool state
)
1120 if (iwl_op_mode_hw_rf_kill(trans
->op_mode
, state
))
1121 iwl_trans_pcie_stop_device(trans
, true);
1124 static void iwl_trans_pcie_d3_suspend(struct iwl_trans
*trans
, bool test
)
1126 iwl_disable_interrupts(trans
);
1129 * in testing mode, the host stays awake and the
1130 * hardware won't be reset (not even partially)
1135 iwl_pcie_disable_ict(trans
);
1137 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1138 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1139 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1140 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1143 * reset TX queues -- some of their registers reset during S3
1144 * so if we don't reset everything here the D3 image would try
1145 * to execute some invalid memory upon resume
1147 iwl_trans_pcie_tx_reset(trans
);
1149 iwl_pcie_set_pwr(trans
, true);
1152 static int iwl_trans_pcie_d3_resume(struct iwl_trans
*trans
,
1153 enum iwl_d3_status
*status
,
1160 iwl_enable_interrupts(trans
);
1161 *status
= IWL_D3_STATUS_ALIVE
;
1166 * Also enables interrupts - none will happen as the device doesn't
1167 * know we're waking it up, only when the opmode actually tells it
1170 iwl_pcie_reset_ict(trans
);
1172 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1173 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1175 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
1178 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
1179 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1180 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1183 IWL_ERR(trans
, "Failed to resume the device (mac ready)\n");
1187 iwl_pcie_set_pwr(trans
, false);
1189 iwl_trans_pcie_tx_reset(trans
);
1191 ret
= iwl_pcie_rx_init(trans
);
1193 IWL_ERR(trans
, "Failed to resume the device (RX reset)\n");
1197 val
= iwl_read32(trans
, CSR_RESET
);
1198 if (val
& CSR_RESET_REG_FLAG_NEVO_RESET
)
1199 *status
= IWL_D3_STATUS_RESET
;
1201 *status
= IWL_D3_STATUS_ALIVE
;
1206 static int iwl_trans_pcie_start_hw(struct iwl_trans
*trans
, bool low_power
)
1211 err
= iwl_pcie_prepare_card_hw(trans
);
1213 IWL_ERR(trans
, "Error while preparing HW: %d\n", err
);
1217 /* Reset the entire device */
1218 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
1220 usleep_range(10, 15);
1222 iwl_pcie_apm_init(trans
);
1224 /* From now on, the op_mode will be kept updated about RF kill state */
1225 iwl_enable_rfkill_int(trans
);
1227 hw_rfkill
= iwl_is_rfkill_set(trans
);
1229 set_bit(STATUS_RFKILL
, &trans
->status
);
1231 clear_bit(STATUS_RFKILL
, &trans
->status
);
1232 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
1237 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans
*trans
)
1239 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1241 /* disable interrupts - don't enable HW RF kill interrupt */
1242 spin_lock(&trans_pcie
->irq_lock
);
1243 iwl_disable_interrupts(trans
);
1244 spin_unlock(&trans_pcie
->irq_lock
);
1246 iwl_pcie_apm_stop(trans
, true);
1248 spin_lock(&trans_pcie
->irq_lock
);
1249 iwl_disable_interrupts(trans
);
1250 spin_unlock(&trans_pcie
->irq_lock
);
1252 iwl_pcie_disable_ict(trans
);
1255 static void iwl_trans_pcie_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
1257 writeb(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1260 static void iwl_trans_pcie_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
1262 writel(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1265 static u32
iwl_trans_pcie_read32(struct iwl_trans
*trans
, u32 ofs
)
1267 return readl(IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1270 static u32
iwl_trans_pcie_read_prph(struct iwl_trans
*trans
, u32 reg
)
1272 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_RADDR
,
1273 ((reg
& 0x000FFFFF) | (3 << 24)));
1274 return iwl_trans_pcie_read32(trans
, HBUS_TARG_PRPH_RDAT
);
1277 static void iwl_trans_pcie_write_prph(struct iwl_trans
*trans
, u32 addr
,
1280 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WADDR
,
1281 ((addr
& 0x000FFFFF) | (3 << 24)));
1282 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WDAT
, val
);
1285 static int iwl_pcie_dummy_napi_poll(struct napi_struct
*napi
, int budget
)
1291 static void iwl_trans_pcie_configure(struct iwl_trans
*trans
,
1292 const struct iwl_trans_config
*trans_cfg
)
1294 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1296 trans_pcie
->cmd_queue
= trans_cfg
->cmd_queue
;
1297 trans_pcie
->cmd_fifo
= trans_cfg
->cmd_fifo
;
1298 trans_pcie
->cmd_q_wdg_timeout
= trans_cfg
->cmd_q_wdg_timeout
;
1299 if (WARN_ON(trans_cfg
->n_no_reclaim_cmds
> MAX_NO_RECLAIM_CMDS
))
1300 trans_pcie
->n_no_reclaim_cmds
= 0;
1302 trans_pcie
->n_no_reclaim_cmds
= trans_cfg
->n_no_reclaim_cmds
;
1303 if (trans_pcie
->n_no_reclaim_cmds
)
1304 memcpy(trans_pcie
->no_reclaim_cmds
, trans_cfg
->no_reclaim_cmds
,
1305 trans_pcie
->n_no_reclaim_cmds
* sizeof(u8
));
1307 trans_pcie
->rx_buf_size_8k
= trans_cfg
->rx_buf_size_8k
;
1308 if (trans_pcie
->rx_buf_size_8k
)
1309 trans_pcie
->rx_page_order
= get_order(8 * 1024);
1311 trans_pcie
->rx_page_order
= get_order(4 * 1024);
1313 trans_pcie
->command_names
= trans_cfg
->command_names
;
1314 trans_pcie
->bc_table_dword
= trans_cfg
->bc_table_dword
;
1315 trans_pcie
->scd_set_active
= trans_cfg
->scd_set_active
;
1317 /* init ref_count to 1 (should be cleared when ucode is loaded) */
1318 trans_pcie
->ref_count
= 1;
1320 /* Initialize NAPI here - it should be before registering to mac80211
1321 * in the opmode but after the HW struct is allocated.
1322 * As this function may be called again in some corner cases don't
1323 * do anything if NAPI was already initialized.
1325 if (!trans_pcie
->napi
.poll
&& trans
->op_mode
->ops
->napi_add
) {
1326 init_dummy_netdev(&trans_pcie
->napi_dev
);
1327 iwl_op_mode_napi_add(trans
->op_mode
, &trans_pcie
->napi
,
1328 &trans_pcie
->napi_dev
,
1329 iwl_pcie_dummy_napi_poll
, 64);
1333 void iwl_trans_pcie_free(struct iwl_trans
*trans
)
1335 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1337 synchronize_irq(trans_pcie
->pci_dev
->irq
);
1339 iwl_pcie_tx_free(trans
);
1340 iwl_pcie_rx_free(trans
);
1342 free_irq(trans_pcie
->pci_dev
->irq
, trans
);
1343 iwl_pcie_free_ict(trans
);
1345 pci_disable_msi(trans_pcie
->pci_dev
);
1346 iounmap(trans_pcie
->hw_base
);
1347 pci_release_regions(trans_pcie
->pci_dev
);
1348 pci_disable_device(trans_pcie
->pci_dev
);
1349 kmem_cache_destroy(trans
->dev_cmd_pool
);
1351 if (trans_pcie
->napi
.poll
)
1352 netif_napi_del(&trans_pcie
->napi
);
1354 iwl_pcie_free_fw_monitor(trans
);
1359 static void iwl_trans_pcie_set_pmi(struct iwl_trans
*trans
, bool state
)
1362 set_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1364 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1367 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans
*trans
, bool silent
,
1368 unsigned long *flags
)
1371 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1373 spin_lock_irqsave(&trans_pcie
->reg_lock
, *flags
);
1375 if (trans_pcie
->cmd_hold_nic_awake
)
1378 /* this bit wakes up the NIC */
1379 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
1380 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1381 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
1385 * These bits say the device is running, and should keep running for
1386 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1387 * but they do not indicate that embedded SRAM is restored yet;
1388 * 3945 and 4965 have volatile SRAM, and must save/restore contents
1389 * to/from host DRAM when sleeping/waking for power-saving.
1390 * Each direction takes approximately 1/4 millisecond; with this
1391 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1392 * series of register accesses are expected (e.g. reading Event Log),
1393 * to keep device from sleeping.
1395 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1396 * SRAM is okay/restored. We don't check that here because this call
1397 * is just for hardware register access; but GP1 MAC_SLEEP check is a
1398 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
1400 * 5000 series and later (including 1000 series) have non-volatile SRAM,
1401 * and do not save/restore SRAM when power cycling.
1403 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
1404 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN
,
1405 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
|
1406 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP
), 15000);
1407 if (unlikely(ret
< 0)) {
1408 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_FORCE_NMI
);
1410 u32 val
= iwl_read32(trans
, CSR_GP_CNTRL
);
1412 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1414 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
1421 * Fool sparse by faking we release the lock - sparse will
1422 * track nic_access anyway.
1424 __release(&trans_pcie
->reg_lock
);
1428 static void iwl_trans_pcie_release_nic_access(struct iwl_trans
*trans
,
1429 unsigned long *flags
)
1431 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1433 lockdep_assert_held(&trans_pcie
->reg_lock
);
1436 * Fool sparse by faking we acquiring the lock - sparse will
1437 * track nic_access anyway.
1439 __acquire(&trans_pcie
->reg_lock
);
1441 if (trans_pcie
->cmd_hold_nic_awake
)
1444 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
1445 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1447 * Above we read the CSR_GP_CNTRL register, which will flush
1448 * any previous writes, but we need the write that clears the
1449 * MAC_ACCESS_REQ bit to be performed before any other writes
1450 * scheduled on different CPUs (after we drop reg_lock).
1454 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
1457 static int iwl_trans_pcie_read_mem(struct iwl_trans
*trans
, u32 addr
,
1458 void *buf
, int dwords
)
1460 unsigned long flags
;
1464 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
1465 iwl_write32(trans
, HBUS_TARG_MEM_RADDR
, addr
);
1466 for (offs
= 0; offs
< dwords
; offs
++)
1467 vals
[offs
] = iwl_read32(trans
, HBUS_TARG_MEM_RDAT
);
1468 iwl_trans_release_nic_access(trans
, &flags
);
1475 static int iwl_trans_pcie_write_mem(struct iwl_trans
*trans
, u32 addr
,
1476 const void *buf
, int dwords
)
1478 unsigned long flags
;
1480 const u32
*vals
= buf
;
1482 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
1483 iwl_write32(trans
, HBUS_TARG_MEM_WADDR
, addr
);
1484 for (offs
= 0; offs
< dwords
; offs
++)
1485 iwl_write32(trans
, HBUS_TARG_MEM_WDAT
,
1486 vals
? vals
[offs
] : 0);
1487 iwl_trans_release_nic_access(trans
, &flags
);
1494 static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans
*trans
,
1498 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1501 for_each_set_bit(queue
, &txqs
, BITS_PER_LONG
) {
1502 struct iwl_txq
*txq
= &trans_pcie
->txq
[queue
];
1505 spin_lock_bh(&txq
->lock
);
1509 if (txq
->frozen
== freeze
)
1512 IWL_DEBUG_TX_QUEUES(trans
, "%s TXQ %d\n",
1513 freeze
? "Freezing" : "Waking", queue
);
1515 txq
->frozen
= freeze
;
1517 if (txq
->q
.read_ptr
== txq
->q
.write_ptr
)
1521 if (unlikely(time_after(now
,
1522 txq
->stuck_timer
.expires
))) {
1524 * The timer should have fired, maybe it is
1525 * spinning right now on the lock.
1529 /* remember how long until the timer fires */
1530 txq
->frozen_expiry_remainder
=
1531 txq
->stuck_timer
.expires
- now
;
1532 del_timer(&txq
->stuck_timer
);
1537 * Wake a non-empty queue -> arm timer with the
1538 * remainder before it froze
1540 mod_timer(&txq
->stuck_timer
,
1541 now
+ txq
->frozen_expiry_remainder
);
1544 spin_unlock_bh(&txq
->lock
);
1548 #define IWL_FLUSH_WAIT_MS 2000
1550 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans
*trans
, u32 txq_bm
)
1552 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1553 struct iwl_txq
*txq
;
1554 struct iwl_queue
*q
;
1556 unsigned long now
= jiffies
;
1561 /* waiting for all the tx frames complete might take a while */
1562 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1565 if (cnt
== trans_pcie
->cmd_queue
)
1567 if (!test_bit(cnt
, trans_pcie
->queue_used
))
1569 if (!(BIT(cnt
) & txq_bm
))
1572 IWL_DEBUG_TX_QUEUES(trans
, "Emptying queue %d...\n", cnt
);
1573 txq
= &trans_pcie
->txq
[cnt
];
1575 wr_ptr
= ACCESS_ONCE(q
->write_ptr
);
1577 while (q
->read_ptr
!= ACCESS_ONCE(q
->write_ptr
) &&
1578 !time_after(jiffies
,
1579 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
))) {
1580 u8 write_ptr
= ACCESS_ONCE(q
->write_ptr
);
1582 if (WARN_ONCE(wr_ptr
!= write_ptr
,
1583 "WR pointer moved while flushing %d -> %d\n",
1589 if (q
->read_ptr
!= q
->write_ptr
) {
1591 "fail to flush all tx fifo queues Q %d\n", cnt
);
1595 IWL_DEBUG_TX_QUEUES(trans
, "Queue %d is now empty.\n", cnt
);
1601 IWL_ERR(trans
, "Current SW read_ptr %d write_ptr %d\n",
1602 txq
->q
.read_ptr
, txq
->q
.write_ptr
);
1604 scd_sram_addr
= trans_pcie
->scd_base_addr
+
1605 SCD_TX_STTS_QUEUE_OFFSET(txq
->q
.id
);
1606 iwl_trans_read_mem_bytes(trans
, scd_sram_addr
, buf
, sizeof(buf
));
1608 iwl_print_hex_error(trans
, buf
, sizeof(buf
));
1610 for (cnt
= 0; cnt
< FH_TCSR_CHNL_NUM
; cnt
++)
1611 IWL_ERR(trans
, "FH TRBs(%d) = 0x%08x\n", cnt
,
1612 iwl_read_direct32(trans
, FH_TX_TRB_REG(cnt
)));
1614 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1615 u32 status
= iwl_read_prph(trans
, SCD_QUEUE_STATUS_BITS(cnt
));
1616 u8 fifo
= (status
>> SCD_QUEUE_STTS_REG_POS_TXF
) & 0x7;
1617 bool active
= !!(status
& BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE
));
1619 iwl_trans_read_mem32(trans
, trans_pcie
->scd_base_addr
+
1620 SCD_TRANS_TBL_OFFSET_QUEUE(cnt
));
1623 tbl_dw
= (tbl_dw
& 0xFFFF0000) >> 16;
1625 tbl_dw
= tbl_dw
& 0x0000FFFF;
1628 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1629 cnt
, active
? "" : "in", fifo
, tbl_dw
,
1630 iwl_read_prph(trans
, SCD_QUEUE_RDPTR(cnt
)) &
1631 (TFD_QUEUE_SIZE_MAX
- 1),
1632 iwl_read_prph(trans
, SCD_QUEUE_WRPTR(cnt
)));
1638 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans
*trans
, u32 reg
,
1639 u32 mask
, u32 value
)
1641 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1642 unsigned long flags
;
1644 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
1645 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, value
);
1646 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
1649 void iwl_trans_pcie_ref(struct iwl_trans
*trans
)
1651 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1652 unsigned long flags
;
1654 if (iwlwifi_mod_params
.d0i3_disable
)
1657 spin_lock_irqsave(&trans_pcie
->ref_lock
, flags
);
1658 IWL_DEBUG_RPM(trans
, "ref_counter: %d\n", trans_pcie
->ref_count
);
1659 trans_pcie
->ref_count
++;
1660 spin_unlock_irqrestore(&trans_pcie
->ref_lock
, flags
);
1663 void iwl_trans_pcie_unref(struct iwl_trans
*trans
)
1665 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1666 unsigned long flags
;
1668 if (iwlwifi_mod_params
.d0i3_disable
)
1671 spin_lock_irqsave(&trans_pcie
->ref_lock
, flags
);
1672 IWL_DEBUG_RPM(trans
, "ref_counter: %d\n", trans_pcie
->ref_count
);
1673 if (WARN_ON_ONCE(trans_pcie
->ref_count
== 0)) {
1674 spin_unlock_irqrestore(&trans_pcie
->ref_lock
, flags
);
1677 trans_pcie
->ref_count
--;
1678 spin_unlock_irqrestore(&trans_pcie
->ref_lock
, flags
);
1681 static const char *get_csr_string(int cmd
)
1683 #define IWL_CMD(x) case x: return #x
1685 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
1686 IWL_CMD(CSR_INT_COALESCING
);
1688 IWL_CMD(CSR_INT_MASK
);
1689 IWL_CMD(CSR_FH_INT_STATUS
);
1690 IWL_CMD(CSR_GPIO_IN
);
1692 IWL_CMD(CSR_GP_CNTRL
);
1693 IWL_CMD(CSR_HW_REV
);
1694 IWL_CMD(CSR_EEPROM_REG
);
1695 IWL_CMD(CSR_EEPROM_GP
);
1696 IWL_CMD(CSR_OTP_GP_REG
);
1697 IWL_CMD(CSR_GIO_REG
);
1698 IWL_CMD(CSR_GP_UCODE_REG
);
1699 IWL_CMD(CSR_GP_DRIVER_REG
);
1700 IWL_CMD(CSR_UCODE_DRV_GP1
);
1701 IWL_CMD(CSR_UCODE_DRV_GP2
);
1702 IWL_CMD(CSR_LED_REG
);
1703 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
1704 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
1705 IWL_CMD(CSR_ANA_PLL_CFG
);
1706 IWL_CMD(CSR_HW_REV_WA_REG
);
1707 IWL_CMD(CSR_MONITOR_STATUS_REG
);
1708 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
1715 void iwl_pcie_dump_csr(struct iwl_trans
*trans
)
1718 static const u32 csr_tbl
[] = {
1719 CSR_HW_IF_CONFIG_REG
,
1737 CSR_DRAM_INT_TBL_REG
,
1738 CSR_GIO_CHICKEN_BITS
,
1740 CSR_MONITOR_STATUS_REG
,
1742 CSR_DBG_HPET_MEM_REG
1744 IWL_ERR(trans
, "CSR values:\n");
1745 IWL_ERR(trans
, "(2nd byte of CSR_INT_COALESCING is "
1746 "CSR_INT_PERIODIC_REG)\n");
1747 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
1748 IWL_ERR(trans
, " %25s: 0X%08x\n",
1749 get_csr_string(csr_tbl
[i
]),
1750 iwl_read32(trans
, csr_tbl
[i
]));
1754 #ifdef CONFIG_IWLWIFI_DEBUGFS
1755 /* create and remove of files */
1756 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1757 if (!debugfs_create_file(#name, mode, parent, trans, \
1758 &iwl_dbgfs_##name##_ops)) \
1762 /* file operation */
1763 #define DEBUGFS_READ_FILE_OPS(name) \
1764 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1765 .read = iwl_dbgfs_##name##_read, \
1766 .open = simple_open, \
1767 .llseek = generic_file_llseek, \
1770 #define DEBUGFS_WRITE_FILE_OPS(name) \
1771 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1772 .write = iwl_dbgfs_##name##_write, \
1773 .open = simple_open, \
1774 .llseek = generic_file_llseek, \
1777 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1778 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1779 .write = iwl_dbgfs_##name##_write, \
1780 .read = iwl_dbgfs_##name##_read, \
1781 .open = simple_open, \
1782 .llseek = generic_file_llseek, \
1785 static ssize_t
iwl_dbgfs_tx_queue_read(struct file
*file
,
1786 char __user
*user_buf
,
1787 size_t count
, loff_t
*ppos
)
1789 struct iwl_trans
*trans
= file
->private_data
;
1790 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1791 struct iwl_txq
*txq
;
1792 struct iwl_queue
*q
;
1799 bufsz
= sizeof(char) * 75 * trans
->cfg
->base_params
->num_of_queues
;
1801 if (!trans_pcie
->txq
)
1804 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1808 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1809 txq
= &trans_pcie
->txq
[cnt
];
1811 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1812 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
1813 cnt
, q
->read_ptr
, q
->write_ptr
,
1814 !!test_bit(cnt
, trans_pcie
->queue_used
),
1815 !!test_bit(cnt
, trans_pcie
->queue_stopped
),
1816 txq
->need_update
, txq
->frozen
,
1817 (cnt
== trans_pcie
->cmd_queue
? " HCMD" : ""));
1819 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1824 static ssize_t
iwl_dbgfs_rx_queue_read(struct file
*file
,
1825 char __user
*user_buf
,
1826 size_t count
, loff_t
*ppos
)
1828 struct iwl_trans
*trans
= file
->private_data
;
1829 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1830 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
;
1833 const size_t bufsz
= sizeof(buf
);
1835 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "read: %u\n",
1837 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "write: %u\n",
1839 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "write_actual: %u\n",
1841 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "need_update: %d\n",
1843 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "free_count: %u\n",
1846 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "closed_rb_num: %u\n",
1847 le16_to_cpu(rxq
->rb_stts
->closed_rb_num
) & 0x0FFF);
1849 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1850 "closed_rb_num: Not Allocated\n");
1852 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1855 static ssize_t
iwl_dbgfs_interrupt_read(struct file
*file
,
1856 char __user
*user_buf
,
1857 size_t count
, loff_t
*ppos
)
1859 struct iwl_trans
*trans
= file
->private_data
;
1860 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1861 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1865 int bufsz
= 24 * 64; /* 24 items * 64 char per item */
1868 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1872 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1873 "Interrupt Statistics Report:\n");
1875 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "HW Error:\t\t\t %u\n",
1877 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "SW Error:\t\t\t %u\n",
1879 if (isr_stats
->sw
|| isr_stats
->hw
) {
1880 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1881 "\tLast Restarting Code: 0x%X\n",
1882 isr_stats
->err_code
);
1884 #ifdef CONFIG_IWLWIFI_DEBUG
1885 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Frame transmitted:\t\t %u\n",
1887 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Alive interrupt:\t\t %u\n",
1890 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1891 "HW RF KILL switch toggled:\t %u\n", isr_stats
->rfkill
);
1893 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "CT KILL:\t\t\t %u\n",
1896 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Wakeup Interrupt:\t\t %u\n",
1899 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1900 "Rx command responses:\t\t %u\n", isr_stats
->rx
);
1902 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Tx/FH interrupt:\t\t %u\n",
1905 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Unexpected INTA:\t\t %u\n",
1906 isr_stats
->unhandled
);
1908 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1913 static ssize_t
iwl_dbgfs_interrupt_write(struct file
*file
,
1914 const char __user
*user_buf
,
1915 size_t count
, loff_t
*ppos
)
1917 struct iwl_trans
*trans
= file
->private_data
;
1918 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1919 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1925 memset(buf
, 0, sizeof(buf
));
1926 buf_size
= min(count
, sizeof(buf
) - 1);
1927 if (copy_from_user(buf
, user_buf
, buf_size
))
1929 if (sscanf(buf
, "%x", &reset_flag
) != 1)
1931 if (reset_flag
== 0)
1932 memset(isr_stats
, 0, sizeof(*isr_stats
));
1937 static ssize_t
iwl_dbgfs_csr_write(struct file
*file
,
1938 const char __user
*user_buf
,
1939 size_t count
, loff_t
*ppos
)
1941 struct iwl_trans
*trans
= file
->private_data
;
1946 memset(buf
, 0, sizeof(buf
));
1947 buf_size
= min(count
, sizeof(buf
) - 1);
1948 if (copy_from_user(buf
, user_buf
, buf_size
))
1950 if (sscanf(buf
, "%d", &csr
) != 1)
1953 iwl_pcie_dump_csr(trans
);
1958 static ssize_t
iwl_dbgfs_fh_reg_read(struct file
*file
,
1959 char __user
*user_buf
,
1960 size_t count
, loff_t
*ppos
)
1962 struct iwl_trans
*trans
= file
->private_data
;
1966 ret
= iwl_dump_fh(trans
, &buf
);
1971 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, ret
);
1976 DEBUGFS_READ_WRITE_FILE_OPS(interrupt
);
1977 DEBUGFS_READ_FILE_OPS(fh_reg
);
1978 DEBUGFS_READ_FILE_OPS(rx_queue
);
1979 DEBUGFS_READ_FILE_OPS(tx_queue
);
1980 DEBUGFS_WRITE_FILE_OPS(csr
);
1983 * Create the debugfs files and directories
1986 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
1989 DEBUGFS_ADD_FILE(rx_queue
, dir
, S_IRUSR
);
1990 DEBUGFS_ADD_FILE(tx_queue
, dir
, S_IRUSR
);
1991 DEBUGFS_ADD_FILE(interrupt
, dir
, S_IWUSR
| S_IRUSR
);
1992 DEBUGFS_ADD_FILE(csr
, dir
, S_IWUSR
);
1993 DEBUGFS_ADD_FILE(fh_reg
, dir
, S_IRUSR
);
1997 IWL_ERR(trans
, "failed to create the trans debugfs entry\n");
2001 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
2006 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2008 static u32
iwl_trans_pcie_get_cmdlen(struct iwl_tfd
*tfd
)
2013 for (i
= 0; i
< IWL_NUM_OF_TBS
; i
++)
2014 cmdlen
+= iwl_pcie_tfd_tb_get_len(tfd
, i
);
2019 static const struct {
2021 } iwl_prph_dump_addr
[] = {
2022 { .start
= 0x00a00000, .end
= 0x00a00000 },
2023 { .start
= 0x00a0000c, .end
= 0x00a00024 },
2024 { .start
= 0x00a0002c, .end
= 0x00a0003c },
2025 { .start
= 0x00a00410, .end
= 0x00a00418 },
2026 { .start
= 0x00a00420, .end
= 0x00a00420 },
2027 { .start
= 0x00a00428, .end
= 0x00a00428 },
2028 { .start
= 0x00a00430, .end
= 0x00a0043c },
2029 { .start
= 0x00a00444, .end
= 0x00a00444 },
2030 { .start
= 0x00a004c0, .end
= 0x00a004cc },
2031 { .start
= 0x00a004d8, .end
= 0x00a004d8 },
2032 { .start
= 0x00a004e0, .end
= 0x00a004f0 },
2033 { .start
= 0x00a00840, .end
= 0x00a00840 },
2034 { .start
= 0x00a00850, .end
= 0x00a00858 },
2035 { .start
= 0x00a01004, .end
= 0x00a01008 },
2036 { .start
= 0x00a01010, .end
= 0x00a01010 },
2037 { .start
= 0x00a01018, .end
= 0x00a01018 },
2038 { .start
= 0x00a01024, .end
= 0x00a01024 },
2039 { .start
= 0x00a0102c, .end
= 0x00a01034 },
2040 { .start
= 0x00a0103c, .end
= 0x00a01040 },
2041 { .start
= 0x00a01048, .end
= 0x00a01094 },
2042 { .start
= 0x00a01c00, .end
= 0x00a01c20 },
2043 { .start
= 0x00a01c58, .end
= 0x00a01c58 },
2044 { .start
= 0x00a01c7c, .end
= 0x00a01c7c },
2045 { .start
= 0x00a01c28, .end
= 0x00a01c54 },
2046 { .start
= 0x00a01c5c, .end
= 0x00a01c5c },
2047 { .start
= 0x00a01c60, .end
= 0x00a01cdc },
2048 { .start
= 0x00a01ce0, .end
= 0x00a01d0c },
2049 { .start
= 0x00a01d18, .end
= 0x00a01d20 },
2050 { .start
= 0x00a01d2c, .end
= 0x00a01d30 },
2051 { .start
= 0x00a01d40, .end
= 0x00a01d5c },
2052 { .start
= 0x00a01d80, .end
= 0x00a01d80 },
2053 { .start
= 0x00a01d98, .end
= 0x00a01d9c },
2054 { .start
= 0x00a01da8, .end
= 0x00a01da8 },
2055 { .start
= 0x00a01db8, .end
= 0x00a01df4 },
2056 { .start
= 0x00a01dc0, .end
= 0x00a01dfc },
2057 { .start
= 0x00a01e00, .end
= 0x00a01e2c },
2058 { .start
= 0x00a01e40, .end
= 0x00a01e60 },
2059 { .start
= 0x00a01e68, .end
= 0x00a01e6c },
2060 { .start
= 0x00a01e74, .end
= 0x00a01e74 },
2061 { .start
= 0x00a01e84, .end
= 0x00a01e90 },
2062 { .start
= 0x00a01e9c, .end
= 0x00a01ec4 },
2063 { .start
= 0x00a01ed0, .end
= 0x00a01ee0 },
2064 { .start
= 0x00a01f00, .end
= 0x00a01f1c },
2065 { .start
= 0x00a01f44, .end
= 0x00a01ffc },
2066 { .start
= 0x00a02000, .end
= 0x00a02048 },
2067 { .start
= 0x00a02068, .end
= 0x00a020f0 },
2068 { .start
= 0x00a02100, .end
= 0x00a02118 },
2069 { .start
= 0x00a02140, .end
= 0x00a0214c },
2070 { .start
= 0x00a02168, .end
= 0x00a0218c },
2071 { .start
= 0x00a021c0, .end
= 0x00a021c0 },
2072 { .start
= 0x00a02400, .end
= 0x00a02410 },
2073 { .start
= 0x00a02418, .end
= 0x00a02420 },
2074 { .start
= 0x00a02428, .end
= 0x00a0242c },
2075 { .start
= 0x00a02434, .end
= 0x00a02434 },
2076 { .start
= 0x00a02440, .end
= 0x00a02460 },
2077 { .start
= 0x00a02468, .end
= 0x00a024b0 },
2078 { .start
= 0x00a024c8, .end
= 0x00a024cc },
2079 { .start
= 0x00a02500, .end
= 0x00a02504 },
2080 { .start
= 0x00a0250c, .end
= 0x00a02510 },
2081 { .start
= 0x00a02540, .end
= 0x00a02554 },
2082 { .start
= 0x00a02580, .end
= 0x00a025f4 },
2083 { .start
= 0x00a02600, .end
= 0x00a0260c },
2084 { .start
= 0x00a02648, .end
= 0x00a02650 },
2085 { .start
= 0x00a02680, .end
= 0x00a02680 },
2086 { .start
= 0x00a026c0, .end
= 0x00a026d0 },
2087 { .start
= 0x00a02700, .end
= 0x00a0270c },
2088 { .start
= 0x00a02804, .end
= 0x00a02804 },
2089 { .start
= 0x00a02818, .end
= 0x00a0281c },
2090 { .start
= 0x00a02c00, .end
= 0x00a02db4 },
2091 { .start
= 0x00a02df4, .end
= 0x00a02fb0 },
2092 { .start
= 0x00a03000, .end
= 0x00a03014 },
2093 { .start
= 0x00a0301c, .end
= 0x00a0302c },
2094 { .start
= 0x00a03034, .end
= 0x00a03038 },
2095 { .start
= 0x00a03040, .end
= 0x00a03048 },
2096 { .start
= 0x00a03060, .end
= 0x00a03068 },
2097 { .start
= 0x00a03070, .end
= 0x00a03074 },
2098 { .start
= 0x00a0307c, .end
= 0x00a0307c },
2099 { .start
= 0x00a03080, .end
= 0x00a03084 },
2100 { .start
= 0x00a0308c, .end
= 0x00a03090 },
2101 { .start
= 0x00a03098, .end
= 0x00a03098 },
2102 { .start
= 0x00a030a0, .end
= 0x00a030a0 },
2103 { .start
= 0x00a030a8, .end
= 0x00a030b4 },
2104 { .start
= 0x00a030bc, .end
= 0x00a030bc },
2105 { .start
= 0x00a030c0, .end
= 0x00a0312c },
2106 { .start
= 0x00a03c00, .end
= 0x00a03c5c },
2107 { .start
= 0x00a04400, .end
= 0x00a04454 },
2108 { .start
= 0x00a04460, .end
= 0x00a04474 },
2109 { .start
= 0x00a044c0, .end
= 0x00a044ec },
2110 { .start
= 0x00a04500, .end
= 0x00a04504 },
2111 { .start
= 0x00a04510, .end
= 0x00a04538 },
2112 { .start
= 0x00a04540, .end
= 0x00a04548 },
2113 { .start
= 0x00a04560, .end
= 0x00a0457c },
2114 { .start
= 0x00a04590, .end
= 0x00a04598 },
2115 { .start
= 0x00a045c0, .end
= 0x00a045f4 },
2118 static u32
iwl_trans_pcie_dump_prph(struct iwl_trans
*trans
,
2119 struct iwl_fw_error_dump_data
**data
)
2121 struct iwl_fw_error_dump_prph
*prph
;
2122 unsigned long flags
;
2123 u32 prph_len
= 0, i
;
2125 if (!iwl_trans_grab_nic_access(trans
, false, &flags
))
2128 for (i
= 0; i
< ARRAY_SIZE(iwl_prph_dump_addr
); i
++) {
2129 /* The range includes both boundaries */
2130 int num_bytes_in_chunk
= iwl_prph_dump_addr
[i
].end
-
2131 iwl_prph_dump_addr
[i
].start
+ 4;
2135 prph_len
+= sizeof(**data
) + sizeof(*prph
) + num_bytes_in_chunk
;
2137 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH
);
2138 (*data
)->len
= cpu_to_le32(sizeof(*prph
) +
2139 num_bytes_in_chunk
);
2140 prph
= (void *)(*data
)->data
;
2141 prph
->prph_start
= cpu_to_le32(iwl_prph_dump_addr
[i
].start
);
2142 val
= (void *)prph
->data
;
2144 for (reg
= iwl_prph_dump_addr
[i
].start
;
2145 reg
<= iwl_prph_dump_addr
[i
].end
;
2147 *val
++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans
,
2149 *data
= iwl_fw_error_next_data(*data
);
2152 iwl_trans_release_nic_access(trans
, &flags
);
2157 #define IWL_CSR_TO_DUMP (0x250)
2159 static u32
iwl_trans_pcie_dump_csr(struct iwl_trans
*trans
,
2160 struct iwl_fw_error_dump_data
**data
)
2162 u32 csr_len
= sizeof(**data
) + IWL_CSR_TO_DUMP
;
2166 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_CSR
);
2167 (*data
)->len
= cpu_to_le32(IWL_CSR_TO_DUMP
);
2168 val
= (void *)(*data
)->data
;
2170 for (i
= 0; i
< IWL_CSR_TO_DUMP
; i
+= 4)
2171 *val
++ = cpu_to_le32(iwl_trans_pcie_read32(trans
, i
));
2173 *data
= iwl_fw_error_next_data(*data
);
2178 static u32
iwl_trans_pcie_fh_regs_dump(struct iwl_trans
*trans
,
2179 struct iwl_fw_error_dump_data
**data
)
2181 u32 fh_regs_len
= FH_MEM_UPPER_BOUND
- FH_MEM_LOWER_BOUND
;
2182 unsigned long flags
;
2186 if (!iwl_trans_grab_nic_access(trans
, false, &flags
))
2189 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS
);
2190 (*data
)->len
= cpu_to_le32(fh_regs_len
);
2191 val
= (void *)(*data
)->data
;
2193 for (i
= FH_MEM_LOWER_BOUND
; i
< FH_MEM_UPPER_BOUND
; i
+= sizeof(u32
))
2194 *val
++ = cpu_to_le32(iwl_trans_pcie_read32(trans
, i
));
2196 iwl_trans_release_nic_access(trans
, &flags
);
2198 *data
= iwl_fw_error_next_data(*data
);
2200 return sizeof(**data
) + fh_regs_len
;
2204 struct iwl_trans_dump_data
*iwl_trans_pcie_dump_data(struct iwl_trans
*trans
)
2206 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2207 struct iwl_fw_error_dump_data
*data
;
2208 struct iwl_txq
*cmdq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
2209 struct iwl_fw_error_dump_txcmd
*txcmd
;
2210 struct iwl_trans_dump_data
*dump_data
;
2215 /* transport dump header */
2216 len
= sizeof(*dump_data
);
2219 len
+= sizeof(*data
) +
2220 cmdq
->q
.n_window
* (sizeof(*txcmd
) + TFD_MAX_PAYLOAD_SIZE
);
2223 len
+= sizeof(*data
) + IWL_CSR_TO_DUMP
;
2225 /* PRPH registers */
2226 for (i
= 0; i
< ARRAY_SIZE(iwl_prph_dump_addr
); i
++) {
2227 /* The range includes both boundaries */
2228 int num_bytes_in_chunk
= iwl_prph_dump_addr
[i
].end
-
2229 iwl_prph_dump_addr
[i
].start
+ 4;
2231 len
+= sizeof(*data
) + sizeof(struct iwl_fw_error_dump_prph
) +
2236 len
+= sizeof(*data
) + (FH_MEM_UPPER_BOUND
- FH_MEM_LOWER_BOUND
);
2239 if (trans_pcie
->fw_mon_page
) {
2240 len
+= sizeof(*data
) + sizeof(struct iwl_fw_error_dump_fw_mon
) +
2241 trans_pcie
->fw_mon_size
;
2242 monitor_len
= trans_pcie
->fw_mon_size
;
2243 } else if (trans
->dbg_dest_tlv
) {
2246 base
= le32_to_cpu(trans
->dbg_dest_tlv
->base_reg
);
2247 end
= le32_to_cpu(trans
->dbg_dest_tlv
->end_reg
);
2249 base
= iwl_read_prph(trans
, base
) <<
2250 trans
->dbg_dest_tlv
->base_shift
;
2251 end
= iwl_read_prph(trans
, end
) <<
2252 trans
->dbg_dest_tlv
->end_shift
;
2254 /* Make "end" point to the actual end */
2255 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
2256 end
+= (1 << trans
->dbg_dest_tlv
->end_shift
);
2257 monitor_len
= end
- base
;
2258 len
+= sizeof(*data
) + sizeof(struct iwl_fw_error_dump_fw_mon
) +
2264 dump_data
= vzalloc(len
);
2269 data
= (void *)dump_data
->data
;
2270 data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD
);
2271 txcmd
= (void *)data
->data
;
2272 spin_lock_bh(&cmdq
->lock
);
2273 ptr
= cmdq
->q
.write_ptr
;
2274 for (i
= 0; i
< cmdq
->q
.n_window
; i
++) {
2275 u8 idx
= get_cmd_index(&cmdq
->q
, ptr
);
2278 cmdlen
= iwl_trans_pcie_get_cmdlen(&cmdq
->tfds
[ptr
]);
2279 caplen
= min_t(u32
, TFD_MAX_PAYLOAD_SIZE
, cmdlen
);
2282 len
+= sizeof(*txcmd
) + caplen
;
2283 txcmd
->cmdlen
= cpu_to_le32(cmdlen
);
2284 txcmd
->caplen
= cpu_to_le32(caplen
);
2285 memcpy(txcmd
->data
, cmdq
->entries
[idx
].cmd
, caplen
);
2286 txcmd
= (void *)((u8
*)txcmd
->data
+ caplen
);
2289 ptr
= iwl_queue_dec_wrap(ptr
);
2291 spin_unlock_bh(&cmdq
->lock
);
2293 data
->len
= cpu_to_le32(len
);
2294 len
+= sizeof(*data
);
2295 data
= iwl_fw_error_next_data(data
);
2297 len
+= iwl_trans_pcie_dump_prph(trans
, &data
);
2298 len
+= iwl_trans_pcie_dump_csr(trans
, &data
);
2299 len
+= iwl_trans_pcie_fh_regs_dump(trans
, &data
);
2300 /* data is already pointing to the next section */
2302 if ((trans_pcie
->fw_mon_page
&&
2303 trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
) ||
2304 trans
->dbg_dest_tlv
) {
2305 struct iwl_fw_error_dump_fw_mon
*fw_mon_data
;
2306 u32 base
, write_ptr
, wrap_cnt
;
2308 /* If there was a dest TLV - use the values from there */
2309 if (trans
->dbg_dest_tlv
) {
2311 le32_to_cpu(trans
->dbg_dest_tlv
->write_ptr_reg
);
2312 wrap_cnt
= le32_to_cpu(trans
->dbg_dest_tlv
->wrap_count
);
2313 base
= le32_to_cpu(trans
->dbg_dest_tlv
->base_reg
);
2315 base
= MON_BUFF_BASE_ADDR
;
2316 write_ptr
= MON_BUFF_WRPTR
;
2317 wrap_cnt
= MON_BUFF_CYCLE_CNT
;
2320 data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR
);
2321 fw_mon_data
= (void *)data
->data
;
2322 fw_mon_data
->fw_mon_wr_ptr
=
2323 cpu_to_le32(iwl_read_prph(trans
, write_ptr
));
2324 fw_mon_data
->fw_mon_cycle_cnt
=
2325 cpu_to_le32(iwl_read_prph(trans
, wrap_cnt
));
2326 fw_mon_data
->fw_mon_base_ptr
=
2327 cpu_to_le32(iwl_read_prph(trans
, base
));
2329 len
+= sizeof(*data
) + sizeof(*fw_mon_data
);
2330 if (trans_pcie
->fw_mon_page
) {
2331 data
->len
= cpu_to_le32(trans_pcie
->fw_mon_size
+
2332 sizeof(*fw_mon_data
));
2335 * The firmware is now asserted, it won't write anything
2336 * to the buffer. CPU can take ownership to fetch the
2337 * data. The buffer will be handed back to the device
2338 * before the firmware will be restarted.
2340 dma_sync_single_for_cpu(trans
->dev
,
2341 trans_pcie
->fw_mon_phys
,
2342 trans_pcie
->fw_mon_size
,
2344 memcpy(fw_mon_data
->data
,
2345 page_address(trans_pcie
->fw_mon_page
),
2346 trans_pcie
->fw_mon_size
);
2348 len
+= trans_pcie
->fw_mon_size
;
2350 /* If we are here then the buffer is internal */
2353 * Update pointers to reflect actual values after
2356 base
= iwl_read_prph(trans
, base
) <<
2357 trans
->dbg_dest_tlv
->base_shift
;
2358 iwl_trans_read_mem(trans
, base
, fw_mon_data
->data
,
2359 monitor_len
/ sizeof(u32
));
2360 data
->len
= cpu_to_le32(sizeof(*fw_mon_data
) +
2366 dump_data
->len
= len
;
2371 static const struct iwl_trans_ops trans_ops_pcie
= {
2372 .start_hw
= iwl_trans_pcie_start_hw
,
2373 .op_mode_leave
= iwl_trans_pcie_op_mode_leave
,
2374 .fw_alive
= iwl_trans_pcie_fw_alive
,
2375 .start_fw
= iwl_trans_pcie_start_fw
,
2376 .stop_device
= iwl_trans_pcie_stop_device
,
2378 .d3_suspend
= iwl_trans_pcie_d3_suspend
,
2379 .d3_resume
= iwl_trans_pcie_d3_resume
,
2381 .send_cmd
= iwl_trans_pcie_send_hcmd
,
2383 .tx
= iwl_trans_pcie_tx
,
2384 .reclaim
= iwl_trans_pcie_reclaim
,
2386 .txq_disable
= iwl_trans_pcie_txq_disable
,
2387 .txq_enable
= iwl_trans_pcie_txq_enable
,
2389 .dbgfs_register
= iwl_trans_pcie_dbgfs_register
,
2391 .wait_tx_queue_empty
= iwl_trans_pcie_wait_txq_empty
,
2392 .freeze_txq_timer
= iwl_trans_pcie_freeze_txq_timer
,
2394 .write8
= iwl_trans_pcie_write8
,
2395 .write32
= iwl_trans_pcie_write32
,
2396 .read32
= iwl_trans_pcie_read32
,
2397 .read_prph
= iwl_trans_pcie_read_prph
,
2398 .write_prph
= iwl_trans_pcie_write_prph
,
2399 .read_mem
= iwl_trans_pcie_read_mem
,
2400 .write_mem
= iwl_trans_pcie_write_mem
,
2401 .configure
= iwl_trans_pcie_configure
,
2402 .set_pmi
= iwl_trans_pcie_set_pmi
,
2403 .grab_nic_access
= iwl_trans_pcie_grab_nic_access
,
2404 .release_nic_access
= iwl_trans_pcie_release_nic_access
,
2405 .set_bits_mask
= iwl_trans_pcie_set_bits_mask
,
2407 .ref
= iwl_trans_pcie_ref
,
2408 .unref
= iwl_trans_pcie_unref
,
2410 .dump_data
= iwl_trans_pcie_dump_data
,
2413 struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev
*pdev
,
2414 const struct pci_device_id
*ent
,
2415 const struct iwl_cfg
*cfg
)
2417 struct iwl_trans_pcie
*trans_pcie
;
2418 struct iwl_trans
*trans
;
2422 trans
= kzalloc(sizeof(struct iwl_trans
) +
2423 sizeof(struct iwl_trans_pcie
), GFP_KERNEL
);
2429 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2431 trans
->ops
= &trans_ops_pcie
;
2433 trans_lockdep_init(trans
);
2434 trans_pcie
->trans
= trans
;
2435 spin_lock_init(&trans_pcie
->irq_lock
);
2436 spin_lock_init(&trans_pcie
->reg_lock
);
2437 spin_lock_init(&trans_pcie
->ref_lock
);
2438 init_waitqueue_head(&trans_pcie
->ucode_write_waitq
);
2440 err
= pci_enable_device(pdev
);
2444 if (!cfg
->base_params
->pcie_l1_allowed
) {
2446 * W/A - seems to solve weird behavior. We need to remove this
2447 * if we don't want to stay in L1 all the time. This wastes a
2450 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
|
2451 PCIE_LINK_STATE_L1
|
2452 PCIE_LINK_STATE_CLKPM
);
2455 pci_set_master(pdev
);
2457 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(36));
2459 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(36));
2461 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2463 err
= pci_set_consistent_dma_mask(pdev
,
2465 /* both attempts failed: */
2467 dev_err(&pdev
->dev
, "No suitable DMA available\n");
2468 goto out_pci_disable_device
;
2472 err
= pci_request_regions(pdev
, DRV_NAME
);
2474 dev_err(&pdev
->dev
, "pci_request_regions failed\n");
2475 goto out_pci_disable_device
;
2478 trans_pcie
->hw_base
= pci_ioremap_bar(pdev
, 0);
2479 if (!trans_pcie
->hw_base
) {
2480 dev_err(&pdev
->dev
, "pci_ioremap_bar failed\n");
2482 goto out_pci_release_regions
;
2485 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2486 * PCI Tx retries from interfering with C3 CPU state */
2487 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
2489 trans
->dev
= &pdev
->dev
;
2490 trans_pcie
->pci_dev
= pdev
;
2491 iwl_disable_interrupts(trans
);
2493 err
= pci_enable_msi(pdev
);
2495 dev_err(&pdev
->dev
, "pci_enable_msi failed(0X%x)\n", err
);
2496 /* enable rfkill interrupt: hw bug w/a */
2497 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
2498 if (pci_cmd
& PCI_COMMAND_INTX_DISABLE
) {
2499 pci_cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
2500 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
2504 trans
->hw_rev
= iwl_read32(trans
, CSR_HW_REV
);
2506 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
2507 * changed, and now the revision step also includes bit 0-1 (no more
2508 * "dash" value). To keep hw_rev backwards compatible - we'll store it
2509 * in the old format.
2511 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
) {
2512 unsigned long flags
;
2515 trans
->hw_rev
= (trans
->hw_rev
& 0xfff0) |
2516 (CSR_HW_REV_STEP(trans
->hw_rev
<< 2) << 2);
2519 * in-order to recognize C step driver should read chip version
2520 * id located at the AUX bus MISC address space.
2522 iwl_set_bit(trans
, CSR_GP_CNTRL
,
2523 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
2526 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
2527 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
2528 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
2531 IWL_DEBUG_INFO(trans
, "Failed to wake up the nic\n");
2532 goto out_pci_disable_msi
;
2535 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
2538 hw_step
= __iwl_read_prph(trans
, WFPM_CTRL_REG
);
2539 hw_step
|= ENABLE_WFPM
;
2540 __iwl_write_prph(trans
, WFPM_CTRL_REG
, hw_step
);
2541 hw_step
= __iwl_read_prph(trans
, AUX_MISC_REG
);
2542 hw_step
= (hw_step
>> HW_STEP_LOCATION_BITS
) & 0xF;
2544 trans
->hw_rev
= (trans
->hw_rev
& 0xFFFFFFF3) |
2545 (SILICON_C_STEP
<< 2);
2546 iwl_trans_release_nic_access(trans
, &flags
);
2550 trans
->hw_id
= (pdev
->device
<< 16) + pdev
->subsystem_device
;
2551 snprintf(trans
->hw_id_str
, sizeof(trans
->hw_id_str
),
2552 "PCI ID: 0x%04X:0x%04X", pdev
->device
, pdev
->subsystem_device
);
2554 /* Initialize the wait queue for commands */
2555 init_waitqueue_head(&trans_pcie
->wait_command_queue
);
2557 snprintf(trans
->dev_cmd_pool_name
, sizeof(trans
->dev_cmd_pool_name
),
2558 "iwl_cmd_pool:%s", dev_name(trans
->dev
));
2560 trans
->dev_cmd_headroom
= 0;
2561 trans
->dev_cmd_pool
=
2562 kmem_cache_create(trans
->dev_cmd_pool_name
,
2563 sizeof(struct iwl_device_cmd
)
2564 + trans
->dev_cmd_headroom
,
2569 if (!trans
->dev_cmd_pool
) {
2571 goto out_pci_disable_msi
;
2574 if (iwl_pcie_alloc_ict(trans
))
2575 goto out_free_cmd_pool
;
2577 err
= request_threaded_irq(pdev
->irq
, iwl_pcie_isr
,
2578 iwl_pcie_irq_handler
,
2579 IRQF_SHARED
, DRV_NAME
, trans
);
2581 IWL_ERR(trans
, "Error allocating IRQ %d\n", pdev
->irq
);
2585 trans_pcie
->inta_mask
= CSR_INI_SET_MASK
;
2586 trans
->d0i3_mode
= IWL_D0I3_MODE_ON_SUSPEND
;
2591 iwl_pcie_free_ict(trans
);
2593 kmem_cache_destroy(trans
->dev_cmd_pool
);
2594 out_pci_disable_msi
:
2595 pci_disable_msi(pdev
);
2596 out_pci_release_regions
:
2597 pci_release_regions(pdev
);
2598 out_pci_disable_device
:
2599 pci_disable_device(pdev
);
2603 return ERR_PTR(err
);