1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
34 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *****************************************************************************/
65 #include <linux/pci.h>
66 #include <linux/pci-aspm.h>
67 #include <linux/interrupt.h>
68 #include <linux/debugfs.h>
69 #include <linux/sched.h>
70 #include <linux/bitops.h>
71 #include <linux/gfp.h>
72 #include <linux/vmalloc.h>
75 #include "iwl-trans.h"
79 #include "iwl-agn-hw.h"
80 #include "iwl-fw-error-dump.h"
84 /* extended range in FW SRAM */
85 #define IWL_FW_MEM_EXTENDED_START 0x40000
86 #define IWL_FW_MEM_EXTENDED_END 0x57FFF
88 static void iwl_pcie_free_fw_monitor(struct iwl_trans
*trans
)
90 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
92 if (!trans_pcie
->fw_mon_page
)
95 dma_unmap_page(trans
->dev
, trans_pcie
->fw_mon_phys
,
96 trans_pcie
->fw_mon_size
, DMA_FROM_DEVICE
);
97 __free_pages(trans_pcie
->fw_mon_page
,
98 get_order(trans_pcie
->fw_mon_size
));
99 trans_pcie
->fw_mon_page
= NULL
;
100 trans_pcie
->fw_mon_phys
= 0;
101 trans_pcie
->fw_mon_size
= 0;
104 static void iwl_pcie_alloc_fw_monitor(struct iwl_trans
*trans
, u8 max_power
)
106 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
107 struct page
*page
= NULL
;
113 /* default max_power is maximum */
119 if (WARN(max_power
> 26,
120 "External buffer size for monitor is too big %d, check the FW TLV\n",
124 if (trans_pcie
->fw_mon_page
) {
125 dma_sync_single_for_device(trans
->dev
, trans_pcie
->fw_mon_phys
,
126 trans_pcie
->fw_mon_size
,
132 for (power
= max_power
; power
>= 11; power
--) {
136 order
= get_order(size
);
137 page
= alloc_pages(__GFP_COMP
| __GFP_NOWARN
| __GFP_ZERO
,
142 phys
= dma_map_page(trans
->dev
, page
, 0, PAGE_SIZE
<< order
,
144 if (dma_mapping_error(trans
->dev
, phys
)) {
145 __free_pages(page
, order
);
150 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
155 if (WARN_ON_ONCE(!page
))
158 if (power
!= max_power
)
160 "Sorry - debug buffer is only %luK while you requested %luK\n",
161 (unsigned long)BIT(power
- 10),
162 (unsigned long)BIT(max_power
- 10));
164 trans_pcie
->fw_mon_page
= page
;
165 trans_pcie
->fw_mon_phys
= phys
;
166 trans_pcie
->fw_mon_size
= size
;
169 static u32
iwl_trans_pcie_read_shr(struct iwl_trans
*trans
, u32 reg
)
171 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_CTRL_REG
,
172 ((reg
& 0x0000ffff) | (2 << 28)));
173 return iwl_read32(trans
, HEEP_CTRL_WRD_PCIEX_DATA_REG
);
176 static void iwl_trans_pcie_write_shr(struct iwl_trans
*trans
, u32 reg
, u32 val
)
178 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_DATA_REG
, val
);
179 iwl_write32(trans
, HEEP_CTRL_WRD_PCIEX_CTRL_REG
,
180 ((reg
& 0x0000ffff) | (3 << 28)));
183 static void iwl_pcie_set_pwr(struct iwl_trans
*trans
, bool vaux
)
185 if (trans
->cfg
->apmg_not_supported
)
188 if (vaux
&& pci_pme_capable(to_pci_dev(trans
->dev
), PCI_D3cold
))
189 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
190 APMG_PS_CTRL_VAL_PWR_SRC_VAUX
,
191 ~APMG_PS_CTRL_MSK_PWR_SRC
);
193 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
194 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
195 ~APMG_PS_CTRL_MSK_PWR_SRC
);
199 #define PCI_CFG_RETRY_TIMEOUT 0x041
201 static void iwl_pcie_apm_config(struct iwl_trans
*trans
)
203 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
208 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
209 * Check if BIOS (or OS) enabled L1-ASPM on this device.
210 * If so (likely), disable L0S, so device moves directly L0->L1;
211 * costs negligible amount of power savings.
212 * If not (unlikely), enable L0S, so there is at least some
213 * power savings, even without L1.
215 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_LNKCTL
, &lctl
);
216 if (lctl
& PCI_EXP_LNKCTL_ASPM_L1
)
217 iwl_set_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
219 iwl_clear_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
220 trans
->pm_support
= !(lctl
& PCI_EXP_LNKCTL_ASPM_L0S
);
222 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_DEVCTL2
, &cap
);
223 trans
->ltr_enabled
= cap
& PCI_EXP_DEVCTL2_LTR_EN
;
224 dev_info(trans
->dev
, "L1 %sabled - LTR %sabled\n",
225 (lctl
& PCI_EXP_LNKCTL_ASPM_L1
) ? "En" : "Dis",
226 trans
->ltr_enabled
? "En" : "Dis");
230 * Start up NIC's basic functionality after it has been reset
231 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
232 * NOTE: This does not load uCode nor start the embedded processor
234 static int iwl_pcie_apm_init(struct iwl_trans
*trans
)
237 IWL_DEBUG_INFO(trans
, "Init card's basic functions\n");
240 * Use "set_bit" below rather than "write", to preserve any hardware
241 * bits already set by default after reset.
244 /* Disable L0S exit timer (platform NMI Work/Around) */
245 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
)
246 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
247 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
250 * Disable L0s without affecting L1;
251 * don't wait for ICH L0s (ICH bug W/A)
253 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
254 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
256 /* Set FH wait threshold to maximum (HW error during stress W/A) */
257 iwl_set_bit(trans
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
260 * Enable HAP INTA (interrupt from management bus) to
261 * wake device's PCI Express link L1a -> L0s
263 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
264 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
266 iwl_pcie_apm_config(trans
);
268 /* Configure analog phase-lock-loop before activating to D0A */
269 if (trans
->cfg
->base_params
->pll_cfg_val
)
270 iwl_set_bit(trans
, CSR_ANA_PLL_CFG
,
271 trans
->cfg
->base_params
->pll_cfg_val
);
274 * Set "initialization complete" bit to move adapter from
275 * D0U* --> D0A* (powered-up active) state.
277 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
280 * Wait for clock stabilization; once stabilized, access to
281 * device-internal resources is supported, e.g. iwl_write_prph()
282 * and accesses to uCode SRAM.
284 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
285 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
286 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
288 IWL_DEBUG_INFO(trans
, "Failed to init the card\n");
292 if (trans
->cfg
->host_interrupt_operation_mode
) {
294 * This is a bit of an abuse - This is needed for 7260 / 3160
295 * only check host_interrupt_operation_mode even if this is
296 * not related to host_interrupt_operation_mode.
298 * Enable the oscillator to count wake up time for L1 exit. This
299 * consumes slightly more power (100uA) - but allows to be sure
300 * that we wake up from L1 on time.
302 * This looks weird: read twice the same register, discard the
303 * value, set a bit, and yet again, read that same register
304 * just to discard the value. But that's the way the hardware
307 iwl_read_prph(trans
, OSC_CLK
);
308 iwl_read_prph(trans
, OSC_CLK
);
309 iwl_set_bits_prph(trans
, OSC_CLK
, OSC_CLK_FORCE_CONTROL
);
310 iwl_read_prph(trans
, OSC_CLK
);
311 iwl_read_prph(trans
, OSC_CLK
);
315 * Enable DMA clock and wait for it to stabilize.
317 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
318 * bits do not disable clocks. This preserves any hardware
319 * bits already set by default in "CLK_CTRL_REG" after reset.
321 if (!trans
->cfg
->apmg_not_supported
) {
322 iwl_write_prph(trans
, APMG_CLK_EN_REG
,
323 APMG_CLK_VAL_DMA_CLK_RQT
);
326 /* Disable L1-Active */
327 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
328 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
330 /* Clear the interrupt in APMG if the NIC is in RFKILL */
331 iwl_write_prph(trans
, APMG_RTC_INT_STT_REG
,
332 APMG_RTC_INT_STT_RFKILL
);
335 set_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
342 * Enable LP XTAL to avoid HW bug where device may consume much power if
343 * FW is not loaded after device reset. LP XTAL is disabled by default
344 * after device HW reset. Do it only if XTAL is fed by internal source.
345 * Configure device's "persistence" mode to avoid resetting XTAL again when
346 * SHRD_HW_RST occurs in S3.
348 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans
*trans
)
352 u32 apmg_xtal_cfg_reg
;
356 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
357 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
359 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
360 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
365 * Set "initialization complete" bit to move adapter from
366 * D0U* --> D0A* (powered-up active) state.
368 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
371 * Wait for clock stabilization; once stabilized, access to
372 * device-internal resources is possible.
374 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
375 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
376 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
378 if (WARN_ON(ret
< 0)) {
379 IWL_ERR(trans
, "Access time out - failed to enable LP XTAL\n");
380 /* Release XTAL ON request */
381 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
382 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
387 * Clear "disable persistence" to avoid LP XTAL resetting when
388 * SHRD_HW_RST is applied in S3.
390 iwl_clear_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
391 APMG_PCIDEV_STT_VAL_PERSIST_DIS
);
394 * Force APMG XTAL to be active to prevent its disabling by HW
395 * caused by APMG idle state.
397 apmg_xtal_cfg_reg
= iwl_trans_pcie_read_shr(trans
,
398 SHR_APMG_XTAL_CFG_REG
);
399 iwl_trans_pcie_write_shr(trans
, SHR_APMG_XTAL_CFG_REG
,
401 SHR_APMG_XTAL_CFG_XTAL_ON_REQ
);
404 * Reset entire device again - do controller reset (results in
405 * SHRD_HW_RST). Turn MAC off before proceeding.
407 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
411 /* Enable LP XTAL by indirect access through CSR */
412 apmg_gp1_reg
= iwl_trans_pcie_read_shr(trans
, SHR_APMG_GP1_REG
);
413 iwl_trans_pcie_write_shr(trans
, SHR_APMG_GP1_REG
, apmg_gp1_reg
|
414 SHR_APMG_GP1_WF_XTAL_LP_EN
|
415 SHR_APMG_GP1_CHICKEN_BIT_SELECT
);
417 /* Clear delay line clock power up */
418 dl_cfg_reg
= iwl_trans_pcie_read_shr(trans
, SHR_APMG_DL_CFG_REG
);
419 iwl_trans_pcie_write_shr(trans
, SHR_APMG_DL_CFG_REG
, dl_cfg_reg
&
420 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP
);
423 * Enable persistence mode to avoid LP XTAL resetting when
424 * SHRD_HW_RST is applied in S3.
426 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
427 CSR_HW_IF_CONFIG_REG_PERSIST_MODE
);
430 * Clear "initialization complete" bit to move adapter from
431 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
433 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
434 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
436 /* Activates XTAL resources monitor */
437 __iwl_trans_pcie_set_bit(trans
, CSR_MONITOR_CFG_REG
,
438 CSR_MONITOR_XTAL_RESOURCES
);
440 /* Release XTAL ON request */
441 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
442 CSR_GP_CNTRL_REG_FLAG_XTAL_ON
);
445 /* Release APMG XTAL */
446 iwl_trans_pcie_write_shr(trans
, SHR_APMG_XTAL_CFG_REG
,
448 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ
);
451 static int iwl_pcie_apm_stop_master(struct iwl_trans
*trans
)
455 /* stop device's busmaster DMA activity */
456 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
458 ret
= iwl_poll_bit(trans
, CSR_RESET
,
459 CSR_RESET_REG_FLAG_MASTER_DISABLED
,
460 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
462 IWL_WARN(trans
, "Master Disable Timed Out, 100 usec\n");
464 IWL_DEBUG_INFO(trans
, "stop master\n");
469 static void iwl_pcie_apm_stop(struct iwl_trans
*trans
, bool op_mode_leave
)
471 IWL_DEBUG_INFO(trans
, "Stop card, put in low power state\n");
474 if (!test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
475 iwl_pcie_apm_init(trans
);
477 /* inform ME that we are leaving */
478 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
)
479 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
480 APMG_PCIDEV_STT_VAL_WAKE_ME
);
481 else if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
) {
482 iwl_set_bit(trans
, CSR_DBG_LINK_PWR_MGMT_REG
,
483 CSR_RESET_LINK_PWR_MGMT_DISABLED
);
484 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
485 CSR_HW_IF_CONFIG_REG_PREPARE
|
486 CSR_HW_IF_CONFIG_REG_ENABLE_PME
);
488 iwl_clear_bit(trans
, CSR_DBG_LINK_PWR_MGMT_REG
,
489 CSR_RESET_LINK_PWR_MGMT_DISABLED
);
494 clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
496 /* Stop device's DMA activity */
497 iwl_pcie_apm_stop_master(trans
);
499 if (trans
->cfg
->lp_xtal_workaround
) {
500 iwl_pcie_apm_lp_xtal_enable(trans
);
504 /* Reset the entire device */
505 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
510 * Clear "initialization complete" bit to move adapter from
511 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
513 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
514 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
517 static int iwl_pcie_nic_init(struct iwl_trans
*trans
)
519 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
522 spin_lock(&trans_pcie
->irq_lock
);
523 iwl_pcie_apm_init(trans
);
525 spin_unlock(&trans_pcie
->irq_lock
);
527 iwl_pcie_set_pwr(trans
, false);
529 iwl_op_mode_nic_config(trans
->op_mode
);
531 /* Allocate the RX queue, or reset if it is already allocated */
532 iwl_pcie_rx_init(trans
);
534 /* Allocate or reset and init all Tx and Command queues */
535 if (iwl_pcie_tx_init(trans
))
538 if (trans
->cfg
->base_params
->shadow_reg_enable
) {
539 /* enable shadow regs in HW */
540 iwl_set_bit(trans
, CSR_MAC_SHADOW_REG_CTRL
, 0x800FFFFF);
541 IWL_DEBUG_INFO(trans
, "Enabling shadow registers in device\n");
547 #define HW_READY_TIMEOUT (50)
549 /* Note: returns poll_bit return value, which is >= 0 if success */
550 static int iwl_pcie_set_hw_ready(struct iwl_trans
*trans
)
554 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
555 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
557 /* See if we got it */
558 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
559 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
560 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
564 iwl_set_bit(trans
, CSR_MBOX_SET_REG
, CSR_MBOX_SET_REG_OS_ALIVE
);
566 IWL_DEBUG_INFO(trans
, "hardware%s ready\n", ret
< 0 ? " not" : "");
570 /* Note: returns standard 0/-ERROR code */
571 static int iwl_pcie_prepare_card_hw(struct iwl_trans
*trans
)
577 IWL_DEBUG_INFO(trans
, "iwl_trans_prepare_card_hw enter\n");
579 ret
= iwl_pcie_set_hw_ready(trans
);
580 /* If the card is ready, exit 0 */
584 iwl_set_bit(trans
, CSR_DBG_LINK_PWR_MGMT_REG
,
585 CSR_RESET_LINK_PWR_MGMT_DISABLED
);
588 for (iter
= 0; iter
< 10; iter
++) {
589 /* If HW is not ready, prepare the conditions to check again */
590 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
591 CSR_HW_IF_CONFIG_REG_PREPARE
);
594 ret
= iwl_pcie_set_hw_ready(trans
);
598 usleep_range(200, 1000);
600 } while (t
< 150000);
604 IWL_ERR(trans
, "Couldn't prepare the card\n");
612 static int iwl_pcie_load_firmware_chunk(struct iwl_trans
*trans
, u32 dst_addr
,
613 dma_addr_t phy_addr
, u32 byte_cnt
)
615 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
618 trans_pcie
->ucode_write_complete
= false;
620 iwl_write_direct32(trans
,
621 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
622 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE
);
624 iwl_write_direct32(trans
,
625 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL
),
628 iwl_write_direct32(trans
,
629 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL
),
630 phy_addr
& FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK
);
632 iwl_write_direct32(trans
,
633 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL
),
634 (iwl_get_dma_hi_addr(phy_addr
)
635 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT
) | byte_cnt
);
637 iwl_write_direct32(trans
,
638 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL
),
639 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM
|
640 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX
|
641 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID
);
643 iwl_write_direct32(trans
,
644 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
645 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
646 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE
|
647 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD
);
649 ret
= wait_event_timeout(trans_pcie
->ucode_write_waitq
,
650 trans_pcie
->ucode_write_complete
, 5 * HZ
);
652 IWL_ERR(trans
, "Failed to load firmware chunk!\n");
659 static int iwl_pcie_load_section(struct iwl_trans
*trans
, u8 section_num
,
660 const struct fw_desc
*section
)
664 u32 offset
, chunk_sz
= min_t(u32
, FH_MEM_TB_MAX_LENGTH
, section
->len
);
667 IWL_DEBUG_FW(trans
, "[%d] uCode section being loaded...\n",
670 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
, &p_addr
,
671 GFP_KERNEL
| __GFP_NOWARN
);
673 IWL_DEBUG_INFO(trans
, "Falling back to small chunks of DMA\n");
674 chunk_sz
= PAGE_SIZE
;
675 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
,
676 &p_addr
, GFP_KERNEL
);
681 for (offset
= 0; offset
< section
->len
; offset
+= chunk_sz
) {
682 u32 copy_size
, dst_addr
;
683 bool extended_addr
= false;
685 copy_size
= min_t(u32
, chunk_sz
, section
->len
- offset
);
686 dst_addr
= section
->offset
+ offset
;
688 if (dst_addr
>= IWL_FW_MEM_EXTENDED_START
&&
689 dst_addr
<= IWL_FW_MEM_EXTENDED_END
)
690 extended_addr
= true;
693 iwl_set_bits_prph(trans
, LMPM_CHICK
,
694 LMPM_CHICK_EXTENDED_ADDR_SPACE
);
696 memcpy(v_addr
, (u8
*)section
->data
+ offset
, copy_size
);
697 ret
= iwl_pcie_load_firmware_chunk(trans
, dst_addr
, p_addr
,
701 iwl_clear_bits_prph(trans
, LMPM_CHICK
,
702 LMPM_CHICK_EXTENDED_ADDR_SPACE
);
706 "Could not load the [%d] uCode section\n",
712 dma_free_coherent(trans
->dev
, chunk_sz
, v_addr
, p_addr
);
717 * Driver Takes the ownership on secure machine before FW load
718 * and prevent race with the BT load.
719 * W/A for ROM bug. (should be remove in the next Si step)
721 static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans
*trans
)
723 u32 val
, loop
= 1000;
726 * Check the RSA semaphore is accessible.
727 * If the HW isn't locked and the rsa semaphore isn't accessible,
730 val
= iwl_read_prph(trans
, PREG_AUX_BUS_WPROT_0
);
731 if (val
& (BIT(1) | BIT(17))) {
733 "can't access the RSA semaphore it is write protected\n");
737 /* take ownership on the AUX IF */
738 iwl_write_prph(trans
, WFPM_CTRL_REG
, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK
);
739 iwl_write_prph(trans
, AUX_MISC_MASTER1_EN
, AUX_MISC_MASTER1_EN_SBE_MSK
);
742 iwl_write_prph(trans
, AUX_MISC_MASTER1_SMPHR_STATUS
, 0x1);
743 val
= iwl_read_prph(trans
, AUX_MISC_MASTER1_SMPHR_STATUS
);
745 iwl_write_prph(trans
, RSA_ENABLE
, 0);
753 IWL_ERR(trans
, "Failed to take ownership on secure machine\n");
757 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans
*trans
,
758 const struct fw_img
*image
,
760 int *first_ucode_section
)
763 int i
, ret
= 0, sec_num
= 0x1;
764 u32 val
, last_read_idx
= 0;
768 *first_ucode_section
= 0;
771 (*first_ucode_section
)++;
774 for (i
= *first_ucode_section
; i
< IWL_UCODE_SECTION_MAX
; i
++) {
778 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
780 * PAGING_SEPARATOR_SECTION delimiter - separate between
781 * CPU2 non paged to CPU2 paging sec.
783 if (!image
->sec
[i
].data
||
784 image
->sec
[i
].offset
== CPU1_CPU2_SEPARATOR_SECTION
||
785 image
->sec
[i
].offset
== PAGING_SEPARATOR_SECTION
) {
787 "Break since Data not valid or Empty section, sec = %d\n",
792 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
796 /* Notify the ucode of the loaded section number and status */
797 val
= iwl_read_direct32(trans
, FH_UCODE_LOAD_STATUS
);
798 val
= val
| (sec_num
<< shift_param
);
799 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
, val
);
800 sec_num
= (sec_num
<< 1) | 0x1;
803 *first_ucode_section
= last_read_idx
;
806 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
, 0xFFFF);
808 iwl_write_direct32(trans
, FH_UCODE_LOAD_STATUS
, 0xFFFFFFFF);
813 static int iwl_pcie_load_cpu_sections(struct iwl_trans
*trans
,
814 const struct fw_img
*image
,
816 int *first_ucode_section
)
820 u32 last_read_idx
= 0;
824 *first_ucode_section
= 0;
827 (*first_ucode_section
)++;
830 for (i
= *first_ucode_section
; i
< IWL_UCODE_SECTION_MAX
; i
++) {
834 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
836 * PAGING_SEPARATOR_SECTION delimiter - separate between
837 * CPU2 non paged to CPU2 paging sec.
839 if (!image
->sec
[i
].data
||
840 image
->sec
[i
].offset
== CPU1_CPU2_SEPARATOR_SECTION
||
841 image
->sec
[i
].offset
== PAGING_SEPARATOR_SECTION
) {
843 "Break since Data not valid or Empty section, sec = %d\n",
848 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
853 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
854 iwl_set_bits_prph(trans
,
855 CSR_UCODE_LOAD_STATUS_ADDR
,
856 (LMPM_CPU_UCODE_LOADING_COMPLETED
|
857 LMPM_CPU_HDRS_LOADING_COMPLETED
|
858 LMPM_CPU_UCODE_LOADING_STARTED
) <<
861 *first_ucode_section
= last_read_idx
;
866 static void iwl_pcie_apply_destination(struct iwl_trans
*trans
)
868 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
869 const struct iwl_fw_dbg_dest_tlv
*dest
= trans
->dbg_dest_tlv
;
874 "DBG DEST version is %d - expect issues\n",
877 IWL_INFO(trans
, "Applying debug destination %s\n",
878 get_fw_dbg_mode_string(dest
->monitor_mode
));
880 if (dest
->monitor_mode
== EXTERNAL_MODE
)
881 iwl_pcie_alloc_fw_monitor(trans
, dest
->size_power
);
883 IWL_WARN(trans
, "PCI should have external buffer debug\n");
885 for (i
= 0; i
< trans
->dbg_dest_reg_num
; i
++) {
886 u32 addr
= le32_to_cpu(dest
->reg_ops
[i
].addr
);
887 u32 val
= le32_to_cpu(dest
->reg_ops
[i
].val
);
889 switch (dest
->reg_ops
[i
].op
) {
891 iwl_write32(trans
, addr
, val
);
894 iwl_set_bit(trans
, addr
, BIT(val
));
897 iwl_clear_bit(trans
, addr
, BIT(val
));
900 iwl_write_prph(trans
, addr
, val
);
903 iwl_set_bits_prph(trans
, addr
, BIT(val
));
906 iwl_clear_bits_prph(trans
, addr
, BIT(val
));
909 if (iwl_read_prph(trans
, addr
) & BIT(val
)) {
911 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
917 IWL_ERR(trans
, "FW debug - unknown OP %d\n",
918 dest
->reg_ops
[i
].op
);
924 if (dest
->monitor_mode
== EXTERNAL_MODE
&& trans_pcie
->fw_mon_size
) {
925 iwl_write_prph(trans
, le32_to_cpu(dest
->base_reg
),
926 trans_pcie
->fw_mon_phys
>> dest
->base_shift
);
927 iwl_write_prph(trans
, le32_to_cpu(dest
->end_reg
),
928 (trans_pcie
->fw_mon_phys
+
929 trans_pcie
->fw_mon_size
) >> dest
->end_shift
);
933 static int iwl_pcie_load_given_ucode(struct iwl_trans
*trans
,
934 const struct fw_img
*image
)
936 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
938 int first_ucode_section
;
940 IWL_DEBUG_FW(trans
, "working with %s CPU\n",
941 image
->is_dual_cpus
? "Dual" : "Single");
943 /* load to FW the binary non secured sections of CPU1 */
944 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 1, &first_ucode_section
);
948 if (image
->is_dual_cpus
) {
949 /* set CPU2 header address */
950 iwl_write_prph(trans
,
951 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR
,
952 LMPM_SECURE_CPU2_HDR_MEM_SPACE
);
954 /* load to FW the binary sections of CPU2 */
955 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 2,
956 &first_ucode_section
);
961 /* supported for 7000 only for the moment */
962 if (iwlwifi_mod_params
.fw_monitor
&&
963 trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
) {
964 iwl_pcie_alloc_fw_monitor(trans
, 0);
966 if (trans_pcie
->fw_mon_size
) {
967 iwl_write_prph(trans
, MON_BUFF_BASE_ADDR
,
968 trans_pcie
->fw_mon_phys
>> 4);
969 iwl_write_prph(trans
, MON_BUFF_END_ADDR
,
970 (trans_pcie
->fw_mon_phys
+
971 trans_pcie
->fw_mon_size
) >> 4);
973 } else if (trans
->dbg_dest_tlv
) {
974 iwl_pcie_apply_destination(trans
);
977 /* release CPU reset */
978 iwl_write32(trans
, CSR_RESET
, 0);
983 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans
*trans
,
984 const struct fw_img
*image
)
987 int first_ucode_section
;
989 IWL_DEBUG_FW(trans
, "working with %s CPU\n",
990 image
->is_dual_cpus
? "Dual" : "Single");
992 if (trans
->dbg_dest_tlv
)
993 iwl_pcie_apply_destination(trans
);
995 /* TODO: remove in the next Si step */
996 ret
= iwl_pcie_rsa_race_bug_wa(trans
);
1000 /* configure the ucode to be ready to get the secured image */
1001 /* release CPU reset */
1002 iwl_write_prph(trans
, RELEASE_CPU_RESET
, RELEASE_CPU_RESET_BIT
);
1004 /* load to FW the binary Secured sections of CPU1 */
1005 ret
= iwl_pcie_load_cpu_sections_8000(trans
, image
, 1,
1006 &first_ucode_section
);
1010 /* load to FW the binary sections of CPU2 */
1011 return iwl_pcie_load_cpu_sections_8000(trans
, image
, 2,
1012 &first_ucode_section
);
1015 static int iwl_trans_pcie_start_fw(struct iwl_trans
*trans
,
1016 const struct fw_img
*fw
, bool run_in_rfkill
)
1018 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1022 mutex_lock(&trans_pcie
->mutex
);
1024 /* Someone called stop_device, don't try to start_fw */
1025 if (trans_pcie
->is_down
) {
1027 "Can't start_fw since the HW hasn't been started\n");
1032 /* This may fail if AMT took ownership of the device */
1033 if (iwl_pcie_prepare_card_hw(trans
)) {
1034 IWL_WARN(trans
, "Exit HW not ready\n");
1039 iwl_enable_rfkill_int(trans
);
1041 /* If platform's RF_KILL switch is NOT set to KILL */
1042 hw_rfkill
= iwl_is_rfkill_set(trans
);
1044 set_bit(STATUS_RFKILL
, &trans
->status
);
1046 clear_bit(STATUS_RFKILL
, &trans
->status
);
1047 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
1048 if (hw_rfkill
&& !run_in_rfkill
) {
1053 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1055 ret
= iwl_pcie_nic_init(trans
);
1057 IWL_ERR(trans
, "Unable to init nic\n");
1061 /* make sure rfkill handshake bits are cleared */
1062 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1063 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
,
1064 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
1066 /* clear (again), then enable host interrupts */
1067 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
1068 iwl_enable_interrupts(trans
);
1070 /* really make sure rfkill handshake bits are cleared */
1071 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1072 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
1074 /* Load the given image to the HW */
1075 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
1076 ret
= iwl_pcie_load_given_ucode_8000(trans
, fw
);
1078 ret
= iwl_pcie_load_given_ucode(trans
, fw
);
1081 mutex_unlock(&trans_pcie
->mutex
);
1085 static void iwl_trans_pcie_fw_alive(struct iwl_trans
*trans
, u32 scd_addr
)
1087 iwl_pcie_reset_ict(trans
);
1088 iwl_pcie_tx_start(trans
, scd_addr
);
1091 static void _iwl_trans_pcie_stop_device(struct iwl_trans
*trans
, bool low_power
)
1093 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1094 bool hw_rfkill
, was_hw_rfkill
;
1096 lockdep_assert_held(&trans_pcie
->mutex
);
1098 if (trans_pcie
->is_down
)
1101 trans_pcie
->is_down
= true;
1103 was_hw_rfkill
= iwl_is_rfkill_set(trans
);
1105 /* tell the device to stop sending interrupts */
1106 spin_lock(&trans_pcie
->irq_lock
);
1107 iwl_disable_interrupts(trans
);
1108 spin_unlock(&trans_pcie
->irq_lock
);
1110 /* device going down, Stop using ICT table */
1111 iwl_pcie_disable_ict(trans
);
1114 * If a HW restart happens during firmware loading,
1115 * then the firmware loading might call this function
1116 * and later it might be called again due to the
1117 * restart. So don't process again if the device is
1120 if (test_and_clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
)) {
1121 IWL_DEBUG_INFO(trans
, "DEVICE_ENABLED bit was set and is now cleared\n");
1122 iwl_pcie_tx_stop(trans
);
1123 iwl_pcie_rx_stop(trans
);
1125 /* Power-down device's busmaster DMA clocks */
1126 if (!trans
->cfg
->apmg_not_supported
) {
1127 iwl_write_prph(trans
, APMG_CLK_DIS_REG
,
1128 APMG_CLK_VAL_DMA_CLK_RQT
);
1133 /* Make sure (redundant) we've released our request to stay awake */
1134 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1135 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1137 /* Stop the device, and put it in low power state */
1138 iwl_pcie_apm_stop(trans
, false);
1140 /* stop and reset the on-board processor */
1141 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
1145 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1146 * This is a bug in certain verions of the hardware.
1147 * Certain devices also keep sending HW RF kill interrupt all
1148 * the time, unless the interrupt is ACKed even if the interrupt
1149 * should be masked. Re-ACK all the interrupts here.
1151 spin_lock(&trans_pcie
->irq_lock
);
1152 iwl_disable_interrupts(trans
);
1153 spin_unlock(&trans_pcie
->irq_lock
);
1156 /* clear all status bits */
1157 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1158 clear_bit(STATUS_INT_ENABLED
, &trans
->status
);
1159 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1160 clear_bit(STATUS_RFKILL
, &trans
->status
);
1163 * Even if we stop the HW, we still want the RF kill
1166 iwl_enable_rfkill_int(trans
);
1169 * Check again since the RF kill state may have changed while
1170 * all the interrupts were disabled, in this case we couldn't
1171 * receive the RF kill interrupt and update the state in the
1173 * Don't call the op_mode if the rkfill state hasn't changed.
1174 * This allows the op_mode to call stop_device from the rfkill
1175 * notification without endless recursion. Under very rare
1176 * circumstances, we might have a small recursion if the rfkill
1177 * state changed exactly now while we were called from stop_device.
1178 * This is very unlikely but can happen and is supported.
1180 hw_rfkill
= iwl_is_rfkill_set(trans
);
1182 set_bit(STATUS_RFKILL
, &trans
->status
);
1184 clear_bit(STATUS_RFKILL
, &trans
->status
);
1185 if (hw_rfkill
!= was_hw_rfkill
)
1186 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
1188 /* re-take ownership to prevent other users from stealing the deivce */
1189 iwl_pcie_prepare_card_hw(trans
);
1192 static void iwl_trans_pcie_stop_device(struct iwl_trans
*trans
, bool low_power
)
1194 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1196 mutex_lock(&trans_pcie
->mutex
);
1197 _iwl_trans_pcie_stop_device(trans
, low_power
);
1198 mutex_unlock(&trans_pcie
->mutex
);
1201 void iwl_trans_pcie_rf_kill(struct iwl_trans
*trans
, bool state
)
1203 struct iwl_trans_pcie __maybe_unused
*trans_pcie
=
1204 IWL_TRANS_GET_PCIE_TRANS(trans
);
1206 lockdep_assert_held(&trans_pcie
->mutex
);
1208 if (iwl_op_mode_hw_rf_kill(trans
->op_mode
, state
))
1209 _iwl_trans_pcie_stop_device(trans
, true);
1212 static void iwl_trans_pcie_d3_suspend(struct iwl_trans
*trans
, bool test
)
1214 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1216 if (trans
->wowlan_d0i3
) {
1217 /* Enable persistence mode to avoid reset */
1218 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
1219 CSR_HW_IF_CONFIG_REG_PERSIST_MODE
);
1222 iwl_disable_interrupts(trans
);
1225 * in testing mode, the host stays awake and the
1226 * hardware won't be reset (not even partially)
1231 iwl_pcie_disable_ict(trans
);
1233 synchronize_irq(trans_pcie
->pci_dev
->irq
);
1235 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1236 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1237 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1238 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1240 if (!trans
->wowlan_d0i3
) {
1242 * reset TX queues -- some of their registers reset during S3
1243 * so if we don't reset everything here the D3 image would try
1244 * to execute some invalid memory upon resume
1246 iwl_trans_pcie_tx_reset(trans
);
1249 iwl_pcie_set_pwr(trans
, true);
1252 static int iwl_trans_pcie_d3_resume(struct iwl_trans
*trans
,
1253 enum iwl_d3_status
*status
,
1260 iwl_enable_interrupts(trans
);
1261 *status
= IWL_D3_STATUS_ALIVE
;
1266 * Also enables interrupts - none will happen as the device doesn't
1267 * know we're waking it up, only when the opmode actually tells it
1270 iwl_pcie_reset_ict(trans
);
1272 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1273 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1275 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
1278 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
1279 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1280 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1283 IWL_ERR(trans
, "Failed to resume the device (mac ready)\n");
1287 iwl_pcie_set_pwr(trans
, false);
1289 if (trans
->wowlan_d0i3
) {
1290 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
1291 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1293 iwl_trans_pcie_tx_reset(trans
);
1295 ret
= iwl_pcie_rx_init(trans
);
1298 "Failed to resume the device (RX reset)\n");
1303 val
= iwl_read32(trans
, CSR_RESET
);
1304 if (val
& CSR_RESET_REG_FLAG_NEVO_RESET
)
1305 *status
= IWL_D3_STATUS_RESET
;
1307 *status
= IWL_D3_STATUS_ALIVE
;
1312 static int _iwl_trans_pcie_start_hw(struct iwl_trans
*trans
, bool low_power
)
1314 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1318 lockdep_assert_held(&trans_pcie
->mutex
);
1320 err
= iwl_pcie_prepare_card_hw(trans
);
1322 IWL_ERR(trans
, "Error while preparing HW: %d\n", err
);
1326 /* Reset the entire device */
1327 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
1329 usleep_range(10, 15);
1331 iwl_pcie_apm_init(trans
);
1333 /* From now on, the op_mode will be kept updated about RF kill state */
1334 iwl_enable_rfkill_int(trans
);
1336 /* Set is_down to false here so that...*/
1337 trans_pcie
->is_down
= false;
1339 hw_rfkill
= iwl_is_rfkill_set(trans
);
1341 set_bit(STATUS_RFKILL
, &trans
->status
);
1343 clear_bit(STATUS_RFKILL
, &trans
->status
);
1344 /* ... rfkill can call stop_device and set it false if needed */
1345 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
1350 static int iwl_trans_pcie_start_hw(struct iwl_trans
*trans
, bool low_power
)
1352 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1355 mutex_lock(&trans_pcie
->mutex
);
1356 ret
= _iwl_trans_pcie_start_hw(trans
, low_power
);
1357 mutex_unlock(&trans_pcie
->mutex
);
1362 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans
*trans
)
1364 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1366 mutex_lock(&trans_pcie
->mutex
);
1368 /* disable interrupts - don't enable HW RF kill interrupt */
1369 spin_lock(&trans_pcie
->irq_lock
);
1370 iwl_disable_interrupts(trans
);
1371 spin_unlock(&trans_pcie
->irq_lock
);
1373 iwl_pcie_apm_stop(trans
, true);
1375 spin_lock(&trans_pcie
->irq_lock
);
1376 iwl_disable_interrupts(trans
);
1377 spin_unlock(&trans_pcie
->irq_lock
);
1379 iwl_pcie_disable_ict(trans
);
1381 mutex_unlock(&trans_pcie
->mutex
);
1383 synchronize_irq(trans_pcie
->pci_dev
->irq
);
1386 static void iwl_trans_pcie_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
1388 writeb(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1391 static void iwl_trans_pcie_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
1393 writel(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1396 static u32
iwl_trans_pcie_read32(struct iwl_trans
*trans
, u32 ofs
)
1398 return readl(IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
1401 static u32
iwl_trans_pcie_read_prph(struct iwl_trans
*trans
, u32 reg
)
1403 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_RADDR
,
1404 ((reg
& 0x000FFFFF) | (3 << 24)));
1405 return iwl_trans_pcie_read32(trans
, HBUS_TARG_PRPH_RDAT
);
1408 static void iwl_trans_pcie_write_prph(struct iwl_trans
*trans
, u32 addr
,
1411 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WADDR
,
1412 ((addr
& 0x000FFFFF) | (3 << 24)));
1413 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WDAT
, val
);
1416 static int iwl_pcie_dummy_napi_poll(struct napi_struct
*napi
, int budget
)
1422 static void iwl_trans_pcie_configure(struct iwl_trans
*trans
,
1423 const struct iwl_trans_config
*trans_cfg
)
1425 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1427 trans_pcie
->cmd_queue
= trans_cfg
->cmd_queue
;
1428 trans_pcie
->cmd_fifo
= trans_cfg
->cmd_fifo
;
1429 trans_pcie
->cmd_q_wdg_timeout
= trans_cfg
->cmd_q_wdg_timeout
;
1430 if (WARN_ON(trans_cfg
->n_no_reclaim_cmds
> MAX_NO_RECLAIM_CMDS
))
1431 trans_pcie
->n_no_reclaim_cmds
= 0;
1433 trans_pcie
->n_no_reclaim_cmds
= trans_cfg
->n_no_reclaim_cmds
;
1434 if (trans_pcie
->n_no_reclaim_cmds
)
1435 memcpy(trans_pcie
->no_reclaim_cmds
, trans_cfg
->no_reclaim_cmds
,
1436 trans_pcie
->n_no_reclaim_cmds
* sizeof(u8
));
1438 trans_pcie
->rx_buf_size_8k
= trans_cfg
->rx_buf_size_8k
;
1439 if (trans_pcie
->rx_buf_size_8k
)
1440 trans_pcie
->rx_page_order
= get_order(8 * 1024);
1442 trans_pcie
->rx_page_order
= get_order(4 * 1024);
1444 trans_pcie
->wide_cmd_header
= trans_cfg
->wide_cmd_header
;
1445 trans_pcie
->command_names
= trans_cfg
->command_names
;
1446 trans_pcie
->bc_table_dword
= trans_cfg
->bc_table_dword
;
1447 trans_pcie
->scd_set_active
= trans_cfg
->scd_set_active
;
1449 /* init ref_count to 1 (should be cleared when ucode is loaded) */
1450 trans_pcie
->ref_count
= 1;
1452 /* Initialize NAPI here - it should be before registering to mac80211
1453 * in the opmode but after the HW struct is allocated.
1454 * As this function may be called again in some corner cases don't
1455 * do anything if NAPI was already initialized.
1457 if (!trans_pcie
->napi
.poll
) {
1458 init_dummy_netdev(&trans_pcie
->napi_dev
);
1459 netif_napi_add(&trans_pcie
->napi_dev
, &trans_pcie
->napi
,
1460 iwl_pcie_dummy_napi_poll
, 64);
1464 void iwl_trans_pcie_free(struct iwl_trans
*trans
)
1466 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1468 synchronize_irq(trans_pcie
->pci_dev
->irq
);
1470 iwl_pcie_tx_free(trans
);
1471 iwl_pcie_rx_free(trans
);
1473 free_irq(trans_pcie
->pci_dev
->irq
, trans
);
1474 iwl_pcie_free_ict(trans
);
1476 pci_disable_msi(trans_pcie
->pci_dev
);
1477 iounmap(trans_pcie
->hw_base
);
1478 pci_release_regions(trans_pcie
->pci_dev
);
1479 pci_disable_device(trans_pcie
->pci_dev
);
1481 if (trans_pcie
->napi
.poll
)
1482 netif_napi_del(&trans_pcie
->napi
);
1484 iwl_pcie_free_fw_monitor(trans
);
1486 iwl_trans_free(trans
);
1489 static void iwl_trans_pcie_set_pmi(struct iwl_trans
*trans
, bool state
)
1492 set_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1494 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
1497 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans
*trans
, bool silent
,
1498 unsigned long *flags
)
1501 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1503 spin_lock_irqsave(&trans_pcie
->reg_lock
, *flags
);
1505 if (trans_pcie
->cmd_hold_nic_awake
)
1508 /* this bit wakes up the NIC */
1509 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
1510 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1511 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
1515 * These bits say the device is running, and should keep running for
1516 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1517 * but they do not indicate that embedded SRAM is restored yet;
1518 * 3945 and 4965 have volatile SRAM, and must save/restore contents
1519 * to/from host DRAM when sleeping/waking for power-saving.
1520 * Each direction takes approximately 1/4 millisecond; with this
1521 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1522 * series of register accesses are expected (e.g. reading Event Log),
1523 * to keep device from sleeping.
1525 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1526 * SRAM is okay/restored. We don't check that here because this call
1527 * is just for hardware register access; but GP1 MAC_SLEEP check is a
1528 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
1530 * 5000 series and later (including 1000 series) have non-volatile SRAM,
1531 * and do not save/restore SRAM when power cycling.
1533 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
1534 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN
,
1535 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
|
1536 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP
), 15000);
1537 if (unlikely(ret
< 0)) {
1538 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_FORCE_NMI
);
1540 u32 val
= iwl_read32(trans
, CSR_GP_CNTRL
);
1542 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1544 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
1551 * Fool sparse by faking we release the lock - sparse will
1552 * track nic_access anyway.
1554 __release(&trans_pcie
->reg_lock
);
1558 static void iwl_trans_pcie_release_nic_access(struct iwl_trans
*trans
,
1559 unsigned long *flags
)
1561 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1563 lockdep_assert_held(&trans_pcie
->reg_lock
);
1566 * Fool sparse by faking we acquiring the lock - sparse will
1567 * track nic_access anyway.
1569 __acquire(&trans_pcie
->reg_lock
);
1571 if (trans_pcie
->cmd_hold_nic_awake
)
1574 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
1575 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1577 * Above we read the CSR_GP_CNTRL register, which will flush
1578 * any previous writes, but we need the write that clears the
1579 * MAC_ACCESS_REQ bit to be performed before any other writes
1580 * scheduled on different CPUs (after we drop reg_lock).
1584 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
1587 static int iwl_trans_pcie_read_mem(struct iwl_trans
*trans
, u32 addr
,
1588 void *buf
, int dwords
)
1590 unsigned long flags
;
1594 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
1595 iwl_write32(trans
, HBUS_TARG_MEM_RADDR
, addr
);
1596 for (offs
= 0; offs
< dwords
; offs
++)
1597 vals
[offs
] = iwl_read32(trans
, HBUS_TARG_MEM_RDAT
);
1598 iwl_trans_release_nic_access(trans
, &flags
);
1605 static int iwl_trans_pcie_write_mem(struct iwl_trans
*trans
, u32 addr
,
1606 const void *buf
, int dwords
)
1608 unsigned long flags
;
1610 const u32
*vals
= buf
;
1612 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
1613 iwl_write32(trans
, HBUS_TARG_MEM_WADDR
, addr
);
1614 for (offs
= 0; offs
< dwords
; offs
++)
1615 iwl_write32(trans
, HBUS_TARG_MEM_WDAT
,
1616 vals
? vals
[offs
] : 0);
1617 iwl_trans_release_nic_access(trans
, &flags
);
1624 static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans
*trans
,
1628 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1631 for_each_set_bit(queue
, &txqs
, BITS_PER_LONG
) {
1632 struct iwl_txq
*txq
= &trans_pcie
->txq
[queue
];
1635 spin_lock_bh(&txq
->lock
);
1639 if (txq
->frozen
== freeze
)
1642 IWL_DEBUG_TX_QUEUES(trans
, "%s TXQ %d\n",
1643 freeze
? "Freezing" : "Waking", queue
);
1645 txq
->frozen
= freeze
;
1647 if (txq
->q
.read_ptr
== txq
->q
.write_ptr
)
1651 if (unlikely(time_after(now
,
1652 txq
->stuck_timer
.expires
))) {
1654 * The timer should have fired, maybe it is
1655 * spinning right now on the lock.
1659 /* remember how long until the timer fires */
1660 txq
->frozen_expiry_remainder
=
1661 txq
->stuck_timer
.expires
- now
;
1662 del_timer(&txq
->stuck_timer
);
1667 * Wake a non-empty queue -> arm timer with the
1668 * remainder before it froze
1670 mod_timer(&txq
->stuck_timer
,
1671 now
+ txq
->frozen_expiry_remainder
);
1674 spin_unlock_bh(&txq
->lock
);
1678 #define IWL_FLUSH_WAIT_MS 2000
1680 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans
*trans
, u32 txq_bm
)
1682 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1683 struct iwl_txq
*txq
;
1684 struct iwl_queue
*q
;
1686 unsigned long now
= jiffies
;
1691 /* waiting for all the tx frames complete might take a while */
1692 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1695 if (cnt
== trans_pcie
->cmd_queue
)
1697 if (!test_bit(cnt
, trans_pcie
->queue_used
))
1699 if (!(BIT(cnt
) & txq_bm
))
1702 IWL_DEBUG_TX_QUEUES(trans
, "Emptying queue %d...\n", cnt
);
1703 txq
= &trans_pcie
->txq
[cnt
];
1705 wr_ptr
= ACCESS_ONCE(q
->write_ptr
);
1707 while (q
->read_ptr
!= ACCESS_ONCE(q
->write_ptr
) &&
1708 !time_after(jiffies
,
1709 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
))) {
1710 u8 write_ptr
= ACCESS_ONCE(q
->write_ptr
);
1712 if (WARN_ONCE(wr_ptr
!= write_ptr
,
1713 "WR pointer moved while flushing %d -> %d\n",
1719 if (q
->read_ptr
!= q
->write_ptr
) {
1721 "fail to flush all tx fifo queues Q %d\n", cnt
);
1725 IWL_DEBUG_TX_QUEUES(trans
, "Queue %d is now empty.\n", cnt
);
1731 IWL_ERR(trans
, "Current SW read_ptr %d write_ptr %d\n",
1732 txq
->q
.read_ptr
, txq
->q
.write_ptr
);
1734 scd_sram_addr
= trans_pcie
->scd_base_addr
+
1735 SCD_TX_STTS_QUEUE_OFFSET(txq
->q
.id
);
1736 iwl_trans_read_mem_bytes(trans
, scd_sram_addr
, buf
, sizeof(buf
));
1738 iwl_print_hex_error(trans
, buf
, sizeof(buf
));
1740 for (cnt
= 0; cnt
< FH_TCSR_CHNL_NUM
; cnt
++)
1741 IWL_ERR(trans
, "FH TRBs(%d) = 0x%08x\n", cnt
,
1742 iwl_read_direct32(trans
, FH_TX_TRB_REG(cnt
)));
1744 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1745 u32 status
= iwl_read_prph(trans
, SCD_QUEUE_STATUS_BITS(cnt
));
1746 u8 fifo
= (status
>> SCD_QUEUE_STTS_REG_POS_TXF
) & 0x7;
1747 bool active
= !!(status
& BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE
));
1749 iwl_trans_read_mem32(trans
, trans_pcie
->scd_base_addr
+
1750 SCD_TRANS_TBL_OFFSET_QUEUE(cnt
));
1753 tbl_dw
= (tbl_dw
& 0xFFFF0000) >> 16;
1755 tbl_dw
= tbl_dw
& 0x0000FFFF;
1758 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1759 cnt
, active
? "" : "in", fifo
, tbl_dw
,
1760 iwl_read_prph(trans
, SCD_QUEUE_RDPTR(cnt
)) &
1761 (TFD_QUEUE_SIZE_MAX
- 1),
1762 iwl_read_prph(trans
, SCD_QUEUE_WRPTR(cnt
)));
1768 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans
*trans
, u32 reg
,
1769 u32 mask
, u32 value
)
1771 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1772 unsigned long flags
;
1774 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
1775 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, value
);
1776 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
1779 void iwl_trans_pcie_ref(struct iwl_trans
*trans
)
1781 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1782 unsigned long flags
;
1784 if (iwlwifi_mod_params
.d0i3_disable
)
1787 spin_lock_irqsave(&trans_pcie
->ref_lock
, flags
);
1788 IWL_DEBUG_RPM(trans
, "ref_counter: %d\n", trans_pcie
->ref_count
);
1789 trans_pcie
->ref_count
++;
1790 spin_unlock_irqrestore(&trans_pcie
->ref_lock
, flags
);
1793 void iwl_trans_pcie_unref(struct iwl_trans
*trans
)
1795 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1796 unsigned long flags
;
1798 if (iwlwifi_mod_params
.d0i3_disable
)
1801 spin_lock_irqsave(&trans_pcie
->ref_lock
, flags
);
1802 IWL_DEBUG_RPM(trans
, "ref_counter: %d\n", trans_pcie
->ref_count
);
1803 if (WARN_ON_ONCE(trans_pcie
->ref_count
== 0)) {
1804 spin_unlock_irqrestore(&trans_pcie
->ref_lock
, flags
);
1807 trans_pcie
->ref_count
--;
1808 spin_unlock_irqrestore(&trans_pcie
->ref_lock
, flags
);
1811 static const char *get_csr_string(int cmd
)
1813 #define IWL_CMD(x) case x: return #x
1815 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
1816 IWL_CMD(CSR_INT_COALESCING
);
1818 IWL_CMD(CSR_INT_MASK
);
1819 IWL_CMD(CSR_FH_INT_STATUS
);
1820 IWL_CMD(CSR_GPIO_IN
);
1822 IWL_CMD(CSR_GP_CNTRL
);
1823 IWL_CMD(CSR_HW_REV
);
1824 IWL_CMD(CSR_EEPROM_REG
);
1825 IWL_CMD(CSR_EEPROM_GP
);
1826 IWL_CMD(CSR_OTP_GP_REG
);
1827 IWL_CMD(CSR_GIO_REG
);
1828 IWL_CMD(CSR_GP_UCODE_REG
);
1829 IWL_CMD(CSR_GP_DRIVER_REG
);
1830 IWL_CMD(CSR_UCODE_DRV_GP1
);
1831 IWL_CMD(CSR_UCODE_DRV_GP2
);
1832 IWL_CMD(CSR_LED_REG
);
1833 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
1834 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
1835 IWL_CMD(CSR_ANA_PLL_CFG
);
1836 IWL_CMD(CSR_HW_REV_WA_REG
);
1837 IWL_CMD(CSR_MONITOR_STATUS_REG
);
1838 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
1845 void iwl_pcie_dump_csr(struct iwl_trans
*trans
)
1848 static const u32 csr_tbl
[] = {
1849 CSR_HW_IF_CONFIG_REG
,
1867 CSR_DRAM_INT_TBL_REG
,
1868 CSR_GIO_CHICKEN_BITS
,
1870 CSR_MONITOR_STATUS_REG
,
1872 CSR_DBG_HPET_MEM_REG
1874 IWL_ERR(trans
, "CSR values:\n");
1875 IWL_ERR(trans
, "(2nd byte of CSR_INT_COALESCING is "
1876 "CSR_INT_PERIODIC_REG)\n");
1877 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
1878 IWL_ERR(trans
, " %25s: 0X%08x\n",
1879 get_csr_string(csr_tbl
[i
]),
1880 iwl_read32(trans
, csr_tbl
[i
]));
1884 #ifdef CONFIG_IWLWIFI_DEBUGFS
1885 /* create and remove of files */
1886 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1887 if (!debugfs_create_file(#name, mode, parent, trans, \
1888 &iwl_dbgfs_##name##_ops)) \
1892 /* file operation */
1893 #define DEBUGFS_READ_FILE_OPS(name) \
1894 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1895 .read = iwl_dbgfs_##name##_read, \
1896 .open = simple_open, \
1897 .llseek = generic_file_llseek, \
1900 #define DEBUGFS_WRITE_FILE_OPS(name) \
1901 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1902 .write = iwl_dbgfs_##name##_write, \
1903 .open = simple_open, \
1904 .llseek = generic_file_llseek, \
1907 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1908 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1909 .write = iwl_dbgfs_##name##_write, \
1910 .read = iwl_dbgfs_##name##_read, \
1911 .open = simple_open, \
1912 .llseek = generic_file_llseek, \
1915 static ssize_t
iwl_dbgfs_tx_queue_read(struct file
*file
,
1916 char __user
*user_buf
,
1917 size_t count
, loff_t
*ppos
)
1919 struct iwl_trans
*trans
= file
->private_data
;
1920 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1921 struct iwl_txq
*txq
;
1922 struct iwl_queue
*q
;
1929 bufsz
= sizeof(char) * 75 * trans
->cfg
->base_params
->num_of_queues
;
1931 if (!trans_pcie
->txq
)
1934 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1938 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1939 txq
= &trans_pcie
->txq
[cnt
];
1941 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1942 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
1943 cnt
, q
->read_ptr
, q
->write_ptr
,
1944 !!test_bit(cnt
, trans_pcie
->queue_used
),
1945 !!test_bit(cnt
, trans_pcie
->queue_stopped
),
1946 txq
->need_update
, txq
->frozen
,
1947 (cnt
== trans_pcie
->cmd_queue
? " HCMD" : ""));
1949 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1954 static ssize_t
iwl_dbgfs_rx_queue_read(struct file
*file
,
1955 char __user
*user_buf
,
1956 size_t count
, loff_t
*ppos
)
1958 struct iwl_trans
*trans
= file
->private_data
;
1959 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1960 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
;
1963 const size_t bufsz
= sizeof(buf
);
1965 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "read: %u\n",
1967 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "write: %u\n",
1969 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "write_actual: %u\n",
1971 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "need_update: %d\n",
1973 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "free_count: %u\n",
1976 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "closed_rb_num: %u\n",
1977 le16_to_cpu(rxq
->rb_stts
->closed_rb_num
) & 0x0FFF);
1979 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1980 "closed_rb_num: Not Allocated\n");
1982 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1985 static ssize_t
iwl_dbgfs_interrupt_read(struct file
*file
,
1986 char __user
*user_buf
,
1987 size_t count
, loff_t
*ppos
)
1989 struct iwl_trans
*trans
= file
->private_data
;
1990 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1991 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1995 int bufsz
= 24 * 64; /* 24 items * 64 char per item */
1998 buf
= kzalloc(bufsz
, GFP_KERNEL
);
2002 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2003 "Interrupt Statistics Report:\n");
2005 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "HW Error:\t\t\t %u\n",
2007 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "SW Error:\t\t\t %u\n",
2009 if (isr_stats
->sw
|| isr_stats
->hw
) {
2010 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2011 "\tLast Restarting Code: 0x%X\n",
2012 isr_stats
->err_code
);
2014 #ifdef CONFIG_IWLWIFI_DEBUG
2015 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Frame transmitted:\t\t %u\n",
2017 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Alive interrupt:\t\t %u\n",
2020 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2021 "HW RF KILL switch toggled:\t %u\n", isr_stats
->rfkill
);
2023 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "CT KILL:\t\t\t %u\n",
2026 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Wakeup Interrupt:\t\t %u\n",
2029 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
2030 "Rx command responses:\t\t %u\n", isr_stats
->rx
);
2032 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Tx/FH interrupt:\t\t %u\n",
2035 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Unexpected INTA:\t\t %u\n",
2036 isr_stats
->unhandled
);
2038 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
2043 static ssize_t
iwl_dbgfs_interrupt_write(struct file
*file
,
2044 const char __user
*user_buf
,
2045 size_t count
, loff_t
*ppos
)
2047 struct iwl_trans
*trans
= file
->private_data
;
2048 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2049 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
2055 memset(buf
, 0, sizeof(buf
));
2056 buf_size
= min(count
, sizeof(buf
) - 1);
2057 if (copy_from_user(buf
, user_buf
, buf_size
))
2059 if (sscanf(buf
, "%x", &reset_flag
) != 1)
2061 if (reset_flag
== 0)
2062 memset(isr_stats
, 0, sizeof(*isr_stats
));
2067 static ssize_t
iwl_dbgfs_csr_write(struct file
*file
,
2068 const char __user
*user_buf
,
2069 size_t count
, loff_t
*ppos
)
2071 struct iwl_trans
*trans
= file
->private_data
;
2076 memset(buf
, 0, sizeof(buf
));
2077 buf_size
= min(count
, sizeof(buf
) - 1);
2078 if (copy_from_user(buf
, user_buf
, buf_size
))
2080 if (sscanf(buf
, "%d", &csr
) != 1)
2083 iwl_pcie_dump_csr(trans
);
2088 static ssize_t
iwl_dbgfs_fh_reg_read(struct file
*file
,
2089 char __user
*user_buf
,
2090 size_t count
, loff_t
*ppos
)
2092 struct iwl_trans
*trans
= file
->private_data
;
2096 ret
= iwl_dump_fh(trans
, &buf
);
2101 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, ret
);
2106 DEBUGFS_READ_WRITE_FILE_OPS(interrupt
);
2107 DEBUGFS_READ_FILE_OPS(fh_reg
);
2108 DEBUGFS_READ_FILE_OPS(rx_queue
);
2109 DEBUGFS_READ_FILE_OPS(tx_queue
);
2110 DEBUGFS_WRITE_FILE_OPS(csr
);
2113 * Create the debugfs files and directories
2116 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
2119 DEBUGFS_ADD_FILE(rx_queue
, dir
, S_IRUSR
);
2120 DEBUGFS_ADD_FILE(tx_queue
, dir
, S_IRUSR
);
2121 DEBUGFS_ADD_FILE(interrupt
, dir
, S_IWUSR
| S_IRUSR
);
2122 DEBUGFS_ADD_FILE(csr
, dir
, S_IWUSR
);
2123 DEBUGFS_ADD_FILE(fh_reg
, dir
, S_IRUSR
);
2127 IWL_ERR(trans
, "failed to create the trans debugfs entry\n");
2131 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
2136 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2138 static u32
iwl_trans_pcie_get_cmdlen(struct iwl_tfd
*tfd
)
2143 for (i
= 0; i
< IWL_NUM_OF_TBS
; i
++)
2144 cmdlen
+= iwl_pcie_tfd_tb_get_len(tfd
, i
);
2149 static const struct {
2151 } iwl_prph_dump_addr
[] = {
2152 { .start
= 0x00a00000, .end
= 0x00a00000 },
2153 { .start
= 0x00a0000c, .end
= 0x00a00024 },
2154 { .start
= 0x00a0002c, .end
= 0x00a0003c },
2155 { .start
= 0x00a00410, .end
= 0x00a00418 },
2156 { .start
= 0x00a00420, .end
= 0x00a00420 },
2157 { .start
= 0x00a00428, .end
= 0x00a00428 },
2158 { .start
= 0x00a00430, .end
= 0x00a0043c },
2159 { .start
= 0x00a00444, .end
= 0x00a00444 },
2160 { .start
= 0x00a004c0, .end
= 0x00a004cc },
2161 { .start
= 0x00a004d8, .end
= 0x00a004d8 },
2162 { .start
= 0x00a004e0, .end
= 0x00a004f0 },
2163 { .start
= 0x00a00840, .end
= 0x00a00840 },
2164 { .start
= 0x00a00850, .end
= 0x00a00858 },
2165 { .start
= 0x00a01004, .end
= 0x00a01008 },
2166 { .start
= 0x00a01010, .end
= 0x00a01010 },
2167 { .start
= 0x00a01018, .end
= 0x00a01018 },
2168 { .start
= 0x00a01024, .end
= 0x00a01024 },
2169 { .start
= 0x00a0102c, .end
= 0x00a01034 },
2170 { .start
= 0x00a0103c, .end
= 0x00a01040 },
2171 { .start
= 0x00a01048, .end
= 0x00a01094 },
2172 { .start
= 0x00a01c00, .end
= 0x00a01c20 },
2173 { .start
= 0x00a01c58, .end
= 0x00a01c58 },
2174 { .start
= 0x00a01c7c, .end
= 0x00a01c7c },
2175 { .start
= 0x00a01c28, .end
= 0x00a01c54 },
2176 { .start
= 0x00a01c5c, .end
= 0x00a01c5c },
2177 { .start
= 0x00a01c60, .end
= 0x00a01cdc },
2178 { .start
= 0x00a01ce0, .end
= 0x00a01d0c },
2179 { .start
= 0x00a01d18, .end
= 0x00a01d20 },
2180 { .start
= 0x00a01d2c, .end
= 0x00a01d30 },
2181 { .start
= 0x00a01d40, .end
= 0x00a01d5c },
2182 { .start
= 0x00a01d80, .end
= 0x00a01d80 },
2183 { .start
= 0x00a01d98, .end
= 0x00a01d9c },
2184 { .start
= 0x00a01da8, .end
= 0x00a01da8 },
2185 { .start
= 0x00a01db8, .end
= 0x00a01df4 },
2186 { .start
= 0x00a01dc0, .end
= 0x00a01dfc },
2187 { .start
= 0x00a01e00, .end
= 0x00a01e2c },
2188 { .start
= 0x00a01e40, .end
= 0x00a01e60 },
2189 { .start
= 0x00a01e68, .end
= 0x00a01e6c },
2190 { .start
= 0x00a01e74, .end
= 0x00a01e74 },
2191 { .start
= 0x00a01e84, .end
= 0x00a01e90 },
2192 { .start
= 0x00a01e9c, .end
= 0x00a01ec4 },
2193 { .start
= 0x00a01ed0, .end
= 0x00a01ee0 },
2194 { .start
= 0x00a01f00, .end
= 0x00a01f1c },
2195 { .start
= 0x00a01f44, .end
= 0x00a01ffc },
2196 { .start
= 0x00a02000, .end
= 0x00a02048 },
2197 { .start
= 0x00a02068, .end
= 0x00a020f0 },
2198 { .start
= 0x00a02100, .end
= 0x00a02118 },
2199 { .start
= 0x00a02140, .end
= 0x00a0214c },
2200 { .start
= 0x00a02168, .end
= 0x00a0218c },
2201 { .start
= 0x00a021c0, .end
= 0x00a021c0 },
2202 { .start
= 0x00a02400, .end
= 0x00a02410 },
2203 { .start
= 0x00a02418, .end
= 0x00a02420 },
2204 { .start
= 0x00a02428, .end
= 0x00a0242c },
2205 { .start
= 0x00a02434, .end
= 0x00a02434 },
2206 { .start
= 0x00a02440, .end
= 0x00a02460 },
2207 { .start
= 0x00a02468, .end
= 0x00a024b0 },
2208 { .start
= 0x00a024c8, .end
= 0x00a024cc },
2209 { .start
= 0x00a02500, .end
= 0x00a02504 },
2210 { .start
= 0x00a0250c, .end
= 0x00a02510 },
2211 { .start
= 0x00a02540, .end
= 0x00a02554 },
2212 { .start
= 0x00a02580, .end
= 0x00a025f4 },
2213 { .start
= 0x00a02600, .end
= 0x00a0260c },
2214 { .start
= 0x00a02648, .end
= 0x00a02650 },
2215 { .start
= 0x00a02680, .end
= 0x00a02680 },
2216 { .start
= 0x00a026c0, .end
= 0x00a026d0 },
2217 { .start
= 0x00a02700, .end
= 0x00a0270c },
2218 { .start
= 0x00a02804, .end
= 0x00a02804 },
2219 { .start
= 0x00a02818, .end
= 0x00a0281c },
2220 { .start
= 0x00a02c00, .end
= 0x00a02db4 },
2221 { .start
= 0x00a02df4, .end
= 0x00a02fb0 },
2222 { .start
= 0x00a03000, .end
= 0x00a03014 },
2223 { .start
= 0x00a0301c, .end
= 0x00a0302c },
2224 { .start
= 0x00a03034, .end
= 0x00a03038 },
2225 { .start
= 0x00a03040, .end
= 0x00a03048 },
2226 { .start
= 0x00a03060, .end
= 0x00a03068 },
2227 { .start
= 0x00a03070, .end
= 0x00a03074 },
2228 { .start
= 0x00a0307c, .end
= 0x00a0307c },
2229 { .start
= 0x00a03080, .end
= 0x00a03084 },
2230 { .start
= 0x00a0308c, .end
= 0x00a03090 },
2231 { .start
= 0x00a03098, .end
= 0x00a03098 },
2232 { .start
= 0x00a030a0, .end
= 0x00a030a0 },
2233 { .start
= 0x00a030a8, .end
= 0x00a030b4 },
2234 { .start
= 0x00a030bc, .end
= 0x00a030bc },
2235 { .start
= 0x00a030c0, .end
= 0x00a0312c },
2236 { .start
= 0x00a03c00, .end
= 0x00a03c5c },
2237 { .start
= 0x00a04400, .end
= 0x00a04454 },
2238 { .start
= 0x00a04460, .end
= 0x00a04474 },
2239 { .start
= 0x00a044c0, .end
= 0x00a044ec },
2240 { .start
= 0x00a04500, .end
= 0x00a04504 },
2241 { .start
= 0x00a04510, .end
= 0x00a04538 },
2242 { .start
= 0x00a04540, .end
= 0x00a04548 },
2243 { .start
= 0x00a04560, .end
= 0x00a0457c },
2244 { .start
= 0x00a04590, .end
= 0x00a04598 },
2245 { .start
= 0x00a045c0, .end
= 0x00a045f4 },
2248 static u32
iwl_trans_pcie_dump_prph(struct iwl_trans
*trans
,
2249 struct iwl_fw_error_dump_data
**data
)
2251 struct iwl_fw_error_dump_prph
*prph
;
2252 unsigned long flags
;
2253 u32 prph_len
= 0, i
;
2255 if (!iwl_trans_grab_nic_access(trans
, false, &flags
))
2258 for (i
= 0; i
< ARRAY_SIZE(iwl_prph_dump_addr
); i
++) {
2259 /* The range includes both boundaries */
2260 int num_bytes_in_chunk
= iwl_prph_dump_addr
[i
].end
-
2261 iwl_prph_dump_addr
[i
].start
+ 4;
2265 prph_len
+= sizeof(**data
) + sizeof(*prph
) + num_bytes_in_chunk
;
2267 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH
);
2268 (*data
)->len
= cpu_to_le32(sizeof(*prph
) +
2269 num_bytes_in_chunk
);
2270 prph
= (void *)(*data
)->data
;
2271 prph
->prph_start
= cpu_to_le32(iwl_prph_dump_addr
[i
].start
);
2272 val
= (void *)prph
->data
;
2274 for (reg
= iwl_prph_dump_addr
[i
].start
;
2275 reg
<= iwl_prph_dump_addr
[i
].end
;
2277 *val
++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans
,
2279 *data
= iwl_fw_error_next_data(*data
);
2282 iwl_trans_release_nic_access(trans
, &flags
);
2287 static u32
iwl_trans_pcie_dump_rbs(struct iwl_trans
*trans
,
2288 struct iwl_fw_error_dump_data
**data
,
2289 int allocated_rb_nums
)
2291 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2292 int max_len
= PAGE_SIZE
<< trans_pcie
->rx_page_order
;
2293 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
;
2294 u32 i
, r
, j
, rb_len
= 0;
2296 spin_lock(&rxq
->lock
);
2298 r
= le16_to_cpu(ACCESS_ONCE(rxq
->rb_stts
->closed_rb_num
)) & 0x0FFF;
2300 for (i
= rxq
->read
, j
= 0;
2301 i
!= r
&& j
< allocated_rb_nums
;
2302 i
= (i
+ 1) & RX_QUEUE_MASK
, j
++) {
2303 struct iwl_rx_mem_buffer
*rxb
= rxq
->queue
[i
];
2304 struct iwl_fw_error_dump_rb
*rb
;
2306 dma_unmap_page(trans
->dev
, rxb
->page_dma
, max_len
,
2309 rb_len
+= sizeof(**data
) + sizeof(*rb
) + max_len
;
2311 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_RB
);
2312 (*data
)->len
= cpu_to_le32(sizeof(*rb
) + max_len
);
2313 rb
= (void *)(*data
)->data
;
2314 rb
->index
= cpu_to_le32(i
);
2315 memcpy(rb
->data
, page_address(rxb
->page
), max_len
);
2316 /* remap the page for the free benefit */
2317 rxb
->page_dma
= dma_map_page(trans
->dev
, rxb
->page
, 0,
2321 *data
= iwl_fw_error_next_data(*data
);
2324 spin_unlock(&rxq
->lock
);
2328 #define IWL_CSR_TO_DUMP (0x250)
2330 static u32
iwl_trans_pcie_dump_csr(struct iwl_trans
*trans
,
2331 struct iwl_fw_error_dump_data
**data
)
2333 u32 csr_len
= sizeof(**data
) + IWL_CSR_TO_DUMP
;
2337 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_CSR
);
2338 (*data
)->len
= cpu_to_le32(IWL_CSR_TO_DUMP
);
2339 val
= (void *)(*data
)->data
;
2341 for (i
= 0; i
< IWL_CSR_TO_DUMP
; i
+= 4)
2342 *val
++ = cpu_to_le32(iwl_trans_pcie_read32(trans
, i
));
2344 *data
= iwl_fw_error_next_data(*data
);
2349 static u32
iwl_trans_pcie_fh_regs_dump(struct iwl_trans
*trans
,
2350 struct iwl_fw_error_dump_data
**data
)
2352 u32 fh_regs_len
= FH_MEM_UPPER_BOUND
- FH_MEM_LOWER_BOUND
;
2353 unsigned long flags
;
2357 if (!iwl_trans_grab_nic_access(trans
, false, &flags
))
2360 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS
);
2361 (*data
)->len
= cpu_to_le32(fh_regs_len
);
2362 val
= (void *)(*data
)->data
;
2364 for (i
= FH_MEM_LOWER_BOUND
; i
< FH_MEM_UPPER_BOUND
; i
+= sizeof(u32
))
2365 *val
++ = cpu_to_le32(iwl_trans_pcie_read32(trans
, i
));
2367 iwl_trans_release_nic_access(trans
, &flags
);
2369 *data
= iwl_fw_error_next_data(*data
);
2371 return sizeof(**data
) + fh_regs_len
;
2375 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans
*trans
,
2376 struct iwl_fw_error_dump_fw_mon
*fw_mon_data
,
2379 u32 buf_size_in_dwords
= (monitor_len
>> 2);
2380 u32
*buffer
= (u32
*)fw_mon_data
->data
;
2381 unsigned long flags
;
2384 if (!iwl_trans_grab_nic_access(trans
, false, &flags
))
2387 __iwl_write_prph(trans
, MON_DMARB_RD_CTL_ADDR
, 0x1);
2388 for (i
= 0; i
< buf_size_in_dwords
; i
++)
2389 buffer
[i
] = __iwl_read_prph(trans
, MON_DMARB_RD_DATA_ADDR
);
2390 __iwl_write_prph(trans
, MON_DMARB_RD_CTL_ADDR
, 0x0);
2392 iwl_trans_release_nic_access(trans
, &flags
);
2398 iwl_trans_pcie_dump_monitor(struct iwl_trans
*trans
,
2399 struct iwl_fw_error_dump_data
**data
,
2402 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2405 if ((trans_pcie
->fw_mon_page
&&
2406 trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
) ||
2407 trans
->dbg_dest_tlv
) {
2408 struct iwl_fw_error_dump_fw_mon
*fw_mon_data
;
2409 u32 base
, write_ptr
, wrap_cnt
;
2411 /* If there was a dest TLV - use the values from there */
2412 if (trans
->dbg_dest_tlv
) {
2414 le32_to_cpu(trans
->dbg_dest_tlv
->write_ptr_reg
);
2415 wrap_cnt
= le32_to_cpu(trans
->dbg_dest_tlv
->wrap_count
);
2416 base
= le32_to_cpu(trans
->dbg_dest_tlv
->base_reg
);
2418 base
= MON_BUFF_BASE_ADDR
;
2419 write_ptr
= MON_BUFF_WRPTR
;
2420 wrap_cnt
= MON_BUFF_CYCLE_CNT
;
2423 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR
);
2424 fw_mon_data
= (void *)(*data
)->data
;
2425 fw_mon_data
->fw_mon_wr_ptr
=
2426 cpu_to_le32(iwl_read_prph(trans
, write_ptr
));
2427 fw_mon_data
->fw_mon_cycle_cnt
=
2428 cpu_to_le32(iwl_read_prph(trans
, wrap_cnt
));
2429 fw_mon_data
->fw_mon_base_ptr
=
2430 cpu_to_le32(iwl_read_prph(trans
, base
));
2432 len
+= sizeof(**data
) + sizeof(*fw_mon_data
);
2433 if (trans_pcie
->fw_mon_page
) {
2435 * The firmware is now asserted, it won't write anything
2436 * to the buffer. CPU can take ownership to fetch the
2437 * data. The buffer will be handed back to the device
2438 * before the firmware will be restarted.
2440 dma_sync_single_for_cpu(trans
->dev
,
2441 trans_pcie
->fw_mon_phys
,
2442 trans_pcie
->fw_mon_size
,
2444 memcpy(fw_mon_data
->data
,
2445 page_address(trans_pcie
->fw_mon_page
),
2446 trans_pcie
->fw_mon_size
);
2448 monitor_len
= trans_pcie
->fw_mon_size
;
2449 } else if (trans
->dbg_dest_tlv
->monitor_mode
== SMEM_MODE
) {
2451 * Update pointers to reflect actual values after
2454 base
= iwl_read_prph(trans
, base
) <<
2455 trans
->dbg_dest_tlv
->base_shift
;
2456 iwl_trans_read_mem(trans
, base
, fw_mon_data
->data
,
2457 monitor_len
/ sizeof(u32
));
2458 } else if (trans
->dbg_dest_tlv
->monitor_mode
== MARBH_MODE
) {
2460 iwl_trans_pci_dump_marbh_monitor(trans
,
2464 /* Didn't match anything - output no monitor data */
2469 (*data
)->len
= cpu_to_le32(monitor_len
+ sizeof(*fw_mon_data
));
2475 static struct iwl_trans_dump_data
2476 *iwl_trans_pcie_dump_data(struct iwl_trans
*trans
,
2477 struct iwl_fw_dbg_trigger_tlv
*trigger
)
2479 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2480 struct iwl_fw_error_dump_data
*data
;
2481 struct iwl_txq
*cmdq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
2482 struct iwl_fw_error_dump_txcmd
*txcmd
;
2483 struct iwl_trans_dump_data
*dump_data
;
2487 bool dump_rbs
= test_bit(STATUS_FW_ERROR
, &trans
->status
);
2489 /* transport dump header */
2490 len
= sizeof(*dump_data
);
2493 len
+= sizeof(*data
) +
2494 cmdq
->q
.n_window
* (sizeof(*txcmd
) + TFD_MAX_PAYLOAD_SIZE
);
2497 if (trans_pcie
->fw_mon_page
) {
2498 len
+= sizeof(*data
) + sizeof(struct iwl_fw_error_dump_fw_mon
) +
2499 trans_pcie
->fw_mon_size
;
2500 monitor_len
= trans_pcie
->fw_mon_size
;
2501 } else if (trans
->dbg_dest_tlv
) {
2504 base
= le32_to_cpu(trans
->dbg_dest_tlv
->base_reg
);
2505 end
= le32_to_cpu(trans
->dbg_dest_tlv
->end_reg
);
2507 base
= iwl_read_prph(trans
, base
) <<
2508 trans
->dbg_dest_tlv
->base_shift
;
2509 end
= iwl_read_prph(trans
, end
) <<
2510 trans
->dbg_dest_tlv
->end_shift
;
2512 /* Make "end" point to the actual end */
2513 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
||
2514 trans
->dbg_dest_tlv
->monitor_mode
== MARBH_MODE
)
2515 end
+= (1 << trans
->dbg_dest_tlv
->end_shift
);
2516 monitor_len
= end
- base
;
2517 len
+= sizeof(*data
) + sizeof(struct iwl_fw_error_dump_fw_mon
) +
2523 if (trigger
&& (trigger
->mode
& IWL_FW_DBG_TRIGGER_MONITOR_ONLY
)) {
2524 dump_data
= vzalloc(len
);
2528 data
= (void *)dump_data
->data
;
2529 len
= iwl_trans_pcie_dump_monitor(trans
, &data
, monitor_len
);
2530 dump_data
->len
= len
;
2536 len
+= sizeof(*data
) + IWL_CSR_TO_DUMP
;
2538 /* PRPH registers */
2539 for (i
= 0; i
< ARRAY_SIZE(iwl_prph_dump_addr
); i
++) {
2540 /* The range includes both boundaries */
2541 int num_bytes_in_chunk
= iwl_prph_dump_addr
[i
].end
-
2542 iwl_prph_dump_addr
[i
].start
+ 4;
2544 len
+= sizeof(*data
) + sizeof(struct iwl_fw_error_dump_prph
) +
2549 len
+= sizeof(*data
) + (FH_MEM_UPPER_BOUND
- FH_MEM_LOWER_BOUND
);
2553 num_rbs
= le16_to_cpu(ACCESS_ONCE(
2554 trans_pcie
->rxq
.rb_stts
->closed_rb_num
))
2556 num_rbs
= (num_rbs
- trans_pcie
->rxq
.read
) & RX_QUEUE_MASK
;
2557 len
+= num_rbs
* (sizeof(*data
) +
2558 sizeof(struct iwl_fw_error_dump_rb
) +
2559 (PAGE_SIZE
<< trans_pcie
->rx_page_order
));
2562 dump_data
= vzalloc(len
);
2567 data
= (void *)dump_data
->data
;
2568 data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD
);
2569 txcmd
= (void *)data
->data
;
2570 spin_lock_bh(&cmdq
->lock
);
2571 ptr
= cmdq
->q
.write_ptr
;
2572 for (i
= 0; i
< cmdq
->q
.n_window
; i
++) {
2573 u8 idx
= get_cmd_index(&cmdq
->q
, ptr
);
2576 cmdlen
= iwl_trans_pcie_get_cmdlen(&cmdq
->tfds
[ptr
]);
2577 caplen
= min_t(u32
, TFD_MAX_PAYLOAD_SIZE
, cmdlen
);
2580 len
+= sizeof(*txcmd
) + caplen
;
2581 txcmd
->cmdlen
= cpu_to_le32(cmdlen
);
2582 txcmd
->caplen
= cpu_to_le32(caplen
);
2583 memcpy(txcmd
->data
, cmdq
->entries
[idx
].cmd
, caplen
);
2584 txcmd
= (void *)((u8
*)txcmd
->data
+ caplen
);
2587 ptr
= iwl_queue_dec_wrap(ptr
);
2589 spin_unlock_bh(&cmdq
->lock
);
2591 data
->len
= cpu_to_le32(len
);
2592 len
+= sizeof(*data
);
2593 data
= iwl_fw_error_next_data(data
);
2595 len
+= iwl_trans_pcie_dump_prph(trans
, &data
);
2596 len
+= iwl_trans_pcie_dump_csr(trans
, &data
);
2597 len
+= iwl_trans_pcie_fh_regs_dump(trans
, &data
);
2599 len
+= iwl_trans_pcie_dump_rbs(trans
, &data
, num_rbs
);
2601 len
+= iwl_trans_pcie_dump_monitor(trans
, &data
, monitor_len
);
2603 dump_data
->len
= len
;
2608 static const struct iwl_trans_ops trans_ops_pcie
= {
2609 .start_hw
= iwl_trans_pcie_start_hw
,
2610 .op_mode_leave
= iwl_trans_pcie_op_mode_leave
,
2611 .fw_alive
= iwl_trans_pcie_fw_alive
,
2612 .start_fw
= iwl_trans_pcie_start_fw
,
2613 .stop_device
= iwl_trans_pcie_stop_device
,
2615 .d3_suspend
= iwl_trans_pcie_d3_suspend
,
2616 .d3_resume
= iwl_trans_pcie_d3_resume
,
2618 .send_cmd
= iwl_trans_pcie_send_hcmd
,
2620 .tx
= iwl_trans_pcie_tx
,
2621 .reclaim
= iwl_trans_pcie_reclaim
,
2623 .txq_disable
= iwl_trans_pcie_txq_disable
,
2624 .txq_enable
= iwl_trans_pcie_txq_enable
,
2626 .dbgfs_register
= iwl_trans_pcie_dbgfs_register
,
2628 .wait_tx_queue_empty
= iwl_trans_pcie_wait_txq_empty
,
2629 .freeze_txq_timer
= iwl_trans_pcie_freeze_txq_timer
,
2631 .write8
= iwl_trans_pcie_write8
,
2632 .write32
= iwl_trans_pcie_write32
,
2633 .read32
= iwl_trans_pcie_read32
,
2634 .read_prph
= iwl_trans_pcie_read_prph
,
2635 .write_prph
= iwl_trans_pcie_write_prph
,
2636 .read_mem
= iwl_trans_pcie_read_mem
,
2637 .write_mem
= iwl_trans_pcie_write_mem
,
2638 .configure
= iwl_trans_pcie_configure
,
2639 .set_pmi
= iwl_trans_pcie_set_pmi
,
2640 .grab_nic_access
= iwl_trans_pcie_grab_nic_access
,
2641 .release_nic_access
= iwl_trans_pcie_release_nic_access
,
2642 .set_bits_mask
= iwl_trans_pcie_set_bits_mask
,
2644 .ref
= iwl_trans_pcie_ref
,
2645 .unref
= iwl_trans_pcie_unref
,
2647 .dump_data
= iwl_trans_pcie_dump_data
,
2650 struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev
*pdev
,
2651 const struct pci_device_id
*ent
,
2652 const struct iwl_cfg
*cfg
)
2654 struct iwl_trans_pcie
*trans_pcie
;
2655 struct iwl_trans
*trans
;
2659 trans
= iwl_trans_alloc(sizeof(struct iwl_trans_pcie
),
2660 &pdev
->dev
, cfg
, &trans_ops_pcie
, 0);
2662 return ERR_PTR(-ENOMEM
);
2664 trans
->max_skb_frags
= IWL_PCIE_MAX_FRAGS
;
2666 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2668 trans_pcie
->trans
= trans
;
2669 spin_lock_init(&trans_pcie
->irq_lock
);
2670 spin_lock_init(&trans_pcie
->reg_lock
);
2671 spin_lock_init(&trans_pcie
->ref_lock
);
2672 mutex_init(&trans_pcie
->mutex
);
2673 init_waitqueue_head(&trans_pcie
->ucode_write_waitq
);
2675 ret
= pci_enable_device(pdev
);
2679 if (!cfg
->base_params
->pcie_l1_allowed
) {
2681 * W/A - seems to solve weird behavior. We need to remove this
2682 * if we don't want to stay in L1 all the time. This wastes a
2685 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
|
2686 PCIE_LINK_STATE_L1
|
2687 PCIE_LINK_STATE_CLKPM
);
2690 pci_set_master(pdev
);
2692 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(36));
2694 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(36));
2696 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2698 ret
= pci_set_consistent_dma_mask(pdev
,
2700 /* both attempts failed: */
2702 dev_err(&pdev
->dev
, "No suitable DMA available\n");
2703 goto out_pci_disable_device
;
2707 ret
= pci_request_regions(pdev
, DRV_NAME
);
2709 dev_err(&pdev
->dev
, "pci_request_regions failed\n");
2710 goto out_pci_disable_device
;
2713 trans_pcie
->hw_base
= pci_ioremap_bar(pdev
, 0);
2714 if (!trans_pcie
->hw_base
) {
2715 dev_err(&pdev
->dev
, "pci_ioremap_bar failed\n");
2717 goto out_pci_release_regions
;
2720 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2721 * PCI Tx retries from interfering with C3 CPU state */
2722 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
2724 trans
->dev
= &pdev
->dev
;
2725 trans_pcie
->pci_dev
= pdev
;
2726 iwl_disable_interrupts(trans
);
2728 ret
= pci_enable_msi(pdev
);
2730 dev_err(&pdev
->dev
, "pci_enable_msi failed(0X%x)\n", ret
);
2731 /* enable rfkill interrupt: hw bug w/a */
2732 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
2733 if (pci_cmd
& PCI_COMMAND_INTX_DISABLE
) {
2734 pci_cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
2735 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
2739 trans
->hw_rev
= iwl_read32(trans
, CSR_HW_REV
);
2741 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
2742 * changed, and now the revision step also includes bit 0-1 (no more
2743 * "dash" value). To keep hw_rev backwards compatible - we'll store it
2744 * in the old format.
2746 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
) {
2747 unsigned long flags
;
2749 trans
->hw_rev
= (trans
->hw_rev
& 0xfff0) |
2750 (CSR_HW_REV_STEP(trans
->hw_rev
<< 2) << 2);
2752 ret
= iwl_pcie_prepare_card_hw(trans
);
2754 IWL_WARN(trans
, "Exit HW not ready\n");
2755 goto out_pci_disable_msi
;
2759 * in-order to recognize C step driver should read chip version
2760 * id located at the AUX bus MISC address space.
2762 iwl_set_bit(trans
, CSR_GP_CNTRL
,
2763 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
2766 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
2767 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
2768 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
2771 IWL_DEBUG_INFO(trans
, "Failed to wake up the nic\n");
2772 goto out_pci_disable_msi
;
2775 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
2778 hw_step
= __iwl_read_prph(trans
, WFPM_CTRL_REG
);
2779 hw_step
|= ENABLE_WFPM
;
2780 __iwl_write_prph(trans
, WFPM_CTRL_REG
, hw_step
);
2781 hw_step
= __iwl_read_prph(trans
, AUX_MISC_REG
);
2782 hw_step
= (hw_step
>> HW_STEP_LOCATION_BITS
) & 0xF;
2784 trans
->hw_rev
= (trans
->hw_rev
& 0xFFFFFFF3) |
2785 (SILICON_C_STEP
<< 2);
2786 iwl_trans_release_nic_access(trans
, &flags
);
2790 trans
->hw_id
= (pdev
->device
<< 16) + pdev
->subsystem_device
;
2791 snprintf(trans
->hw_id_str
, sizeof(trans
->hw_id_str
),
2792 "PCI ID: 0x%04X:0x%04X", pdev
->device
, pdev
->subsystem_device
);
2794 /* Initialize the wait queue for commands */
2795 init_waitqueue_head(&trans_pcie
->wait_command_queue
);
2797 ret
= iwl_pcie_alloc_ict(trans
);
2799 goto out_pci_disable_msi
;
2801 ret
= request_threaded_irq(pdev
->irq
, iwl_pcie_isr
,
2802 iwl_pcie_irq_handler
,
2803 IRQF_SHARED
, DRV_NAME
, trans
);
2805 IWL_ERR(trans
, "Error allocating IRQ %d\n", pdev
->irq
);
2809 trans_pcie
->inta_mask
= CSR_INI_SET_MASK
;
2810 trans
->d0i3_mode
= IWL_D0I3_MODE_ON_SUSPEND
;
2815 iwl_pcie_free_ict(trans
);
2816 out_pci_disable_msi
:
2817 pci_disable_msi(pdev
);
2818 out_pci_release_regions
:
2819 pci_release_regions(pdev
);
2820 out_pci_disable_device
:
2821 pci_disable_device(pdev
);
2823 iwl_trans_free(trans
);
2824 return ERR_PTR(ret
);