1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
63 #include <linux/pci.h>
64 #include <linux/pci-aspm.h>
65 #include <linux/interrupt.h>
66 #include <linux/debugfs.h>
67 #include <linux/sched.h>
68 #include <linux/bitops.h>
69 #include <linux/gfp.h>
72 #include "iwl-trans.h"
75 #include "iwl-agn-hw.h"
78 static void iwl_pcie_set_pwr(struct iwl_trans
*trans
, bool vaux
)
80 if (vaux
&& pci_pme_capable(to_pci_dev(trans
->dev
), PCI_D3cold
))
81 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
82 APMG_PS_CTRL_VAL_PWR_SRC_VAUX
,
83 ~APMG_PS_CTRL_MSK_PWR_SRC
);
85 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
86 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
87 ~APMG_PS_CTRL_MSK_PWR_SRC
);
91 #define PCI_CFG_RETRY_TIMEOUT 0x041
93 static void iwl_pcie_apm_config(struct iwl_trans
*trans
)
95 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
99 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
100 * Check if BIOS (or OS) enabled L1-ASPM on this device.
101 * If so (likely), disable L0S, so device moves directly L0->L1;
102 * costs negligible amount of power savings.
103 * If not (unlikely), enable L0S, so there is at least some
104 * power savings, even without L1.
106 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_LNKCTL
, &lctl
);
107 if (lctl
& PCI_EXP_LNKCTL_ASPM_L1
) {
108 /* L1-ASPM enabled; disable(!) L0S */
109 iwl_set_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
110 dev_info(trans
->dev
, "L1 Enabled; Disabling L0S\n");
112 /* L1-ASPM disabled; enable(!) L0S */
113 iwl_clear_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
114 dev_info(trans
->dev
, "L1 Disabled; Enabling L0S\n");
116 trans
->pm_support
= !(lctl
& PCI_EXP_LNKCTL_ASPM_L0S
);
120 * Start up NIC's basic functionality after it has been reset
121 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
122 * NOTE: This does not load uCode nor start the embedded processor
124 static int iwl_pcie_apm_init(struct iwl_trans
*trans
)
127 IWL_DEBUG_INFO(trans
, "Init card's basic functions\n");
130 * Use "set_bit" below rather than "write", to preserve any hardware
131 * bits already set by default after reset.
134 /* Disable L0S exit timer (platform NMI Work/Around) */
135 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
)
136 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
137 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
140 * Disable L0s without affecting L1;
141 * don't wait for ICH L0s (ICH bug W/A)
143 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
144 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
146 /* Set FH wait threshold to maximum (HW error during stress W/A) */
147 iwl_set_bit(trans
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
150 * Enable HAP INTA (interrupt from management bus) to
151 * wake device's PCI Express link L1a -> L0s
153 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
154 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
156 iwl_pcie_apm_config(trans
);
158 /* Configure analog phase-lock-loop before activating to D0A */
159 if (trans
->cfg
->base_params
->pll_cfg_val
)
160 iwl_set_bit(trans
, CSR_ANA_PLL_CFG
,
161 trans
->cfg
->base_params
->pll_cfg_val
);
164 * Set "initialization complete" bit to move adapter from
165 * D0U* --> D0A* (powered-up active) state.
167 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
170 * Wait for clock stabilization; once stabilized, access to
171 * device-internal resources is supported, e.g. iwl_write_prph()
172 * and accesses to uCode SRAM.
174 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
175 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
176 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
178 IWL_DEBUG_INFO(trans
, "Failed to init the card\n");
182 if (trans
->cfg
->host_interrupt_operation_mode
) {
184 * This is a bit of an abuse - This is needed for 7260 / 3160
185 * only check host_interrupt_operation_mode even if this is
186 * not related to host_interrupt_operation_mode.
188 * Enable the oscillator to count wake up time for L1 exit. This
189 * consumes slightly more power (100uA) - but allows to be sure
190 * that we wake up from L1 on time.
192 * This looks weird: read twice the same register, discard the
193 * value, set a bit, and yet again, read that same register
194 * just to discard the value. But that's the way the hardware
197 iwl_read_prph(trans
, OSC_CLK
);
198 iwl_read_prph(trans
, OSC_CLK
);
199 iwl_set_bits_prph(trans
, OSC_CLK
, OSC_CLK_FORCE_CONTROL
);
200 iwl_read_prph(trans
, OSC_CLK
);
201 iwl_read_prph(trans
, OSC_CLK
);
205 * Enable DMA clock and wait for it to stabilize.
207 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
208 * bits do not disable clocks. This preserves any hardware
209 * bits already set by default in "CLK_CTRL_REG" after reset.
211 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
) {
212 iwl_write_prph(trans
, APMG_CLK_EN_REG
,
213 APMG_CLK_VAL_DMA_CLK_RQT
);
216 /* Disable L1-Active */
217 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
218 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
220 /* Clear the interrupt in APMG if the NIC is in RFKILL */
221 iwl_write_prph(trans
, APMG_RTC_INT_STT_REG
,
222 APMG_RTC_INT_STT_RFKILL
);
225 set_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
231 static int iwl_pcie_apm_stop_master(struct iwl_trans
*trans
)
235 /* stop device's busmaster DMA activity */
236 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
238 ret
= iwl_poll_bit(trans
, CSR_RESET
,
239 CSR_RESET_REG_FLAG_MASTER_DISABLED
,
240 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
242 IWL_WARN(trans
, "Master Disable Timed Out, 100 usec\n");
244 IWL_DEBUG_INFO(trans
, "stop master\n");
249 static void iwl_pcie_apm_stop(struct iwl_trans
*trans
)
251 IWL_DEBUG_INFO(trans
, "Stop card, put in low power state\n");
253 clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
255 /* Stop device's DMA activity */
256 iwl_pcie_apm_stop_master(trans
);
258 /* Reset the entire device */
259 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
264 * Clear "initialization complete" bit to move adapter from
265 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
267 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
268 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
271 static int iwl_pcie_nic_init(struct iwl_trans
*trans
)
273 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
276 spin_lock(&trans_pcie
->irq_lock
);
277 iwl_pcie_apm_init(trans
);
279 spin_unlock(&trans_pcie
->irq_lock
);
281 if (trans
->cfg
->device_family
!= IWL_DEVICE_FAMILY_8000
)
282 iwl_pcie_set_pwr(trans
, false);
284 iwl_op_mode_nic_config(trans
->op_mode
);
286 /* Allocate the RX queue, or reset if it is already allocated */
287 iwl_pcie_rx_init(trans
);
289 /* Allocate or reset and init all Tx and Command queues */
290 if (iwl_pcie_tx_init(trans
))
293 if (trans
->cfg
->base_params
->shadow_reg_enable
) {
294 /* enable shadow regs in HW */
295 iwl_set_bit(trans
, CSR_MAC_SHADOW_REG_CTRL
, 0x800FFFFF);
296 IWL_DEBUG_INFO(trans
, "Enabling shadow registers in device\n");
302 #define HW_READY_TIMEOUT (50)
304 /* Note: returns poll_bit return value, which is >= 0 if success */
305 static int iwl_pcie_set_hw_ready(struct iwl_trans
*trans
)
309 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
310 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
312 /* See if we got it */
313 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
314 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
315 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
318 IWL_DEBUG_INFO(trans
, "hardware%s ready\n", ret
< 0 ? " not" : "");
322 /* Note: returns standard 0/-ERROR code */
323 static int iwl_pcie_prepare_card_hw(struct iwl_trans
*trans
)
328 IWL_DEBUG_INFO(trans
, "iwl_trans_prepare_card_hw enter\n");
330 ret
= iwl_pcie_set_hw_ready(trans
);
331 /* If the card is ready, exit 0 */
335 /* If HW is not ready, prepare the conditions to check again */
336 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
337 CSR_HW_IF_CONFIG_REG_PREPARE
);
340 ret
= iwl_pcie_set_hw_ready(trans
);
344 usleep_range(200, 1000);
346 } while (t
< 150000);
354 static int iwl_pcie_load_firmware_chunk(struct iwl_trans
*trans
, u32 dst_addr
,
355 dma_addr_t phy_addr
, u32 byte_cnt
)
357 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
360 trans_pcie
->ucode_write_complete
= false;
362 iwl_write_direct32(trans
,
363 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
364 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE
);
366 iwl_write_direct32(trans
,
367 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL
),
370 iwl_write_direct32(trans
,
371 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL
),
372 phy_addr
& FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK
);
374 iwl_write_direct32(trans
,
375 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL
),
376 (iwl_get_dma_hi_addr(phy_addr
)
377 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT
) | byte_cnt
);
379 iwl_write_direct32(trans
,
380 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL
),
381 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM
|
382 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX
|
383 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID
);
385 iwl_write_direct32(trans
,
386 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
387 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
388 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE
|
389 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD
);
391 ret
= wait_event_timeout(trans_pcie
->ucode_write_waitq
,
392 trans_pcie
->ucode_write_complete
, 5 * HZ
);
394 IWL_ERR(trans
, "Failed to load firmware chunk!\n");
401 static int iwl_pcie_load_section(struct iwl_trans
*trans
, u8 section_num
,
402 const struct fw_desc
*section
)
406 u32 offset
, chunk_sz
= section
->len
;
409 IWL_DEBUG_FW(trans
, "[%d] uCode section being loaded...\n",
412 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
, &p_addr
,
413 GFP_KERNEL
| __GFP_NOWARN
);
415 IWL_DEBUG_INFO(trans
, "Falling back to small chunks of DMA\n");
416 chunk_sz
= PAGE_SIZE
;
417 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
,
418 &p_addr
, GFP_KERNEL
);
423 for (offset
= 0; offset
< section
->len
; offset
+= chunk_sz
) {
426 copy_size
= min_t(u32
, chunk_sz
, section
->len
- offset
);
428 memcpy(v_addr
, (u8
*)section
->data
+ offset
, copy_size
);
429 ret
= iwl_pcie_load_firmware_chunk(trans
,
430 section
->offset
+ offset
,
434 "Could not load the [%d] uCode section\n",
440 dma_free_coherent(trans
->dev
, chunk_sz
, v_addr
, p_addr
);
444 static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans
*trans
,
445 const struct fw_img
*image
,
449 u32 first_idx
, last_idx
;
462 for (i
= first_idx
; i
<= last_idx
; i
++) {
463 if (!image
->sec
[i
].data
)
465 if (i
== first_idx
+ 1)
466 /* set CPU to started */
467 iwl_set_bits_prph(trans
,
468 CSR_UCODE_LOAD_STATUS_ADDR
,
469 LMPM_CPU_HDRS_LOADING_COMPLETED
472 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
476 /* image loading complete */
477 iwl_set_bits_prph(trans
,
478 CSR_UCODE_LOAD_STATUS_ADDR
,
479 LMPM_CPU_UCODE_LOADING_COMPLETED
<< shift_param
);
484 static int iwl_pcie_load_cpu_sections(struct iwl_trans
*trans
,
485 const struct fw_img
*image
,
489 u32 first_idx
, last_idx
;
502 for (i
= first_idx
; i
<= last_idx
; i
++) {
503 if (!image
->sec
[i
].data
)
505 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
510 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
511 iwl_set_bits_prph(trans
,
512 CSR_UCODE_LOAD_STATUS_ADDR
,
513 (LMPM_CPU_UCODE_LOADING_COMPLETED
|
514 LMPM_CPU_HDRS_LOADING_COMPLETED
|
515 LMPM_CPU_UCODE_LOADING_STARTED
) <<
521 static int iwl_pcie_load_given_ucode(struct iwl_trans
*trans
,
522 const struct fw_img
*image
)
527 "working with %s image\n",
528 image
->is_secure
? "Secured" : "Non Secured");
530 "working with %s CPU\n",
531 image
->is_dual_cpus
? "Dual" : "Single");
533 /* configure the ucode to be ready to get the secured image */
534 if (image
->is_secure
) {
535 /* set secure boot inspector addresses */
536 iwl_write_prph(trans
,
537 LMPM_SECURE_INSPECTOR_CODE_ADDR
,
538 LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE
);
540 iwl_write_prph(trans
,
541 LMPM_SECURE_INSPECTOR_DATA_ADDR
,
542 LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE
);
544 /* set CPU1 header address */
545 iwl_write_prph(trans
,
546 LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR
,
547 LMPM_SECURE_CPU1_HDR_MEM_SPACE
);
549 /* load to FW the binary Secured sections of CPU1 */
550 ret
= iwl_pcie_load_cpu_secured_sections(trans
, image
, 1);
555 /* load to FW the binary Non secured sections of CPU1 */
556 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 1);
561 if (image
->is_dual_cpus
) {
562 /* set CPU2 header address */
563 iwl_write_prph(trans
,
564 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR
,
565 LMPM_SECURE_CPU2_HDR_MEM_SPACE
);
567 /* load to FW the binary sections of CPU2 */
568 if (image
->is_secure
)
569 ret
= iwl_pcie_load_cpu_secured_sections(trans
,
573 ret
= iwl_pcie_load_cpu_sections(trans
, image
, 2);
578 /* release CPU reset */
579 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
580 iwl_write_prph(trans
, RELEASE_CPU_RESET
, RELEASE_CPU_RESET_BIT
);
582 iwl_write32(trans
, CSR_RESET
, 0);
584 if (image
->is_secure
) {
585 /* wait for image verification to complete */
586 ret
= iwl_poll_prph_bit(trans
,
587 LMPM_SECURE_BOOT_CPU1_STATUS_ADDR
,
588 LMPM_SECURE_BOOT_STATUS_SUCCESS
,
589 LMPM_SECURE_BOOT_STATUS_SUCCESS
,
590 LMPM_SECURE_TIME_OUT
);
593 IWL_ERR(trans
, "Time out on secure boot process\n");
601 static int iwl_trans_pcie_start_fw(struct iwl_trans
*trans
,
602 const struct fw_img
*fw
, bool run_in_rfkill
)
607 /* This may fail if AMT took ownership of the device */
608 if (iwl_pcie_prepare_card_hw(trans
)) {
609 IWL_WARN(trans
, "Exit HW not ready\n");
613 iwl_enable_rfkill_int(trans
);
615 /* If platform's RF_KILL switch is NOT set to KILL */
616 hw_rfkill
= iwl_is_rfkill_set(trans
);
618 set_bit(STATUS_RFKILL
, &trans
->status
);
620 clear_bit(STATUS_RFKILL
, &trans
->status
);
621 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
622 if (hw_rfkill
&& !run_in_rfkill
)
625 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
627 ret
= iwl_pcie_nic_init(trans
);
629 IWL_ERR(trans
, "Unable to init nic\n");
633 /* make sure rfkill handshake bits are cleared */
634 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
635 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
,
636 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
638 /* clear (again), then enable host interrupts */
639 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
640 iwl_enable_interrupts(trans
);
642 /* really make sure rfkill handshake bits are cleared */
643 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
644 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
646 /* Load the given image to the HW */
647 return iwl_pcie_load_given_ucode(trans
, fw
);
650 static void iwl_trans_pcie_fw_alive(struct iwl_trans
*trans
, u32 scd_addr
)
652 iwl_pcie_reset_ict(trans
);
653 iwl_pcie_tx_start(trans
, scd_addr
);
656 static void iwl_trans_pcie_stop_device(struct iwl_trans
*trans
)
658 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
659 bool hw_rfkill
, was_hw_rfkill
;
661 was_hw_rfkill
= iwl_is_rfkill_set(trans
);
663 /* tell the device to stop sending interrupts */
664 spin_lock(&trans_pcie
->irq_lock
);
665 iwl_disable_interrupts(trans
);
666 spin_unlock(&trans_pcie
->irq_lock
);
668 /* device going down, Stop using ICT table */
669 iwl_pcie_disable_ict(trans
);
672 * If a HW restart happens during firmware loading,
673 * then the firmware loading might call this function
674 * and later it might be called again due to the
675 * restart. So don't process again if the device is
678 if (test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
)) {
679 iwl_pcie_tx_stop(trans
);
680 iwl_pcie_rx_stop(trans
);
682 /* Power-down device's busmaster DMA clocks */
683 iwl_write_prph(trans
, APMG_CLK_DIS_REG
,
684 APMG_CLK_VAL_DMA_CLK_RQT
);
688 /* Make sure (redundant) we've released our request to stay awake */
689 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
690 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
692 /* Stop the device, and put it in low power state */
693 iwl_pcie_apm_stop(trans
);
695 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
696 * Clean again the interrupt here
698 spin_lock(&trans_pcie
->irq_lock
);
699 iwl_disable_interrupts(trans
);
700 spin_unlock(&trans_pcie
->irq_lock
);
702 /* stop and reset the on-board processor */
703 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_NEVO_RESET
);
705 /* clear all status bits */
706 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
707 clear_bit(STATUS_INT_ENABLED
, &trans
->status
);
708 clear_bit(STATUS_DEVICE_ENABLED
, &trans
->status
);
709 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
710 clear_bit(STATUS_RFKILL
, &trans
->status
);
713 * Even if we stop the HW, we still want the RF kill
716 iwl_enable_rfkill_int(trans
);
719 * Check again since the RF kill state may have changed while
720 * all the interrupts were disabled, in this case we couldn't
721 * receive the RF kill interrupt and update the state in the
723 * Don't call the op_mode if the rkfill state hasn't changed.
724 * This allows the op_mode to call stop_device from the rfkill
725 * notification without endless recursion. Under very rare
726 * circumstances, we might have a small recursion if the rfkill
727 * state changed exactly now while we were called from stop_device.
728 * This is very unlikely but can happen and is supported.
730 hw_rfkill
= iwl_is_rfkill_set(trans
);
732 set_bit(STATUS_RFKILL
, &trans
->status
);
734 clear_bit(STATUS_RFKILL
, &trans
->status
);
735 if (hw_rfkill
!= was_hw_rfkill
)
736 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
739 static void iwl_trans_pcie_d3_suspend(struct iwl_trans
*trans
, bool test
)
741 iwl_disable_interrupts(trans
);
744 * in testing mode, the host stays awake and the
745 * hardware won't be reset (not even partially)
750 iwl_pcie_disable_ict(trans
);
752 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
753 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
754 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
755 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
758 * reset TX queues -- some of their registers reset during S3
759 * so if we don't reset everything here the D3 image would try
760 * to execute some invalid memory upon resume
762 iwl_trans_pcie_tx_reset(trans
);
764 iwl_pcie_set_pwr(trans
, true);
767 static int iwl_trans_pcie_d3_resume(struct iwl_trans
*trans
,
768 enum iwl_d3_status
*status
,
775 iwl_enable_interrupts(trans
);
776 *status
= IWL_D3_STATUS_ALIVE
;
780 iwl_pcie_set_pwr(trans
, false);
782 val
= iwl_read32(trans
, CSR_RESET
);
783 if (val
& CSR_RESET_REG_FLAG_NEVO_RESET
) {
784 *status
= IWL_D3_STATUS_RESET
;
789 * Also enables interrupts - none will happen as the device doesn't
790 * know we're waking it up, only when the opmode actually tells it
793 iwl_pcie_reset_ict(trans
);
795 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
796 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
798 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
799 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
800 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
803 IWL_ERR(trans
, "Failed to resume the device (mac ready)\n");
807 iwl_trans_pcie_tx_reset(trans
);
809 ret
= iwl_pcie_rx_init(trans
);
811 IWL_ERR(trans
, "Failed to resume the device (RX reset)\n");
815 *status
= IWL_D3_STATUS_ALIVE
;
819 static int iwl_trans_pcie_start_hw(struct iwl_trans
*trans
)
824 err
= iwl_pcie_prepare_card_hw(trans
);
826 IWL_ERR(trans
, "Error while preparing HW: %d\n", err
);
830 /* Reset the entire device */
831 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
833 usleep_range(10, 15);
835 iwl_pcie_apm_init(trans
);
837 /* From now on, the op_mode will be kept updated about RF kill state */
838 iwl_enable_rfkill_int(trans
);
840 hw_rfkill
= iwl_is_rfkill_set(trans
);
842 set_bit(STATUS_RFKILL
, &trans
->status
);
844 clear_bit(STATUS_RFKILL
, &trans
->status
);
845 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
850 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans
*trans
)
852 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
854 /* disable interrupts - don't enable HW RF kill interrupt */
855 spin_lock(&trans_pcie
->irq_lock
);
856 iwl_disable_interrupts(trans
);
857 spin_unlock(&trans_pcie
->irq_lock
);
859 iwl_pcie_apm_stop(trans
);
861 spin_lock(&trans_pcie
->irq_lock
);
862 iwl_disable_interrupts(trans
);
863 spin_unlock(&trans_pcie
->irq_lock
);
865 iwl_pcie_disable_ict(trans
);
868 static void iwl_trans_pcie_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
870 writeb(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
873 static void iwl_trans_pcie_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
875 writel(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
878 static u32
iwl_trans_pcie_read32(struct iwl_trans
*trans
, u32 ofs
)
880 return readl(IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
883 static u32
iwl_trans_pcie_read_prph(struct iwl_trans
*trans
, u32 reg
)
885 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_RADDR
,
886 ((reg
& 0x000FFFFF) | (3 << 24)));
887 return iwl_trans_pcie_read32(trans
, HBUS_TARG_PRPH_RDAT
);
890 static void iwl_trans_pcie_write_prph(struct iwl_trans
*trans
, u32 addr
,
893 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WADDR
,
894 ((addr
& 0x000FFFFF) | (3 << 24)));
895 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WDAT
, val
);
898 static void iwl_trans_pcie_configure(struct iwl_trans
*trans
,
899 const struct iwl_trans_config
*trans_cfg
)
901 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
903 trans_pcie
->cmd_queue
= trans_cfg
->cmd_queue
;
904 trans_pcie
->cmd_fifo
= trans_cfg
->cmd_fifo
;
905 if (WARN_ON(trans_cfg
->n_no_reclaim_cmds
> MAX_NO_RECLAIM_CMDS
))
906 trans_pcie
->n_no_reclaim_cmds
= 0;
908 trans_pcie
->n_no_reclaim_cmds
= trans_cfg
->n_no_reclaim_cmds
;
909 if (trans_pcie
->n_no_reclaim_cmds
)
910 memcpy(trans_pcie
->no_reclaim_cmds
, trans_cfg
->no_reclaim_cmds
,
911 trans_pcie
->n_no_reclaim_cmds
* sizeof(u8
));
913 trans_pcie
->rx_buf_size_8k
= trans_cfg
->rx_buf_size_8k
;
914 if (trans_pcie
->rx_buf_size_8k
)
915 trans_pcie
->rx_page_order
= get_order(8 * 1024);
917 trans_pcie
->rx_page_order
= get_order(4 * 1024);
919 trans_pcie
->wd_timeout
=
920 msecs_to_jiffies(trans_cfg
->queue_watchdog_timeout
);
922 trans_pcie
->command_names
= trans_cfg
->command_names
;
923 trans_pcie
->bc_table_dword
= trans_cfg
->bc_table_dword
;
926 void iwl_trans_pcie_free(struct iwl_trans
*trans
)
928 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
930 synchronize_irq(trans_pcie
->pci_dev
->irq
);
932 iwl_pcie_tx_free(trans
);
933 iwl_pcie_rx_free(trans
);
935 free_irq(trans_pcie
->pci_dev
->irq
, trans
);
936 iwl_pcie_free_ict(trans
);
938 pci_disable_msi(trans_pcie
->pci_dev
);
939 iounmap(trans_pcie
->hw_base
);
940 pci_release_regions(trans_pcie
->pci_dev
);
941 pci_disable_device(trans_pcie
->pci_dev
);
942 kmem_cache_destroy(trans
->dev_cmd_pool
);
947 static void iwl_trans_pcie_set_pmi(struct iwl_trans
*trans
, bool state
)
950 set_bit(STATUS_TPOWER_PMI
, &trans
->status
);
952 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
955 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans
*trans
, bool silent
,
956 unsigned long *flags
)
959 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
961 spin_lock_irqsave(&trans_pcie
->reg_lock
, *flags
);
963 if (trans_pcie
->cmd_in_flight
)
966 /* this bit wakes up the NIC */
967 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
968 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
971 * These bits say the device is running, and should keep running for
972 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
973 * but they do not indicate that embedded SRAM is restored yet;
974 * 3945 and 4965 have volatile SRAM, and must save/restore contents
975 * to/from host DRAM when sleeping/waking for power-saving.
976 * Each direction takes approximately 1/4 millisecond; with this
977 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
978 * series of register accesses are expected (e.g. reading Event Log),
979 * to keep device from sleeping.
981 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
982 * SRAM is okay/restored. We don't check that here because this call
983 * is just for hardware register access; but GP1 MAC_SLEEP check is a
984 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
986 * 5000 series and later (including 1000 series) have non-volatile SRAM,
987 * and do not save/restore SRAM when power cycling.
989 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
990 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN
,
991 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
|
992 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP
), 15000);
993 if (unlikely(ret
< 0)) {
994 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_FORCE_NMI
);
996 u32 val
= iwl_read32(trans
, CSR_GP_CNTRL
);
998 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1000 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
1007 * Fool sparse by faking we release the lock - sparse will
1008 * track nic_access anyway.
1010 __release(&trans_pcie
->reg_lock
);
1014 static void iwl_trans_pcie_release_nic_access(struct iwl_trans
*trans
,
1015 unsigned long *flags
)
1017 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1019 lockdep_assert_held(&trans_pcie
->reg_lock
);
1022 * Fool sparse by faking we acquiring the lock - sparse will
1023 * track nic_access anyway.
1025 __acquire(&trans_pcie
->reg_lock
);
1027 if (trans_pcie
->cmd_in_flight
)
1030 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
1031 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
1033 * Above we read the CSR_GP_CNTRL register, which will flush
1034 * any previous writes, but we need the write that clears the
1035 * MAC_ACCESS_REQ bit to be performed before any other writes
1036 * scheduled on different CPUs (after we drop reg_lock).
1040 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
1043 static int iwl_trans_pcie_read_mem(struct iwl_trans
*trans
, u32 addr
,
1044 void *buf
, int dwords
)
1046 unsigned long flags
;
1050 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
1051 iwl_write32(trans
, HBUS_TARG_MEM_RADDR
, addr
);
1052 for (offs
= 0; offs
< dwords
; offs
++)
1053 vals
[offs
] = iwl_read32(trans
, HBUS_TARG_MEM_RDAT
);
1054 iwl_trans_release_nic_access(trans
, &flags
);
1061 static int iwl_trans_pcie_write_mem(struct iwl_trans
*trans
, u32 addr
,
1062 const void *buf
, int dwords
)
1064 unsigned long flags
;
1066 const u32
*vals
= buf
;
1068 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
1069 iwl_write32(trans
, HBUS_TARG_MEM_WADDR
, addr
);
1070 for (offs
= 0; offs
< dwords
; offs
++)
1071 iwl_write32(trans
, HBUS_TARG_MEM_WDAT
,
1072 vals
? vals
[offs
] : 0);
1073 iwl_trans_release_nic_access(trans
, &flags
);
1080 #define IWL_FLUSH_WAIT_MS 2000
1082 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans
*trans
)
1084 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1085 struct iwl_txq
*txq
;
1086 struct iwl_queue
*q
;
1088 unsigned long now
= jiffies
;
1093 /* waiting for all the tx frames complete might take a while */
1094 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1095 if (cnt
== trans_pcie
->cmd_queue
)
1097 txq
= &trans_pcie
->txq
[cnt
];
1099 while (q
->read_ptr
!= q
->write_ptr
&& !time_after(jiffies
,
1100 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
)))
1103 if (q
->read_ptr
!= q
->write_ptr
) {
1105 "fail to flush all tx fifo queues Q %d\n", cnt
);
1114 IWL_ERR(trans
, "Current SW read_ptr %d write_ptr %d\n",
1115 txq
->q
.read_ptr
, txq
->q
.write_ptr
);
1117 scd_sram_addr
= trans_pcie
->scd_base_addr
+
1118 SCD_TX_STTS_QUEUE_OFFSET(txq
->q
.id
);
1119 iwl_trans_read_mem_bytes(trans
, scd_sram_addr
, buf
, sizeof(buf
));
1121 iwl_print_hex_error(trans
, buf
, sizeof(buf
));
1123 for (cnt
= 0; cnt
< FH_TCSR_CHNL_NUM
; cnt
++)
1124 IWL_ERR(trans
, "FH TRBs(%d) = 0x%08x\n", cnt
,
1125 iwl_read_direct32(trans
, FH_TX_TRB_REG(cnt
)));
1127 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1128 u32 status
= iwl_read_prph(trans
, SCD_QUEUE_STATUS_BITS(cnt
));
1129 u8 fifo
= (status
>> SCD_QUEUE_STTS_REG_POS_TXF
) & 0x7;
1130 bool active
= !!(status
& BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE
));
1132 iwl_trans_read_mem32(trans
, trans_pcie
->scd_base_addr
+
1133 SCD_TRANS_TBL_OFFSET_QUEUE(cnt
));
1136 tbl_dw
= (tbl_dw
& 0xFFFF0000) >> 16;
1138 tbl_dw
= tbl_dw
& 0x0000FFFF;
1141 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1142 cnt
, active
? "" : "in", fifo
, tbl_dw
,
1143 iwl_read_prph(trans
,
1144 SCD_QUEUE_RDPTR(cnt
)) & (txq
->q
.n_bd
- 1),
1145 iwl_read_prph(trans
, SCD_QUEUE_WRPTR(cnt
)));
1151 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans
*trans
, u32 reg
,
1152 u32 mask
, u32 value
)
1154 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1155 unsigned long flags
;
1157 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
1158 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, value
);
1159 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
1162 static const char *get_csr_string(int cmd
)
1164 #define IWL_CMD(x) case x: return #x
1166 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
1167 IWL_CMD(CSR_INT_COALESCING
);
1169 IWL_CMD(CSR_INT_MASK
);
1170 IWL_CMD(CSR_FH_INT_STATUS
);
1171 IWL_CMD(CSR_GPIO_IN
);
1173 IWL_CMD(CSR_GP_CNTRL
);
1174 IWL_CMD(CSR_HW_REV
);
1175 IWL_CMD(CSR_EEPROM_REG
);
1176 IWL_CMD(CSR_EEPROM_GP
);
1177 IWL_CMD(CSR_OTP_GP_REG
);
1178 IWL_CMD(CSR_GIO_REG
);
1179 IWL_CMD(CSR_GP_UCODE_REG
);
1180 IWL_CMD(CSR_GP_DRIVER_REG
);
1181 IWL_CMD(CSR_UCODE_DRV_GP1
);
1182 IWL_CMD(CSR_UCODE_DRV_GP2
);
1183 IWL_CMD(CSR_LED_REG
);
1184 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
1185 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
1186 IWL_CMD(CSR_ANA_PLL_CFG
);
1187 IWL_CMD(CSR_HW_REV_WA_REG
);
1188 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
1195 void iwl_pcie_dump_csr(struct iwl_trans
*trans
)
1198 static const u32 csr_tbl
[] = {
1199 CSR_HW_IF_CONFIG_REG
,
1217 CSR_DRAM_INT_TBL_REG
,
1218 CSR_GIO_CHICKEN_BITS
,
1221 CSR_DBG_HPET_MEM_REG
1223 IWL_ERR(trans
, "CSR values:\n");
1224 IWL_ERR(trans
, "(2nd byte of CSR_INT_COALESCING is "
1225 "CSR_INT_PERIODIC_REG)\n");
1226 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
1227 IWL_ERR(trans
, " %25s: 0X%08x\n",
1228 get_csr_string(csr_tbl
[i
]),
1229 iwl_read32(trans
, csr_tbl
[i
]));
1233 #ifdef CONFIG_IWLWIFI_DEBUGFS
1234 /* create and remove of files */
1235 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1236 if (!debugfs_create_file(#name, mode, parent, trans, \
1237 &iwl_dbgfs_##name##_ops)) \
1241 /* file operation */
1242 #define DEBUGFS_READ_FILE_OPS(name) \
1243 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1244 .read = iwl_dbgfs_##name##_read, \
1245 .open = simple_open, \
1246 .llseek = generic_file_llseek, \
1249 #define DEBUGFS_WRITE_FILE_OPS(name) \
1250 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1251 .write = iwl_dbgfs_##name##_write, \
1252 .open = simple_open, \
1253 .llseek = generic_file_llseek, \
1256 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1257 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1258 .write = iwl_dbgfs_##name##_write, \
1259 .read = iwl_dbgfs_##name##_read, \
1260 .open = simple_open, \
1261 .llseek = generic_file_llseek, \
1264 static ssize_t
iwl_dbgfs_tx_queue_read(struct file
*file
,
1265 char __user
*user_buf
,
1266 size_t count
, loff_t
*ppos
)
1268 struct iwl_trans
*trans
= file
->private_data
;
1269 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1270 struct iwl_txq
*txq
;
1271 struct iwl_queue
*q
;
1278 bufsz
= sizeof(char) * 64 * trans
->cfg
->base_params
->num_of_queues
;
1280 if (!trans_pcie
->txq
)
1283 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1287 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1288 txq
= &trans_pcie
->txq
[cnt
];
1290 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1291 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1292 cnt
, q
->read_ptr
, q
->write_ptr
,
1293 !!test_bit(cnt
, trans_pcie
->queue_used
),
1294 !!test_bit(cnt
, trans_pcie
->queue_stopped
));
1296 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1301 static ssize_t
iwl_dbgfs_rx_queue_read(struct file
*file
,
1302 char __user
*user_buf
,
1303 size_t count
, loff_t
*ppos
)
1305 struct iwl_trans
*trans
= file
->private_data
;
1306 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1307 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
;
1310 const size_t bufsz
= sizeof(buf
);
1312 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "read: %u\n",
1314 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "write: %u\n",
1316 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "free_count: %u\n",
1319 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "closed_rb_num: %u\n",
1320 le16_to_cpu(rxq
->rb_stts
->closed_rb_num
) & 0x0FFF);
1322 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1323 "closed_rb_num: Not Allocated\n");
1325 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1328 static ssize_t
iwl_dbgfs_interrupt_read(struct file
*file
,
1329 char __user
*user_buf
,
1330 size_t count
, loff_t
*ppos
)
1332 struct iwl_trans
*trans
= file
->private_data
;
1333 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1334 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1338 int bufsz
= 24 * 64; /* 24 items * 64 char per item */
1341 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1345 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1346 "Interrupt Statistics Report:\n");
1348 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "HW Error:\t\t\t %u\n",
1350 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "SW Error:\t\t\t %u\n",
1352 if (isr_stats
->sw
|| isr_stats
->hw
) {
1353 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1354 "\tLast Restarting Code: 0x%X\n",
1355 isr_stats
->err_code
);
1357 #ifdef CONFIG_IWLWIFI_DEBUG
1358 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Frame transmitted:\t\t %u\n",
1360 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Alive interrupt:\t\t %u\n",
1363 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1364 "HW RF KILL switch toggled:\t %u\n", isr_stats
->rfkill
);
1366 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "CT KILL:\t\t\t %u\n",
1369 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Wakeup Interrupt:\t\t %u\n",
1372 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1373 "Rx command responses:\t\t %u\n", isr_stats
->rx
);
1375 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Tx/FH interrupt:\t\t %u\n",
1378 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Unexpected INTA:\t\t %u\n",
1379 isr_stats
->unhandled
);
1381 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1386 static ssize_t
iwl_dbgfs_interrupt_write(struct file
*file
,
1387 const char __user
*user_buf
,
1388 size_t count
, loff_t
*ppos
)
1390 struct iwl_trans
*trans
= file
->private_data
;
1391 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1392 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1398 memset(buf
, 0, sizeof(buf
));
1399 buf_size
= min(count
, sizeof(buf
) - 1);
1400 if (copy_from_user(buf
, user_buf
, buf_size
))
1402 if (sscanf(buf
, "%x", &reset_flag
) != 1)
1404 if (reset_flag
== 0)
1405 memset(isr_stats
, 0, sizeof(*isr_stats
));
1410 static ssize_t
iwl_dbgfs_csr_write(struct file
*file
,
1411 const char __user
*user_buf
,
1412 size_t count
, loff_t
*ppos
)
1414 struct iwl_trans
*trans
= file
->private_data
;
1419 memset(buf
, 0, sizeof(buf
));
1420 buf_size
= min(count
, sizeof(buf
) - 1);
1421 if (copy_from_user(buf
, user_buf
, buf_size
))
1423 if (sscanf(buf
, "%d", &csr
) != 1)
1426 iwl_pcie_dump_csr(trans
);
1431 static ssize_t
iwl_dbgfs_fh_reg_read(struct file
*file
,
1432 char __user
*user_buf
,
1433 size_t count
, loff_t
*ppos
)
1435 struct iwl_trans
*trans
= file
->private_data
;
1438 ssize_t ret
= -EFAULT
;
1440 ret
= pos
= iwl_dump_fh(trans
, &buf
);
1442 ret
= simple_read_from_buffer(user_buf
,
1443 count
, ppos
, buf
, pos
);
1450 DEBUGFS_READ_WRITE_FILE_OPS(interrupt
);
1451 DEBUGFS_READ_FILE_OPS(fh_reg
);
1452 DEBUGFS_READ_FILE_OPS(rx_queue
);
1453 DEBUGFS_READ_FILE_OPS(tx_queue
);
1454 DEBUGFS_WRITE_FILE_OPS(csr
);
1457 * Create the debugfs files and directories
1460 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
1463 DEBUGFS_ADD_FILE(rx_queue
, dir
, S_IRUSR
);
1464 DEBUGFS_ADD_FILE(tx_queue
, dir
, S_IRUSR
);
1465 DEBUGFS_ADD_FILE(interrupt
, dir
, S_IWUSR
| S_IRUSR
);
1466 DEBUGFS_ADD_FILE(csr
, dir
, S_IWUSR
);
1467 DEBUGFS_ADD_FILE(fh_reg
, dir
, S_IRUSR
);
1471 IWL_ERR(trans
, "failed to create the trans debugfs entry\n");
1475 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
1480 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1482 static const struct iwl_trans_ops trans_ops_pcie
= {
1483 .start_hw
= iwl_trans_pcie_start_hw
,
1484 .op_mode_leave
= iwl_trans_pcie_op_mode_leave
,
1485 .fw_alive
= iwl_trans_pcie_fw_alive
,
1486 .start_fw
= iwl_trans_pcie_start_fw
,
1487 .stop_device
= iwl_trans_pcie_stop_device
,
1489 .d3_suspend
= iwl_trans_pcie_d3_suspend
,
1490 .d3_resume
= iwl_trans_pcie_d3_resume
,
1492 .send_cmd
= iwl_trans_pcie_send_hcmd
,
1494 .tx
= iwl_trans_pcie_tx
,
1495 .reclaim
= iwl_trans_pcie_reclaim
,
1497 .txq_disable
= iwl_trans_pcie_txq_disable
,
1498 .txq_enable
= iwl_trans_pcie_txq_enable
,
1500 .dbgfs_register
= iwl_trans_pcie_dbgfs_register
,
1502 .wait_tx_queue_empty
= iwl_trans_pcie_wait_txq_empty
,
1504 .write8
= iwl_trans_pcie_write8
,
1505 .write32
= iwl_trans_pcie_write32
,
1506 .read32
= iwl_trans_pcie_read32
,
1507 .read_prph
= iwl_trans_pcie_read_prph
,
1508 .write_prph
= iwl_trans_pcie_write_prph
,
1509 .read_mem
= iwl_trans_pcie_read_mem
,
1510 .write_mem
= iwl_trans_pcie_write_mem
,
1511 .configure
= iwl_trans_pcie_configure
,
1512 .set_pmi
= iwl_trans_pcie_set_pmi
,
1513 .grab_nic_access
= iwl_trans_pcie_grab_nic_access
,
1514 .release_nic_access
= iwl_trans_pcie_release_nic_access
,
1515 .set_bits_mask
= iwl_trans_pcie_set_bits_mask
,
1518 struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev
*pdev
,
1519 const struct pci_device_id
*ent
,
1520 const struct iwl_cfg
*cfg
)
1522 struct iwl_trans_pcie
*trans_pcie
;
1523 struct iwl_trans
*trans
;
1527 trans
= kzalloc(sizeof(struct iwl_trans
) +
1528 sizeof(struct iwl_trans_pcie
), GFP_KERNEL
);
1534 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1536 trans
->ops
= &trans_ops_pcie
;
1538 trans_lockdep_init(trans
);
1539 trans_pcie
->trans
= trans
;
1540 spin_lock_init(&trans_pcie
->irq_lock
);
1541 spin_lock_init(&trans_pcie
->reg_lock
);
1542 init_waitqueue_head(&trans_pcie
->ucode_write_waitq
);
1544 err
= pci_enable_device(pdev
);
1548 if (!cfg
->base_params
->pcie_l1_allowed
) {
1550 * W/A - seems to solve weird behavior. We need to remove this
1551 * if we don't want to stay in L1 all the time. This wastes a
1554 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
|
1555 PCIE_LINK_STATE_L1
|
1556 PCIE_LINK_STATE_CLKPM
);
1559 pci_set_master(pdev
);
1561 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(36));
1563 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(36));
1565 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1567 err
= pci_set_consistent_dma_mask(pdev
,
1569 /* both attempts failed: */
1571 dev_err(&pdev
->dev
, "No suitable DMA available\n");
1572 goto out_pci_disable_device
;
1576 err
= pci_request_regions(pdev
, DRV_NAME
);
1578 dev_err(&pdev
->dev
, "pci_request_regions failed\n");
1579 goto out_pci_disable_device
;
1582 trans_pcie
->hw_base
= pci_ioremap_bar(pdev
, 0);
1583 if (!trans_pcie
->hw_base
) {
1584 dev_err(&pdev
->dev
, "pci_ioremap_bar failed\n");
1586 goto out_pci_release_regions
;
1589 /* We disable the RETRY_TIMEOUT register (0x41) to keep
1590 * PCI Tx retries from interfering with C3 CPU state */
1591 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
1593 err
= pci_enable_msi(pdev
);
1595 dev_err(&pdev
->dev
, "pci_enable_msi failed(0X%x)\n", err
);
1596 /* enable rfkill interrupt: hw bug w/a */
1597 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
1598 if (pci_cmd
& PCI_COMMAND_INTX_DISABLE
) {
1599 pci_cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
1600 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
1604 trans
->dev
= &pdev
->dev
;
1605 trans_pcie
->pci_dev
= pdev
;
1606 trans
->hw_rev
= iwl_read32(trans
, CSR_HW_REV
);
1607 trans
->hw_id
= (pdev
->device
<< 16) + pdev
->subsystem_device
;
1608 snprintf(trans
->hw_id_str
, sizeof(trans
->hw_id_str
),
1609 "PCI ID: 0x%04X:0x%04X", pdev
->device
, pdev
->subsystem_device
);
1611 /* Initialize the wait queue for commands */
1612 init_waitqueue_head(&trans_pcie
->wait_command_queue
);
1614 snprintf(trans
->dev_cmd_pool_name
, sizeof(trans
->dev_cmd_pool_name
),
1615 "iwl_cmd_pool:%s", dev_name(trans
->dev
));
1617 trans
->dev_cmd_headroom
= 0;
1618 trans
->dev_cmd_pool
=
1619 kmem_cache_create(trans
->dev_cmd_pool_name
,
1620 sizeof(struct iwl_device_cmd
)
1621 + trans
->dev_cmd_headroom
,
1626 if (!trans
->dev_cmd_pool
) {
1628 goto out_pci_disable_msi
;
1631 trans_pcie
->inta_mask
= CSR_INI_SET_MASK
;
1633 if (iwl_pcie_alloc_ict(trans
))
1634 goto out_free_cmd_pool
;
1636 err
= request_threaded_irq(pdev
->irq
, iwl_pcie_isr
,
1637 iwl_pcie_irq_handler
,
1638 IRQF_SHARED
, DRV_NAME
, trans
);
1640 IWL_ERR(trans
, "Error allocating IRQ %d\n", pdev
->irq
);
1647 iwl_pcie_free_ict(trans
);
1649 kmem_cache_destroy(trans
->dev_cmd_pool
);
1650 out_pci_disable_msi
:
1651 pci_disable_msi(pdev
);
1652 out_pci_release_regions
:
1653 pci_release_regions(pdev
);
1654 out_pci_disable_device
:
1655 pci_disable_device(pdev
);
1659 return ERR_PTR(err
);