1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *****************************************************************************/
65 #include <net/mac80211.h>
67 #include "iwl-trans.h"
68 #include "iwl-op-mode.h"
70 #include "iwl-debug.h"
71 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
72 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
74 #include "iwl-eeprom-parse.h"
77 #include "iwl-phy-db.h"
79 #define MVM_UCODE_ALIVE_TIMEOUT HZ
80 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
82 #define UCODE_VALID_OK cpu_to_le32(0x1)
84 struct iwl_mvm_alive_data
{
89 static inline const struct fw_img
*
90 iwl_get_ucode_image(struct iwl_mvm
*mvm
, enum iwl_ucode_type ucode_type
)
92 if (ucode_type
>= IWL_UCODE_TYPE_MAX
)
95 return &mvm
->fw
->img
[ucode_type
];
98 static int iwl_send_tx_ant_cfg(struct iwl_mvm
*mvm
, u8 valid_tx_ant
)
100 struct iwl_tx_ant_cfg_cmd tx_ant_cmd
= {
101 .valid
= cpu_to_le32(valid_tx_ant
),
104 IWL_DEBUG_FW(mvm
, "select valid tx ant: %u\n", valid_tx_ant
);
105 return iwl_mvm_send_cmd_pdu(mvm
, TX_ANT_CONFIGURATION_CMD
, 0,
106 sizeof(tx_ant_cmd
), &tx_ant_cmd
);
109 static void iwl_free_fw_paging(struct iwl_mvm
*mvm
)
113 if (!mvm
->fw_paging_db
[0].fw_paging_block
)
116 for (i
= 0; i
< NUM_OF_FW_PAGING_BLOCKS
; i
++) {
117 if (!mvm
->fw_paging_db
[i
].fw_paging_block
) {
119 "Paging: block %d already freed, continue to next page\n",
125 __free_pages(mvm
->fw_paging_db
[i
].fw_paging_block
,
126 get_order(mvm
->fw_paging_db
[i
].fw_paging_size
));
128 kfree(mvm
->trans
->paging_download_buf
);
129 memset(mvm
->fw_paging_db
, 0, sizeof(mvm
->fw_paging_db
));
132 static int iwl_fill_paging_mem(struct iwl_mvm
*mvm
, const struct fw_img
*image
)
138 * find where is the paging image start point:
139 * if CPU2 exist and it's in paging format, then the image looks like:
140 * CPU1 sections (2 or more)
141 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
142 * CPU2 sections (not paged)
143 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
144 * non paged to CPU2 paging sec
146 * CPU2 paging image (including instruction and data)
148 for (sec_idx
= 0; sec_idx
< IWL_UCODE_SECTION_MAX
; sec_idx
++) {
149 if (image
->sec
[sec_idx
].offset
== PAGING_SEPARATOR_SECTION
) {
155 if (sec_idx
>= IWL_UCODE_SECTION_MAX
) {
156 IWL_ERR(mvm
, "driver didn't find paging image\n");
157 iwl_free_fw_paging(mvm
);
161 /* copy the CSS block to the dram */
162 IWL_DEBUG_FW(mvm
, "Paging: load paging CSS to FW, sec = %d\n",
165 memcpy(page_address(mvm
->fw_paging_db
[0].fw_paging_block
),
166 image
->sec
[sec_idx
].data
,
167 mvm
->fw_paging_db
[0].fw_paging_size
);
170 "Paging: copied %d CSS bytes to first block\n",
171 mvm
->fw_paging_db
[0].fw_paging_size
);
176 * copy the paging blocks to the dram
177 * loop index start from 1 since that CSS block already copied to dram
178 * and CSS index is 0.
179 * loop stop at num_of_paging_blk since that last block is not full.
181 for (idx
= 1; idx
< mvm
->num_of_paging_blk
; idx
++) {
182 memcpy(page_address(mvm
->fw_paging_db
[idx
].fw_paging_block
),
183 image
->sec
[sec_idx
].data
+ offset
,
184 mvm
->fw_paging_db
[idx
].fw_paging_size
);
187 "Paging: copied %d paging bytes to block %d\n",
188 mvm
->fw_paging_db
[idx
].fw_paging_size
,
191 offset
+= mvm
->fw_paging_db
[idx
].fw_paging_size
;
194 /* copy the last paging block */
195 if (mvm
->num_of_pages_in_last_blk
> 0) {
196 memcpy(page_address(mvm
->fw_paging_db
[idx
].fw_paging_block
),
197 image
->sec
[sec_idx
].data
+ offset
,
198 FW_PAGING_SIZE
* mvm
->num_of_pages_in_last_blk
);
201 "Paging: copied %d pages in the last block %d\n",
202 mvm
->num_of_pages_in_last_blk
, idx
);
208 static int iwl_alloc_fw_paging_mem(struct iwl_mvm
*mvm
,
209 const struct fw_img
*image
)
214 int order
, num_of_pages
;
217 if (mvm
->fw_paging_db
[0].fw_paging_block
)
220 dma_enabled
= is_device_dma_capable(mvm
->trans
->dev
);
222 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
223 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE
) != PAGING_BLOCK_SIZE
);
225 num_of_pages
= image
->paging_mem_size
/ FW_PAGING_SIZE
;
226 mvm
->num_of_paging_blk
= ((num_of_pages
- 1) /
227 NUM_OF_PAGE_PER_GROUP
) + 1;
229 mvm
->num_of_pages_in_last_blk
=
231 NUM_OF_PAGE_PER_GROUP
* (mvm
->num_of_paging_blk
- 1);
234 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
235 mvm
->num_of_paging_blk
,
236 mvm
->num_of_pages_in_last_blk
);
238 /* allocate block of 4Kbytes for paging CSS */
239 order
= get_order(FW_PAGING_SIZE
);
240 block
= alloc_pages(GFP_KERNEL
, order
);
242 /* free all the previous pages since we failed */
243 iwl_free_fw_paging(mvm
);
247 mvm
->fw_paging_db
[blk_idx
].fw_paging_block
= block
;
248 mvm
->fw_paging_db
[blk_idx
].fw_paging_size
= FW_PAGING_SIZE
;
251 phys
= dma_map_page(mvm
->trans
->dev
, block
, 0,
252 PAGE_SIZE
<< order
, DMA_BIDIRECTIONAL
);
253 if (dma_mapping_error(mvm
->trans
->dev
, phys
)) {
255 * free the previous pages and the current one since
256 * we failed to map_page.
258 iwl_free_fw_paging(mvm
);
261 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
= phys
;
263 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
= PAGING_ADDR_SIG
|
264 blk_idx
<< BLOCK_2_EXP_SIZE
;
268 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
272 * allocate blocks in dram.
273 * since that CSS allocated in fw_paging_db[0] loop start from index 1
275 for (blk_idx
= 1; blk_idx
< mvm
->num_of_paging_blk
+ 1; blk_idx
++) {
276 /* allocate block of PAGING_BLOCK_SIZE (32K) */
277 order
= get_order(PAGING_BLOCK_SIZE
);
278 block
= alloc_pages(GFP_KERNEL
, order
);
280 /* free all the previous pages since we failed */
281 iwl_free_fw_paging(mvm
);
285 mvm
->fw_paging_db
[blk_idx
].fw_paging_block
= block
;
286 mvm
->fw_paging_db
[blk_idx
].fw_paging_size
= PAGING_BLOCK_SIZE
;
289 phys
= dma_map_page(mvm
->trans
->dev
, block
, 0,
292 if (dma_mapping_error(mvm
->trans
->dev
, phys
)) {
294 * free the previous pages and the current one
295 * since we failed to map_page.
297 iwl_free_fw_paging(mvm
);
300 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
= phys
;
302 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
=
304 blk_idx
<< BLOCK_2_EXP_SIZE
;
308 "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
315 static int iwl_save_fw_paging(struct iwl_mvm
*mvm
,
316 const struct fw_img
*fw
)
320 ret
= iwl_alloc_fw_paging_mem(mvm
, fw
);
324 return iwl_fill_paging_mem(mvm
, fw
);
327 /* send paging cmd to FW in case CPU2 has paging image */
328 static int iwl_send_paging_cmd(struct iwl_mvm
*mvm
, const struct fw_img
*fw
)
332 struct iwl_fw_paging_cmd fw_paging_cmd
= {
334 cpu_to_le32(PAGING_CMD_IS_SECURED
|
335 PAGING_CMD_IS_ENABLED
|
336 (mvm
->num_of_pages_in_last_blk
<<
337 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS
)),
338 .block_size
= cpu_to_le32(BLOCK_2_EXP_SIZE
),
339 .block_num
= cpu_to_le32(mvm
->num_of_paging_blk
),
342 /* loop for for all paging blocks + CSS block */
343 for (blk_idx
= 0; blk_idx
< mvm
->num_of_paging_blk
+ 1; blk_idx
++) {
345 cpu_to_le32(mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
>>
347 fw_paging_cmd
.device_phy_addr
[blk_idx
] = dev_phy_addr
;
350 return iwl_mvm_send_cmd_pdu(mvm
, iwl_cmd_id(FW_PAGING_BLOCK_CMD
,
351 IWL_ALWAYS_LONG_GROUP
, 0),
352 0, sizeof(fw_paging_cmd
), &fw_paging_cmd
);
356 * Send paging item cmd to FW in case CPU2 has paging image
358 static int iwl_trans_get_paging_item(struct iwl_mvm
*mvm
)
361 struct iwl_fw_get_item_cmd fw_get_item_cmd
= {
362 .item_id
= cpu_to_le32(IWL_FW_ITEM_ID_PAGING
),
365 struct iwl_fw_get_item_resp
*item_resp
;
366 struct iwl_host_cmd cmd
= {
367 .id
= iwl_cmd_id(FW_GET_ITEM_CMD
, IWL_ALWAYS_LONG_GROUP
, 0),
368 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
369 .data
= { &fw_get_item_cmd
, },
372 cmd
.len
[0] = sizeof(struct iwl_fw_get_item_cmd
);
374 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
377 "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
382 item_resp
= (void *)((struct iwl_rx_packet
*)cmd
.resp_pkt
)->data
;
383 if (item_resp
->item_id
!= cpu_to_le32(IWL_FW_ITEM_ID_PAGING
)) {
385 "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
386 le32_to_cpu(item_resp
->item_id
));
391 mvm
->trans
->paging_download_buf
= kzalloc(MAX_PAGING_IMAGE_SIZE
,
393 if (!mvm
->trans
->paging_download_buf
) {
397 mvm
->trans
->paging_req_addr
= le32_to_cpu(item_resp
->item_val
);
398 mvm
->trans
->paging_db
= mvm
->fw_paging_db
;
400 "Paging: got paging request address (paging_req_addr 0x%08x)\n",
401 mvm
->trans
->paging_req_addr
);
409 static bool iwl_alive_fn(struct iwl_notif_wait_data
*notif_wait
,
410 struct iwl_rx_packet
*pkt
, void *data
)
412 struct iwl_mvm
*mvm
=
413 container_of(notif_wait
, struct iwl_mvm
, notif_wait
);
414 struct iwl_mvm_alive_data
*alive_data
= data
;
415 struct mvm_alive_resp_ver1
*palive1
;
416 struct mvm_alive_resp_ver2
*palive2
;
417 struct mvm_alive_resp
*palive
;
419 if (iwl_rx_packet_payload_len(pkt
) == sizeof(*palive1
)) {
420 palive1
= (void *)pkt
->data
;
422 mvm
->support_umac_log
= false;
423 mvm
->error_event_table
=
424 le32_to_cpu(palive1
->error_event_table_ptr
);
425 mvm
->log_event_table
=
426 le32_to_cpu(palive1
->log_event_table_ptr
);
427 alive_data
->scd_base_addr
= le32_to_cpu(palive1
->scd_base_ptr
);
429 alive_data
->valid
= le16_to_cpu(palive1
->status
) ==
432 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
433 le16_to_cpu(palive1
->status
), palive1
->ver_type
,
434 palive1
->ver_subtype
, palive1
->flags
);
435 } else if (iwl_rx_packet_payload_len(pkt
) == sizeof(*palive2
)) {
436 palive2
= (void *)pkt
->data
;
438 mvm
->error_event_table
=
439 le32_to_cpu(palive2
->error_event_table_ptr
);
440 mvm
->log_event_table
=
441 le32_to_cpu(palive2
->log_event_table_ptr
);
442 alive_data
->scd_base_addr
= le32_to_cpu(palive2
->scd_base_ptr
);
443 mvm
->umac_error_event_table
=
444 le32_to_cpu(palive2
->error_info_addr
);
445 mvm
->sf_space
.addr
= le32_to_cpu(palive2
->st_fwrd_addr
);
446 mvm
->sf_space
.size
= le32_to_cpu(palive2
->st_fwrd_size
);
448 alive_data
->valid
= le16_to_cpu(palive2
->status
) ==
450 if (mvm
->umac_error_event_table
)
451 mvm
->support_umac_log
= true;
454 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
455 le16_to_cpu(palive2
->status
), palive2
->ver_type
,
456 palive2
->ver_subtype
, palive2
->flags
);
459 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
460 palive2
->umac_major
, palive2
->umac_minor
);
461 } else if (iwl_rx_packet_payload_len(pkt
) == sizeof(*palive
)) {
462 palive
= (void *)pkt
->data
;
464 mvm
->error_event_table
=
465 le32_to_cpu(palive
->error_event_table_ptr
);
466 mvm
->log_event_table
=
467 le32_to_cpu(palive
->log_event_table_ptr
);
468 alive_data
->scd_base_addr
= le32_to_cpu(palive
->scd_base_ptr
);
469 mvm
->umac_error_event_table
=
470 le32_to_cpu(palive
->error_info_addr
);
471 mvm
->sf_space
.addr
= le32_to_cpu(palive
->st_fwrd_addr
);
472 mvm
->sf_space
.size
= le32_to_cpu(palive
->st_fwrd_size
);
474 alive_data
->valid
= le16_to_cpu(palive
->status
) ==
476 if (mvm
->umac_error_event_table
)
477 mvm
->support_umac_log
= true;
480 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
481 le16_to_cpu(palive
->status
), palive
->ver_type
,
482 palive
->ver_subtype
, palive
->flags
);
485 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
486 le32_to_cpu(palive
->umac_major
),
487 le32_to_cpu(palive
->umac_minor
));
493 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data
*notif_wait
,
494 struct iwl_rx_packet
*pkt
, void *data
)
496 struct iwl_phy_db
*phy_db
= data
;
498 if (pkt
->hdr
.cmd
!= CALIB_RES_NOTIF_PHY_DB
) {
499 WARN_ON(pkt
->hdr
.cmd
!= INIT_COMPLETE_NOTIF
);
503 WARN_ON(iwl_phy_db_set_section(phy_db
, pkt
, GFP_ATOMIC
));
508 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm
*mvm
,
509 enum iwl_ucode_type ucode_type
)
511 struct iwl_notification_wait alive_wait
;
512 struct iwl_mvm_alive_data alive_data
;
513 const struct fw_img
*fw
;
515 enum iwl_ucode_type old_type
= mvm
->cur_ucode
;
516 static const u16 alive_cmd
[] = { MVM_ALIVE
};
517 struct iwl_sf_region st_fwrd_space
;
519 if (ucode_type
== IWL_UCODE_REGULAR
&&
520 iwl_fw_dbg_conf_usniffer(mvm
->fw
, FW_DBG_START_FROM_ALIVE
))
521 fw
= iwl_get_ucode_image(mvm
, IWL_UCODE_REGULAR_USNIFFER
);
523 fw
= iwl_get_ucode_image(mvm
, ucode_type
);
526 mvm
->cur_ucode
= ucode_type
;
527 mvm
->ucode_loaded
= false;
529 iwl_init_notification_wait(&mvm
->notif_wait
, &alive_wait
,
530 alive_cmd
, ARRAY_SIZE(alive_cmd
),
531 iwl_alive_fn
, &alive_data
);
533 ret
= iwl_trans_start_fw(mvm
->trans
, fw
, ucode_type
== IWL_UCODE_INIT
);
535 mvm
->cur_ucode
= old_type
;
536 iwl_remove_notification(&mvm
->notif_wait
, &alive_wait
);
541 * Some things may run in the background now, but we
542 * just wait for the ALIVE notification here.
544 ret
= iwl_wait_notification(&mvm
->notif_wait
, &alive_wait
,
545 MVM_UCODE_ALIVE_TIMEOUT
);
547 if (mvm
->trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
549 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
550 iwl_read_prph(mvm
->trans
, SB_CPU_1_STATUS
),
551 iwl_read_prph(mvm
->trans
, SB_CPU_2_STATUS
));
552 mvm
->cur_ucode
= old_type
;
556 if (!alive_data
.valid
) {
557 IWL_ERR(mvm
, "Loaded ucode is not valid!\n");
558 mvm
->cur_ucode
= old_type
;
563 * update the sdio allocation according to the pointer we get in the
564 * alive notification.
566 st_fwrd_space
.addr
= mvm
->sf_space
.addr
;
567 st_fwrd_space
.size
= mvm
->sf_space
.size
;
568 ret
= iwl_trans_update_sf(mvm
->trans
, &st_fwrd_space
);
570 IWL_ERR(mvm
, "Failed to update SF size. ret %d\n", ret
);
574 iwl_trans_fw_alive(mvm
->trans
, alive_data
.scd_base_addr
);
577 * configure and operate fw paging mechanism.
578 * driver configures the paging flow only once, CPU2 paging image
579 * included in the IWL_UCODE_INIT image.
581 if (fw
->paging_mem_size
) {
583 * When dma is not enabled, the driver needs to copy / write
584 * the downloaded / uploaded page to / from the smem.
585 * This gets the location of the place were the pages are
588 if (!is_device_dma_capable(mvm
->trans
->dev
)) {
589 ret
= iwl_trans_get_paging_item(mvm
);
591 IWL_ERR(mvm
, "failed to get FW paging item\n");
596 ret
= iwl_save_fw_paging(mvm
, fw
);
598 IWL_ERR(mvm
, "failed to save the FW paging image\n");
602 ret
= iwl_send_paging_cmd(mvm
, fw
);
604 IWL_ERR(mvm
, "failed to send the paging cmd\n");
605 iwl_free_fw_paging(mvm
);
611 * Note: all the queues are enabled as part of the interface
612 * initialization, but in firmware restart scenarios they
613 * could be stopped, so wake them up. In firmware restart,
614 * mac80211 will have the queues stopped as well until the
615 * reconfiguration completes. During normal startup, they
619 for (i
= 0; i
< IWL_MAX_HW_QUEUES
; i
++) {
620 if (i
< mvm
->first_agg_queue
&& i
!= IWL_MVM_CMD_QUEUE
)
621 mvm
->queue_to_mac80211
[i
] = i
;
623 mvm
->queue_to_mac80211
[i
] = IWL_INVALID_MAC80211_QUEUE
;
626 for (i
= 0; i
< IEEE80211_MAX_QUEUES
; i
++)
627 atomic_set(&mvm
->mac80211_queue_stop_count
[i
], 0);
629 mvm
->ucode_loaded
= true;
634 static int iwl_send_phy_cfg_cmd(struct iwl_mvm
*mvm
)
636 struct iwl_phy_cfg_cmd phy_cfg_cmd
;
637 enum iwl_ucode_type ucode_type
= mvm
->cur_ucode
;
640 phy_cfg_cmd
.phy_cfg
= cpu_to_le32(iwl_mvm_get_phy_config(mvm
));
641 phy_cfg_cmd
.calib_control
.event_trigger
=
642 mvm
->fw
->default_calib
[ucode_type
].event_trigger
;
643 phy_cfg_cmd
.calib_control
.flow_trigger
=
644 mvm
->fw
->default_calib
[ucode_type
].flow_trigger
;
646 IWL_DEBUG_INFO(mvm
, "Sending Phy CFG command: 0x%x\n",
647 phy_cfg_cmd
.phy_cfg
);
649 return iwl_mvm_send_cmd_pdu(mvm
, PHY_CONFIGURATION_CMD
, 0,
650 sizeof(phy_cfg_cmd
), &phy_cfg_cmd
);
653 int iwl_run_init_mvm_ucode(struct iwl_mvm
*mvm
, bool read_nvm
)
655 struct iwl_notification_wait calib_wait
;
656 static const u16 init_complete
[] = {
658 CALIB_RES_NOTIF_PHY_DB
662 lockdep_assert_held(&mvm
->mutex
);
664 if (WARN_ON_ONCE(mvm
->calibrating
))
667 iwl_init_notification_wait(&mvm
->notif_wait
,
670 ARRAY_SIZE(init_complete
),
671 iwl_wait_phy_db_entry
,
674 /* Will also start the device */
675 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_INIT
);
677 IWL_ERR(mvm
, "Failed to start INIT ucode: %d\n", ret
);
681 ret
= iwl_send_bt_init_conf(mvm
);
685 /* Read the NVM only at driver load time, no need to do this twice */
688 ret
= iwl_nvm_init(mvm
, true);
690 IWL_ERR(mvm
, "Failed to read NVM: %d\n", ret
);
695 /* In case we read the NVM from external file, load it to the NIC */
696 if (mvm
->nvm_file_name
)
697 iwl_mvm_load_nvm_to_nic(mvm
);
699 ret
= iwl_nvm_check_version(mvm
->nvm_data
, mvm
->trans
);
703 * abort after reading the nvm in case RF Kill is on, we will complete
704 * the init seq later when RF kill will switch to off
706 if (iwl_mvm_is_radio_killed(mvm
)) {
707 IWL_DEBUG_RF_KILL(mvm
,
708 "jump over all phy activities due to RF kill\n");
709 iwl_remove_notification(&mvm
->notif_wait
, &calib_wait
);
714 mvm
->calibrating
= true;
716 /* Send TX valid antennas before triggering calibrations */
717 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
722 * Send phy configurations command to init uCode
723 * to start the 16.0 uCode init image internal calibrations.
725 ret
= iwl_send_phy_cfg_cmd(mvm
);
727 IWL_ERR(mvm
, "Failed to run INIT calibrations: %d\n",
733 * Some things may run in the background now, but we
734 * just wait for the calibration complete notification.
736 ret
= iwl_wait_notification(&mvm
->notif_wait
, &calib_wait
,
737 MVM_UCODE_CALIB_TIMEOUT
);
739 if (ret
&& iwl_mvm_is_radio_killed(mvm
)) {
740 IWL_DEBUG_RF_KILL(mvm
, "RFKILL while calibrating.\n");
746 iwl_remove_notification(&mvm
->notif_wait
, &calib_wait
);
748 mvm
->calibrating
= false;
749 if (iwlmvm_mod_params
.init_dbg
&& !mvm
->nvm_data
) {
750 /* we want to debug INIT and we have no NVM - fake */
751 mvm
->nvm_data
= kzalloc(sizeof(struct iwl_nvm_data
) +
752 sizeof(struct ieee80211_channel
) +
753 sizeof(struct ieee80211_rate
),
757 mvm
->nvm_data
->bands
[0].channels
= mvm
->nvm_data
->channels
;
758 mvm
->nvm_data
->bands
[0].n_channels
= 1;
759 mvm
->nvm_data
->bands
[0].n_bitrates
= 1;
760 mvm
->nvm_data
->bands
[0].bitrates
=
761 (void *)mvm
->nvm_data
->channels
+ 1;
762 mvm
->nvm_data
->bands
[0].bitrates
->hw_value
= 10;
768 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm
*mvm
)
770 struct iwl_host_cmd cmd
= {
771 .id
= SHARED_MEM_CFG
,
772 .flags
= CMD_WANT_SKB
,
776 struct iwl_rx_packet
*pkt
;
777 struct iwl_shared_mem_cfg
*mem_cfg
;
780 lockdep_assert_held(&mvm
->mutex
);
782 if (WARN_ON(iwl_mvm_send_cmd(mvm
, &cmd
)))
786 mem_cfg
= (void *)pkt
->data
;
788 mvm
->shared_mem_cfg
.shared_mem_addr
=
789 le32_to_cpu(mem_cfg
->shared_mem_addr
);
790 mvm
->shared_mem_cfg
.shared_mem_size
=
791 le32_to_cpu(mem_cfg
->shared_mem_size
);
792 mvm
->shared_mem_cfg
.sample_buff_addr
=
793 le32_to_cpu(mem_cfg
->sample_buff_addr
);
794 mvm
->shared_mem_cfg
.sample_buff_size
=
795 le32_to_cpu(mem_cfg
->sample_buff_size
);
796 mvm
->shared_mem_cfg
.txfifo_addr
= le32_to_cpu(mem_cfg
->txfifo_addr
);
797 for (i
= 0; i
< ARRAY_SIZE(mvm
->shared_mem_cfg
.txfifo_size
); i
++)
798 mvm
->shared_mem_cfg
.txfifo_size
[i
] =
799 le32_to_cpu(mem_cfg
->txfifo_size
[i
]);
800 for (i
= 0; i
< ARRAY_SIZE(mvm
->shared_mem_cfg
.rxfifo_size
); i
++)
801 mvm
->shared_mem_cfg
.rxfifo_size
[i
] =
802 le32_to_cpu(mem_cfg
->rxfifo_size
[i
]);
803 mvm
->shared_mem_cfg
.page_buff_addr
=
804 le32_to_cpu(mem_cfg
->page_buff_addr
);
805 mvm
->shared_mem_cfg
.page_buff_size
=
806 le32_to_cpu(mem_cfg
->page_buff_size
);
807 IWL_DEBUG_INFO(mvm
, "SHARED MEM CFG: got memory offsets/sizes\n");
812 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm
*mvm
,
813 struct iwl_mvm_dump_desc
*desc
,
814 struct iwl_fw_dbg_trigger_tlv
*trigger
)
816 unsigned int delay
= 0;
819 delay
= msecs_to_jiffies(le32_to_cpu(trigger
->stop_delay
));
821 if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG
, &mvm
->status
))
824 if (WARN_ON(mvm
->fw_dump_desc
))
825 iwl_mvm_free_fw_dump_desc(mvm
);
827 IWL_WARN(mvm
, "Collecting data: trigger %d fired.\n",
828 le32_to_cpu(desc
->trig_desc
.type
));
830 mvm
->fw_dump_desc
= desc
;
831 mvm
->fw_dump_trig
= trigger
;
833 queue_delayed_work(system_wq
, &mvm
->fw_dump_wk
, delay
);
838 int iwl_mvm_fw_dbg_collect(struct iwl_mvm
*mvm
, enum iwl_fw_dbg_trigger trig
,
839 const char *str
, size_t len
,
840 struct iwl_fw_dbg_trigger_tlv
*trigger
)
842 struct iwl_mvm_dump_desc
*desc
;
844 desc
= kzalloc(sizeof(*desc
) + len
, GFP_ATOMIC
);
849 desc
->trig_desc
.type
= cpu_to_le32(trig
);
850 memcpy(desc
->trig_desc
.data
, str
, len
);
852 return iwl_mvm_fw_dbg_collect_desc(mvm
, desc
, trigger
);
855 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm
*mvm
,
856 struct iwl_fw_dbg_trigger_tlv
*trigger
,
857 const char *fmt
, ...)
859 u16 occurrences
= le16_to_cpu(trigger
->occurrences
);
869 buf
[sizeof(buf
) - 1] = '\0';
872 vsnprintf(buf
, sizeof(buf
), fmt
, ap
);
875 /* check for truncation */
876 if (WARN_ON_ONCE(buf
[sizeof(buf
) - 1]))
877 buf
[sizeof(buf
) - 1] = '\0';
879 len
= strlen(buf
) + 1;
882 ret
= iwl_mvm_fw_dbg_collect(mvm
, le32_to_cpu(trigger
->id
), buf
, len
,
888 trigger
->occurrences
= cpu_to_le16(occurrences
- 1);
892 static inline void iwl_mvm_restart_early_start(struct iwl_mvm
*mvm
)
894 if (mvm
->cfg
->device_family
== IWL_DEVICE_FAMILY_7000
)
895 iwl_clear_bits_prph(mvm
->trans
, MON_BUFF_SAMPLE_CTL
, 0x100);
897 iwl_write_prph(mvm
->trans
, DBGC_IN_SAMPLE
, 1);
900 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm
*mvm
, u8 conf_id
)
906 if (WARN_ONCE(conf_id
>= ARRAY_SIZE(mvm
->fw
->dbg_conf_tlv
),
907 "Invalid configuration %d\n", conf_id
))
910 /* EARLY START - firmware's configuration is hard coded */
911 if ((!mvm
->fw
->dbg_conf_tlv
[conf_id
] ||
912 !mvm
->fw
->dbg_conf_tlv
[conf_id
]->num_of_hcmds
) &&
913 conf_id
== FW_DBG_START_FROM_ALIVE
) {
914 iwl_mvm_restart_early_start(mvm
);
918 if (!mvm
->fw
->dbg_conf_tlv
[conf_id
])
921 if (mvm
->fw_dbg_conf
!= FW_DBG_INVALID
)
922 IWL_WARN(mvm
, "FW already configured (%d) - re-configuring\n",
925 /* Send all HCMDs for configuring the FW debug */
926 ptr
= (void *)&mvm
->fw
->dbg_conf_tlv
[conf_id
]->hcmd
;
927 for (i
= 0; i
< mvm
->fw
->dbg_conf_tlv
[conf_id
]->num_of_hcmds
; i
++) {
928 struct iwl_fw_dbg_conf_hcmd
*cmd
= (void *)ptr
;
930 ret
= iwl_mvm_send_cmd_pdu(mvm
, cmd
->id
, 0,
931 le16_to_cpu(cmd
->len
), cmd
->data
);
936 ptr
+= le16_to_cpu(cmd
->len
);
939 mvm
->fw_dbg_conf
= conf_id
;
943 static int iwl_mvm_config_ltr(struct iwl_mvm
*mvm
)
945 struct iwl_ltr_config_cmd cmd
= {
946 .flags
= cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE
),
949 if (!mvm
->trans
->ltr_enabled
)
952 return iwl_mvm_send_cmd_pdu(mvm
, LTR_CONFIG
, 0,
956 int iwl_mvm_up(struct iwl_mvm
*mvm
)
959 struct ieee80211_channel
*chan
;
960 struct cfg80211_chan_def chandef
;
962 lockdep_assert_held(&mvm
->mutex
);
964 ret
= iwl_trans_start_hw(mvm
->trans
);
969 * If we haven't completed the run of the init ucode during
970 * module loading, load init ucode now
971 * (for example, if we were in RFKILL)
973 ret
= iwl_run_init_mvm_ucode(mvm
, false);
974 if (ret
&& !iwlmvm_mod_params
.init_dbg
) {
975 IWL_ERR(mvm
, "Failed to run INIT ucode: %d\n", ret
);
976 /* this can't happen */
977 if (WARN_ON(ret
> 0))
981 if (!iwlmvm_mod_params
.init_dbg
) {
983 * Stop and start the transport without entering low power
984 * mode. This will save the state of other components on the
985 * device that are triggered by the INIT firwmare (MFUART).
987 _iwl_trans_stop_device(mvm
->trans
, false);
988 ret
= _iwl_trans_start_hw(mvm
->trans
, false);
993 if (iwlmvm_mod_params
.init_dbg
)
996 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_REGULAR
);
998 IWL_ERR(mvm
, "Failed to start RT ucode: %d\n", ret
);
1002 iwl_mvm_get_shared_mem_conf(mvm
);
1004 ret
= iwl_mvm_sf_update(mvm
, NULL
, false);
1006 IWL_ERR(mvm
, "Failed to initialize Smart Fifo\n");
1008 mvm
->fw_dbg_conf
= FW_DBG_INVALID
;
1009 /* if we have a destination, assume EARLY START */
1010 if (mvm
->fw
->dbg_dest_tlv
)
1011 mvm
->fw_dbg_conf
= FW_DBG_START_FROM_ALIVE
;
1012 iwl_mvm_start_fw_dbg_conf(mvm
, FW_DBG_START_FROM_ALIVE
);
1014 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
1018 ret
= iwl_send_bt_init_conf(mvm
);
1022 /* Send phy db control command and then phy db calibration*/
1023 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
1027 ret
= iwl_send_phy_cfg_cmd(mvm
);
1031 /* init the fw <-> mac80211 STA mapping */
1032 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++)
1033 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
1035 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_STATION_COUNT
;
1037 /* reset quota debouncing buffer - 0xff will yield invalid data */
1038 memset(&mvm
->last_quota_cmd
, 0xff, sizeof(mvm
->last_quota_cmd
));
1040 /* Add auxiliary station for scanning */
1041 ret
= iwl_mvm_add_aux_sta(mvm
);
1045 /* Add all the PHY contexts */
1046 chan
= &mvm
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
]->channels
[0];
1047 cfg80211_chandef_create(&chandef
, chan
, NL80211_CHAN_NO_HT
);
1048 for (i
= 0; i
< NUM_PHY_CTX
; i
++) {
1050 * The channel used here isn't relevant as it's
1051 * going to be overwritten in the other flows.
1052 * For now use the first channel we have.
1054 ret
= iwl_mvm_phy_ctxt_add(mvm
, &mvm
->phy_ctxts
[i
],
1060 /* Initialize tx backoffs to the minimal possible */
1061 iwl_mvm_tt_tx_backoff(mvm
, 0);
1063 WARN_ON(iwl_mvm_config_ltr(mvm
));
1065 ret
= iwl_mvm_power_update_device(mvm
);
1070 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1071 * anyway, so don't init MCC.
1073 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL
, &mvm
->status
)) {
1074 ret
= iwl_mvm_init_mcc(mvm
);
1079 if (fw_has_capa(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_CAPA_UMAC_SCAN
)) {
1080 ret
= iwl_mvm_config_scan(mvm
);
1085 if (iwl_mvm_is_csum_supported(mvm
) &&
1086 mvm
->cfg
->features
& NETIF_F_RXCSUM
)
1087 iwl_trans_write_prph(mvm
->trans
, RX_EN_CSUM
, 0x3);
1089 /* allow FW/transport low power modes if not during restart */
1090 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
1091 iwl_mvm_unref(mvm
, IWL_MVM_REF_UCODE_DOWN
);
1093 IWL_DEBUG_INFO(mvm
, "RT uCode started.\n");
1096 iwl_trans_stop_device(mvm
->trans
);
1100 int iwl_mvm_load_d3_fw(struct iwl_mvm
*mvm
)
1104 lockdep_assert_held(&mvm
->mutex
);
1106 ret
= iwl_trans_start_hw(mvm
->trans
);
1110 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_WOWLAN
);
1112 IWL_ERR(mvm
, "Failed to start WoWLAN firmware: %d\n", ret
);
1116 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
1120 /* Send phy db control command and then phy db calibration*/
1121 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
1125 ret
= iwl_send_phy_cfg_cmd(mvm
);
1129 /* init the fw <-> mac80211 STA mapping */
1130 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++)
1131 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
1133 /* Add auxiliary station for scanning */
1134 ret
= iwl_mvm_add_aux_sta(mvm
);
1140 iwl_trans_stop_device(mvm
->trans
);
1144 void iwl_mvm_rx_card_state_notif(struct iwl_mvm
*mvm
,
1145 struct iwl_rx_cmd_buffer
*rxb
)
1147 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1148 struct iwl_card_state_notif
*card_state_notif
= (void *)pkt
->data
;
1149 u32 flags
= le32_to_cpu(card_state_notif
->flags
);
1151 IWL_DEBUG_RF_KILL(mvm
, "Card state received: HW:%s SW:%s CT:%s\n",
1152 (flags
& HW_CARD_DISABLED
) ? "Kill" : "On",
1153 (flags
& SW_CARD_DISABLED
) ? "Kill" : "On",
1154 (flags
& CT_KILL_CARD_DISABLED
) ?
1155 "Reached" : "Not reached");
1158 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm
*mvm
,
1159 struct iwl_rx_cmd_buffer
*rxb
)
1161 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1162 struct iwl_mfuart_load_notif
*mfuart_notif
= (void *)pkt
->data
;
1165 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1166 le32_to_cpu(mfuart_notif
->installed_ver
),
1167 le32_to_cpu(mfuart_notif
->external_ver
),
1168 le32_to_cpu(mfuart_notif
->status
),
1169 le32_to_cpu(mfuart_notif
->duration
));