1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * All rights reserved.
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *****************************************************************************/
66 #include <net/mac80211.h>
68 #include "iwl-trans.h"
69 #include "iwl-op-mode.h"
71 #include "iwl-debug.h"
72 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
73 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
75 #include "iwl-eeprom-parse.h"
79 #include "iwl-phy-db.h"
81 #define MVM_UCODE_ALIVE_TIMEOUT HZ
82 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
84 #define UCODE_VALID_OK cpu_to_le32(0x1)
86 struct iwl_mvm_alive_data
{
91 static inline const struct fw_img
*
92 iwl_get_ucode_image(struct iwl_mvm
*mvm
, enum iwl_ucode_type ucode_type
)
94 if (ucode_type
>= IWL_UCODE_TYPE_MAX
)
97 return &mvm
->fw
->img
[ucode_type
];
100 static int iwl_send_tx_ant_cfg(struct iwl_mvm
*mvm
, u8 valid_tx_ant
)
102 struct iwl_tx_ant_cfg_cmd tx_ant_cmd
= {
103 .valid
= cpu_to_le32(valid_tx_ant
),
106 IWL_DEBUG_FW(mvm
, "select valid tx ant: %u\n", valid_tx_ant
);
107 return iwl_mvm_send_cmd_pdu(mvm
, TX_ANT_CONFIGURATION_CMD
, 0,
108 sizeof(tx_ant_cmd
), &tx_ant_cmd
);
111 static int iwl_send_rss_cfg_cmd(struct iwl_mvm
*mvm
)
114 struct iwl_rss_config_cmd cmd
= {
115 .flags
= cpu_to_le32(IWL_RSS_ENABLE
),
116 .hash_mask
= IWL_RSS_HASH_TYPE_IPV4_TCP
|
117 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD
|
118 IWL_RSS_HASH_TYPE_IPV6_TCP
|
119 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD
,
122 for (i
= 0; i
< ARRAY_SIZE(cmd
.indirection_table
); i
++)
123 cmd
.indirection_table
[i
] = i
% mvm
->trans
->num_rx_queues
;
124 memcpy(cmd
.secret_key
, mvm
->secret_key
, sizeof(cmd
.secret_key
));
126 return iwl_mvm_send_cmd_pdu(mvm
, RSS_CONFIG_CMD
, 0, sizeof(cmd
), &cmd
);
129 void iwl_free_fw_paging(struct iwl_mvm
*mvm
)
133 if (!mvm
->fw_paging_db
[0].fw_paging_block
)
136 for (i
= 0; i
< NUM_OF_FW_PAGING_BLOCKS
; i
++) {
137 if (!mvm
->fw_paging_db
[i
].fw_paging_block
) {
139 "Paging: block %d already freed, continue to next page\n",
145 __free_pages(mvm
->fw_paging_db
[i
].fw_paging_block
,
146 get_order(mvm
->fw_paging_db
[i
].fw_paging_size
));
147 mvm
->fw_paging_db
[i
].fw_paging_block
= NULL
;
149 kfree(mvm
->trans
->paging_download_buf
);
150 mvm
->trans
->paging_download_buf
= NULL
;
151 mvm
->trans
->paging_db
= NULL
;
153 memset(mvm
->fw_paging_db
, 0, sizeof(mvm
->fw_paging_db
));
156 static int iwl_fill_paging_mem(struct iwl_mvm
*mvm
, const struct fw_img
*image
)
162 * find where is the paging image start point:
163 * if CPU2 exist and it's in paging format, then the image looks like:
164 * CPU1 sections (2 or more)
165 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
166 * CPU2 sections (not paged)
167 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
168 * non paged to CPU2 paging sec
170 * CPU2 paging image (including instruction and data)
172 for (sec_idx
= 0; sec_idx
< IWL_UCODE_SECTION_MAX
; sec_idx
++) {
173 if (image
->sec
[sec_idx
].offset
== PAGING_SEPARATOR_SECTION
) {
179 if (sec_idx
>= IWL_UCODE_SECTION_MAX
) {
180 IWL_ERR(mvm
, "driver didn't find paging image\n");
181 iwl_free_fw_paging(mvm
);
185 /* copy the CSS block to the dram */
186 IWL_DEBUG_FW(mvm
, "Paging: load paging CSS to FW, sec = %d\n",
189 memcpy(page_address(mvm
->fw_paging_db
[0].fw_paging_block
),
190 image
->sec
[sec_idx
].data
,
191 mvm
->fw_paging_db
[0].fw_paging_size
);
194 "Paging: copied %d CSS bytes to first block\n",
195 mvm
->fw_paging_db
[0].fw_paging_size
);
200 * copy the paging blocks to the dram
201 * loop index start from 1 since that CSS block already copied to dram
202 * and CSS index is 0.
203 * loop stop at num_of_paging_blk since that last block is not full.
205 for (idx
= 1; idx
< mvm
->num_of_paging_blk
; idx
++) {
206 memcpy(page_address(mvm
->fw_paging_db
[idx
].fw_paging_block
),
207 image
->sec
[sec_idx
].data
+ offset
,
208 mvm
->fw_paging_db
[idx
].fw_paging_size
);
211 "Paging: copied %d paging bytes to block %d\n",
212 mvm
->fw_paging_db
[idx
].fw_paging_size
,
215 offset
+= mvm
->fw_paging_db
[idx
].fw_paging_size
;
218 /* copy the last paging block */
219 if (mvm
->num_of_pages_in_last_blk
> 0) {
220 memcpy(page_address(mvm
->fw_paging_db
[idx
].fw_paging_block
),
221 image
->sec
[sec_idx
].data
+ offset
,
222 FW_PAGING_SIZE
* mvm
->num_of_pages_in_last_blk
);
225 "Paging: copied %d pages in the last block %d\n",
226 mvm
->num_of_pages_in_last_blk
, idx
);
232 static int iwl_alloc_fw_paging_mem(struct iwl_mvm
*mvm
,
233 const struct fw_img
*image
)
238 int order
, num_of_pages
;
241 if (mvm
->fw_paging_db
[0].fw_paging_block
)
244 dma_enabled
= is_device_dma_capable(mvm
->trans
->dev
);
246 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
247 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE
) != PAGING_BLOCK_SIZE
);
249 num_of_pages
= image
->paging_mem_size
/ FW_PAGING_SIZE
;
250 mvm
->num_of_paging_blk
= ((num_of_pages
- 1) /
251 NUM_OF_PAGE_PER_GROUP
) + 1;
253 mvm
->num_of_pages_in_last_blk
=
255 NUM_OF_PAGE_PER_GROUP
* (mvm
->num_of_paging_blk
- 1);
258 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
259 mvm
->num_of_paging_blk
,
260 mvm
->num_of_pages_in_last_blk
);
262 /* allocate block of 4Kbytes for paging CSS */
263 order
= get_order(FW_PAGING_SIZE
);
264 block
= alloc_pages(GFP_KERNEL
, order
);
266 /* free all the previous pages since we failed */
267 iwl_free_fw_paging(mvm
);
271 mvm
->fw_paging_db
[blk_idx
].fw_paging_block
= block
;
272 mvm
->fw_paging_db
[blk_idx
].fw_paging_size
= FW_PAGING_SIZE
;
275 phys
= dma_map_page(mvm
->trans
->dev
, block
, 0,
276 PAGE_SIZE
<< order
, DMA_BIDIRECTIONAL
);
277 if (dma_mapping_error(mvm
->trans
->dev
, phys
)) {
279 * free the previous pages and the current one since
280 * we failed to map_page.
282 iwl_free_fw_paging(mvm
);
285 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
= phys
;
287 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
= PAGING_ADDR_SIG
|
288 blk_idx
<< BLOCK_2_EXP_SIZE
;
292 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
296 * allocate blocks in dram.
297 * since that CSS allocated in fw_paging_db[0] loop start from index 1
299 for (blk_idx
= 1; blk_idx
< mvm
->num_of_paging_blk
+ 1; blk_idx
++) {
300 /* allocate block of PAGING_BLOCK_SIZE (32K) */
301 order
= get_order(PAGING_BLOCK_SIZE
);
302 block
= alloc_pages(GFP_KERNEL
, order
);
304 /* free all the previous pages since we failed */
305 iwl_free_fw_paging(mvm
);
309 mvm
->fw_paging_db
[blk_idx
].fw_paging_block
= block
;
310 mvm
->fw_paging_db
[blk_idx
].fw_paging_size
= PAGING_BLOCK_SIZE
;
313 phys
= dma_map_page(mvm
->trans
->dev
, block
, 0,
316 if (dma_mapping_error(mvm
->trans
->dev
, phys
)) {
318 * free the previous pages and the current one
319 * since we failed to map_page.
321 iwl_free_fw_paging(mvm
);
324 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
= phys
;
326 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
=
328 blk_idx
<< BLOCK_2_EXP_SIZE
;
332 "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
339 static int iwl_save_fw_paging(struct iwl_mvm
*mvm
,
340 const struct fw_img
*fw
)
344 ret
= iwl_alloc_fw_paging_mem(mvm
, fw
);
348 return iwl_fill_paging_mem(mvm
, fw
);
351 /* send paging cmd to FW in case CPU2 has paging image */
352 static int iwl_send_paging_cmd(struct iwl_mvm
*mvm
, const struct fw_img
*fw
)
356 struct iwl_fw_paging_cmd fw_paging_cmd
= {
358 cpu_to_le32(PAGING_CMD_IS_SECURED
|
359 PAGING_CMD_IS_ENABLED
|
360 (mvm
->num_of_pages_in_last_blk
<<
361 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS
)),
362 .block_size
= cpu_to_le32(BLOCK_2_EXP_SIZE
),
363 .block_num
= cpu_to_le32(mvm
->num_of_paging_blk
),
366 /* loop for for all paging blocks + CSS block */
367 for (blk_idx
= 0; blk_idx
< mvm
->num_of_paging_blk
+ 1; blk_idx
++) {
369 cpu_to_le32(mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
>>
371 fw_paging_cmd
.device_phy_addr
[blk_idx
] = dev_phy_addr
;
374 return iwl_mvm_send_cmd_pdu(mvm
, iwl_cmd_id(FW_PAGING_BLOCK_CMD
,
375 IWL_ALWAYS_LONG_GROUP
, 0),
376 0, sizeof(fw_paging_cmd
), &fw_paging_cmd
);
380 * Send paging item cmd to FW in case CPU2 has paging image
382 static int iwl_trans_get_paging_item(struct iwl_mvm
*mvm
)
385 struct iwl_fw_get_item_cmd fw_get_item_cmd
= {
386 .item_id
= cpu_to_le32(IWL_FW_ITEM_ID_PAGING
),
389 struct iwl_fw_get_item_resp
*item_resp
;
390 struct iwl_host_cmd cmd
= {
391 .id
= iwl_cmd_id(FW_GET_ITEM_CMD
, IWL_ALWAYS_LONG_GROUP
, 0),
392 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
393 .data
= { &fw_get_item_cmd
, },
396 cmd
.len
[0] = sizeof(struct iwl_fw_get_item_cmd
);
398 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
401 "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
406 item_resp
= (void *)((struct iwl_rx_packet
*)cmd
.resp_pkt
)->data
;
407 if (item_resp
->item_id
!= cpu_to_le32(IWL_FW_ITEM_ID_PAGING
)) {
409 "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
410 le32_to_cpu(item_resp
->item_id
));
415 mvm
->trans
->paging_download_buf
= kzalloc(MAX_PAGING_IMAGE_SIZE
,
417 if (!mvm
->trans
->paging_download_buf
) {
421 mvm
->trans
->paging_req_addr
= le32_to_cpu(item_resp
->item_val
);
422 mvm
->trans
->paging_db
= mvm
->fw_paging_db
;
424 "Paging: got paging request address (paging_req_addr 0x%08x)\n",
425 mvm
->trans
->paging_req_addr
);
433 static bool iwl_alive_fn(struct iwl_notif_wait_data
*notif_wait
,
434 struct iwl_rx_packet
*pkt
, void *data
)
436 struct iwl_mvm
*mvm
=
437 container_of(notif_wait
, struct iwl_mvm
, notif_wait
);
438 struct iwl_mvm_alive_data
*alive_data
= data
;
439 struct mvm_alive_resp_ver1
*palive1
;
440 struct mvm_alive_resp_ver2
*palive2
;
441 struct mvm_alive_resp
*palive
;
443 if (iwl_rx_packet_payload_len(pkt
) == sizeof(*palive1
)) {
444 palive1
= (void *)pkt
->data
;
446 mvm
->support_umac_log
= false;
447 mvm
->error_event_table
=
448 le32_to_cpu(palive1
->error_event_table_ptr
);
449 mvm
->log_event_table
=
450 le32_to_cpu(palive1
->log_event_table_ptr
);
451 alive_data
->scd_base_addr
= le32_to_cpu(palive1
->scd_base_ptr
);
453 alive_data
->valid
= le16_to_cpu(palive1
->status
) ==
456 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
457 le16_to_cpu(palive1
->status
), palive1
->ver_type
,
458 palive1
->ver_subtype
, palive1
->flags
);
459 } else if (iwl_rx_packet_payload_len(pkt
) == sizeof(*palive2
)) {
460 palive2
= (void *)pkt
->data
;
462 mvm
->error_event_table
=
463 le32_to_cpu(palive2
->error_event_table_ptr
);
464 mvm
->log_event_table
=
465 le32_to_cpu(palive2
->log_event_table_ptr
);
466 alive_data
->scd_base_addr
= le32_to_cpu(palive2
->scd_base_ptr
);
467 mvm
->umac_error_event_table
=
468 le32_to_cpu(palive2
->error_info_addr
);
469 mvm
->sf_space
.addr
= le32_to_cpu(palive2
->st_fwrd_addr
);
470 mvm
->sf_space
.size
= le32_to_cpu(palive2
->st_fwrd_size
);
472 alive_data
->valid
= le16_to_cpu(palive2
->status
) ==
474 if (mvm
->umac_error_event_table
)
475 mvm
->support_umac_log
= true;
478 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
479 le16_to_cpu(palive2
->status
), palive2
->ver_type
,
480 palive2
->ver_subtype
, palive2
->flags
);
483 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
484 palive2
->umac_major
, palive2
->umac_minor
);
485 } else if (iwl_rx_packet_payload_len(pkt
) == sizeof(*palive
)) {
486 palive
= (void *)pkt
->data
;
488 mvm
->error_event_table
=
489 le32_to_cpu(palive
->error_event_table_ptr
);
490 mvm
->log_event_table
=
491 le32_to_cpu(palive
->log_event_table_ptr
);
492 alive_data
->scd_base_addr
= le32_to_cpu(palive
->scd_base_ptr
);
493 mvm
->umac_error_event_table
=
494 le32_to_cpu(palive
->error_info_addr
);
495 mvm
->sf_space
.addr
= le32_to_cpu(palive
->st_fwrd_addr
);
496 mvm
->sf_space
.size
= le32_to_cpu(palive
->st_fwrd_size
);
498 alive_data
->valid
= le16_to_cpu(palive
->status
) ==
500 if (mvm
->umac_error_event_table
)
501 mvm
->support_umac_log
= true;
504 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
505 le16_to_cpu(palive
->status
), palive
->ver_type
,
506 palive
->ver_subtype
, palive
->flags
);
509 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
510 le32_to_cpu(palive
->umac_major
),
511 le32_to_cpu(palive
->umac_minor
));
517 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data
*notif_wait
,
518 struct iwl_rx_packet
*pkt
, void *data
)
520 struct iwl_phy_db
*phy_db
= data
;
522 if (pkt
->hdr
.cmd
!= CALIB_RES_NOTIF_PHY_DB
) {
523 WARN_ON(pkt
->hdr
.cmd
!= INIT_COMPLETE_NOTIF
);
527 WARN_ON(iwl_phy_db_set_section(phy_db
, pkt
, GFP_ATOMIC
));
532 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm
*mvm
,
533 enum iwl_ucode_type ucode_type
)
535 struct iwl_notification_wait alive_wait
;
536 struct iwl_mvm_alive_data alive_data
;
537 const struct fw_img
*fw
;
539 enum iwl_ucode_type old_type
= mvm
->cur_ucode
;
540 static const u16 alive_cmd
[] = { MVM_ALIVE
};
541 struct iwl_sf_region st_fwrd_space
;
543 if (ucode_type
== IWL_UCODE_REGULAR
&&
544 iwl_fw_dbg_conf_usniffer(mvm
->fw
, FW_DBG_START_FROM_ALIVE
) &&
545 !(fw_has_capa(&mvm
->fw
->ucode_capa
,
546 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED
)))
547 fw
= iwl_get_ucode_image(mvm
, IWL_UCODE_REGULAR_USNIFFER
);
549 fw
= iwl_get_ucode_image(mvm
, ucode_type
);
552 mvm
->cur_ucode
= ucode_type
;
553 mvm
->ucode_loaded
= false;
555 iwl_init_notification_wait(&mvm
->notif_wait
, &alive_wait
,
556 alive_cmd
, ARRAY_SIZE(alive_cmd
),
557 iwl_alive_fn
, &alive_data
);
559 ret
= iwl_trans_start_fw(mvm
->trans
, fw
, ucode_type
== IWL_UCODE_INIT
);
561 mvm
->cur_ucode
= old_type
;
562 iwl_remove_notification(&mvm
->notif_wait
, &alive_wait
);
567 * Some things may run in the background now, but we
568 * just wait for the ALIVE notification here.
570 ret
= iwl_wait_notification(&mvm
->notif_wait
, &alive_wait
,
571 MVM_UCODE_ALIVE_TIMEOUT
);
573 if (mvm
->trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
575 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
576 iwl_read_prph(mvm
->trans
, SB_CPU_1_STATUS
),
577 iwl_read_prph(mvm
->trans
, SB_CPU_2_STATUS
));
578 mvm
->cur_ucode
= old_type
;
582 if (!alive_data
.valid
) {
583 IWL_ERR(mvm
, "Loaded ucode is not valid!\n");
584 mvm
->cur_ucode
= old_type
;
589 * update the sdio allocation according to the pointer we get in the
590 * alive notification.
592 st_fwrd_space
.addr
= mvm
->sf_space
.addr
;
593 st_fwrd_space
.size
= mvm
->sf_space
.size
;
594 ret
= iwl_trans_update_sf(mvm
->trans
, &st_fwrd_space
);
596 IWL_ERR(mvm
, "Failed to update SF size. ret %d\n", ret
);
600 iwl_trans_fw_alive(mvm
->trans
, alive_data
.scd_base_addr
);
603 * configure and operate fw paging mechanism.
604 * driver configures the paging flow only once, CPU2 paging image
605 * included in the IWL_UCODE_INIT image.
607 if (fw
->paging_mem_size
) {
609 * When dma is not enabled, the driver needs to copy / write
610 * the downloaded / uploaded page to / from the smem.
611 * This gets the location of the place were the pages are
614 if (!is_device_dma_capable(mvm
->trans
->dev
)) {
615 ret
= iwl_trans_get_paging_item(mvm
);
617 IWL_ERR(mvm
, "failed to get FW paging item\n");
622 ret
= iwl_save_fw_paging(mvm
, fw
);
624 IWL_ERR(mvm
, "failed to save the FW paging image\n");
628 ret
= iwl_send_paging_cmd(mvm
, fw
);
630 IWL_ERR(mvm
, "failed to send the paging cmd\n");
631 iwl_free_fw_paging(mvm
);
637 * Note: all the queues are enabled as part of the interface
638 * initialization, but in firmware restart scenarios they
639 * could be stopped, so wake them up. In firmware restart,
640 * mac80211 will have the queues stopped as well until the
641 * reconfiguration completes. During normal startup, they
645 memset(&mvm
->queue_info
, 0, sizeof(mvm
->queue_info
));
646 mvm
->queue_info
[IWL_MVM_CMD_QUEUE
].hw_queue_refcount
= 1;
648 for (i
= 0; i
< IEEE80211_MAX_QUEUES
; i
++)
649 atomic_set(&mvm
->mac80211_queue_stop_count
[i
], 0);
651 mvm
->ucode_loaded
= true;
656 static int iwl_send_phy_cfg_cmd(struct iwl_mvm
*mvm
)
658 struct iwl_phy_cfg_cmd phy_cfg_cmd
;
659 enum iwl_ucode_type ucode_type
= mvm
->cur_ucode
;
662 phy_cfg_cmd
.phy_cfg
= cpu_to_le32(iwl_mvm_get_phy_config(mvm
));
663 phy_cfg_cmd
.calib_control
.event_trigger
=
664 mvm
->fw
->default_calib
[ucode_type
].event_trigger
;
665 phy_cfg_cmd
.calib_control
.flow_trigger
=
666 mvm
->fw
->default_calib
[ucode_type
].flow_trigger
;
668 IWL_DEBUG_INFO(mvm
, "Sending Phy CFG command: 0x%x\n",
669 phy_cfg_cmd
.phy_cfg
);
671 return iwl_mvm_send_cmd_pdu(mvm
, PHY_CONFIGURATION_CMD
, 0,
672 sizeof(phy_cfg_cmd
), &phy_cfg_cmd
);
675 int iwl_run_init_mvm_ucode(struct iwl_mvm
*mvm
, bool read_nvm
)
677 struct iwl_notification_wait calib_wait
;
678 static const u16 init_complete
[] = {
680 CALIB_RES_NOTIF_PHY_DB
684 lockdep_assert_held(&mvm
->mutex
);
686 if (WARN_ON_ONCE(mvm
->calibrating
))
689 iwl_init_notification_wait(&mvm
->notif_wait
,
692 ARRAY_SIZE(init_complete
),
693 iwl_wait_phy_db_entry
,
696 /* Will also start the device */
697 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_INIT
);
699 IWL_ERR(mvm
, "Failed to start INIT ucode: %d\n", ret
);
703 ret
= iwl_send_bt_init_conf(mvm
);
707 /* Read the NVM only at driver load time, no need to do this twice */
710 ret
= iwl_nvm_init(mvm
, true);
712 IWL_ERR(mvm
, "Failed to read NVM: %d\n", ret
);
717 /* In case we read the NVM from external file, load it to the NIC */
718 if (mvm
->nvm_file_name
)
719 iwl_mvm_load_nvm_to_nic(mvm
);
721 ret
= iwl_nvm_check_version(mvm
->nvm_data
, mvm
->trans
);
725 * abort after reading the nvm in case RF Kill is on, we will complete
726 * the init seq later when RF kill will switch to off
728 if (iwl_mvm_is_radio_hw_killed(mvm
)) {
729 IWL_DEBUG_RF_KILL(mvm
,
730 "jump over all phy activities due to RF kill\n");
731 iwl_remove_notification(&mvm
->notif_wait
, &calib_wait
);
736 mvm
->calibrating
= true;
738 /* Send TX valid antennas before triggering calibrations */
739 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
744 * Send phy configurations command to init uCode
745 * to start the 16.0 uCode init image internal calibrations.
747 ret
= iwl_send_phy_cfg_cmd(mvm
);
749 IWL_ERR(mvm
, "Failed to run INIT calibrations: %d\n",
755 * Some things may run in the background now, but we
756 * just wait for the calibration complete notification.
758 ret
= iwl_wait_notification(&mvm
->notif_wait
, &calib_wait
,
759 MVM_UCODE_CALIB_TIMEOUT
);
761 if (ret
&& iwl_mvm_is_radio_hw_killed(mvm
)) {
762 IWL_DEBUG_RF_KILL(mvm
, "RFKILL while calibrating.\n");
768 iwl_remove_notification(&mvm
->notif_wait
, &calib_wait
);
770 mvm
->calibrating
= false;
771 if (iwlmvm_mod_params
.init_dbg
&& !mvm
->nvm_data
) {
772 /* we want to debug INIT and we have no NVM - fake */
773 mvm
->nvm_data
= kzalloc(sizeof(struct iwl_nvm_data
) +
774 sizeof(struct ieee80211_channel
) +
775 sizeof(struct ieee80211_rate
),
779 mvm
->nvm_data
->bands
[0].channels
= mvm
->nvm_data
->channels
;
780 mvm
->nvm_data
->bands
[0].n_channels
= 1;
781 mvm
->nvm_data
->bands
[0].n_bitrates
= 1;
782 mvm
->nvm_data
->bands
[0].bitrates
=
783 (void *)mvm
->nvm_data
->channels
+ 1;
784 mvm
->nvm_data
->bands
[0].bitrates
->hw_value
= 10;
790 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm
*mvm
)
792 struct iwl_host_cmd cmd
= {
793 .id
= SHARED_MEM_CFG
,
794 .flags
= CMD_WANT_SKB
,
798 struct iwl_rx_packet
*pkt
;
799 struct iwl_shared_mem_cfg
*mem_cfg
;
802 lockdep_assert_held(&mvm
->mutex
);
804 if (WARN_ON(iwl_mvm_send_cmd(mvm
, &cmd
)))
808 mem_cfg
= (void *)pkt
->data
;
810 mvm
->shared_mem_cfg
.shared_mem_addr
=
811 le32_to_cpu(mem_cfg
->shared_mem_addr
);
812 mvm
->shared_mem_cfg
.shared_mem_size
=
813 le32_to_cpu(mem_cfg
->shared_mem_size
);
814 mvm
->shared_mem_cfg
.sample_buff_addr
=
815 le32_to_cpu(mem_cfg
->sample_buff_addr
);
816 mvm
->shared_mem_cfg
.sample_buff_size
=
817 le32_to_cpu(mem_cfg
->sample_buff_size
);
818 mvm
->shared_mem_cfg
.txfifo_addr
= le32_to_cpu(mem_cfg
->txfifo_addr
);
819 for (i
= 0; i
< ARRAY_SIZE(mvm
->shared_mem_cfg
.txfifo_size
); i
++)
820 mvm
->shared_mem_cfg
.txfifo_size
[i
] =
821 le32_to_cpu(mem_cfg
->txfifo_size
[i
]);
822 for (i
= 0; i
< ARRAY_SIZE(mvm
->shared_mem_cfg
.rxfifo_size
); i
++)
823 mvm
->shared_mem_cfg
.rxfifo_size
[i
] =
824 le32_to_cpu(mem_cfg
->rxfifo_size
[i
]);
825 mvm
->shared_mem_cfg
.page_buff_addr
=
826 le32_to_cpu(mem_cfg
->page_buff_addr
);
827 mvm
->shared_mem_cfg
.page_buff_size
=
828 le32_to_cpu(mem_cfg
->page_buff_size
);
829 IWL_DEBUG_INFO(mvm
, "SHARED MEM CFG: got memory offsets/sizes\n");
834 static int iwl_mvm_config_ltr(struct iwl_mvm
*mvm
)
836 struct iwl_ltr_config_cmd cmd
= {
837 .flags
= cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE
),
840 if (!mvm
->trans
->ltr_enabled
)
843 return iwl_mvm_send_cmd_pdu(mvm
, LTR_CONFIG
, 0,
847 int iwl_mvm_up(struct iwl_mvm
*mvm
)
850 struct ieee80211_channel
*chan
;
851 struct cfg80211_chan_def chandef
;
853 lockdep_assert_held(&mvm
->mutex
);
855 ret
= iwl_trans_start_hw(mvm
->trans
);
860 * If we haven't completed the run of the init ucode during
861 * module loading, load init ucode now
862 * (for example, if we were in RFKILL)
864 ret
= iwl_run_init_mvm_ucode(mvm
, false);
865 if (ret
&& !iwlmvm_mod_params
.init_dbg
) {
866 IWL_ERR(mvm
, "Failed to run INIT ucode: %d\n", ret
);
867 /* this can't happen */
868 if (WARN_ON(ret
> 0))
872 if (!iwlmvm_mod_params
.init_dbg
) {
874 * Stop and start the transport without entering low power
875 * mode. This will save the state of other components on the
876 * device that are triggered by the INIT firwmare (MFUART).
878 _iwl_trans_stop_device(mvm
->trans
, false);
879 ret
= _iwl_trans_start_hw(mvm
->trans
, false);
884 if (iwlmvm_mod_params
.init_dbg
)
887 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_REGULAR
);
889 IWL_ERR(mvm
, "Failed to start RT ucode: %d\n", ret
);
893 iwl_mvm_get_shared_mem_conf(mvm
);
895 ret
= iwl_mvm_sf_update(mvm
, NULL
, false);
897 IWL_ERR(mvm
, "Failed to initialize Smart Fifo\n");
899 mvm
->fw_dbg_conf
= FW_DBG_INVALID
;
900 /* if we have a destination, assume EARLY START */
901 if (mvm
->fw
->dbg_dest_tlv
)
902 mvm
->fw_dbg_conf
= FW_DBG_START_FROM_ALIVE
;
903 iwl_mvm_start_fw_dbg_conf(mvm
, FW_DBG_START_FROM_ALIVE
);
905 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
909 ret
= iwl_send_bt_init_conf(mvm
);
913 /* Send phy db control command and then phy db calibration*/
914 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
918 ret
= iwl_send_phy_cfg_cmd(mvm
);
922 /* Init RSS configuration */
923 if (iwl_mvm_has_new_rx_api(mvm
)) {
924 ret
= iwl_send_rss_cfg_cmd(mvm
);
926 IWL_ERR(mvm
, "Failed to configure RSS queues: %d\n",
932 /* init the fw <-> mac80211 STA mapping */
933 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++)
934 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
936 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_STATION_COUNT
;
938 /* reset quota debouncing buffer - 0xff will yield invalid data */
939 memset(&mvm
->last_quota_cmd
, 0xff, sizeof(mvm
->last_quota_cmd
));
941 /* Add auxiliary station for scanning */
942 ret
= iwl_mvm_add_aux_sta(mvm
);
946 /* Add all the PHY contexts */
947 chan
= &mvm
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
]->channels
[0];
948 cfg80211_chandef_create(&chandef
, chan
, NL80211_CHAN_NO_HT
);
949 for (i
= 0; i
< NUM_PHY_CTX
; i
++) {
951 * The channel used here isn't relevant as it's
952 * going to be overwritten in the other flows.
953 * For now use the first channel we have.
955 ret
= iwl_mvm_phy_ctxt_add(mvm
, &mvm
->phy_ctxts
[i
],
961 #ifdef CONFIG_THERMAL
962 if (iwl_mvm_is_tt_in_fw(mvm
)) {
963 /* in order to give the responsibility of ct-kill and
964 * TX backoff to FW we need to send empty temperature reporting
965 * cmd during init time
967 iwl_mvm_send_temp_report_ths_cmd(mvm
);
969 /* Initialize tx backoffs to the minimal possible */
970 iwl_mvm_tt_tx_backoff(mvm
, 0);
973 /* TODO: read the budget from BIOS / Platform NVM */
974 if (iwl_mvm_is_ctdp_supported(mvm
) && mvm
->cooling_dev
.cur_state
> 0)
975 ret
= iwl_mvm_ctdp_command(mvm
, CTDP_CMD_OPERATION_START
,
976 mvm
->cooling_dev
.cur_state
);
978 /* Initialize tx backoffs to the minimal possible */
979 iwl_mvm_tt_tx_backoff(mvm
, 0);
982 WARN_ON(iwl_mvm_config_ltr(mvm
));
984 ret
= iwl_mvm_power_update_device(mvm
);
989 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
990 * anyway, so don't init MCC.
992 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL
, &mvm
->status
)) {
993 ret
= iwl_mvm_init_mcc(mvm
);
998 if (fw_has_capa(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_CAPA_UMAC_SCAN
)) {
999 mvm
->scan_type
= IWL_SCAN_TYPE_NOT_SET
;
1000 ret
= iwl_mvm_config_scan(mvm
);
1005 if (iwl_mvm_is_csum_supported(mvm
) &&
1006 mvm
->cfg
->features
& NETIF_F_RXCSUM
)
1007 iwl_trans_write_prph(mvm
->trans
, RX_EN_CSUM
, 0x3);
1009 /* allow FW/transport low power modes if not during restart */
1010 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
1011 iwl_mvm_unref(mvm
, IWL_MVM_REF_UCODE_DOWN
);
1013 IWL_DEBUG_INFO(mvm
, "RT uCode started.\n");
1016 iwl_mvm_stop_device(mvm
);
1020 int iwl_mvm_load_d3_fw(struct iwl_mvm
*mvm
)
1024 lockdep_assert_held(&mvm
->mutex
);
1026 ret
= iwl_trans_start_hw(mvm
->trans
);
1030 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_WOWLAN
);
1032 IWL_ERR(mvm
, "Failed to start WoWLAN firmware: %d\n", ret
);
1036 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
1040 /* Send phy db control command and then phy db calibration*/
1041 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
1045 ret
= iwl_send_phy_cfg_cmd(mvm
);
1049 /* init the fw <-> mac80211 STA mapping */
1050 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++)
1051 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
1053 /* Add auxiliary station for scanning */
1054 ret
= iwl_mvm_add_aux_sta(mvm
);
1060 iwl_mvm_stop_device(mvm
);
1064 void iwl_mvm_rx_card_state_notif(struct iwl_mvm
*mvm
,
1065 struct iwl_rx_cmd_buffer
*rxb
)
1067 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1068 struct iwl_card_state_notif
*card_state_notif
= (void *)pkt
->data
;
1069 u32 flags
= le32_to_cpu(card_state_notif
->flags
);
1071 IWL_DEBUG_RF_KILL(mvm
, "Card state received: HW:%s SW:%s CT:%s\n",
1072 (flags
& HW_CARD_DISABLED
) ? "Kill" : "On",
1073 (flags
& SW_CARD_DISABLED
) ? "Kill" : "On",
1074 (flags
& CT_KILL_CARD_DISABLED
) ?
1075 "Reached" : "Not reached");
1078 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm
*mvm
,
1079 struct iwl_rx_cmd_buffer
*rxb
)
1081 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1082 struct iwl_mfuart_load_notif
*mfuart_notif
= (void *)pkt
->data
;
1085 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1086 le32_to_cpu(mfuart_notif
->installed_ver
),
1087 le32_to_cpu(mfuart_notif
->external_ver
),
1088 le32_to_cpu(mfuart_notif
->status
),
1089 le32_to_cpu(mfuart_notif
->duration
));