iwlwifi: iwlmvm: LAR: disable LAR support due to NVM vs TLV conflict
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / mvm / nvm.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65 #include <linux/firmware.h>
66 #include <linux/rtnetlink.h>
67 #include "iwl-trans.h"
68 #include "iwl-csr.h"
69 #include "mvm.h"
70 #include "iwl-eeprom-parse.h"
71 #include "iwl-eeprom-read.h"
72 #include "iwl-nvm-parse.h"
73
74 /* Default NVM size to read */
75 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
76 #define IWL_MAX_NVM_SECTION_SIZE 0x1b58
77 #define IWL_MAX_NVM_8000A_SECTION_SIZE 0xffc
78 #define IWL_MAX_NVM_8000B_SECTION_SIZE 0x1ffc
79
80 #define NVM_WRITE_OPCODE 1
81 #define NVM_READ_OPCODE 0
82
83 /* load nvm chunk response */
84 enum {
85 READ_NVM_CHUNK_SUCCEED = 0,
86 READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
87 };
88
89 /*
90 * prepare the NVM host command w/ the pointers to the nvm buffer
91 * and send it to fw
92 */
93 static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
94 u16 offset, u16 length, const u8 *data)
95 {
96 struct iwl_nvm_access_cmd nvm_access_cmd = {
97 .offset = cpu_to_le16(offset),
98 .length = cpu_to_le16(length),
99 .type = cpu_to_le16(section),
100 .op_code = NVM_WRITE_OPCODE,
101 };
102 struct iwl_host_cmd cmd = {
103 .id = NVM_ACCESS_CMD,
104 .len = { sizeof(struct iwl_nvm_access_cmd), length },
105 .flags = CMD_SEND_IN_RFKILL,
106 .data = { &nvm_access_cmd, data },
107 /* data may come from vmalloc, so use _DUP */
108 .dataflags = { 0, IWL_HCMD_DFL_DUP },
109 };
110
111 return iwl_mvm_send_cmd(mvm, &cmd);
112 }
113
114 static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
115 u16 offset, u16 length, u8 *data)
116 {
117 struct iwl_nvm_access_cmd nvm_access_cmd = {
118 .offset = cpu_to_le16(offset),
119 .length = cpu_to_le16(length),
120 .type = cpu_to_le16(section),
121 .op_code = NVM_READ_OPCODE,
122 };
123 struct iwl_nvm_access_resp *nvm_resp;
124 struct iwl_rx_packet *pkt;
125 struct iwl_host_cmd cmd = {
126 .id = NVM_ACCESS_CMD,
127 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
128 .data = { &nvm_access_cmd, },
129 };
130 int ret, bytes_read, offset_read;
131 u8 *resp_data;
132
133 cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
134
135 ret = iwl_mvm_send_cmd(mvm, &cmd);
136 if (ret)
137 return ret;
138
139 pkt = cmd.resp_pkt;
140 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
141 IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
142 pkt->hdr.flags);
143 ret = -EIO;
144 goto exit;
145 }
146
147 /* Extract NVM response */
148 nvm_resp = (void *)pkt->data;
149 ret = le16_to_cpu(nvm_resp->status);
150 bytes_read = le16_to_cpu(nvm_resp->length);
151 offset_read = le16_to_cpu(nvm_resp->offset);
152 resp_data = nvm_resp->data;
153 if (ret) {
154 if ((offset != 0) &&
155 (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
156 /*
157 * meaning of NOT_VALID_ADDRESS:
158 * driver try to read chunk from address that is
159 * multiple of 2K and got an error since addr is empty.
160 * meaning of (offset != 0): driver already
161 * read valid data from another chunk so this case
162 * is not an error.
163 */
164 IWL_DEBUG_EEPROM(mvm->trans->dev,
165 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
166 offset);
167 ret = 0;
168 } else {
169 IWL_DEBUG_EEPROM(mvm->trans->dev,
170 "NVM access command failed with status %d (device: %s)\n",
171 ret, mvm->cfg->name);
172 ret = -EIO;
173 }
174 goto exit;
175 }
176
177 if (offset_read != offset) {
178 IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
179 offset_read);
180 ret = -EINVAL;
181 goto exit;
182 }
183
184 /* Write data to NVM */
185 memcpy(data + offset, resp_data, bytes_read);
186 ret = bytes_read;
187
188 exit:
189 iwl_free_resp(&cmd);
190 return ret;
191 }
192
193 static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
194 const u8 *data, u16 length)
195 {
196 int offset = 0;
197
198 /* copy data in chunks of 2k (and remainder if any) */
199
200 while (offset < length) {
201 int chunk_size, ret;
202
203 chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
204 length - offset);
205
206 ret = iwl_nvm_write_chunk(mvm, section, offset,
207 chunk_size, data + offset);
208 if (ret < 0)
209 return ret;
210
211 offset += chunk_size;
212 }
213
214 return 0;
215 }
216
217 /*
218 * Reads an NVM section completely.
219 * NICs prior to 7000 family doesn't have a real NVM, but just read
220 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
221 * by uCode, we need to manually check in this case that we don't
222 * overflow and try to read more than the EEPROM size.
223 * For 7000 family NICs, we supply the maximal size we can read, and
224 * the uCode fills the response with as much data as we can,
225 * without overflowing, so no check is needed.
226 */
227 static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
228 u8 *data, u32 size_read)
229 {
230 u16 length, offset = 0;
231 int ret;
232
233 /* Set nvm section read length */
234 length = IWL_NVM_DEFAULT_CHUNK_SIZE;
235
236 ret = length;
237
238 /* Read the NVM until exhausted (reading less than requested) */
239 while (ret == length) {
240 /* Check no memory assumptions fail and cause an overflow */
241 if ((size_read + offset + length) >
242 mvm->cfg->base_params->eeprom_size) {
243 IWL_ERR(mvm, "EEPROM size is too small for NVM\n");
244 return -ENOBUFS;
245 }
246
247 ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
248 if (ret < 0) {
249 IWL_DEBUG_EEPROM(mvm->trans->dev,
250 "Cannot read NVM from section %d offset %d, length %d\n",
251 section, offset, length);
252 return ret;
253 }
254 offset += ret;
255 }
256
257 IWL_DEBUG_EEPROM(mvm->trans->dev,
258 "NVM section %d read completed\n", section);
259 return offset;
260 }
261
262 static struct iwl_nvm_data *
263 iwl_parse_nvm_sections(struct iwl_mvm *mvm)
264 {
265 struct iwl_nvm_section *sections = mvm->nvm_sections;
266 const __le16 *hw, *sw, *calib, *regulatory, *mac_override;
267
268 /* Checking for required sections */
269 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
270 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
271 !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
272 IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
273 return NULL;
274 }
275 } else {
276 /* SW and REGULATORY sections are mandatory */
277 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
278 !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
279 IWL_ERR(mvm,
280 "Can't parse empty family 8000 OTP/NVM sections\n");
281 return NULL;
282 }
283 /* MAC_OVERRIDE or at least HW section must exist */
284 if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
285 !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
286 IWL_ERR(mvm,
287 "Can't parse mac_address, empty sections\n");
288 return NULL;
289 }
290 }
291
292 if (WARN_ON(!mvm->cfg))
293 return NULL;
294
295 hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
296 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
297 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
298 regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
299 mac_override =
300 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
301
302 return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
303 regulatory, mac_override,
304 mvm->fw->valid_tx_ant,
305 mvm->fw->valid_rx_ant,
306 mvm->fw->ucode_capa.capa[0] &
307 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
308 }
309
310 #define MAX_NVM_FILE_LEN 16384
311
312 /*
313 * Reads external NVM from a file into mvm->nvm_sections
314 *
315 * HOW TO CREATE THE NVM FILE FORMAT:
316 * ------------------------------
317 * 1. create hex file, format:
318 * 3800 -> header
319 * 0000 -> header
320 * 5a40 -> data
321 *
322 * rev - 6 bit (word1)
323 * len - 10 bit (word1)
324 * id - 4 bit (word2)
325 * rsv - 12 bit (word2)
326 *
327 * 2. flip 8bits with 8 bits per line to get the right NVM file format
328 *
329 * 3. create binary file from the hex file
330 *
331 * 4. save as "iNVM_xxx.bin" under /lib/firmware
332 */
333 static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
334 {
335 int ret, section_size;
336 u16 section_id;
337 const struct firmware *fw_entry;
338 const struct {
339 __le16 word1;
340 __le16 word2;
341 u8 data[];
342 } *file_sec;
343 const u8 *eof, *temp;
344 int max_section_size;
345 const __le32 *dword_buff;
346
347 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
348 #define NVM_WORD2_ID(x) (x >> 12)
349 #define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
350 #define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
351 #define NVM_HEADER_0 (0x2A504C54)
352 #define NVM_HEADER_1 (0x4E564D2A)
353 #define NVM_HEADER_SIZE (4 * sizeof(u32))
354
355 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
356
357 /* Maximal size depends on HW family and step */
358 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
359 max_section_size = IWL_MAX_NVM_SECTION_SIZE;
360 else if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP)
361 max_section_size = IWL_MAX_NVM_8000A_SECTION_SIZE;
362 else /* Family 8000 B-step or C-step */
363 max_section_size = IWL_MAX_NVM_8000B_SECTION_SIZE;
364
365 /*
366 * Obtain NVM image via request_firmware. Since we already used
367 * request_firmware_nowait() for the firmware binary load and only
368 * get here after that we assume the NVM request can be satisfied
369 * synchronously.
370 */
371 ret = request_firmware(&fw_entry, mvm->nvm_file_name,
372 mvm->trans->dev);
373 if (ret) {
374 IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
375 mvm->nvm_file_name, ret);
376 return ret;
377 }
378
379 IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
380 mvm->nvm_file_name, fw_entry->size);
381
382 if (fw_entry->size > MAX_NVM_FILE_LEN) {
383 IWL_ERR(mvm, "NVM file too large\n");
384 ret = -EINVAL;
385 goto out;
386 }
387
388 eof = fw_entry->data + fw_entry->size;
389 dword_buff = (__le32 *)fw_entry->data;
390
391 /* some NVM file will contain a header.
392 * The header is identified by 2 dwords header as follow:
393 * dword[0] = 0x2A504C54
394 * dword[1] = 0x4E564D2A
395 *
396 * This header must be skipped when providing the NVM data to the FW.
397 */
398 if (fw_entry->size > NVM_HEADER_SIZE &&
399 dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
400 dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
401 file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
402 IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
403 IWL_INFO(mvm, "NVM Manufacturing date %08X\n",
404 le32_to_cpu(dword_buff[3]));
405 } else {
406 file_sec = (void *)fw_entry->data;
407 }
408
409 while (true) {
410 if (file_sec->data > eof) {
411 IWL_ERR(mvm,
412 "ERROR - NVM file too short for section header\n");
413 ret = -EINVAL;
414 break;
415 }
416
417 /* check for EOF marker */
418 if (!file_sec->word1 && !file_sec->word2) {
419 ret = 0;
420 break;
421 }
422
423 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
424 section_size =
425 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
426 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
427 } else {
428 section_size = 2 * NVM_WORD2_LEN_FAMILY_8000(
429 le16_to_cpu(file_sec->word2));
430 section_id = NVM_WORD1_ID_FAMILY_8000(
431 le16_to_cpu(file_sec->word1));
432 }
433
434 if (section_size > max_section_size) {
435 IWL_ERR(mvm, "ERROR - section too large (%d)\n",
436 section_size);
437 ret = -EINVAL;
438 break;
439 }
440
441 if (!section_size) {
442 IWL_ERR(mvm, "ERROR - section empty\n");
443 ret = -EINVAL;
444 break;
445 }
446
447 if (file_sec->data + section_size > eof) {
448 IWL_ERR(mvm,
449 "ERROR - NVM file too short for section (%d bytes)\n",
450 section_size);
451 ret = -EINVAL;
452 break;
453 }
454
455 if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
456 "Invalid NVM section ID %d\n", section_id)) {
457 ret = -EINVAL;
458 break;
459 }
460
461 temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
462 if (!temp) {
463 ret = -ENOMEM;
464 break;
465 }
466 mvm->nvm_sections[section_id].data = temp;
467 mvm->nvm_sections[section_id].length = section_size;
468
469 /* advance to the next section */
470 file_sec = (void *)(file_sec->data + section_size);
471 }
472 out:
473 release_firmware(fw_entry);
474 return ret;
475 }
476
477 /* Loads the NVM data stored in mvm->nvm_sections into the NIC */
478 int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
479 {
480 int i, ret = 0;
481 struct iwl_nvm_section *sections = mvm->nvm_sections;
482
483 IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
484
485 for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
486 if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length)
487 continue;
488 ret = iwl_nvm_write_section(mvm, i, sections[i].data,
489 sections[i].length);
490 if (ret < 0) {
491 IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
492 break;
493 }
494 }
495 return ret;
496 }
497
498 int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
499 {
500 int ret, section;
501 u32 size_read = 0;
502 u8 *nvm_buffer, *temp;
503
504 if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
505 return -EINVAL;
506
507 /* load NVM values from nic */
508 if (read_nvm_from_nic) {
509 /* Read From FW NVM */
510 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
511
512 nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
513 GFP_KERNEL);
514 if (!nvm_buffer)
515 return -ENOMEM;
516 for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
517 /* we override the constness for initial read */
518 ret = iwl_nvm_read_section(mvm, section, nvm_buffer,
519 size_read);
520 if (ret < 0)
521 continue;
522 size_read += ret;
523 temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
524 if (!temp) {
525 ret = -ENOMEM;
526 break;
527 }
528 mvm->nvm_sections[section].data = temp;
529 mvm->nvm_sections[section].length = ret;
530
531 #ifdef CONFIG_IWLWIFI_DEBUGFS
532 switch (section) {
533 case NVM_SECTION_TYPE_SW:
534 mvm->nvm_sw_blob.data = temp;
535 mvm->nvm_sw_blob.size = ret;
536 break;
537 case NVM_SECTION_TYPE_CALIBRATION:
538 mvm->nvm_calib_blob.data = temp;
539 mvm->nvm_calib_blob.size = ret;
540 break;
541 case NVM_SECTION_TYPE_PRODUCTION:
542 mvm->nvm_prod_blob.data = temp;
543 mvm->nvm_prod_blob.size = ret;
544 break;
545 default:
546 if (section == mvm->cfg->nvm_hw_section_num) {
547 mvm->nvm_hw_blob.data = temp;
548 mvm->nvm_hw_blob.size = ret;
549 break;
550 }
551 }
552 #endif
553 }
554 if (!size_read)
555 IWL_ERR(mvm, "OTP is blank\n");
556 kfree(nvm_buffer);
557 }
558
559 /* load external NVM if configured */
560 if (mvm->nvm_file_name) {
561 /* move to External NVM flow */
562 ret = iwl_mvm_read_external_nvm(mvm);
563 if (ret)
564 return ret;
565 }
566
567 /* parse the relevant nvm sections */
568 mvm->nvm_data = iwl_parse_nvm_sections(mvm);
569 if (!mvm->nvm_data)
570 return -ENODATA;
571 IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
572 mvm->nvm_data->nvm_version);
573
574 return 0;
575 }
576
577 struct iwl_mcc_update_resp *
578 iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2)
579 {
580 struct iwl_mcc_update_cmd mcc_update_cmd = {
581 .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
582 };
583 struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL;
584 struct iwl_rx_packet *pkt;
585 struct iwl_host_cmd cmd = {
586 .id = MCC_UPDATE_CMD,
587 .flags = CMD_WANT_SKB,
588 .data = { &mcc_update_cmd },
589 };
590
591 int ret;
592 u32 status;
593 int resp_len, n_channels;
594 u16 mcc;
595
596 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
597 return ERR_PTR(-EOPNOTSUPP);
598
599 cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
600
601 IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c'\n",
602 alpha2[0], alpha2[1]);
603
604 ret = iwl_mvm_send_cmd(mvm, &cmd);
605 if (ret)
606 return ERR_PTR(ret);
607
608 pkt = cmd.resp_pkt;
609 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
610 IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
611 pkt->hdr.flags);
612 ret = -EIO;
613 goto exit;
614 }
615
616 /* Extract MCC response */
617 mcc_resp = (void *)pkt->data;
618 status = le32_to_cpu(mcc_resp->status);
619
620 if (status == MCC_RESP_INVALID) {
621 IWL_ERR(mvm,
622 "FW ERROR: MCC update with invalid parameter '%c%c'\n",
623 alpha2[0], alpha2[1]);
624 ret = -EINVAL;
625 goto exit;
626 } else if (status == MCC_RESP_NVM_DISABLED) {
627 ret = 0;
628 /* resp_cp will be NULL */
629 goto exit;
630 }
631
632 mcc = le16_to_cpu(mcc_resp->mcc);
633
634 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
635 if (mcc == 0) {
636 mcc = 0x3030; /* "00" - world */
637 mcc_resp->mcc = cpu_to_le16(mcc);
638 }
639
640 n_channels = __le32_to_cpu(mcc_resp->n_channels);
641 IWL_DEBUG_LAR(mvm,
642 "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
643 status, mcc, mcc >> 8, mcc & 0xff,
644 !!(status == MCC_RESP_SAME_CHAN_PROFILE), n_channels);
645
646 resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32);
647 resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
648 if (!resp_cp) {
649 ret = -ENOMEM;
650 goto exit;
651 }
652
653 ret = 0;
654 exit:
655 iwl_free_resp(&cmd);
656 if (ret)
657 return ERR_PTR(ret);
658 return resp_cp;
659 }
660
661 int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
662 {
663 bool tlv_lar;
664 bool nvm_lar;
665
666 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
667 tlv_lar = mvm->fw->ucode_capa.capa[0] &
668 IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
669 nvm_lar = mvm->nvm_data->lar_enabled;
670 if (tlv_lar != nvm_lar)
671 IWL_INFO(mvm,
672 "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
673 tlv_lar ? "enabled" : "disabled",
674 nvm_lar ? "enabled" : "disabled");
675 }
676
677 if (!iwl_mvm_is_lar_supported(mvm))
678 return 0;
679
680 /*
681 * During HW restart, only replay the last set MCC to FW. Otherwise,
682 * queue an update to cfg80211 to retrieve the default alpha2 from FW.
683 */
684 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
685 /* This should only be called during vif up and hold RTNL */
686 const struct ieee80211_regdomain *r =
687 rtnl_dereference(mvm->hw->wiphy->regd);
688
689 if (r) {
690 struct iwl_mcc_update_resp *resp;
691
692 resp = iwl_mvm_update_mcc(mvm, r->alpha2);
693 if (IS_ERR_OR_NULL(resp))
694 return -EIO;
695
696 kfree(resp);
697 }
698
699 return 0;
700 }
701
702 /*
703 * Driver regulatory hint for initial update - use the special
704 * unknown-country "99" code. This will also clear the "custom reg"
705 * flag and allow regdomain changes. It will happen after init since
706 * RTNL is required.
707 * Disallow scans that might crash the FW while the LAR regdomain
708 * is not set.
709 */
710 mvm->lar_regdom_set = false;
711 return 0;
712 }
713
714 int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
715 struct iwl_rx_cmd_buffer *rxb,
716 struct iwl_device_cmd *cmd)
717 {
718 struct iwl_rx_packet *pkt = rxb_addr(rxb);
719 struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
720 char mcc[3];
721
722 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
723 return -EOPNOTSUPP;
724
725 mcc[0] = notif->mcc >> 8;
726 mcc[1] = notif->mcc & 0xff;
727 mcc[2] = '\0';
728
729 IWL_DEBUG_LAR(mvm,
730 "RX: received chub update mcc command (mcc 0x%x '%s')\n",
731 notif->mcc, mcc);
732 return 0;
733 }
This page took 0.060039 seconds and 5 git commands to generate.