iwlwifi: define PAN queues/FIFOs
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-agn-ucode.c
CommitLineData
792bc3cb
WYG
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
81b8176e 33#include <linux/sched.h>
792bc3cb
WYG
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
81b8176e 37#include "iwl-io.h"
741a6266 38#include "iwl-helpers.h"
19e6cda0 39#include "iwl-agn-hw.h"
741a6266
WYG
40#include "iwl-agn.h"
41
42static const s8 iwlagn_default_queue_to_tx_fifo[] = {
43 IWL_TX_FIFO_VO,
44 IWL_TX_FIFO_VI,
45 IWL_TX_FIFO_BE,
46 IWL_TX_FIFO_BK,
47 IWLAGN_CMD_FIFO_NUM,
48 IWL_TX_FIFO_UNUSED,
49 IWL_TX_FIFO_UNUSED,
50 IWL_TX_FIFO_UNUSED,
51 IWL_TX_FIFO_UNUSED,
52 IWL_TX_FIFO_UNUSED,
53};
81b8176e 54
13bb9483
JB
55static const s8 iwlagn_ipan_queue_to_tx_fifo[] = {
56 IWL_TX_FIFO_VO,
57 IWL_TX_FIFO_VI,
58 IWL_TX_FIFO_BE,
59 IWL_TX_FIFO_BK,
751ca305
JB
60 IWL_TX_FIFO_BK_IPAN,
61 IWL_TX_FIFO_BE_IPAN,
62 IWL_TX_FIFO_VI_IPAN,
63 IWL_TX_FIFO_VO_IPAN,
64 IWL_TX_FIFO_BE_IPAN,
13bb9483
JB
65 IWLAGN_CMD_FIFO_NUM,
66};
67
f4012413
WYG
68static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
69 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
70 0, COEX_UNASSOC_IDLE_FLAGS},
71 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
72 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
73 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
74 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
75 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
76 0, COEX_CALIBRATION_FLAGS},
77 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
78 0, COEX_PERIODIC_CALIBRATION_FLAGS},
79 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
80 0, COEX_CONNECTION_ESTAB_FLAGS},
81 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
82 0, COEX_ASSOCIATED_IDLE_FLAGS},
83 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
84 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
85 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
86 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
87 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
88 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
89 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
90 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
91 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
92 0, COEX_STAND_ALONE_DEBUG_FLAGS},
93 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
94 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
95 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
96 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
97};
98
81b8176e
WYG
99/*
100 * ucode
101 */
102static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
103 struct fw_desc *image, u32 dst_addr)
104{
105 dma_addr_t phy_addr = image->p_addr;
106 u32 byte_cnt = image->len;
107 int ret;
108
109 priv->ucode_write_complete = 0;
110
111 iwl_write_direct32(priv,
112 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
113 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
114
115 iwl_write_direct32(priv,
116 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
117
118 iwl_write_direct32(priv,
119 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
120 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
121
122 iwl_write_direct32(priv,
123 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
124 (iwl_get_dma_hi_addr(phy_addr)
125 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
126
127 iwl_write_direct32(priv,
128 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
129 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
130 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
131 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
132
133 iwl_write_direct32(priv,
134 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
135 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
136 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
137 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
138
139 IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
140 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
141 priv->ucode_write_complete, 5 * HZ);
142 if (ret == -ERESTARTSYS) {
143 IWL_ERR(priv, "Could not load the %s uCode section due "
144 "to interrupt\n", name);
145 return ret;
146 }
147 if (!ret) {
148 IWL_ERR(priv, "Could not load the %s uCode section\n",
149 name);
150 return -ETIMEDOUT;
151 }
152
153 return 0;
154}
155
156static int iwlagn_load_given_ucode(struct iwl_priv *priv,
157 struct fw_desc *inst_image,
158 struct fw_desc *data_image)
159{
160 int ret = 0;
161
162 ret = iwlagn_load_section(priv, "INST", inst_image,
19e6cda0 163 IWLAGN_RTC_INST_LOWER_BOUND);
81b8176e
WYG
164 if (ret)
165 return ret;
166
167 return iwlagn_load_section(priv, "DATA", data_image,
19e6cda0 168 IWLAGN_RTC_DATA_LOWER_BOUND);
81b8176e
WYG
169}
170
171int iwlagn_load_ucode(struct iwl_priv *priv)
172{
173 int ret = 0;
174
175 /* check whether init ucode should be loaded, or rather runtime ucode */
176 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
177 IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
178 ret = iwlagn_load_given_ucode(priv,
179 &priv->ucode_init, &priv->ucode_init_data);
180 if (!ret) {
181 IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
182 priv->ucode_type = UCODE_INIT;
183 }
184 } else {
185 IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
186 "Loading runtime ucode...\n");
187 ret = iwlagn_load_given_ucode(priv,
188 &priv->ucode_code, &priv->ucode_data);
189 if (!ret) {
190 IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
191 priv->ucode_type = UCODE_RT;
192 }
193 }
194
195 return ret;
196}
792bc3cb 197
741a6266
WYG
198/*
199 * Calibration
200 */
201static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
202{
203 struct iwl_calib_xtal_freq_cmd cmd;
204 __le16 *xtal_calib =
7944f8e4 205 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
741a6266
WYG
206
207 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
208 cmd.hdr.first_group = 0;
209 cmd.hdr.groups_num = 1;
210 cmd.hdr.data_valid = 1;
211 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
212 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
213 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
214 (u8 *)&cmd, sizeof(cmd));
215}
216
217static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
218{
219 struct iwl_calib_cfg_cmd calib_cfg_cmd;
220 struct iwl_host_cmd cmd = {
221 .id = CALIBRATION_CFG_CMD,
222 .len = sizeof(struct iwl_calib_cfg_cmd),
223 .data = &calib_cfg_cmd,
224 };
225
226 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
227 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
228 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
229 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
230 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
231
232 return iwl_send_cmd(priv, &cmd);
233}
234
235void iwlagn_rx_calib_result(struct iwl_priv *priv,
236 struct iwl_rx_mem_buffer *rxb)
237{
238 struct iwl_rx_packet *pkt = rxb_addr(rxb);
239 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
240 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
241 int index;
242
243 /* reduce the size of the length field itself */
244 len -= 4;
245
246 /* Define the order in which the results will be sent to the runtime
247 * uCode. iwl_send_calib_results sends them in a row according to
248 * their index. We sort them here
249 */
250 switch (hdr->op_code) {
251 case IWL_PHY_CALIBRATE_DC_CMD:
252 index = IWL_CALIB_DC;
253 break;
254 case IWL_PHY_CALIBRATE_LO_CMD:
255 index = IWL_CALIB_LO;
256 break;
257 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
258 index = IWL_CALIB_TX_IQ;
259 break;
260 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
261 index = IWL_CALIB_TX_IQ_PERD;
262 break;
263 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
264 index = IWL_CALIB_BASE_BAND;
265 break;
266 default:
267 IWL_ERR(priv, "Unknown calibration notification %d\n",
268 hdr->op_code);
269 return;
270 }
271 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
272}
273
274void iwlagn_rx_calib_complete(struct iwl_priv *priv,
275 struct iwl_rx_mem_buffer *rxb)
276{
277 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
278 queue_work(priv->workqueue, &priv->restart);
279}
280
281void iwlagn_init_alive_start(struct iwl_priv *priv)
282{
283 int ret = 0;
284
285 /* Check alive response for "valid" sign from uCode */
286 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
287 /* We had an error bringing up the hardware, so take it
288 * all the way back down so we can try again */
289 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
290 goto restart;
291 }
292
293 /* initialize uCode was loaded... verify inst image.
294 * This is a paranoid check, because we would not have gotten the
295 * "initialize" alive if code weren't properly loaded. */
296 if (iwl_verify_ucode(priv)) {
297 /* Runtime instruction load was bad;
298 * take it all the way back down so we can try again */
299 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
300 goto restart;
301 }
302
303 ret = priv->cfg->ops->lib->alive_notify(priv);
304 if (ret) {
305 IWL_WARN(priv,
306 "Could not complete ALIVE transition: %d\n", ret);
307 goto restart;
308 }
309
310 iwlagn_send_calib_cfg(priv);
311 return;
312
313restart:
314 /* real restart (first load init_ucode) */
315 queue_work(priv->workqueue, &priv->restart);
316}
317
f4012413
WYG
318static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
319{
320 struct iwl_wimax_coex_cmd coex_cmd;
321
322 if (priv->cfg->support_wimax_coexist) {
323 /* UnMask wake up src at associated sleep */
324 coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
325
326 /* UnMask wake up src at unassociated sleep */
327 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
328 memcpy(coex_cmd.sta_prio, cu_priorities,
329 sizeof(struct iwl_wimax_coex_event_entry) *
330 COEX_NUM_OF_EVENTS);
331
332 /* enabling the coexistence feature */
333 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
334
335 /* enabling the priorities tables */
336 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
337 } else {
338 /* coexistence is disabled */
339 memset(&coex_cmd, 0, sizeof(coex_cmd));
340 }
341 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
342 sizeof(coex_cmd), &coex_cmd);
343}
344
aeb4a2ee
WYG
345static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
346 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
347 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
348 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
349 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
350 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
351 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
352 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
353 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
354 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
355 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
356 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
357 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
358 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
359 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
360 ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
361 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
362 ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
363 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
364 0, 0, 0, 0, 0, 0, 0
365};
366
367static void iwlagn_send_prio_tbl(struct iwl_priv *priv)
368{
369 struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
370
371 memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
372 sizeof(iwlagn_bt_prio_tbl));
373 if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PRIO_TABLE,
374 sizeof(prio_tbl_cmd), &prio_tbl_cmd))
375 IWL_ERR(priv, "failed to send BT prio tbl command\n");
376}
377
378static void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
379{
380 struct iwl_bt_coex_prot_env_cmd env_cmd;
381
382 env_cmd.action = action;
383 env_cmd.type = type;
384 if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PROT_ENV,
385 sizeof(env_cmd), &env_cmd))
386 IWL_ERR(priv, "failed to send BT env command\n");
387}
388
389
741a6266
WYG
390int iwlagn_alive_notify(struct iwl_priv *priv)
391{
13bb9483 392 const s8 *queues;
741a6266
WYG
393 u32 a;
394 unsigned long flags;
395 int i, chan;
396 u32 reg_val;
397
398 spin_lock_irqsave(&priv->lock, flags);
399
f4388adc
WYG
400 priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
401 a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
402 for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
741a6266
WYG
403 a += 4)
404 iwl_write_targ_mem(priv, a, 0);
f4388adc 405 for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
741a6266
WYG
406 a += 4)
407 iwl_write_targ_mem(priv, a, 0);
408 for (; a < priv->scd_base_addr +
f4388adc 409 IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
741a6266
WYG
410 iwl_write_targ_mem(priv, a, 0);
411
f4388adc 412 iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
741a6266
WYG
413 priv->scd_bc_tbls.dma >> 10);
414
415 /* Enable DMA channel */
416 for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
417 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
418 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
419 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
420
421 /* Update FH chicken bits */
422 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
423 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
424 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
425
f4388adc 426 iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
13bb9483 427 IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
f4388adc 428 iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
741a6266
WYG
429
430 /* initiate the queues */
431 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
f4388adc 432 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
741a6266
WYG
433 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
434 iwl_write_targ_mem(priv, priv->scd_base_addr +
f4388adc 435 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
741a6266 436 iwl_write_targ_mem(priv, priv->scd_base_addr +
f4388adc 437 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
741a6266
WYG
438 sizeof(u32),
439 ((SCD_WIN_SIZE <<
f4388adc
WYG
440 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
441 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
741a6266 442 ((SCD_FRAME_LIMIT <<
f4388adc
WYG
443 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
444 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
741a6266
WYG
445 }
446
f4388adc 447 iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
741a6266
WYG
448 IWL_MASK(0, priv->hw_params.max_txq_num));
449
450 /* Activate all Tx DMA/FIFO channels */
451 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
452
13bb9483
JB
453 /* map queues to FIFOs */
454 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
455 queues = iwlagn_ipan_queue_to_tx_fifo;
456 else
457 queues = iwlagn_default_queue_to_tx_fifo;
458
459 iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
741a6266
WYG
460
461 /* make sure all queue are not stopped */
462 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
463 for (i = 0; i < 4; i++)
464 atomic_set(&priv->queue_stop_count[i], 0);
465
466 /* reset to 0 to enable all the queue first */
467 priv->txq_ctx_active_msk = 0;
13bb9483 468
741a6266 469 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
13bb9483 470 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
741a6266 471
13bb9483
JB
472 for (i = 0; i < 10; i++) {
473 int ac = queues[i];
741a6266
WYG
474
475 iwl_txq_ctx_activate(priv, i);
476
477 if (ac == IWL_TX_FIFO_UNUSED)
478 continue;
479
480 iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
481 }
482
483 spin_unlock_irqrestore(&priv->lock, flags);
484
aeb4a2ee
WYG
485 if (priv->cfg->advanced_bt_coexist) {
486 /* Configure Bluetooth device coexistence support */
487 /* need to perform this before any calibration */
b6e116e8
WYG
488 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
489 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
490 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
aeb4a2ee 491 priv->cfg->ops->hcmd->send_bt_config(priv);
b6e116e8 492 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
fbba9410 493
aeb4a2ee
WYG
494 if (bt_coex_active && priv->iw_mode != NL80211_IFTYPE_ADHOC) {
495 iwlagn_send_prio_tbl(priv);
496 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
497 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
498 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
499 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
500 }
501
502 }
503
f4012413 504 iwlagn_send_wimax_coex(priv);
741a6266
WYG
505
506 iwlagn_set_Xtal_calib(priv);
507 iwl_send_calib_results(priv);
508
509 return 0;
510}
db41dd27
WYG
511
512
513/**
514 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
515 * using sample data 100 bytes apart. If these sample points are good,
516 * it's a pretty good bet that everything between them is good, too.
517 */
518static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
519{
520 u32 val;
521 int ret = 0;
522 u32 errcnt = 0;
523 u32 i;
524
525 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
526
527 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
528 /* read data comes through single port, auto-incr addr */
529 /* NOTE: Use the debugless read so we don't flood kernel log
530 * if IWL_DL_IO is set */
531 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
532 i + IWLAGN_RTC_INST_LOWER_BOUND);
533 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
534 if (val != le32_to_cpu(*image)) {
535 ret = -EIO;
536 errcnt++;
537 if (errcnt >= 3)
538 break;
539 }
540 }
541
542 return ret;
543}
544
545/**
546 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
547 * looking at all data.
548 */
549static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
550 u32 len)
551{
552 u32 val;
553 u32 save_len = len;
554 int ret = 0;
555 u32 errcnt;
556
557 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
558
559 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
560 IWLAGN_RTC_INST_LOWER_BOUND);
561
562 errcnt = 0;
563 for (; len > 0; len -= sizeof(u32), image++) {
564 /* read data comes through single port, auto-incr addr */
565 /* NOTE: Use the debugless read so we don't flood kernel log
566 * if IWL_DL_IO is set */
567 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
568 if (val != le32_to_cpu(*image)) {
569 IWL_ERR(priv, "uCode INST section is invalid at "
570 "offset 0x%x, is 0x%x, s/b 0x%x\n",
571 save_len - len, val, le32_to_cpu(*image));
572 ret = -EIO;
573 errcnt++;
574 if (errcnt >= 20)
575 break;
576 }
577 }
578
579 if (!errcnt)
580 IWL_DEBUG_INFO(priv,
581 "ucode image in INSTRUCTION memory is good\n");
582
583 return ret;
584}
585
586/**
587 * iwl_verify_ucode - determine which instruction image is in SRAM,
588 * and verify its contents
589 */
590int iwl_verify_ucode(struct iwl_priv *priv)
591{
592 __le32 *image;
593 u32 len;
594 int ret;
595
596 /* Try bootstrap */
597 image = (__le32 *)priv->ucode_boot.v_addr;
598 len = priv->ucode_boot.len;
599 ret = iwlcore_verify_inst_sparse(priv, image, len);
600 if (!ret) {
601 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
602 return 0;
603 }
604
605 /* Try initialize */
606 image = (__le32 *)priv->ucode_init.v_addr;
607 len = priv->ucode_init.len;
608 ret = iwlcore_verify_inst_sparse(priv, image, len);
609 if (!ret) {
610 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
611 return 0;
612 }
613
614 /* Try runtime/protocol */
615 image = (__le32 *)priv->ucode_code.v_addr;
616 len = priv->ucode_code.len;
617 ret = iwlcore_verify_inst_sparse(priv, image, len);
618 if (!ret) {
619 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
620 return 0;
621 }
622
623 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
624
625 /* Since nothing seems to match, show first several data entries in
626 * instruction SRAM, so maybe visual inspection will give a clue.
627 * Selection of bootstrap image (vs. other images) is arbitrary. */
628 image = (__le32 *)priv->ucode_boot.v_addr;
629 len = priv->ucode_boot.len;
630 ret = iwl_verify_inst_full(priv, image, len);
631
632 return ret;
633}
This page took 0.100027 seconds and 5 git commands to generate.