39e7bdbc9ff9144f352e2c1b230bf86013444b9b
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-agn.c
1 /******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/slab.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/delay.h>
35 #include <linux/sched.h>
36 #include <linux/skbuff.h>
37 #include <linux/netdevice.h>
38 #include <linux/firmware.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_arp.h>
41
42 #include <net/mac80211.h>
43
44 #include <asm/div64.h>
45
46 #include "iwl-eeprom.h"
47 #include "iwl-dev.h"
48 #include "iwl-core.h"
49 #include "iwl-io.h"
50 #include "iwl-helpers.h"
51 #include "iwl-sta.h"
52 #include "iwl-agn-calib.h"
53 #include "iwl-agn.h"
54 #include "iwl-shared.h"
55 #include "iwl-bus.h"
56 #include "iwl-trans.h"
57
58 /******************************************************************************
59 *
60 * module boiler plate
61 *
62 ******************************************************************************/
63
64 /*
65 * module name, copyright, version, etc.
66 */
67 #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
68
69 #ifdef CONFIG_IWLWIFI_DEBUG
70 #define VD "d"
71 #else
72 #define VD
73 #endif
74
75 #define DRV_VERSION IWLWIFI_VERSION VD
76
77
78 MODULE_DESCRIPTION(DRV_DESCRIPTION);
79 MODULE_VERSION(DRV_VERSION);
80 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
81 MODULE_LICENSE("GPL");
82
83 void iwl_update_chain_flags(struct iwl_priv *priv)
84 {
85 struct iwl_rxon_context *ctx;
86
87 for_each_context(priv, ctx) {
88 iwlagn_set_rxon_chain(priv, ctx);
89 if (ctx->active.rx_chain != ctx->staging.rx_chain)
90 iwlagn_commit_rxon(priv, ctx);
91 }
92 }
93
94 /* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
95 static void iwl_set_beacon_tim(struct iwl_priv *priv,
96 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
97 u8 *beacon, u32 frame_size)
98 {
99 u16 tim_idx;
100 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
101
102 /*
103 * The index is relative to frame start but we start looking at the
104 * variable-length part of the beacon.
105 */
106 tim_idx = mgmt->u.beacon.variable - beacon;
107
108 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
109 while ((tim_idx < (frame_size - 2)) &&
110 (beacon[tim_idx] != WLAN_EID_TIM))
111 tim_idx += beacon[tim_idx+1] + 2;
112
113 /* If TIM field was found, set variables */
114 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
115 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
116 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
117 } else
118 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
119 }
120
121 int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
122 {
123 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
124 struct iwl_host_cmd cmd = {
125 .id = REPLY_TX_BEACON,
126 .flags = CMD_SYNC,
127 };
128 struct ieee80211_tx_info *info;
129 u32 frame_size;
130 u32 rate_flags;
131 u32 rate;
132
133 /*
134 * We have to set up the TX command, the TX Beacon command, and the
135 * beacon contents.
136 */
137
138 lockdep_assert_held(&priv->mutex);
139
140 if (!priv->beacon_ctx) {
141 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
142 return 0;
143 }
144
145 if (WARN_ON(!priv->beacon_skb))
146 return -EINVAL;
147
148 /* Allocate beacon command */
149 if (!priv->beacon_cmd)
150 priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL);
151 tx_beacon_cmd = priv->beacon_cmd;
152 if (!tx_beacon_cmd)
153 return -ENOMEM;
154
155 frame_size = priv->beacon_skb->len;
156
157 /* Set up TX command fields */
158 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
159 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
160 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
161 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
162 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
163
164 /* Set up TX beacon command fields */
165 iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data,
166 frame_size);
167
168 /* Set up packet rate and flags */
169 info = IEEE80211_SKB_CB(priv->beacon_skb);
170
171 /*
172 * Let's set up the rate at least somewhat correctly;
173 * it will currently not actually be used by the uCode,
174 * it uses the broadcast station's rate instead.
175 */
176 if (info->control.rates[0].idx < 0 ||
177 info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
178 rate = 0;
179 else
180 rate = info->control.rates[0].idx;
181
182 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
183 priv->hw_params.valid_tx_ant);
184 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
185
186 /* In mac80211, rates for 5 GHz start at 0 */
187 if (info->band == IEEE80211_BAND_5GHZ)
188 rate += IWL_FIRST_OFDM_RATE;
189 else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
190 rate_flags |= RATE_MCS_CCK_MSK;
191
192 tx_beacon_cmd->tx.rate_n_flags =
193 iwl_hw_set_rate_n_flags(rate, rate_flags);
194
195 /* Submit command */
196 cmd.len[0] = sizeof(*tx_beacon_cmd);
197 cmd.data[0] = tx_beacon_cmd;
198 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
199 cmd.len[1] = frame_size;
200 cmd.data[1] = priv->beacon_skb->data;
201 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
202
203 return trans_send_cmd(&priv->trans, &cmd);
204 }
205
206 static void iwl_bg_beacon_update(struct work_struct *work)
207 {
208 struct iwl_priv *priv =
209 container_of(work, struct iwl_priv, beacon_update);
210 struct sk_buff *beacon;
211
212 mutex_lock(&priv->mutex);
213 if (!priv->beacon_ctx) {
214 IWL_ERR(priv, "updating beacon w/o beacon context!\n");
215 goto out;
216 }
217
218 if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) {
219 /*
220 * The ucode will send beacon notifications even in
221 * IBSS mode, but we don't want to process them. But
222 * we need to defer the type check to here due to
223 * requiring locking around the beacon_ctx access.
224 */
225 goto out;
226 }
227
228 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
229 beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
230 if (!beacon) {
231 IWL_ERR(priv, "update beacon failed -- keeping old\n");
232 goto out;
233 }
234
235 /* new beacon skb is allocated every time; dispose previous.*/
236 dev_kfree_skb(priv->beacon_skb);
237
238 priv->beacon_skb = beacon;
239
240 iwlagn_send_beacon_cmd(priv);
241 out:
242 mutex_unlock(&priv->mutex);
243 }
244
245 static void iwl_bg_bt_runtime_config(struct work_struct *work)
246 {
247 struct iwl_priv *priv =
248 container_of(work, struct iwl_priv, bt_runtime_config);
249
250 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
251 return;
252
253 /* dont send host command if rf-kill is on */
254 if (!iwl_is_ready_rf(priv))
255 return;
256 iwlagn_send_advance_bt_config(priv);
257 }
258
259 static void iwl_bg_bt_full_concurrency(struct work_struct *work)
260 {
261 struct iwl_priv *priv =
262 container_of(work, struct iwl_priv, bt_full_concurrency);
263 struct iwl_rxon_context *ctx;
264
265 mutex_lock(&priv->mutex);
266
267 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
268 goto out;
269
270 /* dont send host command if rf-kill is on */
271 if (!iwl_is_ready_rf(priv))
272 goto out;
273
274 IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
275 priv->bt_full_concurrent ?
276 "full concurrency" : "3-wire");
277
278 /*
279 * LQ & RXON updated cmds must be sent before BT Config cmd
280 * to avoid 3-wire collisions
281 */
282 for_each_context(priv, ctx) {
283 iwlagn_set_rxon_chain(priv, ctx);
284 iwlagn_commit_rxon(priv, ctx);
285 }
286
287 iwlagn_send_advance_bt_config(priv);
288 out:
289 mutex_unlock(&priv->mutex);
290 }
291
292 /**
293 * iwl_bg_statistics_periodic - Timer callback to queue statistics
294 *
295 * This callback is provided in order to send a statistics request.
296 *
297 * This timer function is continually reset to execute within
298 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
299 * was received. We need to ensure we receive the statistics in order
300 * to update the temperature used for calibrating the TXPOWER.
301 */
302 static void iwl_bg_statistics_periodic(unsigned long data)
303 {
304 struct iwl_priv *priv = (struct iwl_priv *)data;
305
306 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
307 return;
308
309 /* dont send host command if rf-kill is on */
310 if (!iwl_is_ready_rf(priv))
311 return;
312
313 iwl_send_statistics_request(priv, CMD_ASYNC, false);
314 }
315
316
317 static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
318 u32 start_idx, u32 num_events,
319 u32 mode)
320 {
321 u32 i;
322 u32 ptr; /* SRAM byte address of log data */
323 u32 ev, time, data; /* event log data */
324 unsigned long reg_flags;
325
326 if (mode == 0)
327 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
328 else
329 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
330
331 /* Make sure device is powered up for SRAM reads */
332 spin_lock_irqsave(&priv->reg_lock, reg_flags);
333 if (iwl_grab_nic_access(priv)) {
334 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
335 return;
336 }
337
338 /* Set starting address; reads will auto-increment */
339 iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
340 rmb();
341
342 /*
343 * "time" is actually "data" for mode 0 (no timestamp).
344 * place event id # at far right for easier visual parsing.
345 */
346 for (i = 0; i < num_events; i++) {
347 ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
348 time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
349 if (mode == 0) {
350 trace_iwlwifi_dev_ucode_cont_event(priv,
351 0, time, ev);
352 } else {
353 data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
354 trace_iwlwifi_dev_ucode_cont_event(priv,
355 time, data, ev);
356 }
357 }
358 /* Allow device to power down */
359 iwl_release_nic_access(priv);
360 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
361 }
362
363 static void iwl_continuous_event_trace(struct iwl_priv *priv)
364 {
365 u32 capacity; /* event log capacity in # entries */
366 u32 base; /* SRAM byte address of event log header */
367 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
368 u32 num_wraps; /* # times uCode wrapped to top of log */
369 u32 next_entry; /* index of next entry to be written by uCode */
370
371 base = priv->device_pointers.error_event_table;
372 if (iwlagn_hw_valid_rtc_data_addr(base)) {
373 capacity = iwl_read_targ_mem(priv, base);
374 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
375 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
376 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
377 } else
378 return;
379
380 if (num_wraps == priv->event_log.num_wraps) {
381 iwl_print_cont_event_trace(priv,
382 base, priv->event_log.next_entry,
383 next_entry - priv->event_log.next_entry,
384 mode);
385 priv->event_log.non_wraps_count++;
386 } else {
387 if ((num_wraps - priv->event_log.num_wraps) > 1)
388 priv->event_log.wraps_more_count++;
389 else
390 priv->event_log.wraps_once_count++;
391 trace_iwlwifi_dev_ucode_wrap_event(priv,
392 num_wraps - priv->event_log.num_wraps,
393 next_entry, priv->event_log.next_entry);
394 if (next_entry < priv->event_log.next_entry) {
395 iwl_print_cont_event_trace(priv, base,
396 priv->event_log.next_entry,
397 capacity - priv->event_log.next_entry,
398 mode);
399
400 iwl_print_cont_event_trace(priv, base, 0,
401 next_entry, mode);
402 } else {
403 iwl_print_cont_event_trace(priv, base,
404 next_entry, capacity - next_entry,
405 mode);
406
407 iwl_print_cont_event_trace(priv, base, 0,
408 next_entry, mode);
409 }
410 }
411 priv->event_log.num_wraps = num_wraps;
412 priv->event_log.next_entry = next_entry;
413 }
414
415 /**
416 * iwl_bg_ucode_trace - Timer callback to log ucode event
417 *
418 * The timer is continually set to execute every
419 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
420 * this function is to perform continuous uCode event logging operation
421 * if enabled
422 */
423 static void iwl_bg_ucode_trace(unsigned long data)
424 {
425 struct iwl_priv *priv = (struct iwl_priv *)data;
426
427 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
428 return;
429
430 if (priv->event_log.ucode_trace) {
431 iwl_continuous_event_trace(priv);
432 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
433 mod_timer(&priv->ucode_trace,
434 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
435 }
436 }
437
438 static void iwl_bg_tx_flush(struct work_struct *work)
439 {
440 struct iwl_priv *priv =
441 container_of(work, struct iwl_priv, tx_flush);
442
443 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
444 return;
445
446 /* do nothing if rf-kill is on */
447 if (!iwl_is_ready_rf(priv))
448 return;
449
450 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
451 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
452 }
453
454 /*****************************************************************************
455 *
456 * sysfs attributes
457 *
458 *****************************************************************************/
459
460 #ifdef CONFIG_IWLWIFI_DEBUG
461
462 /*
463 * The following adds a new attribute to the sysfs representation
464 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
465 * used for controlling the debug level.
466 *
467 * See the level definitions in iwl for details.
468 *
469 * The debug_level being managed using sysfs below is a per device debug
470 * level that is used instead of the global debug level if it (the per
471 * device debug level) is set.
472 */
473 static ssize_t show_debug_level(struct device *d,
474 struct device_attribute *attr, char *buf)
475 {
476 struct iwl_shared *shrd = dev_get_drvdata(d);
477 return sprintf(buf, "0x%08X\n", iwl_get_debug_level(shrd->priv));
478 }
479 static ssize_t store_debug_level(struct device *d,
480 struct device_attribute *attr,
481 const char *buf, size_t count)
482 {
483 struct iwl_shared *shrd = dev_get_drvdata(d);
484 struct iwl_priv *priv = shrd->priv;
485 unsigned long val;
486 int ret;
487
488 ret = strict_strtoul(buf, 0, &val);
489 if (ret)
490 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
491 else {
492 priv->debug_level = val;
493 if (iwl_alloc_traffic_mem(priv))
494 IWL_ERR(priv,
495 "Not enough memory to generate traffic log\n");
496 }
497 return strnlen(buf, count);
498 }
499
500 static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
501 show_debug_level, store_debug_level);
502
503
504 #endif /* CONFIG_IWLWIFI_DEBUG */
505
506
507 static ssize_t show_temperature(struct device *d,
508 struct device_attribute *attr, char *buf)
509 {
510 struct iwl_shared *shrd = dev_get_drvdata(d);
511 struct iwl_priv *priv = shrd->priv;
512
513 if (!iwl_is_alive(priv))
514 return -EAGAIN;
515
516 return sprintf(buf, "%d\n", priv->temperature);
517 }
518
519 static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
520
521 static ssize_t show_tx_power(struct device *d,
522 struct device_attribute *attr, char *buf)
523 {
524 struct iwl_priv *priv = dev_get_drvdata(d);
525
526 if (!iwl_is_ready_rf(priv))
527 return sprintf(buf, "off\n");
528 else
529 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
530 }
531
532 static ssize_t store_tx_power(struct device *d,
533 struct device_attribute *attr,
534 const char *buf, size_t count)
535 {
536 struct iwl_priv *priv = dev_get_drvdata(d);
537 unsigned long val;
538 int ret;
539
540 ret = strict_strtoul(buf, 10, &val);
541 if (ret)
542 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
543 else {
544 ret = iwl_set_tx_power(priv, val, false);
545 if (ret)
546 IWL_ERR(priv, "failed setting tx power (0x%d).\n",
547 ret);
548 else
549 ret = count;
550 }
551 return ret;
552 }
553
554 static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
555
556 static struct attribute *iwl_sysfs_entries[] = {
557 &dev_attr_temperature.attr,
558 &dev_attr_tx_power.attr,
559 #ifdef CONFIG_IWLWIFI_DEBUG
560 &dev_attr_debug_level.attr,
561 #endif
562 NULL
563 };
564
565 static struct attribute_group iwl_attribute_group = {
566 .name = NULL, /* put in device directory */
567 .attrs = iwl_sysfs_entries,
568 };
569
570 /******************************************************************************
571 *
572 * uCode download functions
573 *
574 ******************************************************************************/
575
576 static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc)
577 {
578 if (desc->v_addr)
579 dma_free_coherent(priv->bus->dev, desc->len,
580 desc->v_addr, desc->p_addr);
581 desc->v_addr = NULL;
582 desc->len = 0;
583 }
584
585 static void iwl_free_fw_img(struct iwl_priv *priv, struct fw_img *img)
586 {
587 iwl_free_fw_desc(priv, &img->code);
588 iwl_free_fw_desc(priv, &img->data);
589 }
590
591 static void iwl_dealloc_ucode(struct iwl_priv *priv)
592 {
593 iwl_free_fw_img(priv, &priv->ucode_rt);
594 iwl_free_fw_img(priv, &priv->ucode_init);
595 iwl_free_fw_img(priv, &priv->ucode_wowlan);
596 }
597
598 static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
599 const void *data, size_t len)
600 {
601 if (!len) {
602 desc->v_addr = NULL;
603 return -EINVAL;
604 }
605
606 desc->v_addr = dma_alloc_coherent(priv->bus->dev, len,
607 &desc->p_addr, GFP_KERNEL);
608 if (!desc->v_addr)
609 return -ENOMEM;
610
611 desc->len = len;
612 memcpy(desc->v_addr, data, len);
613 return 0;
614 }
615
616 static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
617 {
618 static const u8 iwlagn_bss_ac_to_fifo[] = {
619 IWL_TX_FIFO_VO,
620 IWL_TX_FIFO_VI,
621 IWL_TX_FIFO_BE,
622 IWL_TX_FIFO_BK,
623 };
624 static const u8 iwlagn_bss_ac_to_queue[] = {
625 0, 1, 2, 3,
626 };
627 static const u8 iwlagn_pan_ac_to_fifo[] = {
628 IWL_TX_FIFO_VO_IPAN,
629 IWL_TX_FIFO_VI_IPAN,
630 IWL_TX_FIFO_BE_IPAN,
631 IWL_TX_FIFO_BK_IPAN,
632 };
633 static const u8 iwlagn_pan_ac_to_queue[] = {
634 7, 6, 5, 4,
635 };
636 int i;
637
638 /*
639 * The default context is always valid,
640 * the PAN context depends on uCode.
641 */
642 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
643 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
644 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
645
646 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
647 priv->contexts[i].ctxid = i;
648
649 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
650 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
651 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
652 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
653 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
654 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
655 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
656 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
657 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwlagn_bss_ac_to_fifo;
658 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwlagn_bss_ac_to_queue;
659 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
660 BIT(NL80211_IFTYPE_ADHOC);
661 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
662 BIT(NL80211_IFTYPE_STATION);
663 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
664 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
665 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
666 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
667
668 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
669 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
670 REPLY_WIPAN_RXON_TIMING;
671 priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
672 REPLY_WIPAN_RXON_ASSOC;
673 priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
674 priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
675 priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
676 priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
677 priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
678 priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo = iwlagn_pan_ac_to_fifo;
679 priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue = iwlagn_pan_ac_to_queue;
680 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
681 priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
682 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
683
684 if (ucode_flags & IWL_UCODE_TLV_FLAGS_P2P)
685 priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
686 BIT(NL80211_IFTYPE_P2P_CLIENT) |
687 BIT(NL80211_IFTYPE_P2P_GO);
688
689 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
690 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
691 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
692
693 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
694 }
695
696
697 struct iwlagn_ucode_capabilities {
698 u32 max_probe_length;
699 u32 standard_phy_calibration_size;
700 u32 flags;
701 };
702
703 static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
704 static int iwl_mac_setup_register(struct iwl_priv *priv,
705 struct iwlagn_ucode_capabilities *capa);
706
707 #define UCODE_EXPERIMENTAL_INDEX 100
708 #define UCODE_EXPERIMENTAL_TAG "exp"
709
710 static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
711 {
712 const char *name_pre = priv->cfg->fw_name_pre;
713 char tag[8];
714
715 if (first) {
716 #ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
717 priv->fw_index = UCODE_EXPERIMENTAL_INDEX;
718 strcpy(tag, UCODE_EXPERIMENTAL_TAG);
719 } else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
720 #endif
721 priv->fw_index = priv->cfg->ucode_api_max;
722 sprintf(tag, "%d", priv->fw_index);
723 } else {
724 priv->fw_index--;
725 sprintf(tag, "%d", priv->fw_index);
726 }
727
728 if (priv->fw_index < priv->cfg->ucode_api_min) {
729 IWL_ERR(priv, "no suitable firmware found!\n");
730 return -ENOENT;
731 }
732
733 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
734
735 IWL_DEBUG_INFO(priv, "attempting to load firmware %s'%s'\n",
736 (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
737 ? "EXPERIMENTAL " : "",
738 priv->firmware_name);
739
740 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
741 priv->bus->dev,
742 GFP_KERNEL, priv, iwl_ucode_callback);
743 }
744
745 struct iwlagn_firmware_pieces {
746 const void *inst, *data, *init, *init_data, *wowlan_inst, *wowlan_data;
747 size_t inst_size, data_size, init_size, init_data_size,
748 wowlan_inst_size, wowlan_data_size;
749
750 u32 build;
751
752 u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
753 u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
754 };
755
756 static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
757 const struct firmware *ucode_raw,
758 struct iwlagn_firmware_pieces *pieces)
759 {
760 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
761 u32 api_ver, hdr_size;
762 const u8 *src;
763
764 priv->ucode_ver = le32_to_cpu(ucode->ver);
765 api_ver = IWL_UCODE_API(priv->ucode_ver);
766
767 switch (api_ver) {
768 default:
769 hdr_size = 28;
770 if (ucode_raw->size < hdr_size) {
771 IWL_ERR(priv, "File size too small!\n");
772 return -EINVAL;
773 }
774 pieces->build = le32_to_cpu(ucode->u.v2.build);
775 pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
776 pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
777 pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
778 pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
779 src = ucode->u.v2.data;
780 break;
781 case 0:
782 case 1:
783 case 2:
784 hdr_size = 24;
785 if (ucode_raw->size < hdr_size) {
786 IWL_ERR(priv, "File size too small!\n");
787 return -EINVAL;
788 }
789 pieces->build = 0;
790 pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
791 pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
792 pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
793 pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
794 src = ucode->u.v1.data;
795 break;
796 }
797
798 /* Verify size of file vs. image size info in file's header */
799 if (ucode_raw->size != hdr_size + pieces->inst_size +
800 pieces->data_size + pieces->init_size +
801 pieces->init_data_size) {
802
803 IWL_ERR(priv,
804 "uCode file size %d does not match expected size\n",
805 (int)ucode_raw->size);
806 return -EINVAL;
807 }
808
809 pieces->inst = src;
810 src += pieces->inst_size;
811 pieces->data = src;
812 src += pieces->data_size;
813 pieces->init = src;
814 src += pieces->init_size;
815 pieces->init_data = src;
816 src += pieces->init_data_size;
817
818 return 0;
819 }
820
821 static int iwlagn_load_firmware(struct iwl_priv *priv,
822 const struct firmware *ucode_raw,
823 struct iwlagn_firmware_pieces *pieces,
824 struct iwlagn_ucode_capabilities *capa)
825 {
826 struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
827 struct iwl_ucode_tlv *tlv;
828 size_t len = ucode_raw->size;
829 const u8 *data;
830 int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
831 int tmp;
832 u64 alternatives;
833 u32 tlv_len;
834 enum iwl_ucode_tlv_type tlv_type;
835 const u8 *tlv_data;
836
837 if (len < sizeof(*ucode)) {
838 IWL_ERR(priv, "uCode has invalid length: %zd\n", len);
839 return -EINVAL;
840 }
841
842 if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
843 IWL_ERR(priv, "invalid uCode magic: 0X%x\n",
844 le32_to_cpu(ucode->magic));
845 return -EINVAL;
846 }
847
848 /*
849 * Check which alternatives are present, and "downgrade"
850 * when the chosen alternative is not present, warning
851 * the user when that happens. Some files may not have
852 * any alternatives, so don't warn in that case.
853 */
854 alternatives = le64_to_cpu(ucode->alternatives);
855 tmp = wanted_alternative;
856 if (wanted_alternative > 63)
857 wanted_alternative = 63;
858 while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
859 wanted_alternative--;
860 if (wanted_alternative && wanted_alternative != tmp)
861 IWL_WARN(priv,
862 "uCode alternative %d not available, choosing %d\n",
863 tmp, wanted_alternative);
864
865 priv->ucode_ver = le32_to_cpu(ucode->ver);
866 pieces->build = le32_to_cpu(ucode->build);
867 data = ucode->data;
868
869 len -= sizeof(*ucode);
870
871 while (len >= sizeof(*tlv)) {
872 u16 tlv_alt;
873
874 len -= sizeof(*tlv);
875 tlv = (void *)data;
876
877 tlv_len = le32_to_cpu(tlv->length);
878 tlv_type = le16_to_cpu(tlv->type);
879 tlv_alt = le16_to_cpu(tlv->alternative);
880 tlv_data = tlv->data;
881
882 if (len < tlv_len) {
883 IWL_ERR(priv, "invalid TLV len: %zd/%u\n",
884 len, tlv_len);
885 return -EINVAL;
886 }
887 len -= ALIGN(tlv_len, 4);
888 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
889
890 /*
891 * Alternative 0 is always valid.
892 *
893 * Skip alternative TLVs that are not selected.
894 */
895 if (tlv_alt != 0 && tlv_alt != wanted_alternative)
896 continue;
897
898 switch (tlv_type) {
899 case IWL_UCODE_TLV_INST:
900 pieces->inst = tlv_data;
901 pieces->inst_size = tlv_len;
902 break;
903 case IWL_UCODE_TLV_DATA:
904 pieces->data = tlv_data;
905 pieces->data_size = tlv_len;
906 break;
907 case IWL_UCODE_TLV_INIT:
908 pieces->init = tlv_data;
909 pieces->init_size = tlv_len;
910 break;
911 case IWL_UCODE_TLV_INIT_DATA:
912 pieces->init_data = tlv_data;
913 pieces->init_data_size = tlv_len;
914 break;
915 case IWL_UCODE_TLV_BOOT:
916 IWL_ERR(priv, "Found unexpected BOOT ucode\n");
917 break;
918 case IWL_UCODE_TLV_PROBE_MAX_LEN:
919 if (tlv_len != sizeof(u32))
920 goto invalid_tlv_len;
921 capa->max_probe_length =
922 le32_to_cpup((__le32 *)tlv_data);
923 break;
924 case IWL_UCODE_TLV_PAN:
925 if (tlv_len)
926 goto invalid_tlv_len;
927 capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
928 break;
929 case IWL_UCODE_TLV_FLAGS:
930 /* must be at least one u32 */
931 if (tlv_len < sizeof(u32))
932 goto invalid_tlv_len;
933 /* and a proper number of u32s */
934 if (tlv_len % sizeof(u32))
935 goto invalid_tlv_len;
936 /*
937 * This driver only reads the first u32 as
938 * right now no more features are defined,
939 * if that changes then either the driver
940 * will not work with the new firmware, or
941 * it'll not take advantage of new features.
942 */
943 capa->flags = le32_to_cpup((__le32 *)tlv_data);
944 break;
945 case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
946 if (tlv_len != sizeof(u32))
947 goto invalid_tlv_len;
948 pieces->init_evtlog_ptr =
949 le32_to_cpup((__le32 *)tlv_data);
950 break;
951 case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
952 if (tlv_len != sizeof(u32))
953 goto invalid_tlv_len;
954 pieces->init_evtlog_size =
955 le32_to_cpup((__le32 *)tlv_data);
956 break;
957 case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
958 if (tlv_len != sizeof(u32))
959 goto invalid_tlv_len;
960 pieces->init_errlog_ptr =
961 le32_to_cpup((__le32 *)tlv_data);
962 break;
963 case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
964 if (tlv_len != sizeof(u32))
965 goto invalid_tlv_len;
966 pieces->inst_evtlog_ptr =
967 le32_to_cpup((__le32 *)tlv_data);
968 break;
969 case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
970 if (tlv_len != sizeof(u32))
971 goto invalid_tlv_len;
972 pieces->inst_evtlog_size =
973 le32_to_cpup((__le32 *)tlv_data);
974 break;
975 case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
976 if (tlv_len != sizeof(u32))
977 goto invalid_tlv_len;
978 pieces->inst_errlog_ptr =
979 le32_to_cpup((__le32 *)tlv_data);
980 break;
981 case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
982 if (tlv_len)
983 goto invalid_tlv_len;
984 priv->enhance_sensitivity_table = true;
985 break;
986 case IWL_UCODE_TLV_WOWLAN_INST:
987 pieces->wowlan_inst = tlv_data;
988 pieces->wowlan_inst_size = tlv_len;
989 break;
990 case IWL_UCODE_TLV_WOWLAN_DATA:
991 pieces->wowlan_data = tlv_data;
992 pieces->wowlan_data_size = tlv_len;
993 break;
994 case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
995 if (tlv_len != sizeof(u32))
996 goto invalid_tlv_len;
997 capa->standard_phy_calibration_size =
998 le32_to_cpup((__le32 *)tlv_data);
999 break;
1000 default:
1001 IWL_DEBUG_INFO(priv, "unknown TLV: %d\n", tlv_type);
1002 break;
1003 }
1004 }
1005
1006 if (len) {
1007 IWL_ERR(priv, "invalid TLV after parsing: %zd\n", len);
1008 iwl_print_hex_dump(priv, IWL_DL_FW, (u8 *)data, len);
1009 return -EINVAL;
1010 }
1011
1012 return 0;
1013
1014 invalid_tlv_len:
1015 IWL_ERR(priv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
1016 iwl_print_hex_dump(priv, IWL_DL_FW, tlv_data, tlv_len);
1017
1018 return -EINVAL;
1019 }
1020
1021 /**
1022 * iwl_ucode_callback - callback when firmware was loaded
1023 *
1024 * If loaded successfully, copies the firmware into buffers
1025 * for the card to fetch (via DMA).
1026 */
1027 static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1028 {
1029 struct iwl_priv *priv = context;
1030 struct iwl_ucode_header *ucode;
1031 int err;
1032 struct iwlagn_firmware_pieces pieces;
1033 const unsigned int api_max = priv->cfg->ucode_api_max;
1034 unsigned int api_ok = priv->cfg->ucode_api_ok;
1035 const unsigned int api_min = priv->cfg->ucode_api_min;
1036 u32 api_ver;
1037 char buildstr[25];
1038 u32 build;
1039 struct iwlagn_ucode_capabilities ucode_capa = {
1040 .max_probe_length = 200,
1041 .standard_phy_calibration_size =
1042 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE,
1043 };
1044
1045 if (!api_ok)
1046 api_ok = api_max;
1047
1048 memset(&pieces, 0, sizeof(pieces));
1049
1050 if (!ucode_raw) {
1051 if (priv->fw_index <= api_ok)
1052 IWL_ERR(priv,
1053 "request for firmware file '%s' failed.\n",
1054 priv->firmware_name);
1055 goto try_again;
1056 }
1057
1058 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1059 priv->firmware_name, ucode_raw->size);
1060
1061 /* Make sure that we got at least the API version number */
1062 if (ucode_raw->size < 4) {
1063 IWL_ERR(priv, "File size way too small!\n");
1064 goto try_again;
1065 }
1066
1067 /* Data from ucode file: header followed by uCode images */
1068 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1069
1070 if (ucode->ver)
1071 err = iwlagn_load_legacy_firmware(priv, ucode_raw, &pieces);
1072 else
1073 err = iwlagn_load_firmware(priv, ucode_raw, &pieces,
1074 &ucode_capa);
1075
1076 if (err)
1077 goto try_again;
1078
1079 api_ver = IWL_UCODE_API(priv->ucode_ver);
1080 build = pieces.build;
1081
1082 /*
1083 * api_ver should match the api version forming part of the
1084 * firmware filename ... but we don't check for that and only rely
1085 * on the API version read from firmware header from here on forward
1086 */
1087 /* no api version check required for experimental uCode */
1088 if (priv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
1089 if (api_ver < api_min || api_ver > api_max) {
1090 IWL_ERR(priv,
1091 "Driver unable to support your firmware API. "
1092 "Driver supports v%u, firmware is v%u.\n",
1093 api_max, api_ver);
1094 goto try_again;
1095 }
1096
1097 if (api_ver < api_ok) {
1098 if (api_ok != api_max)
1099 IWL_ERR(priv, "Firmware has old API version, "
1100 "expected v%u through v%u, got v%u.\n",
1101 api_ok, api_max, api_ver);
1102 else
1103 IWL_ERR(priv, "Firmware has old API version, "
1104 "expected v%u, got v%u.\n",
1105 api_max, api_ver);
1106 IWL_ERR(priv, "New firmware can be obtained from "
1107 "http://www.intellinuxwireless.org/.\n");
1108 }
1109 }
1110
1111 if (build)
1112 sprintf(buildstr, " build %u%s", build,
1113 (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
1114 ? " (EXP)" : "");
1115 else
1116 buildstr[0] = '\0';
1117
1118 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u%s\n",
1119 IWL_UCODE_MAJOR(priv->ucode_ver),
1120 IWL_UCODE_MINOR(priv->ucode_ver),
1121 IWL_UCODE_API(priv->ucode_ver),
1122 IWL_UCODE_SERIAL(priv->ucode_ver),
1123 buildstr);
1124
1125 snprintf(priv->hw->wiphy->fw_version,
1126 sizeof(priv->hw->wiphy->fw_version),
1127 "%u.%u.%u.%u%s",
1128 IWL_UCODE_MAJOR(priv->ucode_ver),
1129 IWL_UCODE_MINOR(priv->ucode_ver),
1130 IWL_UCODE_API(priv->ucode_ver),
1131 IWL_UCODE_SERIAL(priv->ucode_ver),
1132 buildstr);
1133
1134 /*
1135 * For any of the failures below (before allocating pci memory)
1136 * we will try to load a version with a smaller API -- maybe the
1137 * user just got a corrupted version of the latest API.
1138 */
1139
1140 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1141 priv->ucode_ver);
1142 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1143 pieces.inst_size);
1144 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1145 pieces.data_size);
1146 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1147 pieces.init_size);
1148 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1149 pieces.init_data_size);
1150
1151 /* Verify that uCode images will fit in card's SRAM */
1152 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1153 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1154 pieces.inst_size);
1155 goto try_again;
1156 }
1157
1158 if (pieces.data_size > priv->hw_params.max_data_size) {
1159 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1160 pieces.data_size);
1161 goto try_again;
1162 }
1163
1164 if (pieces.init_size > priv->hw_params.max_inst_size) {
1165 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1166 pieces.init_size);
1167 goto try_again;
1168 }
1169
1170 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1171 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1172 pieces.init_data_size);
1173 goto try_again;
1174 }
1175
1176 /* Allocate ucode buffers for card's bus-master loading ... */
1177
1178 /* Runtime instructions and 2 copies of data:
1179 * 1) unmodified from disk
1180 * 2) backup cache for save/restore during power-downs */
1181 if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.code,
1182 pieces.inst, pieces.inst_size))
1183 goto err_pci_alloc;
1184 if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.data,
1185 pieces.data, pieces.data_size))
1186 goto err_pci_alloc;
1187
1188 /* Initialization instructions and data */
1189 if (pieces.init_size && pieces.init_data_size) {
1190 if (iwl_alloc_fw_desc(priv, &priv->ucode_init.code,
1191 pieces.init, pieces.init_size))
1192 goto err_pci_alloc;
1193 if (iwl_alloc_fw_desc(priv, &priv->ucode_init.data,
1194 pieces.init_data, pieces.init_data_size))
1195 goto err_pci_alloc;
1196 }
1197
1198 /* WoWLAN instructions and data */
1199 if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
1200 if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.code,
1201 pieces.wowlan_inst,
1202 pieces.wowlan_inst_size))
1203 goto err_pci_alloc;
1204 if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.data,
1205 pieces.wowlan_data,
1206 pieces.wowlan_data_size))
1207 goto err_pci_alloc;
1208 }
1209
1210 /* Now that we can no longer fail, copy information */
1211
1212 /*
1213 * The (size - 16) / 12 formula is based on the information recorded
1214 * for each event, which is of mode 1 (including timestamp) for all
1215 * new microcodes that include this information.
1216 */
1217 priv->init_evtlog_ptr = pieces.init_evtlog_ptr;
1218 if (pieces.init_evtlog_size)
1219 priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
1220 else
1221 priv->init_evtlog_size =
1222 priv->cfg->base_params->max_event_log_size;
1223 priv->init_errlog_ptr = pieces.init_errlog_ptr;
1224 priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
1225 if (pieces.inst_evtlog_size)
1226 priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
1227 else
1228 priv->inst_evtlog_size =
1229 priv->cfg->base_params->max_event_log_size;
1230 priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
1231
1232 priv->new_scan_threshold_behaviour =
1233 !!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
1234
1235 if (!(priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
1236 ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1237
1238 /*
1239 * if not PAN, then don't support P2P -- might be a uCode
1240 * packaging bug or due to the eeprom check above
1241 */
1242 if (!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN))
1243 ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
1244
1245 if (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN) {
1246 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1247 priv->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1248 } else {
1249 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1250 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1251 }
1252
1253 /*
1254 * figure out the offset of chain noise reset and gain commands
1255 * base on the size of standard phy calibration commands table size
1256 */
1257 if (ucode_capa.standard_phy_calibration_size >
1258 IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
1259 ucode_capa.standard_phy_calibration_size =
1260 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1261
1262 priv->phy_calib_chain_noise_reset_cmd =
1263 ucode_capa.standard_phy_calibration_size;
1264 priv->phy_calib_chain_noise_gain_cmd =
1265 ucode_capa.standard_phy_calibration_size + 1;
1266
1267 /* initialize all valid contexts */
1268 iwl_init_context(priv, ucode_capa.flags);
1269
1270 /**************************************************
1271 * This is still part of probe() in a sense...
1272 *
1273 * 9. Setup and register with mac80211 and debugfs
1274 **************************************************/
1275 err = iwl_mac_setup_register(priv, &ucode_capa);
1276 if (err)
1277 goto out_unbind;
1278
1279 err = iwl_dbgfs_register(priv, DRV_NAME);
1280 if (err)
1281 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
1282
1283 err = sysfs_create_group(&(priv->bus->dev->kobj),
1284 &iwl_attribute_group);
1285 if (err) {
1286 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1287 goto out_unbind;
1288 }
1289
1290 /* We have our copies now, allow OS release its copies */
1291 release_firmware(ucode_raw);
1292 complete(&priv->firmware_loading_complete);
1293 return;
1294
1295 try_again:
1296 /* try next, if any */
1297 if (iwl_request_firmware(priv, false))
1298 goto out_unbind;
1299 release_firmware(ucode_raw);
1300 return;
1301
1302 err_pci_alloc:
1303 IWL_ERR(priv, "failed to allocate pci memory\n");
1304 iwl_dealloc_ucode(priv);
1305 out_unbind:
1306 complete(&priv->firmware_loading_complete);
1307 device_release_driver(priv->bus->dev);
1308 release_firmware(ucode_raw);
1309 }
1310
1311 static const char * const desc_lookup_text[] = {
1312 "OK",
1313 "FAIL",
1314 "BAD_PARAM",
1315 "BAD_CHECKSUM",
1316 "NMI_INTERRUPT_WDG",
1317 "SYSASSERT",
1318 "FATAL_ERROR",
1319 "BAD_COMMAND",
1320 "HW_ERROR_TUNE_LOCK",
1321 "HW_ERROR_TEMPERATURE",
1322 "ILLEGAL_CHAN_FREQ",
1323 "VCC_NOT_STABLE",
1324 "FH_ERROR",
1325 "NMI_INTERRUPT_HOST",
1326 "NMI_INTERRUPT_ACTION_PT",
1327 "NMI_INTERRUPT_UNKNOWN",
1328 "UCODE_VERSION_MISMATCH",
1329 "HW_ERROR_ABS_LOCK",
1330 "HW_ERROR_CAL_LOCK_FAIL",
1331 "NMI_INTERRUPT_INST_ACTION_PT",
1332 "NMI_INTERRUPT_DATA_ACTION_PT",
1333 "NMI_TRM_HW_ER",
1334 "NMI_INTERRUPT_TRM",
1335 "NMI_INTERRUPT_BREAK_POINT",
1336 "DEBUG_0",
1337 "DEBUG_1",
1338 "DEBUG_2",
1339 "DEBUG_3",
1340 };
1341
1342 static struct { char *name; u8 num; } advanced_lookup[] = {
1343 { "NMI_INTERRUPT_WDG", 0x34 },
1344 { "SYSASSERT", 0x35 },
1345 { "UCODE_VERSION_MISMATCH", 0x37 },
1346 { "BAD_COMMAND", 0x38 },
1347 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1348 { "FATAL_ERROR", 0x3D },
1349 { "NMI_TRM_HW_ERR", 0x46 },
1350 { "NMI_INTERRUPT_TRM", 0x4C },
1351 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1352 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1353 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1354 { "NMI_INTERRUPT_HOST", 0x66 },
1355 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1356 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1357 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1358 { "ADVANCED_SYSASSERT", 0 },
1359 };
1360
1361 static const char *desc_lookup(u32 num)
1362 {
1363 int i;
1364 int max = ARRAY_SIZE(desc_lookup_text);
1365
1366 if (num < max)
1367 return desc_lookup_text[num];
1368
1369 max = ARRAY_SIZE(advanced_lookup) - 1;
1370 for (i = 0; i < max; i++) {
1371 if (advanced_lookup[i].num == num)
1372 break;
1373 }
1374 return advanced_lookup[i].name;
1375 }
1376
1377 #define ERROR_START_OFFSET (1 * sizeof(u32))
1378 #define ERROR_ELEM_SIZE (7 * sizeof(u32))
1379
1380 void iwl_dump_nic_error_log(struct iwl_priv *priv)
1381 {
1382 u32 base;
1383 struct iwl_error_event_table table;
1384
1385 base = priv->device_pointers.error_event_table;
1386 if (priv->ucode_type == IWL_UCODE_INIT) {
1387 if (!base)
1388 base = priv->init_errlog_ptr;
1389 } else {
1390 if (!base)
1391 base = priv->inst_errlog_ptr;
1392 }
1393
1394 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1395 IWL_ERR(priv,
1396 "Not valid error log pointer 0x%08X for %s uCode\n",
1397 base,
1398 (priv->ucode_type == IWL_UCODE_INIT)
1399 ? "Init" : "RT");
1400 return;
1401 }
1402
1403 iwl_read_targ_mem_words(priv, base, &table, sizeof(table));
1404
1405 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1406 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1407 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1408 priv->status, table.valid);
1409 }
1410
1411 priv->isr_stats.err_code = table.error_id;
1412
1413 trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
1414 table.data1, table.data2, table.line,
1415 table.blink1, table.blink2, table.ilink1,
1416 table.ilink2, table.bcon_time, table.gp1,
1417 table.gp2, table.gp3, table.ucode_ver,
1418 table.hw_ver, table.brd_ver);
1419 IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
1420 desc_lookup(table.error_id));
1421 IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
1422 IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
1423 IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
1424 IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
1425 IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
1426 IWL_ERR(priv, "0x%08X | data1\n", table.data1);
1427 IWL_ERR(priv, "0x%08X | data2\n", table.data2);
1428 IWL_ERR(priv, "0x%08X | line\n", table.line);
1429 IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
1430 IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
1431 IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
1432 IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
1433 IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
1434 IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
1435 IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
1436 IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
1437 IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
1438 IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
1439 }
1440
1441 #define EVENT_START_OFFSET (4 * sizeof(u32))
1442
1443 /**
1444 * iwl_print_event_log - Dump error event log to syslog
1445 *
1446 */
1447 static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1448 u32 num_events, u32 mode,
1449 int pos, char **buf, size_t bufsz)
1450 {
1451 u32 i;
1452 u32 base; /* SRAM byte address of event log header */
1453 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1454 u32 ptr; /* SRAM byte address of log data */
1455 u32 ev, time, data; /* event log data */
1456 unsigned long reg_flags;
1457
1458 if (num_events == 0)
1459 return pos;
1460
1461 base = priv->device_pointers.log_event_table;
1462 if (priv->ucode_type == IWL_UCODE_INIT) {
1463 if (!base)
1464 base = priv->init_evtlog_ptr;
1465 } else {
1466 if (!base)
1467 base = priv->inst_evtlog_ptr;
1468 }
1469
1470 if (mode == 0)
1471 event_size = 2 * sizeof(u32);
1472 else
1473 event_size = 3 * sizeof(u32);
1474
1475 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1476
1477 /* Make sure device is powered up for SRAM reads */
1478 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1479 iwl_grab_nic_access(priv);
1480
1481 /* Set starting address; reads will auto-increment */
1482 iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
1483 rmb();
1484
1485 /* "time" is actually "data" for mode 0 (no timestamp).
1486 * place event id # at far right for easier visual parsing. */
1487 for (i = 0; i < num_events; i++) {
1488 ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1489 time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1490 if (mode == 0) {
1491 /* data, ev */
1492 if (bufsz) {
1493 pos += scnprintf(*buf + pos, bufsz - pos,
1494 "EVT_LOG:0x%08x:%04u\n",
1495 time, ev);
1496 } else {
1497 trace_iwlwifi_dev_ucode_event(priv, 0,
1498 time, ev);
1499 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1500 time, ev);
1501 }
1502 } else {
1503 data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1504 if (bufsz) {
1505 pos += scnprintf(*buf + pos, bufsz - pos,
1506 "EVT_LOGT:%010u:0x%08x:%04u\n",
1507 time, data, ev);
1508 } else {
1509 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1510 time, data, ev);
1511 trace_iwlwifi_dev_ucode_event(priv, time,
1512 data, ev);
1513 }
1514 }
1515 }
1516
1517 /* Allow device to power down */
1518 iwl_release_nic_access(priv);
1519 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1520 return pos;
1521 }
1522
1523 /**
1524 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1525 */
1526 static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1527 u32 num_wraps, u32 next_entry,
1528 u32 size, u32 mode,
1529 int pos, char **buf, size_t bufsz)
1530 {
1531 /*
1532 * display the newest DEFAULT_LOG_ENTRIES entries
1533 * i.e the entries just before the next ont that uCode would fill.
1534 */
1535 if (num_wraps) {
1536 if (next_entry < size) {
1537 pos = iwl_print_event_log(priv,
1538 capacity - (size - next_entry),
1539 size - next_entry, mode,
1540 pos, buf, bufsz);
1541 pos = iwl_print_event_log(priv, 0,
1542 next_entry, mode,
1543 pos, buf, bufsz);
1544 } else
1545 pos = iwl_print_event_log(priv, next_entry - size,
1546 size, mode, pos, buf, bufsz);
1547 } else {
1548 if (next_entry < size) {
1549 pos = iwl_print_event_log(priv, 0, next_entry,
1550 mode, pos, buf, bufsz);
1551 } else {
1552 pos = iwl_print_event_log(priv, next_entry - size,
1553 size, mode, pos, buf, bufsz);
1554 }
1555 }
1556 return pos;
1557 }
1558
1559 #define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1560
1561 int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1562 char **buf, bool display)
1563 {
1564 u32 base; /* SRAM byte address of event log header */
1565 u32 capacity; /* event log capacity in # entries */
1566 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1567 u32 num_wraps; /* # times uCode wrapped to top of log */
1568 u32 next_entry; /* index of next entry to be written by uCode */
1569 u32 size; /* # entries that we'll print */
1570 u32 logsize;
1571 int pos = 0;
1572 size_t bufsz = 0;
1573
1574 base = priv->device_pointers.log_event_table;
1575 if (priv->ucode_type == IWL_UCODE_INIT) {
1576 logsize = priv->init_evtlog_size;
1577 if (!base)
1578 base = priv->init_evtlog_ptr;
1579 } else {
1580 logsize = priv->inst_evtlog_size;
1581 if (!base)
1582 base = priv->inst_evtlog_ptr;
1583 }
1584
1585 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1586 IWL_ERR(priv,
1587 "Invalid event log pointer 0x%08X for %s uCode\n",
1588 base,
1589 (priv->ucode_type == IWL_UCODE_INIT)
1590 ? "Init" : "RT");
1591 return -EINVAL;
1592 }
1593
1594 /* event log header */
1595 capacity = iwl_read_targ_mem(priv, base);
1596 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1597 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1598 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1599
1600 if (capacity > logsize) {
1601 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1602 capacity, logsize);
1603 capacity = logsize;
1604 }
1605
1606 if (next_entry > logsize) {
1607 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1608 next_entry, logsize);
1609 next_entry = logsize;
1610 }
1611
1612 size = num_wraps ? capacity : next_entry;
1613
1614 /* bail out if nothing in log */
1615 if (size == 0) {
1616 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1617 return pos;
1618 }
1619
1620 /* enable/disable bt channel inhibition */
1621 priv->bt_ch_announce = iwlagn_mod_params.bt_ch_announce;
1622
1623 #ifdef CONFIG_IWLWIFI_DEBUG
1624 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1625 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1626 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1627 #else
1628 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1629 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1630 #endif
1631 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1632 size);
1633
1634 #ifdef CONFIG_IWLWIFI_DEBUG
1635 if (display) {
1636 if (full_log)
1637 bufsz = capacity * 48;
1638 else
1639 bufsz = size * 48;
1640 *buf = kmalloc(bufsz, GFP_KERNEL);
1641 if (!*buf)
1642 return -ENOMEM;
1643 }
1644 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1645 /*
1646 * if uCode has wrapped back to top of log,
1647 * start at the oldest entry,
1648 * i.e the next one that uCode would fill.
1649 */
1650 if (num_wraps)
1651 pos = iwl_print_event_log(priv, next_entry,
1652 capacity - next_entry, mode,
1653 pos, buf, bufsz);
1654 /* (then/else) start at top of log */
1655 pos = iwl_print_event_log(priv, 0,
1656 next_entry, mode, pos, buf, bufsz);
1657 } else
1658 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1659 next_entry, size, mode,
1660 pos, buf, bufsz);
1661 #else
1662 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1663 next_entry, size, mode,
1664 pos, buf, bufsz);
1665 #endif
1666 return pos;
1667 }
1668
1669 static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1670 {
1671 struct iwl_ct_kill_config cmd;
1672 struct iwl_ct_kill_throttling_config adv_cmd;
1673 unsigned long flags;
1674 int ret = 0;
1675
1676 spin_lock_irqsave(&priv->lock, flags);
1677 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1678 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1679 spin_unlock_irqrestore(&priv->lock, flags);
1680 priv->thermal_throttle.ct_kill_toggle = false;
1681
1682 if (priv->cfg->base_params->support_ct_kill_exit) {
1683 adv_cmd.critical_temperature_enter =
1684 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1685 adv_cmd.critical_temperature_exit =
1686 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
1687
1688 ret = trans_send_cmd_pdu(&priv->trans,
1689 REPLY_CT_KILL_CONFIG_CMD,
1690 CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
1691 if (ret)
1692 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1693 else
1694 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1695 "succeeded, "
1696 "critical temperature enter is %d,"
1697 "exit is %d\n",
1698 priv->hw_params.ct_kill_threshold,
1699 priv->hw_params.ct_kill_exit_threshold);
1700 } else {
1701 cmd.critical_temperature_R =
1702 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1703
1704 ret = trans_send_cmd_pdu(&priv->trans,
1705 REPLY_CT_KILL_CONFIG_CMD,
1706 CMD_SYNC, sizeof(cmd), &cmd);
1707 if (ret)
1708 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1709 else
1710 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1711 "succeeded, "
1712 "critical temperature is %d\n",
1713 priv->hw_params.ct_kill_threshold);
1714 }
1715 }
1716
1717 static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
1718 {
1719 struct iwl_calib_cfg_cmd calib_cfg_cmd;
1720 struct iwl_host_cmd cmd = {
1721 .id = CALIBRATION_CFG_CMD,
1722 .len = { sizeof(struct iwl_calib_cfg_cmd), },
1723 .data = { &calib_cfg_cmd, },
1724 };
1725
1726 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
1727 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
1728 calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
1729
1730 return trans_send_cmd(&priv->trans, &cmd);
1731 }
1732
1733
1734 static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
1735 {
1736 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
1737 .valid = cpu_to_le32(valid_tx_ant),
1738 };
1739
1740 if (IWL_UCODE_API(priv->ucode_ver) > 1) {
1741 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
1742 return trans_send_cmd_pdu(&priv->trans,
1743 TX_ANT_CONFIGURATION_CMD,
1744 CMD_SYNC,
1745 sizeof(struct iwl_tx_ant_config_cmd),
1746 &tx_ant_cmd);
1747 } else {
1748 IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
1749 return -EOPNOTSUPP;
1750 }
1751 }
1752
1753 /**
1754 * iwl_alive_start - called after REPLY_ALIVE notification received
1755 * from protocol/runtime uCode (initialization uCode's
1756 * Alive gets handled by iwl_init_alive_start()).
1757 */
1758 int iwl_alive_start(struct iwl_priv *priv)
1759 {
1760 int ret = 0;
1761 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1762
1763 /*TODO: this should go to the transport layer */
1764 iwl_reset_ict(priv);
1765
1766 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
1767
1768 /* After the ALIVE response, we can send host commands to the uCode */
1769 set_bit(STATUS_ALIVE, &priv->status);
1770
1771 /* Enable watchdog to monitor the driver tx queues */
1772 iwl_setup_watchdog(priv);
1773
1774 if (iwl_is_rfkill(priv))
1775 return -ERFKILL;
1776
1777 /* download priority table before any calibration request */
1778 if (priv->cfg->bt_params &&
1779 priv->cfg->bt_params->advanced_bt_coexist) {
1780 /* Configure Bluetooth device coexistence support */
1781 if (priv->cfg->bt_params->bt_sco_disable)
1782 priv->bt_enable_pspoll = false;
1783 else
1784 priv->bt_enable_pspoll = true;
1785
1786 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
1787 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
1788 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
1789 iwlagn_send_advance_bt_config(priv);
1790 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
1791 priv->cur_rssi_ctx = NULL;
1792
1793 iwlagn_send_prio_tbl(priv);
1794
1795 /* FIXME: w/a to force change uCode BT state machine */
1796 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
1797 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
1798 if (ret)
1799 return ret;
1800 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
1801 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
1802 if (ret)
1803 return ret;
1804 } else {
1805 /*
1806 * default is 2-wire BT coexexistence support
1807 */
1808 iwl_send_bt_config(priv);
1809 }
1810
1811 if (priv->hw_params.calib_rt_cfg)
1812 iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg);
1813
1814 ieee80211_wake_queues(priv->hw);
1815
1816 priv->active_rate = IWL_RATES_MASK;
1817
1818 /* Configure Tx antenna selection based on H/W config */
1819 iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant);
1820
1821 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
1822 struct iwl_rxon_cmd *active_rxon =
1823 (struct iwl_rxon_cmd *)&ctx->active;
1824 /* apply any changes in staging */
1825 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1826 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1827 } else {
1828 struct iwl_rxon_context *tmp;
1829 /* Initialize our rx_config data */
1830 for_each_context(priv, tmp)
1831 iwl_connection_init_rx_config(priv, tmp);
1832
1833 iwlagn_set_rxon_chain(priv, ctx);
1834 }
1835
1836 if (!priv->wowlan) {
1837 /* WoWLAN ucode will not reply in the same way, skip it */
1838 iwl_reset_run_time_calib(priv);
1839 }
1840
1841 set_bit(STATUS_READY, &priv->status);
1842
1843 /* Configure the adapter for unassociated operation */
1844 ret = iwlagn_commit_rxon(priv, ctx);
1845 if (ret)
1846 return ret;
1847
1848 /* At this point, the NIC is initialized and operational */
1849 iwl_rf_kill_ct_config(priv);
1850
1851 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
1852
1853 return iwl_power_update_mode(priv, true);
1854 }
1855
1856 static void iwl_cancel_deferred_work(struct iwl_priv *priv);
1857
1858 static void __iwl_down(struct iwl_priv *priv)
1859 {
1860 int exit_pending;
1861
1862 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
1863
1864 iwl_scan_cancel_timeout(priv, 200);
1865
1866 /*
1867 * If active, scanning won't cancel it, so say it expired.
1868 * No race since we hold the mutex here and a new one
1869 * can't come in at this time.
1870 */
1871 ieee80211_remain_on_channel_expired(priv->hw);
1872
1873 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
1874
1875 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
1876 * to prevent rearm timer */
1877 del_timer_sync(&priv->watchdog);
1878
1879 iwl_clear_ucode_stations(priv, NULL);
1880 iwl_dealloc_bcast_stations(priv);
1881 iwl_clear_driver_stations(priv);
1882
1883 /* reset BT coex data */
1884 priv->bt_status = 0;
1885 priv->cur_rssi_ctx = NULL;
1886 priv->bt_is_sco = 0;
1887 if (priv->cfg->bt_params)
1888 priv->bt_traffic_load =
1889 priv->cfg->bt_params->bt_init_traffic_load;
1890 else
1891 priv->bt_traffic_load = 0;
1892 priv->bt_full_concurrent = false;
1893 priv->bt_ci_compliance = 0;
1894
1895 /* Wipe out the EXIT_PENDING status bit if we are not actually
1896 * exiting the module */
1897 if (!exit_pending)
1898 clear_bit(STATUS_EXIT_PENDING, &priv->status);
1899
1900 if (priv->mac80211_registered)
1901 ieee80211_stop_queues(priv->hw);
1902
1903 /* Clear out all status bits but a few that are stable across reset */
1904 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1905 STATUS_RF_KILL_HW |
1906 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1907 STATUS_GEO_CONFIGURED |
1908 test_bit(STATUS_FW_ERROR, &priv->status) <<
1909 STATUS_FW_ERROR |
1910 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
1911 STATUS_EXIT_PENDING;
1912
1913 trans_stop_device(&priv->trans);
1914
1915 dev_kfree_skb(priv->beacon_skb);
1916 priv->beacon_skb = NULL;
1917 }
1918
1919 static void iwl_down(struct iwl_priv *priv)
1920 {
1921 mutex_lock(&priv->mutex);
1922 __iwl_down(priv);
1923 mutex_unlock(&priv->mutex);
1924
1925 iwl_cancel_deferred_work(priv);
1926 }
1927
1928 #define MAX_HW_RESTARTS 5
1929
1930 static int __iwl_up(struct iwl_priv *priv)
1931 {
1932 struct iwl_rxon_context *ctx;
1933 int ret;
1934
1935 lockdep_assert_held(&priv->mutex);
1936
1937 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1938 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
1939 return -EIO;
1940 }
1941
1942 for_each_context(priv, ctx) {
1943 ret = iwlagn_alloc_bcast_station(priv, ctx);
1944 if (ret) {
1945 iwl_dealloc_bcast_stations(priv);
1946 return ret;
1947 }
1948 }
1949
1950 ret = iwlagn_run_init_ucode(priv);
1951 if (ret) {
1952 IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
1953 goto error;
1954 }
1955
1956 ret = iwlagn_load_ucode_wait_alive(priv,
1957 &priv->ucode_rt,
1958 IWL_UCODE_REGULAR);
1959 if (ret) {
1960 IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
1961 goto error;
1962 }
1963
1964 ret = iwl_alive_start(priv);
1965 if (ret)
1966 goto error;
1967 return 0;
1968
1969 error:
1970 set_bit(STATUS_EXIT_PENDING, &priv->status);
1971 __iwl_down(priv);
1972 clear_bit(STATUS_EXIT_PENDING, &priv->status);
1973
1974 IWL_ERR(priv, "Unable to initialize device.\n");
1975 return ret;
1976 }
1977
1978
1979 /*****************************************************************************
1980 *
1981 * Workqueue callbacks
1982 *
1983 *****************************************************************************/
1984
1985 static void iwl_bg_run_time_calib_work(struct work_struct *work)
1986 {
1987 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1988 run_time_calib_work);
1989
1990 mutex_lock(&priv->mutex);
1991
1992 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1993 test_bit(STATUS_SCANNING, &priv->status)) {
1994 mutex_unlock(&priv->mutex);
1995 return;
1996 }
1997
1998 if (priv->start_calib) {
1999 iwl_chain_noise_calibration(priv);
2000 iwl_sensitivity_calibration(priv);
2001 }
2002
2003 mutex_unlock(&priv->mutex);
2004 }
2005
2006 static void iwlagn_prepare_restart(struct iwl_priv *priv)
2007 {
2008 struct iwl_rxon_context *ctx;
2009 bool bt_full_concurrent;
2010 u8 bt_ci_compliance;
2011 u8 bt_load;
2012 u8 bt_status;
2013 bool bt_is_sco;
2014
2015 lockdep_assert_held(&priv->mutex);
2016
2017 for_each_context(priv, ctx)
2018 ctx->vif = NULL;
2019 priv->is_open = 0;
2020
2021 /*
2022 * __iwl_down() will clear the BT status variables,
2023 * which is correct, but when we restart we really
2024 * want to keep them so restore them afterwards.
2025 *
2026 * The restart process will later pick them up and
2027 * re-configure the hw when we reconfigure the BT
2028 * command.
2029 */
2030 bt_full_concurrent = priv->bt_full_concurrent;
2031 bt_ci_compliance = priv->bt_ci_compliance;
2032 bt_load = priv->bt_traffic_load;
2033 bt_status = priv->bt_status;
2034 bt_is_sco = priv->bt_is_sco;
2035
2036 __iwl_down(priv);
2037
2038 priv->bt_full_concurrent = bt_full_concurrent;
2039 priv->bt_ci_compliance = bt_ci_compliance;
2040 priv->bt_traffic_load = bt_load;
2041 priv->bt_status = bt_status;
2042 priv->bt_is_sco = bt_is_sco;
2043 }
2044
2045 static void iwl_bg_restart(struct work_struct *data)
2046 {
2047 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2048
2049 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2050 return;
2051
2052 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2053 mutex_lock(&priv->mutex);
2054 iwlagn_prepare_restart(priv);
2055 mutex_unlock(&priv->mutex);
2056 iwl_cancel_deferred_work(priv);
2057 ieee80211_restart_hw(priv->hw);
2058 } else {
2059 WARN_ON(1);
2060 }
2061 }
2062
2063 /*****************************************************************************
2064 *
2065 * mac80211 entry point functions
2066 *
2067 *****************************************************************************/
2068
2069 static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
2070 {
2071 .max = 1,
2072 .types = BIT(NL80211_IFTYPE_STATION),
2073 },
2074 {
2075 .max = 1,
2076 .types = BIT(NL80211_IFTYPE_AP),
2077 },
2078 };
2079
2080 static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
2081 {
2082 .max = 2,
2083 .types = BIT(NL80211_IFTYPE_STATION),
2084 },
2085 };
2086
2087 static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
2088 {
2089 .max = 1,
2090 .types = BIT(NL80211_IFTYPE_STATION),
2091 },
2092 {
2093 .max = 1,
2094 .types = BIT(NL80211_IFTYPE_P2P_GO) |
2095 BIT(NL80211_IFTYPE_AP),
2096 },
2097 };
2098
2099 static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
2100 {
2101 .max = 2,
2102 .types = BIT(NL80211_IFTYPE_STATION),
2103 },
2104 {
2105 .max = 1,
2106 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
2107 },
2108 };
2109
2110 static const struct ieee80211_iface_combination
2111 iwlagn_iface_combinations_dualmode[] = {
2112 { .num_different_channels = 1,
2113 .max_interfaces = 2,
2114 .beacon_int_infra_match = true,
2115 .limits = iwlagn_sta_ap_limits,
2116 .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
2117 },
2118 { .num_different_channels = 1,
2119 .max_interfaces = 2,
2120 .limits = iwlagn_2sta_limits,
2121 .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
2122 },
2123 };
2124
2125 static const struct ieee80211_iface_combination
2126 iwlagn_iface_combinations_p2p[] = {
2127 { .num_different_channels = 1,
2128 .max_interfaces = 2,
2129 .beacon_int_infra_match = true,
2130 .limits = iwlagn_p2p_sta_go_limits,
2131 .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
2132 },
2133 { .num_different_channels = 1,
2134 .max_interfaces = 2,
2135 .limits = iwlagn_p2p_2sta_limits,
2136 .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
2137 },
2138 };
2139
2140 /*
2141 * Not a mac80211 entry point function, but it fits in with all the
2142 * other mac80211 functions grouped here.
2143 */
2144 static int iwl_mac_setup_register(struct iwl_priv *priv,
2145 struct iwlagn_ucode_capabilities *capa)
2146 {
2147 int ret;
2148 struct ieee80211_hw *hw = priv->hw;
2149 struct iwl_rxon_context *ctx;
2150
2151 hw->rate_control_algorithm = "iwl-agn-rs";
2152
2153 /* Tell mac80211 our characteristics */
2154 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2155 IEEE80211_HW_AMPDU_AGGREGATION |
2156 IEEE80211_HW_NEED_DTIM_PERIOD |
2157 IEEE80211_HW_SPECTRUM_MGMT |
2158 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2159
2160 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2161
2162 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2163 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2164
2165 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
2166 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2167 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2168
2169 if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
2170 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
2171
2172 hw->sta_data_size = sizeof(struct iwl_station_priv);
2173 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2174
2175 for_each_context(priv, ctx) {
2176 hw->wiphy->interface_modes |= ctx->interface_modes;
2177 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2178 }
2179
2180 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
2181
2182 if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
2183 hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
2184 hw->wiphy->n_iface_combinations =
2185 ARRAY_SIZE(iwlagn_iface_combinations_p2p);
2186 } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
2187 hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode;
2188 hw->wiphy->n_iface_combinations =
2189 ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
2190 }
2191
2192 hw->wiphy->max_remain_on_channel_duration = 1000;
2193
2194 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2195 WIPHY_FLAG_DISABLE_BEACON_HINTS |
2196 WIPHY_FLAG_IBSS_RSN;
2197
2198 if (priv->ucode_wowlan.code.len && device_can_wakeup(priv->bus->dev)) {
2199 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
2200 WIPHY_WOWLAN_DISCONNECT |
2201 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
2202 WIPHY_WOWLAN_RFKILL_RELEASE;
2203 if (!iwlagn_mod_params.sw_crypto)
2204 hw->wiphy->wowlan.flags |=
2205 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
2206 WIPHY_WOWLAN_GTK_REKEY_FAILURE;
2207
2208 hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
2209 hw->wiphy->wowlan.pattern_min_len =
2210 IWLAGN_WOWLAN_MIN_PATTERN_LEN;
2211 hw->wiphy->wowlan.pattern_max_len =
2212 IWLAGN_WOWLAN_MAX_PATTERN_LEN;
2213 }
2214
2215 if (iwlagn_mod_params.power_save)
2216 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
2217 else
2218 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2219
2220 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2221 /* we create the 802.11 header and a zero-length SSID element */
2222 hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
2223
2224 /* Default value; 4 EDCA QOS priorities */
2225 hw->queues = 4;
2226
2227 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2228
2229 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2230 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2231 &priv->bands[IEEE80211_BAND_2GHZ];
2232 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2233 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2234 &priv->bands[IEEE80211_BAND_5GHZ];
2235
2236 iwl_leds_init(priv);
2237
2238 ret = ieee80211_register_hw(priv->hw);
2239 if (ret) {
2240 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2241 return ret;
2242 }
2243 priv->mac80211_registered = 1;
2244
2245 return 0;
2246 }
2247
2248
2249 static int iwlagn_mac_start(struct ieee80211_hw *hw)
2250 {
2251 struct iwl_priv *priv = hw->priv;
2252 int ret;
2253
2254 IWL_DEBUG_MAC80211(priv, "enter\n");
2255
2256 /* we should be verifying the device is ready to be opened */
2257 mutex_lock(&priv->mutex);
2258 ret = __iwl_up(priv);
2259 mutex_unlock(&priv->mutex);
2260 if (ret)
2261 return ret;
2262
2263 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2264
2265 /* Now we should be done, and the READY bit should be set. */
2266 if (WARN_ON(!test_bit(STATUS_READY, &priv->status)))
2267 ret = -EIO;
2268
2269 iwlagn_led_enable(priv);
2270
2271 priv->is_open = 1;
2272 IWL_DEBUG_MAC80211(priv, "leave\n");
2273 return 0;
2274 }
2275
2276 static void iwlagn_mac_stop(struct ieee80211_hw *hw)
2277 {
2278 struct iwl_priv *priv = hw->priv;
2279
2280 IWL_DEBUG_MAC80211(priv, "enter\n");
2281
2282 if (!priv->is_open)
2283 return;
2284
2285 priv->is_open = 0;
2286
2287 iwl_down(priv);
2288
2289 flush_workqueue(priv->workqueue);
2290
2291 /* User space software may expect getting rfkill changes
2292 * even if interface is down */
2293 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2294 iwl_enable_rfkill_int(priv);
2295
2296 IWL_DEBUG_MAC80211(priv, "leave\n");
2297 }
2298
2299 #ifdef CONFIG_PM
2300 static int iwlagn_send_patterns(struct iwl_priv *priv,
2301 struct cfg80211_wowlan *wowlan)
2302 {
2303 struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
2304 struct iwl_host_cmd cmd = {
2305 .id = REPLY_WOWLAN_PATTERNS,
2306 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
2307 .flags = CMD_SYNC,
2308 };
2309 int i, err;
2310
2311 if (!wowlan->n_patterns)
2312 return 0;
2313
2314 cmd.len[0] = sizeof(*pattern_cmd) +
2315 wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
2316
2317 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
2318 if (!pattern_cmd)
2319 return -ENOMEM;
2320
2321 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
2322
2323 for (i = 0; i < wowlan->n_patterns; i++) {
2324 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
2325
2326 memcpy(&pattern_cmd->patterns[i].mask,
2327 wowlan->patterns[i].mask, mask_len);
2328 memcpy(&pattern_cmd->patterns[i].pattern,
2329 wowlan->patterns[i].pattern,
2330 wowlan->patterns[i].pattern_len);
2331 pattern_cmd->patterns[i].mask_size = mask_len;
2332 pattern_cmd->patterns[i].pattern_size =
2333 wowlan->patterns[i].pattern_len;
2334 }
2335
2336 cmd.data[0] = pattern_cmd;
2337 err = trans_send_cmd(&priv->trans, &cmd);
2338 kfree(pattern_cmd);
2339 return err;
2340 }
2341 #endif
2342
2343 static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
2344 struct ieee80211_vif *vif,
2345 struct cfg80211_gtk_rekey_data *data)
2346 {
2347 struct iwl_priv *priv = hw->priv;
2348
2349 if (iwlagn_mod_params.sw_crypto)
2350 return;
2351
2352 mutex_lock(&priv->mutex);
2353
2354 if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
2355 goto out;
2356
2357 memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
2358 memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
2359 priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
2360 priv->have_rekey_data = true;
2361
2362 out:
2363 mutex_unlock(&priv->mutex);
2364 }
2365
2366 struct wowlan_key_data {
2367 struct iwl_rxon_context *ctx;
2368 struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
2369 struct iwlagn_wowlan_tkip_params_cmd *tkip;
2370 const u8 *bssid;
2371 bool error, use_rsc_tsc, use_tkip;
2372 };
2373
2374 #ifdef CONFIG_PM
2375 static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
2376 {
2377 int i;
2378
2379 for (i = 0; i < IWLAGN_P1K_SIZE; i++)
2380 out[i] = cpu_to_le16(p1k[i]);
2381 }
2382
2383 static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
2384 struct ieee80211_vif *vif,
2385 struct ieee80211_sta *sta,
2386 struct ieee80211_key_conf *key,
2387 void *_data)
2388 {
2389 struct iwl_priv *priv = hw->priv;
2390 struct wowlan_key_data *data = _data;
2391 struct iwl_rxon_context *ctx = data->ctx;
2392 struct aes_sc *aes_sc, *aes_tx_sc = NULL;
2393 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
2394 struct iwlagn_p1k_cache *rx_p1ks;
2395 u8 *rx_mic_key;
2396 struct ieee80211_key_seq seq;
2397 u32 cur_rx_iv32 = 0;
2398 u16 p1k[IWLAGN_P1K_SIZE];
2399 int ret, i;
2400
2401 mutex_lock(&priv->mutex);
2402
2403 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2404 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2405 !sta && !ctx->key_mapping_keys)
2406 ret = iwl_set_default_wep_key(priv, ctx, key);
2407 else
2408 ret = iwl_set_dynamic_key(priv, ctx, key, sta);
2409
2410 if (ret) {
2411 IWL_ERR(priv, "Error setting key during suspend!\n");
2412 data->error = true;
2413 }
2414
2415 switch (key->cipher) {
2416 case WLAN_CIPHER_SUITE_TKIP:
2417 if (sta) {
2418 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
2419 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
2420
2421 rx_p1ks = data->tkip->rx_uni;
2422
2423 ieee80211_get_key_tx_seq(key, &seq);
2424 tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
2425 tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
2426
2427 ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
2428 iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
2429
2430 memcpy(data->tkip->mic_keys.tx,
2431 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
2432 IWLAGN_MIC_KEY_SIZE);
2433
2434 rx_mic_key = data->tkip->mic_keys.rx_unicast;
2435 } else {
2436 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
2437 rx_p1ks = data->tkip->rx_multi;
2438 rx_mic_key = data->tkip->mic_keys.rx_mcast;
2439 }
2440
2441 /*
2442 * For non-QoS this relies on the fact that both the uCode and
2443 * mac80211 use TID 0 (as they need to to avoid replay attacks)
2444 * for checking the IV in the frames.
2445 */
2446 for (i = 0; i < IWLAGN_NUM_RSC; i++) {
2447 ieee80211_get_key_rx_seq(key, i, &seq);
2448 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
2449 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
2450 /* wrapping isn't allowed, AP must rekey */
2451 if (seq.tkip.iv32 > cur_rx_iv32)
2452 cur_rx_iv32 = seq.tkip.iv32;
2453 }
2454
2455 ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
2456 iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
2457 ieee80211_get_tkip_rx_p1k(key, data->bssid,
2458 cur_rx_iv32 + 1, p1k);
2459 iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
2460
2461 memcpy(rx_mic_key,
2462 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
2463 IWLAGN_MIC_KEY_SIZE);
2464
2465 data->use_tkip = true;
2466 data->use_rsc_tsc = true;
2467 break;
2468 case WLAN_CIPHER_SUITE_CCMP:
2469 if (sta) {
2470 u8 *pn = seq.ccmp.pn;
2471
2472 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
2473 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
2474
2475 ieee80211_get_key_tx_seq(key, &seq);
2476 aes_tx_sc->pn = cpu_to_le64(
2477 (u64)pn[5] |
2478 ((u64)pn[4] << 8) |
2479 ((u64)pn[3] << 16) |
2480 ((u64)pn[2] << 24) |
2481 ((u64)pn[1] << 32) |
2482 ((u64)pn[0] << 40));
2483 } else
2484 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
2485
2486 /*
2487 * For non-QoS this relies on the fact that both the uCode and
2488 * mac80211 use TID 0 for checking the IV in the frames.
2489 */
2490 for (i = 0; i < IWLAGN_NUM_RSC; i++) {
2491 u8 *pn = seq.ccmp.pn;
2492
2493 ieee80211_get_key_rx_seq(key, i, &seq);
2494 aes_sc->pn = cpu_to_le64(
2495 (u64)pn[5] |
2496 ((u64)pn[4] << 8) |
2497 ((u64)pn[3] << 16) |
2498 ((u64)pn[2] << 24) |
2499 ((u64)pn[1] << 32) |
2500 ((u64)pn[0] << 40));
2501 }
2502 data->use_rsc_tsc = true;
2503 break;
2504 }
2505
2506 mutex_unlock(&priv->mutex);
2507 }
2508
2509 static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
2510 struct cfg80211_wowlan *wowlan)
2511 {
2512 struct iwl_priv *priv = hw->priv;
2513 struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
2514 struct iwl_rxon_cmd rxon;
2515 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2516 struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
2517 struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
2518 struct wowlan_key_data key_data = {
2519 .ctx = ctx,
2520 .bssid = ctx->active.bssid_addr,
2521 .use_rsc_tsc = false,
2522 .tkip = &tkip_cmd,
2523 .use_tkip = false,
2524 };
2525 int ret, i;
2526 u16 seq;
2527
2528 if (WARN_ON(!wowlan))
2529 return -EINVAL;
2530
2531 mutex_lock(&priv->mutex);
2532
2533 /* Don't attempt WoWLAN when not associated, tear down instead. */
2534 if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
2535 !iwl_is_associated_ctx(ctx)) {
2536 ret = 1;
2537 goto out;
2538 }
2539
2540 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
2541 if (!key_data.rsc_tsc) {
2542 ret = -ENOMEM;
2543 goto out;
2544 }
2545
2546 memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
2547
2548 /*
2549 * We know the last used seqno, and the uCode expects to know that
2550 * one, it will increment before TX.
2551 */
2552 seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
2553 wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
2554
2555 /*
2556 * For QoS counters, we store the one to use next, so subtract 0x10
2557 * since the uCode will add 0x10 before using the value.
2558 */
2559 for (i = 0; i < 8; i++) {
2560 seq = priv->stations[IWL_AP_ID].tid[i].seq_number;
2561 seq -= 0x10;
2562 wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
2563 }
2564
2565 if (wowlan->disconnect)
2566 wakeup_filter_cmd.enabled |=
2567 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
2568 IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
2569 if (wowlan->magic_pkt)
2570 wakeup_filter_cmd.enabled |=
2571 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
2572 if (wowlan->gtk_rekey_failure)
2573 wakeup_filter_cmd.enabled |=
2574 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
2575 if (wowlan->eap_identity_req)
2576 wakeup_filter_cmd.enabled |=
2577 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
2578 if (wowlan->four_way_handshake)
2579 wakeup_filter_cmd.enabled |=
2580 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
2581 if (wowlan->rfkill_release)
2582 wakeup_filter_cmd.enabled |=
2583 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL);
2584 if (wowlan->n_patterns)
2585 wakeup_filter_cmd.enabled |=
2586 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
2587
2588 iwl_scan_cancel_timeout(priv, 200);
2589
2590 memcpy(&rxon, &ctx->active, sizeof(rxon));
2591
2592 trans_stop_device(&priv->trans);
2593
2594 priv->wowlan = true;
2595
2596 ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan,
2597 IWL_UCODE_WOWLAN);
2598 if (ret)
2599 goto error;
2600
2601 /* now configure WoWLAN ucode */
2602 ret = iwl_alive_start(priv);
2603 if (ret)
2604 goto error;
2605
2606 memcpy(&ctx->staging, &rxon, sizeof(rxon));
2607 ret = iwlagn_commit_rxon(priv, ctx);
2608 if (ret)
2609 goto error;
2610
2611 ret = iwl_power_update_mode(priv, true);
2612 if (ret)
2613 goto error;
2614
2615 if (!iwlagn_mod_params.sw_crypto) {
2616 /* mark all keys clear */
2617 priv->ucode_key_table = 0;
2618 ctx->key_mapping_keys = 0;
2619
2620 /*
2621 * This needs to be unlocked due to lock ordering
2622 * constraints. Since we're in the suspend path
2623 * that isn't really a problem though.
2624 */
2625 mutex_unlock(&priv->mutex);
2626 ieee80211_iter_keys(priv->hw, ctx->vif,
2627 iwlagn_wowlan_program_keys,
2628 &key_data);
2629 mutex_lock(&priv->mutex);
2630 if (key_data.error) {
2631 ret = -EIO;
2632 goto error;
2633 }
2634
2635 if (key_data.use_rsc_tsc) {
2636 struct iwl_host_cmd rsc_tsc_cmd = {
2637 .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
2638 .flags = CMD_SYNC,
2639 .data[0] = key_data.rsc_tsc,
2640 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
2641 .len[0] = sizeof(*key_data.rsc_tsc),
2642 };
2643
2644 ret = trans_send_cmd(&priv->trans, &rsc_tsc_cmd);
2645 if (ret)
2646 goto error;
2647 }
2648
2649 if (key_data.use_tkip) {
2650 ret = trans_send_cmd_pdu(&priv->trans,
2651 REPLY_WOWLAN_TKIP_PARAMS,
2652 CMD_SYNC, sizeof(tkip_cmd),
2653 &tkip_cmd);
2654 if (ret)
2655 goto error;
2656 }
2657
2658 if (priv->have_rekey_data) {
2659 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
2660 memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
2661 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
2662 memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
2663 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
2664 kek_kck_cmd.replay_ctr = priv->replay_ctr;
2665
2666 ret = trans_send_cmd_pdu(&priv->trans,
2667 REPLY_WOWLAN_KEK_KCK_MATERIAL,
2668 CMD_SYNC, sizeof(kek_kck_cmd),
2669 &kek_kck_cmd);
2670 if (ret)
2671 goto error;
2672 }
2673 }
2674
2675 ret = trans_send_cmd_pdu(&priv->trans, REPLY_WOWLAN_WAKEUP_FILTER,
2676 CMD_SYNC, sizeof(wakeup_filter_cmd),
2677 &wakeup_filter_cmd);
2678 if (ret)
2679 goto error;
2680
2681 ret = iwlagn_send_patterns(priv, wowlan);
2682 if (ret)
2683 goto error;
2684
2685 device_set_wakeup_enable(priv->bus->dev, true);
2686
2687 /* Now let the ucode operate on its own */
2688 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
2689 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
2690
2691 goto out;
2692
2693 error:
2694 priv->wowlan = false;
2695 iwlagn_prepare_restart(priv);
2696 ieee80211_restart_hw(priv->hw);
2697 out:
2698 mutex_unlock(&priv->mutex);
2699 kfree(key_data.rsc_tsc);
2700 return ret;
2701 }
2702
2703 static int iwlagn_mac_resume(struct ieee80211_hw *hw)
2704 {
2705 struct iwl_priv *priv = hw->priv;
2706 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2707 struct ieee80211_vif *vif;
2708 unsigned long flags;
2709 u32 base, status = 0xffffffff;
2710 int ret = -EIO;
2711
2712 mutex_lock(&priv->mutex);
2713
2714 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2715 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
2716
2717 base = priv->device_pointers.error_event_table;
2718 if (iwlagn_hw_valid_rtc_data_addr(base)) {
2719 spin_lock_irqsave(&priv->reg_lock, flags);
2720 ret = iwl_grab_nic_access_silent(priv);
2721 if (ret == 0) {
2722 iwl_write32(priv, HBUS_TARG_MEM_RADDR, base);
2723 status = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
2724 iwl_release_nic_access(priv);
2725 }
2726 spin_unlock_irqrestore(&priv->reg_lock, flags);
2727
2728 #ifdef CONFIG_IWLWIFI_DEBUGFS
2729 if (ret == 0) {
2730 if (!priv->wowlan_sram)
2731 priv->wowlan_sram =
2732 kzalloc(priv->ucode_wowlan.data.len,
2733 GFP_KERNEL);
2734
2735 if (priv->wowlan_sram)
2736 _iwl_read_targ_mem_words(
2737 priv, 0x800000, priv->wowlan_sram,
2738 priv->ucode_wowlan.data.len / 4);
2739 }
2740 #endif
2741 }
2742
2743 /* we'll clear ctx->vif during iwlagn_prepare_restart() */
2744 vif = ctx->vif;
2745
2746 priv->wowlan = false;
2747
2748 device_set_wakeup_enable(priv->bus->dev, false);
2749
2750 iwlagn_prepare_restart(priv);
2751
2752 memset((void *)&ctx->active, 0, sizeof(ctx->active));
2753 iwl_connection_init_rx_config(priv, ctx);
2754 iwlagn_set_rxon_chain(priv, ctx);
2755
2756 mutex_unlock(&priv->mutex);
2757
2758 ieee80211_resume_disconnect(vif);
2759
2760 return 1;
2761 }
2762 #endif
2763
2764 static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2765 {
2766 struct iwl_priv *priv = hw->priv;
2767
2768 IWL_DEBUG_MACDUMP(priv, "enter\n");
2769
2770 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2771 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2772
2773 if (iwlagn_tx_skb(priv, skb))
2774 dev_kfree_skb_any(skb);
2775
2776 IWL_DEBUG_MACDUMP(priv, "leave\n");
2777 }
2778
2779 static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
2780 struct ieee80211_vif *vif,
2781 struct ieee80211_key_conf *keyconf,
2782 struct ieee80211_sta *sta,
2783 u32 iv32, u16 *phase1key)
2784 {
2785 struct iwl_priv *priv = hw->priv;
2786
2787 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
2788 }
2789
2790 static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2791 struct ieee80211_vif *vif,
2792 struct ieee80211_sta *sta,
2793 struct ieee80211_key_conf *key)
2794 {
2795 struct iwl_priv *priv = hw->priv;
2796 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2797 struct iwl_rxon_context *ctx = vif_priv->ctx;
2798 int ret;
2799 bool is_default_wep_key = false;
2800
2801 IWL_DEBUG_MAC80211(priv, "enter\n");
2802
2803 if (iwlagn_mod_params.sw_crypto) {
2804 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2805 return -EOPNOTSUPP;
2806 }
2807
2808 /*
2809 * We could program these keys into the hardware as well, but we
2810 * don't expect much multicast traffic in IBSS and having keys
2811 * for more stations is probably more useful.
2812 *
2813 * Mark key TX-only and return 0.
2814 */
2815 if (vif->type == NL80211_IFTYPE_ADHOC &&
2816 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
2817 key->hw_key_idx = WEP_INVALID_OFFSET;
2818 return 0;
2819 }
2820
2821 /* If they key was TX-only, accept deletion */
2822 if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
2823 return 0;
2824
2825 mutex_lock(&priv->mutex);
2826 iwl_scan_cancel_timeout(priv, 100);
2827
2828 BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
2829
2830 /*
2831 * If we are getting WEP group key and we didn't receive any key mapping
2832 * so far, we are in legacy wep mode (group key only), otherwise we are
2833 * in 1X mode.
2834 * In legacy wep mode, we use another host command to the uCode.
2835 */
2836 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2837 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
2838 if (cmd == SET_KEY)
2839 is_default_wep_key = !ctx->key_mapping_keys;
2840 else
2841 is_default_wep_key =
2842 key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
2843 }
2844
2845
2846 switch (cmd) {
2847 case SET_KEY:
2848 if (is_default_wep_key) {
2849 ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
2850 break;
2851 }
2852 ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
2853 if (ret) {
2854 /*
2855 * can't add key for RX, but we don't need it
2856 * in the device for TX so still return 0
2857 */
2858 ret = 0;
2859 key->hw_key_idx = WEP_INVALID_OFFSET;
2860 }
2861
2862 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2863 break;
2864 case DISABLE_KEY:
2865 if (is_default_wep_key)
2866 ret = iwl_remove_default_wep_key(priv, ctx, key);
2867 else
2868 ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
2869
2870 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2871 break;
2872 default:
2873 ret = -EINVAL;
2874 }
2875
2876 mutex_unlock(&priv->mutex);
2877 IWL_DEBUG_MAC80211(priv, "leave\n");
2878
2879 return ret;
2880 }
2881
2882 static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2883 struct ieee80211_vif *vif,
2884 enum ieee80211_ampdu_mlme_action action,
2885 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2886 u8 buf_size)
2887 {
2888 struct iwl_priv *priv = hw->priv;
2889 int ret = -EINVAL;
2890 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
2891
2892 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2893 sta->addr, tid);
2894
2895 if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
2896 return -EACCES;
2897
2898 mutex_lock(&priv->mutex);
2899
2900 switch (action) {
2901 case IEEE80211_AMPDU_RX_START:
2902 IWL_DEBUG_HT(priv, "start Rx\n");
2903 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
2904 break;
2905 case IEEE80211_AMPDU_RX_STOP:
2906 IWL_DEBUG_HT(priv, "stop Rx\n");
2907 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
2908 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2909 ret = 0;
2910 break;
2911 case IEEE80211_AMPDU_TX_START:
2912 IWL_DEBUG_HT(priv, "start Tx\n");
2913 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
2914 if (ret == 0) {
2915 priv->agg_tids_count++;
2916 IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
2917 priv->agg_tids_count);
2918 }
2919 break;
2920 case IEEE80211_AMPDU_TX_STOP:
2921 IWL_DEBUG_HT(priv, "stop Tx\n");
2922 ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
2923 if ((ret == 0) && (priv->agg_tids_count > 0)) {
2924 priv->agg_tids_count--;
2925 IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
2926 priv->agg_tids_count);
2927 }
2928 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2929 ret = 0;
2930 if (priv->cfg->ht_params &&
2931 priv->cfg->ht_params->use_rts_for_aggregation) {
2932 /*
2933 * switch off RTS/CTS if it was previously enabled
2934 */
2935 sta_priv->lq_sta.lq.general_params.flags &=
2936 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
2937 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
2938 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
2939 }
2940 break;
2941 case IEEE80211_AMPDU_TX_OPERATIONAL:
2942 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2943
2944 trans_txq_agg_setup(&priv->trans, iwl_sta_id(sta), tid,
2945 buf_size);
2946
2947 /*
2948 * If the limit is 0, then it wasn't initialised yet,
2949 * use the default. We can do that since we take the
2950 * minimum below, and we don't want to go above our
2951 * default due to hardware restrictions.
2952 */
2953 if (sta_priv->max_agg_bufsize == 0)
2954 sta_priv->max_agg_bufsize =
2955 LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2956
2957 /*
2958 * Even though in theory the peer could have different
2959 * aggregation reorder buffer sizes for different sessions,
2960 * our ucode doesn't allow for that and has a global limit
2961 * for each station. Therefore, use the minimum of all the
2962 * aggregation sessions and our default value.
2963 */
2964 sta_priv->max_agg_bufsize =
2965 min(sta_priv->max_agg_bufsize, buf_size);
2966
2967 if (priv->cfg->ht_params &&
2968 priv->cfg->ht_params->use_rts_for_aggregation) {
2969 /*
2970 * switch to RTS/CTS if it is the prefer protection
2971 * method for HT traffic
2972 */
2973
2974 sta_priv->lq_sta.lq.general_params.flags |=
2975 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
2976 }
2977
2978 sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
2979 sta_priv->max_agg_bufsize;
2980
2981 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
2982 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
2983
2984 IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
2985 sta->addr, tid);
2986 ret = 0;
2987 break;
2988 }
2989 mutex_unlock(&priv->mutex);
2990
2991 return ret;
2992 }
2993
2994 static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
2995 struct ieee80211_vif *vif,
2996 struct ieee80211_sta *sta)
2997 {
2998 struct iwl_priv *priv = hw->priv;
2999 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
3000 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
3001 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3002 int ret;
3003 u8 sta_id;
3004
3005 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3006 sta->addr);
3007 mutex_lock(&priv->mutex);
3008 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
3009 sta->addr);
3010 sta_priv->common.sta_id = IWL_INVALID_STATION;
3011
3012 atomic_set(&sta_priv->pending_frames, 0);
3013 if (vif->type == NL80211_IFTYPE_AP)
3014 sta_priv->client = true;
3015
3016 ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
3017 is_ap, sta, &sta_id);
3018 if (ret) {
3019 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3020 sta->addr, ret);
3021 /* Should we return success if return code is EEXIST ? */
3022 mutex_unlock(&priv->mutex);
3023 return ret;
3024 }
3025
3026 sta_priv->common.sta_id = sta_id;
3027
3028 /* Initialize rate scaling */
3029 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3030 sta->addr);
3031 iwl_rs_rate_init(priv, sta, sta_id);
3032 mutex_unlock(&priv->mutex);
3033
3034 return 0;
3035 }
3036
3037 static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
3038 struct ieee80211_channel_switch *ch_switch)
3039 {
3040 struct iwl_priv *priv = hw->priv;
3041 const struct iwl_channel_info *ch_info;
3042 struct ieee80211_conf *conf = &hw->conf;
3043 struct ieee80211_channel *channel = ch_switch->channel;
3044 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
3045 /*
3046 * MULTI-FIXME
3047 * When we add support for multiple interfaces, we need to
3048 * revisit this. The channel switch command in the device
3049 * only affects the BSS context, but what does that really
3050 * mean? And what if we get a CSA on the second interface?
3051 * This needs a lot of work.
3052 */
3053 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3054 u16 ch;
3055
3056 IWL_DEBUG_MAC80211(priv, "enter\n");
3057
3058 mutex_lock(&priv->mutex);
3059
3060 if (iwl_is_rfkill(priv))
3061 goto out;
3062
3063 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
3064 test_bit(STATUS_SCANNING, &priv->status) ||
3065 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
3066 goto out;
3067
3068 if (!iwl_is_associated_ctx(ctx))
3069 goto out;
3070
3071 if (!priv->cfg->lib->set_channel_switch)
3072 goto out;
3073
3074 ch = channel->hw_value;
3075 if (le16_to_cpu(ctx->active.channel) == ch)
3076 goto out;
3077
3078 ch_info = iwl_get_channel_info(priv, channel->band, ch);
3079 if (!is_channel_valid(ch_info)) {
3080 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
3081 goto out;
3082 }
3083
3084 spin_lock_irq(&priv->lock);
3085
3086 priv->current_ht_config.smps = conf->smps_mode;
3087
3088 /* Configure HT40 channels */
3089 ctx->ht.enabled = conf_is_ht(conf);
3090 if (ctx->ht.enabled) {
3091 if (conf_is_ht40_minus(conf)) {
3092 ctx->ht.extension_chan_offset =
3093 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
3094 ctx->ht.is_40mhz = true;
3095 } else if (conf_is_ht40_plus(conf)) {
3096 ctx->ht.extension_chan_offset =
3097 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
3098 ctx->ht.is_40mhz = true;
3099 } else {
3100 ctx->ht.extension_chan_offset =
3101 IEEE80211_HT_PARAM_CHA_SEC_NONE;
3102 ctx->ht.is_40mhz = false;
3103 }
3104 } else
3105 ctx->ht.is_40mhz = false;
3106
3107 if ((le16_to_cpu(ctx->staging.channel) != ch))
3108 ctx->staging.flags = 0;
3109
3110 iwl_set_rxon_channel(priv, channel, ctx);
3111 iwl_set_rxon_ht(priv, ht_conf);
3112 iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
3113
3114 spin_unlock_irq(&priv->lock);
3115
3116 iwl_set_rate(priv);
3117 /*
3118 * at this point, staging_rxon has the
3119 * configuration for channel switch
3120 */
3121 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
3122 priv->switch_channel = cpu_to_le16(ch);
3123 if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
3124 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
3125 priv->switch_channel = 0;
3126 ieee80211_chswitch_done(ctx->vif, false);
3127 }
3128
3129 out:
3130 mutex_unlock(&priv->mutex);
3131 IWL_DEBUG_MAC80211(priv, "leave\n");
3132 }
3133
3134 static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3135 unsigned int changed_flags,
3136 unsigned int *total_flags,
3137 u64 multicast)
3138 {
3139 struct iwl_priv *priv = hw->priv;
3140 __le32 filter_or = 0, filter_nand = 0;
3141 struct iwl_rxon_context *ctx;
3142
3143 #define CHK(test, flag) do { \
3144 if (*total_flags & (test)) \
3145 filter_or |= (flag); \
3146 else \
3147 filter_nand |= (flag); \
3148 } while (0)
3149
3150 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3151 changed_flags, *total_flags);
3152
3153 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3154 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
3155 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3156 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3157
3158 #undef CHK
3159
3160 mutex_lock(&priv->mutex);
3161
3162 for_each_context(priv, ctx) {
3163 ctx->staging.filter_flags &= ~filter_nand;
3164 ctx->staging.filter_flags |= filter_or;
3165
3166 /*
3167 * Not committing directly because hardware can perform a scan,
3168 * but we'll eventually commit the filter flags change anyway.
3169 */
3170 }
3171
3172 mutex_unlock(&priv->mutex);
3173
3174 /*
3175 * Receiving all multicast frames is always enabled by the
3176 * default flags setup in iwl_connection_init_rx_config()
3177 * since we currently do not support programming multicast
3178 * filters into the device.
3179 */
3180 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3181 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3182 }
3183
3184 static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
3185 {
3186 struct iwl_priv *priv = hw->priv;
3187
3188 mutex_lock(&priv->mutex);
3189 IWL_DEBUG_MAC80211(priv, "enter\n");
3190
3191 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
3192 IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
3193 goto done;
3194 }
3195 if (iwl_is_rfkill(priv)) {
3196 IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
3197 goto done;
3198 }
3199
3200 /*
3201 * mac80211 will not push any more frames for transmit
3202 * until the flush is completed
3203 */
3204 if (drop) {
3205 IWL_DEBUG_MAC80211(priv, "send flush command\n");
3206 if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
3207 IWL_ERR(priv, "flush request fail\n");
3208 goto done;
3209 }
3210 }
3211 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
3212 iwlagn_wait_tx_queue_empty(priv);
3213 done:
3214 mutex_unlock(&priv->mutex);
3215 IWL_DEBUG_MAC80211(priv, "leave\n");
3216 }
3217
3218 void iwlagn_disable_roc(struct iwl_priv *priv)
3219 {
3220 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
3221
3222 lockdep_assert_held(&priv->mutex);
3223
3224 if (!priv->hw_roc_setup)
3225 return;
3226
3227 ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
3228 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3229
3230 priv->hw_roc_channel = NULL;
3231
3232 memset(ctx->staging.node_addr, 0, ETH_ALEN);
3233
3234 iwlagn_commit_rxon(priv, ctx);
3235
3236 ctx->is_active = false;
3237 priv->hw_roc_setup = false;
3238 }
3239
3240 static void iwlagn_disable_roc_work(struct work_struct *work)
3241 {
3242 struct iwl_priv *priv = container_of(work, struct iwl_priv,
3243 hw_roc_disable_work.work);
3244
3245 mutex_lock(&priv->mutex);
3246 iwlagn_disable_roc(priv);
3247 mutex_unlock(&priv->mutex);
3248 }
3249
3250 static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
3251 struct ieee80211_channel *channel,
3252 enum nl80211_channel_type channel_type,
3253 int duration)
3254 {
3255 struct iwl_priv *priv = hw->priv;
3256 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
3257 int err = 0;
3258
3259 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
3260 return -EOPNOTSUPP;
3261
3262 if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
3263 return -EOPNOTSUPP;
3264
3265 mutex_lock(&priv->mutex);
3266
3267 /*
3268 * TODO: Remove this hack! Firmware needs to be updated
3269 * to allow longer off-channel periods in scanning for
3270 * this use case, based on a flag (and we'll need an API
3271 * flag in the firmware when it has that).
3272 */
3273 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && duration > 80)
3274 duration = 80;
3275
3276 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
3277 err = -EBUSY;
3278 goto out;
3279 }
3280
3281 priv->hw_roc_channel = channel;
3282 priv->hw_roc_chantype = channel_type;
3283 priv->hw_roc_duration = duration;
3284 cancel_delayed_work(&priv->hw_roc_disable_work);
3285
3286 if (!ctx->is_active) {
3287 ctx->is_active = true;
3288 ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
3289 memcpy(ctx->staging.node_addr,
3290 priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
3291 ETH_ALEN);
3292 memcpy(ctx->staging.bssid_addr,
3293 priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
3294 ETH_ALEN);
3295 err = iwlagn_commit_rxon(priv, ctx);
3296 if (err)
3297 goto out;
3298 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
3299 RXON_FILTER_PROMISC_MSK |
3300 RXON_FILTER_CTL2HOST_MSK;
3301
3302 err = iwlagn_commit_rxon(priv, ctx);
3303 if (err) {
3304 iwlagn_disable_roc(priv);
3305 goto out;
3306 }
3307 priv->hw_roc_setup = true;
3308 }
3309
3310 err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
3311 if (err)
3312 iwlagn_disable_roc(priv);
3313
3314 out:
3315 mutex_unlock(&priv->mutex);
3316
3317 return err;
3318 }
3319
3320 static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
3321 {
3322 struct iwl_priv *priv = hw->priv;
3323
3324 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
3325 return -EOPNOTSUPP;
3326
3327 mutex_lock(&priv->mutex);
3328 iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
3329 iwlagn_disable_roc(priv);
3330 mutex_unlock(&priv->mutex);
3331
3332 return 0;
3333 }
3334
3335 /*****************************************************************************
3336 *
3337 * driver setup and teardown
3338 *
3339 *****************************************************************************/
3340
3341 static void iwl_setup_deferred_work(struct iwl_priv *priv)
3342 {
3343 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3344
3345 init_waitqueue_head(&priv->wait_command_queue);
3346
3347 INIT_WORK(&priv->restart, iwl_bg_restart);
3348 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
3349 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
3350 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
3351 INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
3352 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
3353 INIT_DELAYED_WORK(&priv->hw_roc_disable_work,
3354 iwlagn_disable_roc_work);
3355
3356 iwl_setup_scan_deferred_work(priv);
3357
3358 if (priv->cfg->lib->bt_setup_deferred_work)
3359 priv->cfg->lib->bt_setup_deferred_work(priv);
3360
3361 init_timer(&priv->statistics_periodic);
3362 priv->statistics_periodic.data = (unsigned long)priv;
3363 priv->statistics_periodic.function = iwl_bg_statistics_periodic;
3364
3365 init_timer(&priv->ucode_trace);
3366 priv->ucode_trace.data = (unsigned long)priv;
3367 priv->ucode_trace.function = iwl_bg_ucode_trace;
3368
3369 init_timer(&priv->watchdog);
3370 priv->watchdog.data = (unsigned long)priv;
3371 priv->watchdog.function = iwl_bg_watchdog;
3372 }
3373
3374 static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3375 {
3376 if (priv->cfg->lib->cancel_deferred_work)
3377 priv->cfg->lib->cancel_deferred_work(priv);
3378
3379 cancel_work_sync(&priv->run_time_calib_work);
3380 cancel_work_sync(&priv->beacon_update);
3381
3382 iwl_cancel_scan_deferred_work(priv);
3383
3384 cancel_work_sync(&priv->bt_full_concurrency);
3385 cancel_work_sync(&priv->bt_runtime_config);
3386 cancel_delayed_work_sync(&priv->hw_roc_disable_work);
3387
3388 del_timer_sync(&priv->statistics_periodic);
3389 del_timer_sync(&priv->ucode_trace);
3390 }
3391
3392 static void iwl_init_hw_rates(struct iwl_priv *priv,
3393 struct ieee80211_rate *rates)
3394 {
3395 int i;
3396
3397 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
3398 rates[i].bitrate = iwl_rates[i].ieee * 5;
3399 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3400 rates[i].hw_value_short = i;
3401 rates[i].flags = 0;
3402 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
3403 /*
3404 * If CCK != 1M then set short preamble rate flag.
3405 */
3406 rates[i].flags |=
3407 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
3408 0 : IEEE80211_RATE_SHORT_PREAMBLE;
3409 }
3410 }
3411 }
3412
3413 static int iwl_init_drv(struct iwl_priv *priv)
3414 {
3415 int ret;
3416
3417 spin_lock_init(&priv->sta_lock);
3418 spin_lock_init(&priv->hcmd_lock);
3419
3420 mutex_init(&priv->mutex);
3421
3422 priv->ieee_channels = NULL;
3423 priv->ieee_rates = NULL;
3424 priv->band = IEEE80211_BAND_2GHZ;
3425
3426 priv->iw_mode = NL80211_IFTYPE_STATION;
3427 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3428 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3429 priv->agg_tids_count = 0;
3430
3431 /* initialize force reset */
3432 priv->force_reset[IWL_RF_RESET].reset_duration =
3433 IWL_DELAY_NEXT_FORCE_RF_RESET;
3434 priv->force_reset[IWL_FW_RESET].reset_duration =
3435 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3436
3437 priv->rx_statistics_jiffies = jiffies;
3438
3439 /* Choose which receivers/antennas to use */
3440 iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
3441
3442 iwl_init_scan_params(priv);
3443
3444 /* init bt coex */
3445 if (priv->cfg->bt_params &&
3446 priv->cfg->bt_params->advanced_bt_coexist) {
3447 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
3448 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
3449 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
3450 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
3451 priv->bt_duration = BT_DURATION_LIMIT_DEF;
3452 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
3453 }
3454
3455 ret = iwl_init_channel_map(priv);
3456 if (ret) {
3457 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3458 goto err;
3459 }
3460
3461 ret = iwl_init_geos(priv);
3462 if (ret) {
3463 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3464 goto err_free_channel_map;
3465 }
3466 iwl_init_hw_rates(priv, priv->ieee_rates);
3467
3468 return 0;
3469
3470 err_free_channel_map:
3471 iwl_free_channel_map(priv);
3472 err:
3473 return ret;
3474 }
3475
3476 static void iwl_uninit_drv(struct iwl_priv *priv)
3477 {
3478 iwl_calib_free_results(priv);
3479 iwl_free_geos(priv);
3480 iwl_free_channel_map(priv);
3481 kfree(priv->scan_cmd);
3482 kfree(priv->beacon_cmd);
3483 #ifdef CONFIG_IWLWIFI_DEBUGFS
3484 kfree(priv->wowlan_sram);
3485 #endif
3486 }
3487
3488 static void iwl_mac_rssi_callback(struct ieee80211_hw *hw,
3489 enum ieee80211_rssi_event rssi_event)
3490 {
3491 struct iwl_priv *priv = hw->priv;
3492
3493 mutex_lock(&priv->mutex);
3494
3495 if (priv->cfg->bt_params &&
3496 priv->cfg->bt_params->advanced_bt_coexist) {
3497 if (rssi_event == RSSI_EVENT_LOW)
3498 priv->bt_enable_pspoll = true;
3499 else if (rssi_event == RSSI_EVENT_HIGH)
3500 priv->bt_enable_pspoll = false;
3501
3502 iwlagn_send_advance_bt_config(priv);
3503 } else {
3504 IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
3505 "ignoring RSSI callback\n");
3506 }
3507
3508 mutex_unlock(&priv->mutex);
3509 }
3510
3511 struct ieee80211_ops iwlagn_hw_ops = {
3512 .tx = iwlagn_mac_tx,
3513 .start = iwlagn_mac_start,
3514 .stop = iwlagn_mac_stop,
3515 #ifdef CONFIG_PM
3516 .suspend = iwlagn_mac_suspend,
3517 .resume = iwlagn_mac_resume,
3518 #endif
3519 .add_interface = iwl_mac_add_interface,
3520 .remove_interface = iwl_mac_remove_interface,
3521 .change_interface = iwl_mac_change_interface,
3522 .config = iwlagn_mac_config,
3523 .configure_filter = iwlagn_configure_filter,
3524 .set_key = iwlagn_mac_set_key,
3525 .update_tkip_key = iwlagn_mac_update_tkip_key,
3526 .set_rekey_data = iwlagn_mac_set_rekey_data,
3527 .conf_tx = iwl_mac_conf_tx,
3528 .bss_info_changed = iwlagn_bss_info_changed,
3529 .ampdu_action = iwlagn_mac_ampdu_action,
3530 .hw_scan = iwl_mac_hw_scan,
3531 .sta_notify = iwlagn_mac_sta_notify,
3532 .sta_add = iwlagn_mac_sta_add,
3533 .sta_remove = iwl_mac_sta_remove,
3534 .channel_switch = iwlagn_mac_channel_switch,
3535 .flush = iwlagn_mac_flush,
3536 .tx_last_beacon = iwl_mac_tx_last_beacon,
3537 .remain_on_channel = iwl_mac_remain_on_channel,
3538 .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
3539 .rssi_callback = iwl_mac_rssi_callback,
3540 CFG80211_TESTMODE_CMD(iwl_testmode_cmd)
3541 CFG80211_TESTMODE_DUMP(iwl_testmode_dump)
3542 };
3543
3544 static u32 iwl_hw_detect(struct iwl_priv *priv)
3545 {
3546 return iwl_read32(priv, CSR_HW_REV);
3547 }
3548
3549 static int iwl_set_hw_params(struct iwl_priv *priv)
3550 {
3551 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
3552 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
3553 if (iwlagn_mod_params.amsdu_size_8K)
3554 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
3555 else
3556 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
3557
3558 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
3559
3560 if (iwlagn_mod_params.disable_11n)
3561 priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
3562
3563 /* Device-specific setup */
3564 return priv->cfg->lib->set_hw_params(priv);
3565 }
3566
3567 /* This function both allocates and initializes hw and priv. */
3568 static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
3569 {
3570 struct iwl_priv *priv;
3571 /* mac80211 allocates memory for this device instance, including
3572 * space for this driver's private structure */
3573 struct ieee80211_hw *hw;
3574
3575 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
3576 if (hw == NULL) {
3577 pr_err("%s: Can not allocate network device\n",
3578 cfg->name);
3579 goto out;
3580 }
3581
3582 priv = hw->priv;
3583 priv->hw = hw;
3584
3585 out:
3586 return hw;
3587 }
3588
3589 int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
3590 {
3591 int err = 0;
3592 struct iwl_priv *priv;
3593 struct ieee80211_hw *hw;
3594 u16 num_mac;
3595 u32 hw_rev;
3596
3597 /************************
3598 * 1. Allocating HW data
3599 ************************/
3600 hw = iwl_alloc_all(cfg);
3601 if (!hw) {
3602 err = -ENOMEM;
3603 goto out;
3604 }
3605
3606 priv = hw->priv;
3607 priv->bus = bus;
3608 priv->shrd = &priv->_shrd;
3609 priv->shrd->bus = bus;
3610 priv->shrd->priv = priv;
3611 bus_set_drv_data(priv->bus, priv->shrd);
3612
3613 /* At this point both hw and priv are allocated. */
3614
3615 SET_IEEE80211_DEV(hw, priv->bus->dev);
3616
3617 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3618 priv->cfg = cfg;
3619 priv->inta_mask = CSR_INI_SET_MASK;
3620
3621 /* is antenna coupling more than 35dB ? */
3622 priv->bt_ant_couple_ok =
3623 (iwlagn_mod_params.ant_coupling >
3624 IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
3625 true : false;
3626
3627 /* enable/disable bt channel inhibition */
3628 priv->bt_ch_announce = iwlagn_mod_params.bt_ch_announce;
3629 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
3630 (priv->bt_ch_announce) ? "On" : "Off");
3631
3632 if (iwl_alloc_traffic_mem(priv))
3633 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3634
3635 /* these spin locks will be used in apm_ops.init and EEPROM access
3636 * we should init now
3637 */
3638 spin_lock_init(&priv->reg_lock);
3639 spin_lock_init(&priv->lock);
3640
3641 /*
3642 * stop and reset the on-board processor just in case it is in a
3643 * strange state ... like being left stranded by a primary kernel
3644 * and this is now the kdump kernel trying to start up
3645 */
3646 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3647
3648 /***********************
3649 * 3. Read REV register
3650 ***********************/
3651 hw_rev = iwl_hw_detect(priv);
3652 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
3653 priv->cfg->name, hw_rev);
3654
3655 err = iwl_trans_register(&priv->trans, priv);
3656 if (err)
3657 goto out_free_traffic_mem;
3658
3659 if (trans_prepare_card_hw(&priv->trans)) {
3660 err = -EIO;
3661 IWL_WARN(priv, "Failed, HW not ready\n");
3662 goto out_free_trans;
3663 }
3664
3665 /*****************
3666 * 4. Read EEPROM
3667 *****************/
3668 /* Read the EEPROM */
3669 err = iwl_eeprom_init(priv, hw_rev);
3670 if (err) {
3671 IWL_ERR(priv, "Unable to init EEPROM\n");
3672 goto out_free_trans;
3673 }
3674 err = iwl_eeprom_check_version(priv);
3675 if (err)
3676 goto out_free_eeprom;
3677
3678 err = iwl_eeprom_check_sku(priv);
3679 if (err)
3680 goto out_free_eeprom;
3681
3682 /* extract MAC Address */
3683 iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
3684 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3685 priv->hw->wiphy->addresses = priv->addresses;
3686 priv->hw->wiphy->n_addresses = 1;
3687 num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
3688 if (num_mac > 1) {
3689 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
3690 ETH_ALEN);
3691 priv->addresses[1].addr[5]++;
3692 priv->hw->wiphy->n_addresses++;
3693 }
3694
3695 /************************
3696 * 5. Setup HW constants
3697 ************************/
3698 if (iwl_set_hw_params(priv)) {
3699 err = -ENOENT;
3700 IWL_ERR(priv, "failed to set hw parameters\n");
3701 goto out_free_eeprom;
3702 }
3703
3704 /*******************
3705 * 6. Setup priv
3706 *******************/
3707
3708 err = iwl_init_drv(priv);
3709 if (err)
3710 goto out_free_eeprom;
3711 /* At this point both hw and priv are initialized. */
3712
3713 /********************
3714 * 7. Setup services
3715 ********************/
3716 iwl_setup_deferred_work(priv);
3717 iwl_setup_rx_handlers(priv);
3718 iwl_testmode_init(priv);
3719
3720 /*********************************************
3721 * 8. Enable interrupts
3722 *********************************************/
3723
3724 iwl_enable_rfkill_int(priv);
3725
3726 /* If platform's RF_KILL switch is NOT set to KILL */
3727 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3728 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3729 else
3730 set_bit(STATUS_RF_KILL_HW, &priv->status);
3731
3732 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3733 test_bit(STATUS_RF_KILL_HW, &priv->status));
3734
3735 iwl_power_initialize(priv);
3736 iwl_tt_initialize(priv);
3737
3738 init_completion(&priv->firmware_loading_complete);
3739
3740 err = iwl_request_firmware(priv, true);
3741 if (err)
3742 goto out_destroy_workqueue;
3743
3744 return 0;
3745
3746 out_destroy_workqueue:
3747 destroy_workqueue(priv->workqueue);
3748 priv->workqueue = NULL;
3749 iwl_uninit_drv(priv);
3750 out_free_eeprom:
3751 iwl_eeprom_free(priv);
3752 out_free_trans:
3753 trans_free(&priv->trans);
3754 out_free_traffic_mem:
3755 iwl_free_traffic_mem(priv);
3756 ieee80211_free_hw(priv->hw);
3757 out:
3758 return err;
3759 }
3760
3761 void __devexit iwl_remove(struct iwl_priv * priv)
3762 {
3763 unsigned long flags;
3764
3765 wait_for_completion(&priv->firmware_loading_complete);
3766
3767 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3768
3769 iwl_dbgfs_unregister(priv);
3770 sysfs_remove_group(&priv->bus->dev->kobj,
3771 &iwl_attribute_group);
3772
3773 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3774 * to be called and iwl_down since we are removing the device
3775 * we need to set STATUS_EXIT_PENDING bit.
3776 */
3777 set_bit(STATUS_EXIT_PENDING, &priv->status);
3778
3779 iwl_testmode_cleanup(priv);
3780 iwl_leds_exit(priv);
3781
3782 if (priv->mac80211_registered) {
3783 ieee80211_unregister_hw(priv->hw);
3784 priv->mac80211_registered = 0;
3785 }
3786
3787 /* Reset to low power before unloading driver. */
3788 iwl_apm_stop(priv);
3789
3790 iwl_tt_exit(priv);
3791
3792 /* make sure we flush any pending irq or
3793 * tasklet for the driver
3794 */
3795 spin_lock_irqsave(&priv->lock, flags);
3796 iwl_disable_interrupts(priv);
3797 spin_unlock_irqrestore(&priv->lock, flags);
3798
3799 trans_sync_irq(&priv->trans);
3800
3801 iwl_dealloc_ucode(priv);
3802
3803 trans_rx_free(&priv->trans);
3804 trans_tx_free(&priv->trans);
3805
3806 iwl_eeprom_free(priv);
3807
3808 /*netif_stop_queue(dev); */
3809 flush_workqueue(priv->workqueue);
3810
3811 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3812 * priv->workqueue... so we can't take down the workqueue
3813 * until now... */
3814 destroy_workqueue(priv->workqueue);
3815 priv->workqueue = NULL;
3816 iwl_free_traffic_mem(priv);
3817
3818 trans_free(&priv->trans);
3819
3820 bus_set_drv_data(priv->bus, NULL);
3821
3822 iwl_uninit_drv(priv);
3823
3824 dev_kfree_skb(priv->beacon_skb);
3825
3826 ieee80211_free_hw(priv->hw);
3827 }
3828
3829
3830 /*****************************************************************************
3831 *
3832 * driver and module entry point
3833 *
3834 *****************************************************************************/
3835 static int __init iwl_init(void)
3836 {
3837
3838 int ret;
3839 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3840 pr_info(DRV_COPYRIGHT "\n");
3841
3842 ret = iwlagn_rate_control_register();
3843 if (ret) {
3844 pr_err("Unable to register rate control algorithm: %d\n", ret);
3845 return ret;
3846 }
3847
3848 ret = iwl_pci_register_driver();
3849
3850 if (ret)
3851 goto error_register;
3852 return ret;
3853
3854 error_register:
3855 iwlagn_rate_control_unregister();
3856 return ret;
3857 }
3858
3859 static void __exit iwl_exit(void)
3860 {
3861 iwl_pci_unregister_driver();
3862 iwlagn_rate_control_unregister();
3863 }
3864
3865 module_exit(iwl_exit);
3866 module_init(iwl_init);
3867
3868 #ifdef CONFIG_IWLWIFI_DEBUG
3869 module_param_named(debug, iwlagn_mod_params.debug_level, uint,
3870 S_IRUGO | S_IWUSR);
3871 MODULE_PARM_DESC(debug, "debug output mask");
3872 #endif
3873
3874 module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
3875 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3876 module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
3877 MODULE_PARM_DESC(queues_num, "number of hw queues.");
3878 module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
3879 MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3880 module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
3881 int, S_IRUGO);
3882 MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3883 module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
3884 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
3885
3886 module_param_named(ucode_alternative,
3887 iwlagn_mod_params.wanted_ucode_alternative,
3888 int, S_IRUGO);
3889 MODULE_PARM_DESC(ucode_alternative,
3890 "specify ucode alternative to use from ucode file");
3891
3892 module_param_named(antenna_coupling, iwlagn_mod_params.ant_coupling,
3893 int, S_IRUGO);
3894 MODULE_PARM_DESC(antenna_coupling,
3895 "specify antenna coupling in dB (defualt: 0 dB)");
3896
3897 module_param_named(bt_ch_inhibition, iwlagn_mod_params.bt_ch_announce,
3898 bool, S_IRUGO);
3899 MODULE_PARM_DESC(bt_ch_inhibition,
3900 "Disable BT channel inhibition (default: enable)");
3901
3902 module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
3903 MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
3904
3905 module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
3906 MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
3907
3908 module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
3909 MODULE_PARM_DESC(wd_disable,
3910 "Disable stuck queue watchdog timer (default: 0 [enabled])");
3911
3912 /*
3913 * set bt_coex_active to true, uCode will do kill/defer
3914 * every time the priority line is asserted (BT is sending signals on the
3915 * priority line in the PCIx).
3916 * set bt_coex_active to false, uCode will ignore the BT activity and
3917 * perform the normal operation
3918 *
3919 * User might experience transmit issue on some platform due to WiFi/BT
3920 * co-exist problem. The possible behaviors are:
3921 * Able to scan and finding all the available AP
3922 * Not able to associate with any AP
3923 * On those platforms, WiFi communication can be restored by set
3924 * "bt_coex_active" module parameter to "false"
3925 *
3926 * default: bt_coex_active = true (BT_COEX_ENABLE)
3927 */
3928 module_param_named(bt_coex_active, iwlagn_mod_params.bt_coex_active,
3929 bool, S_IRUGO);
3930 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
3931
3932 module_param_named(led_mode, iwlagn_mod_params.led_mode, int, S_IRUGO);
3933 MODULE_PARM_DESC(led_mode, "0=system default, "
3934 "1=On(RF On)/Off(RF Off), 2=blinking (default: 0)");
3935
3936 module_param_named(power_save, iwlagn_mod_params.power_save,
3937 bool, S_IRUGO);
3938 MODULE_PARM_DESC(power_save,
3939 "enable WiFi power management (default: disable)");
3940
3941 module_param_named(power_level, iwlagn_mod_params.power_level,
3942 int, S_IRUGO);
3943 MODULE_PARM_DESC(power_level,
3944 "default power save level (range from 1 - 5, default: 1)");
3945
3946 /*
3947 * For now, keep using power level 1 instead of automatically
3948 * adjusting ...
3949 */
3950 module_param_named(no_sleep_autoadjust, iwlagn_mod_params.no_sleep_autoadjust,
3951 bool, S_IRUGO);
3952 MODULE_PARM_DESC(no_sleep_autoadjust,
3953 "don't automatically adjust sleep level "
3954 "according to maximum network latency (default: true)");
This page took 0.119959 seconds and 5 git commands to generate.