iwlagn: new 105 series device
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-rx.c
CommitLineData
a55360e4
TW
1/******************************************************************************
2 *
901069c7 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
a55360e4
TW
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
a55360e4
TW
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
1781a07f 30#include <linux/etherdevice.h>
5a0e3ad6 31#include <linux/slab.h>
118253ca 32#include <linux/sched.h>
a55360e4 33#include <net/mac80211.h>
a05ffd39 34#include <asm/unaligned.h>
a55360e4
TW
35#include "iwl-eeprom.h"
36#include "iwl-dev.h"
37#include "iwl-core.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
67289941 41#include "iwl-agn-calib.h"
466a19a0
SG
42#include "iwl-agn.h"
43
44/******************************************************************************
45 *
46 * RX path functions
47 *
48 ******************************************************************************/
49
a55360e4
TW
50/*
51 * Rx theory of operation
52 *
53 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
54 * each of which point to Receive Buffers to be filled by the NIC. These get
55 * used not only for Rx frames, but for any command response or notification
56 * from the NIC. The driver and NIC manage the Rx buffers by means
57 * of indexes into the circular buffer.
58 *
59 * Rx Queue Indexes
60 * The host/firmware share two index registers for managing the Rx buffers.
61 *
62 * The READ index maps to the first position that the firmware may be writing
63 * to -- the driver can read up to (but not including) this position and get
64 * good data.
65 * The READ index is managed by the firmware once the card is enabled.
66 *
67 * The WRITE index maps to the last position the driver has read from -- the
68 * position preceding WRITE is the last slot the firmware can place a packet.
69 *
70 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
71 * WRITE = READ.
72 *
73 * During initialization, the host sets up the READ queue position to the first
74 * INDEX position, and WRITE to the last (READ - 1 wrapped)
75 *
76 * When the firmware places a packet in a buffer, it will advance the READ index
77 * and fire the RX interrupt. The driver can then query the READ index and
78 * process as many packets as possible, moving the WRITE index forward as it
79 * resets the Rx queue buffers with new memory.
80 *
81 * The management in the driver is as follows:
82 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
83 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
84 * to replenish the iwl->rxq->rx_free.
85 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
86 * iwl->rxq is replenished and the READ INDEX is updated (updating the
87 * 'processed' and 'read' driver indexes as well)
88 * + A received packet is processed and handed to the kernel network stack,
89 * detached from the iwl->rxq. The driver 'processed' index is updated.
90 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
91 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
92 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
93 * were enough free buffers and RX_STALLED is set it is cleared.
94 *
95 *
96 * Driver sequence:
97 *
98 * iwl_rx_queue_alloc() Allocates rx_free
99 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
100 * iwl_rx_queue_restock
101 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
102 * queue, updates firmware pointers, and updates
103 * the WRITE index. If insufficient rx_free buffers
104 * are available, schedules iwl_rx_replenish
105 *
106 * -- enable interrupts --
107 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
108 * READ INDEX, detaching the SKB from the pool.
109 * Moves the packet buffer from queue to rx_used.
110 * Calls iwl_rx_queue_restock to refill any empty
111 * slots.
112 * ...
113 *
114 */
115
116/**
117 * iwl_rx_queue_space - Return number of free slots available in queue.
118 */
119int iwl_rx_queue_space(const struct iwl_rx_queue *q)
120{
121 int s = q->read - q->write;
122 if (s <= 0)
123 s += RX_QUEUE_SIZE;
124 /* keep some buffer to not confuse full and empty queue */
125 s -= 2;
126 if (s < 0)
127 s = 0;
128 return s;
129}
a55360e4
TW
130
131/**
132 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
133 */
7bfedc59 134void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
a55360e4 135{
a55360e4 136 unsigned long flags;
141c43a3
WT
137 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
138 u32 reg;
a55360e4
TW
139
140 spin_lock_irqsave(&q->lock, flags);
141
142 if (q->need_update == 0)
143 goto exit_unlock;
144
f81c1f48
WYG
145 if (priv->cfg->base_params->shadow_reg_enable) {
146 /* shadow register enabled */
a55360e4 147 /* Device expects a multiple of 8 */
4752c93c 148 q->write_actual = (q->write & ~0x7);
fd11743d 149 iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
f81c1f48
WYG
150 } else {
151 /* If power-saving is in use, make sure device is awake */
152 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
153 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
154
155 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
156 IWL_DEBUG_INFO(priv,
157 "Rx queue requesting wakeup,"
158 " GP1 = 0x%x\n", reg);
159 iwl_set_bit(priv, CSR_GP_CNTRL,
160 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
161 goto exit_unlock;
162 }
163
164 q->write_actual = (q->write & ~0x7);
165 iwl_write_direct32(priv, rx_wrt_ptr_reg,
166 q->write_actual);
167
168 /* Else device is assumed to be awake */
169 } else {
170 /* Device expects a multiple of 8 */
171 q->write_actual = (q->write & ~0x7);
172 iwl_write_direct32(priv, rx_wrt_ptr_reg,
173 q->write_actual);
174 }
141c43a3 175 }
a55360e4
TW
176 q->need_update = 0;
177
178 exit_unlock:
179 spin_unlock_irqrestore(&q->lock, flags);
a55360e4 180}
a55360e4
TW
181
182int iwl_rx_queue_alloc(struct iwl_priv *priv)
183{
184 struct iwl_rx_queue *rxq = &priv->rxq;
f36d04ab 185 struct device *dev = &priv->pci_dev->dev;
a55360e4
TW
186 int i;
187
188 spin_lock_init(&rxq->lock);
189 INIT_LIST_HEAD(&rxq->rx_free);
190 INIT_LIST_HEAD(&rxq->rx_used);
191
192 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
d5b25c90 193 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
f36d04ab 194 GFP_KERNEL);
a55360e4 195 if (!rxq->bd)
8d86422a
WT
196 goto err_bd;
197
f36d04ab
SG
198 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
199 &rxq->rb_stts_dma, GFP_KERNEL);
8d86422a
WT
200 if (!rxq->rb_stts)
201 goto err_rb;
a55360e4
TW
202
203 /* Fill the rx_used queue with _all_ of the Rx buffers */
204 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
205 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
206
207 /* Set us so that we have processed and used all buffers, but have
208 * not restocked the Rx queue with fresh buffers */
209 rxq->read = rxq->write = 0;
4752c93c 210 rxq->write_actual = 0;
a55360e4
TW
211 rxq->free_count = 0;
212 rxq->need_update = 0;
213 return 0;
8d86422a
WT
214
215err_rb:
f36d04ab 216 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
d5b25c90 217 rxq->bd_dma);
8d86422a
WT
218err_bd:
219 return -ENOMEM;
a55360e4 220}
a55360e4 221
466a19a0
SG
222/******************************************************************************
223 *
224 * Generic RX handler implementations
225 *
226 ******************************************************************************/
227
466a19a0
SG
228static void iwl_rx_reply_error(struct iwl_priv *priv,
229 struct iwl_rx_mem_buffer *rxb)
230{
231 struct iwl_rx_packet *pkt = rxb_addr(rxb);
232
233 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
234 "seq 0x%04X ser 0x%08X\n",
235 le32_to_cpu(pkt->u.err_resp.error_type),
236 get_cmd_string(pkt->u.err_resp.cmd_id),
237 pkt->u.err_resp.cmd_id,
238 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
239 le32_to_cpu(pkt->u.err_resp.error_info));
240}
241
242static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
243{
244 struct iwl_rx_packet *pkt = rxb_addr(rxb);
245 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
246 /*
247 * MULTI-FIXME
248 * See iwl_mac_channel_switch.
249 */
250 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
251 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
252
253 if (priv->switch_rxon.switch_in_progress) {
254 if (!le32_to_cpu(csa->status) &&
255 (csa->channel == priv->switch_rxon.channel)) {
256 rxon->channel = csa->channel;
257 ctx->staging.channel = csa->channel;
258 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
259 le16_to_cpu(csa->channel));
260 iwl_chswitch_done(priv, true);
261 } else {
262 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
263 le16_to_cpu(csa->channel));
264 iwl_chswitch_done(priv, false);
265 }
266 }
267}
268
269
270static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
81963d68
RC
271 struct iwl_rx_mem_buffer *rxb)
272{
273 struct iwl_rx_packet *pkt = rxb_addr(rxb);
274 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
275
276 if (!report->state) {
277 IWL_DEBUG_11H(priv,
278 "Spectrum Measure Notification: Start\n");
279 return;
280 }
281
282 memcpy(&priv->measure_report, report, sizeof(*report));
283 priv->measurement_status |= MEASUREMENT_READY;
284}
81963d68 285
466a19a0
SG
286static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
287 struct iwl_rx_mem_buffer *rxb)
288{
289#ifdef CONFIG_IWLWIFI_DEBUG
290 struct iwl_rx_packet *pkt = rxb_addr(rxb);
291 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
292 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
293 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
294#endif
295}
296
297static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
298 struct iwl_rx_mem_buffer *rxb)
299{
300 struct iwl_rx_packet *pkt = rxb_addr(rxb);
301 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
302 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
303 "notification for %s:\n", len,
304 get_cmd_string(pkt->hdr.cmd));
305 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
306}
307
308static void iwl_rx_beacon_notif(struct iwl_priv *priv,
309 struct iwl_rx_mem_buffer *rxb)
310{
311 struct iwl_rx_packet *pkt = rxb_addr(rxb);
312 struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
313#ifdef CONFIG_IWLWIFI_DEBUG
314 u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
315 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
316
317 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
318 "tsf:0x%.8x%.8x rate:%d\n",
319 status & TX_STATUS_MSK,
320 beacon->beacon_notify_hdr.failure_frame,
321 le32_to_cpu(beacon->ibss_mgr_status),
322 le32_to_cpu(beacon->high_tsf),
323 le32_to_cpu(beacon->low_tsf), rate);
324#endif
325
326 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
327
328 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
329 queue_work(priv->workqueue, &priv->beacon_update);
330}
331
ad6e82a5
SG
332/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
333#define ACK_CNT_RATIO (50)
334#define BA_TIMEOUT_CNT (5)
335#define BA_TIMEOUT_MAX (16)
336
337/**
338 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
339 *
340 * When the ACK count ratio is low and aggregated BA timeout retries exceeding
341 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
342 * operation state.
343 */
0da0e5bf
JB
344static bool iwl_good_ack_health(struct iwl_priv *priv,
345 struct statistics_tx *cur)
ad6e82a5
SG
346{
347 int actual_delta, expected_delta, ba_timeout_delta;
0da0e5bf 348 struct statistics_tx *old;
ad6e82a5
SG
349
350 if (priv->_agn.agg_tids_count)
351 return true;
352
0da0e5bf 353 old = &priv->statistics.tx;
ad6e82a5
SG
354
355 actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
356 le32_to_cpu(old->actual_ack_cnt);
357 expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
358 le32_to_cpu(old->expected_ack_cnt);
359
360 /* Values should not be negative, but we do not trust the firmware */
361 if (actual_delta <= 0 || expected_delta <= 0)
362 return true;
363
364 ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
365 le32_to_cpu(old->agg.ba_timeout);
366
367 if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
368 ba_timeout_delta > BA_TIMEOUT_CNT) {
369 IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
370 actual_delta, expected_delta, ba_timeout_delta);
371
372#ifdef CONFIG_IWLWIFI_DEBUGFS
373 /*
374 * This is ifdef'ed on DEBUGFS because otherwise the
375 * statistics aren't available. If DEBUGFS is set but
376 * DEBUG is not, these will just compile out.
377 */
378 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
0da0e5bf 379 priv->delta_stats.tx.rx_detected_cnt);
ad6e82a5
SG
380 IWL_DEBUG_RADIO(priv,
381 "ack_or_ba_timeout_collision delta %d\n",
0da0e5bf 382 priv->delta_stats.tx.ack_or_ba_timeout_collision);
ad6e82a5
SG
383#endif
384
385 if (ba_timeout_delta >= BA_TIMEOUT_MAX)
386 return false;
387 }
388
389 return true;
390}
391
392/**
393 * iwl_good_plcp_health - checks for plcp error.
394 *
395 * When the plcp error is exceeding the thresholds, reset the radio
396 * to improve the throughput.
397 */
466a19a0 398static bool iwl_good_plcp_health(struct iwl_priv *priv,
0da0e5bf
JB
399 struct statistics_rx_phy *cur_ofdm,
400 struct statistics_rx_ht_phy *cur_ofdm_ht,
401 unsigned int msecs)
ad6e82a5 402{
6198c387
SG
403 int delta;
404 int threshold = priv->cfg->base_params->plcp_delta_threshold;
ad6e82a5 405
6198c387 406 if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
ad6e82a5 407 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
6198c387 408 return true;
ad6e82a5
SG
409 }
410
0da0e5bf
JB
411 delta = le32_to_cpu(cur_ofdm->plcp_err) -
412 le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) +
413 le32_to_cpu(cur_ofdm_ht->plcp_err) -
414 le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err);
6198c387 415
0da0e5bf 416 /* Can be negative if firmware reset statistics */
6198c387
SG
417 if (delta <= 0)
418 return true;
419
420 if ((delta * 100 / msecs) > threshold) {
421 IWL_DEBUG_RADIO(priv,
422 "plcp health threshold %u delta %d msecs %u\n",
423 threshold, delta, msecs);
424 return false;
425 }
426
427 return true;
ad6e82a5
SG
428}
429
466a19a0 430static void iwl_recover_from_statistics(struct iwl_priv *priv,
0da0e5bf
JB
431 struct statistics_rx_phy *cur_ofdm,
432 struct statistics_rx_ht_phy *cur_ofdm_ht,
433 struct statistics_tx *tx,
434 unsigned long stamp)
fa8f130c 435{
b7977ffa 436 const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
410f2bb3 437 unsigned int msecs;
b7977ffa 438
410f2bb3
SG
439 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
440 return;
441
410f2bb3
SG
442 msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
443
444 /* Only gather statistics and update time stamp when not associated */
445 if (!iwl_is_any_associated(priv))
0da0e5bf 446 return;
410f2bb3
SG
447
448 /* Do not check/recover when do not have enough statistics data */
449 if (msecs < 99)
fa8f130c 450 return;
ca3d9389 451
0da0e5bf 452 if (mod_params->ack_check && !iwl_good_ack_health(priv, tx)) {
ca3d9389
SG
453 IWL_ERR(priv, "low ack count detected, restart firmware\n");
454 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
455 return;
3e4fb5fa 456 }
ca3d9389 457
0da0e5bf
JB
458 if (mod_params->plcp_check &&
459 !iwl_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
ca3d9389 460 iwl_force_reset(priv, IWL_RF_RESET, false);
beac5498 461}
beac5498 462
67289941
SG
463/* Calculate noise level, based on measurements during network silence just
464 * before arriving beacon. This measurement can be done only if we know
465 * exactly when to expect beacons, therefore only when we're associated. */
466static void iwl_rx_calc_noise(struct iwl_priv *priv)
467{
468 struct statistics_rx_non_phy *rx_info;
469 int num_active_rx = 0;
470 int total_silence = 0;
471 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
472 int last_rx_noise;
473
0da0e5bf
JB
474 rx_info = &priv->statistics.rx_non_phy;
475
67289941
SG
476 bcn_silence_a =
477 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
478 bcn_silence_b =
479 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
480 bcn_silence_c =
481 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
482
483 if (bcn_silence_a) {
484 total_silence += bcn_silence_a;
485 num_active_rx++;
486 }
487 if (bcn_silence_b) {
488 total_silence += bcn_silence_b;
489 num_active_rx++;
490 }
491 if (bcn_silence_c) {
492 total_silence += bcn_silence_c;
493 num_active_rx++;
494 }
495
496 /* Average among active antennas */
497 if (num_active_rx)
498 last_rx_noise = (total_silence / num_active_rx) - 107;
499 else
500 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
501
502 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
503 bcn_silence_a, bcn_silence_b, bcn_silence_c,
504 last_rx_noise);
505}
506
0da0e5bf 507#ifdef CONFIG_IWLWIFI_DEBUGFS
67289941
SG
508/*
509 * based on the assumption of all statistics counter are in DWORD
510 * FIXME: This function is for debugging, do not deal with
511 * the case of counters roll-over.
512 */
0da0e5bf
JB
513static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
514 __le32 *max_delta, __le32 *accum, int size)
67289941 515{
0da0e5bf
JB
516 int i;
517
518 for (i = 0;
519 i < size / sizeof(__le32);
520 i++, prev++, cur++, delta++, max_delta++, accum++) {
521 if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
522 *delta = cpu_to_le32(
523 le32_to_cpu(*cur) - le32_to_cpu(*prev));
524 le32_add_cpu(accum, le32_to_cpu(*delta));
525 if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta))
67289941
SG
526 *max_delta = *delta;
527 }
528 }
0da0e5bf 529}
67289941 530
0da0e5bf
JB
531static void
532iwl_accumulative_statistics(struct iwl_priv *priv,
533 struct statistics_general_common *common,
534 struct statistics_rx_non_phy *rx_non_phy,
535 struct statistics_rx_phy *rx_ofdm,
536 struct statistics_rx_ht_phy *rx_ofdm_ht,
537 struct statistics_rx_phy *rx_cck,
538 struct statistics_tx *tx,
539 struct statistics_bt_activity *bt_activity)
540{
541#define ACCUM(_name) \
542 accum_stats((__le32 *)&priv->statistics._name, \
543 (__le32 *)_name, \
544 (__le32 *)&priv->delta_stats._name, \
545 (__le32 *)&priv->max_delta_stats._name, \
546 (__le32 *)&priv->accum_stats._name, \
547 sizeof(*_name));
548
549 ACCUM(common);
550 ACCUM(rx_non_phy);
551 ACCUM(rx_ofdm);
552 ACCUM(rx_ofdm_ht);
553 ACCUM(rx_cck);
554 ACCUM(tx);
555 if (bt_activity)
556 ACCUM(bt_activity);
557#undef ACCUM
466a19a0 558}
0da0e5bf
JB
559#else
560static inline void
561iwl_accumulative_statistics(struct iwl_priv *priv,
562 struct statistics_general_common *common,
563 struct statistics_rx_non_phy *rx_non_phy,
564 struct statistics_rx_phy *rx_ofdm,
565 struct statistics_rx_ht_phy *rx_ofdm_ht,
566 struct statistics_rx_phy *rx_cck,
567 struct statistics_tx *tx,
568 struct statistics_bt_activity *bt_activity)
569{
570}
571#endif
67289941 572
466a19a0 573static void iwl_rx_statistics(struct iwl_priv *priv,
67289941
SG
574 struct iwl_rx_mem_buffer *rxb)
575{
0da0e5bf 576 unsigned long stamp = jiffies;
466a19a0 577 const int reg_recalib_period = 60;
67289941
SG
578 int change;
579 struct iwl_rx_packet *pkt = rxb_addr(rxb);
0da0e5bf
JB
580 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
581 __le32 *flag;
582 struct statistics_general_common *common;
583 struct statistics_rx_non_phy *rx_non_phy;
584 struct statistics_rx_phy *rx_ofdm;
585 struct statistics_rx_ht_phy *rx_ofdm_ht;
586 struct statistics_rx_phy *rx_cck;
587 struct statistics_tx *tx;
588 struct statistics_bt_activity *bt_activity;
589
590 len -= sizeof(struct iwl_cmd_header); /* skip header */
591
592 IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
593 len);
594
595 if (len == sizeof(struct iwl_bt_notif_statistics)) {
596 struct iwl_bt_notif_statistics *stats;
597 stats = &pkt->u.stats_bt;
598 flag = &stats->flag;
599 common = &stats->general.common;
600 rx_non_phy = &stats->rx.general.common;
601 rx_ofdm = &stats->rx.ofdm;
602 rx_ofdm_ht = &stats->rx.ofdm_ht;
603 rx_cck = &stats->rx.cck;
604 tx = &stats->tx;
605 bt_activity = &stats->general.activity;
67289941 606
0da0e5bf
JB
607#ifdef CONFIG_IWLWIFI_DEBUGFS
608 /* handle this exception directly */
609 priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills;
610 le32_add_cpu(&priv->statistics.accum_num_bt_kills,
611 le32_to_cpu(stats->rx.general.num_bt_kills));
612#endif
613 } else if (len == sizeof(struct iwl_notif_statistics)) {
614 struct iwl_notif_statistics *stats;
615 stats = &pkt->u.stats;
616 flag = &stats->flag;
617 common = &stats->general.common;
618 rx_non_phy = &stats->rx.general;
619 rx_ofdm = &stats->rx.ofdm;
620 rx_ofdm_ht = &stats->rx.ofdm_ht;
621 rx_cck = &stats->rx.cck;
622 tx = &stats->tx;
623 bt_activity = NULL;
67289941 624 } else {
0da0e5bf
JB
625 WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
626 len, sizeof(struct iwl_bt_notif_statistics),
627 sizeof(struct iwl_notif_statistics));
628 return;
67289941
SG
629 }
630
0da0e5bf
JB
631 change = common->temperature != priv->statistics.common.temperature ||
632 (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
633 (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
634
635 iwl_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
636 rx_ofdm_ht, rx_cck, tx, bt_activity);
637
638 iwl_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
639
640 priv->statistics.flag = *flag;
641 memcpy(&priv->statistics.common, common, sizeof(*common));
642 memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy));
643 memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm));
644 memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht));
645 memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck));
646 memcpy(&priv->statistics.tx, tx, sizeof(*tx));
647#ifdef CONFIG_IWLWIFI_DEBUGFS
648 if (bt_activity)
649 memcpy(&priv->statistics.bt_activity, bt_activity,
650 sizeof(*bt_activity));
651#endif
652
653 priv->rx_statistics_jiffies = stamp;
67289941 654
67289941
SG
655 set_bit(STATUS_STATISTICS, &priv->status);
656
657 /* Reschedule the statistics timer to occur in
466a19a0 658 * reg_recalib_period seconds to ensure we get a
67289941
SG
659 * thermal update even if the uCode doesn't give
660 * us one */
661 mod_timer(&priv->statistics_periodic, jiffies +
466a19a0 662 msecs_to_jiffies(reg_recalib_period * 1000));
67289941
SG
663
664 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
665 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
666 iwl_rx_calc_noise(priv);
667 queue_work(priv->workqueue, &priv->run_time_calib_work);
668 }
669 if (priv->cfg->ops->lib->temp_ops.temperature && change)
670 priv->cfg->ops->lib->temp_ops.temperature(priv);
671}
672
466a19a0
SG
673static void iwl_rx_reply_statistics(struct iwl_priv *priv,
674 struct iwl_rx_mem_buffer *rxb)
67289941
SG
675{
676 struct iwl_rx_packet *pkt = rxb_addr(rxb);
677
678 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
679#ifdef CONFIG_IWLWIFI_DEBUGFS
0da0e5bf
JB
680 memset(&priv->accum_stats, 0,
681 sizeof(priv->accum_stats));
682 memset(&priv->delta_stats, 0,
683 sizeof(priv->delta_stats));
684 memset(&priv->max_delta_stats, 0,
685 sizeof(priv->max_delta_stats));
67289941
SG
686#endif
687 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
688 }
689 iwl_rx_statistics(priv, rxb);
690}
691
466a19a0
SG
692/* Handle notification from uCode that card's power state is changing
693 * due to software, hardware, or critical temperature RFKILL */
694static void iwl_rx_card_state_notif(struct iwl_priv *priv,
695 struct iwl_rx_mem_buffer *rxb)
696{
697 struct iwl_rx_packet *pkt = rxb_addr(rxb);
698 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
699 unsigned long status = priv->status;
700
701 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
702 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
703 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
704 (flags & CT_CARD_DISABLED) ?
705 "Reached" : "Not reached");
706
707 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
708 CT_CARD_DISABLED)) {
709
710 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
711 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
712
713 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
714 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
715
716 if (!(flags & RXON_CARD_DISABLED)) {
717 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
718 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
719 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
720 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
721 }
722 if (flags & CT_CARD_DISABLED)
723 iwl_tt_enter_ct_kill(priv);
724 }
725 if (!(flags & CT_CARD_DISABLED))
726 iwl_tt_exit_ct_kill(priv);
727
728 if (flags & HW_CARD_DISABLED)
729 set_bit(STATUS_RF_KILL_HW, &priv->status);
730 else
731 clear_bit(STATUS_RF_KILL_HW, &priv->status);
732
733
734 if (!(flags & RXON_CARD_DISABLED))
735 iwl_scan_cancel(priv);
736
737 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
738 test_bit(STATUS_RF_KILL_HW, &priv->status)))
739 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
740 test_bit(STATUS_RF_KILL_HW, &priv->status));
741 else
742 wake_up_interruptible(&priv->wait_command_queue);
743}
744
745static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
746 struct iwl_rx_mem_buffer *rxb)
67289941
SG
747
748{
749 struct iwl_rx_packet *pkt = rxb_addr(rxb);
750 struct iwl_missed_beacon_notif *missed_beacon;
751
752 missed_beacon = &pkt->u.missed_beacon;
753 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
754 priv->missed_beacon_threshold) {
755 IWL_DEBUG_CALIB(priv,
756 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
757 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
758 le32_to_cpu(missed_beacon->total_missed_becons),
759 le32_to_cpu(missed_beacon->num_recvd_beacons),
760 le32_to_cpu(missed_beacon->num_expected_beacons));
761 if (!test_bit(STATUS_SCANNING, &priv->status))
762 iwl_init_sensitivity(priv);
763 }
764}
765
466a19a0
SG
766/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
767 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
768static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
769 struct iwl_rx_mem_buffer *rxb)
770{
771 struct iwl_rx_packet *pkt = rxb_addr(rxb);
772
773 priv->_agn.last_phy_res_valid = true;
774 memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
775 sizeof(struct iwl_rx_phy_res));
776}
777
1781a07f
EG
778/*
779 * returns non-zero if packet should be dropped
780 */
466a19a0
SG
781static int iwl_set_decrypted_flag(struct iwl_priv *priv,
782 struct ieee80211_hdr *hdr,
783 u32 decrypt_res,
784 struct ieee80211_rx_status *stats)
1781a07f
EG
785{
786 u16 fc = le16_to_cpu(hdr->frame_control);
787
246ed355
JB
788 /*
789 * All contexts have the same setting here due to it being
790 * a module parameter, so OK to check any context.
791 */
792 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
793 RXON_FILTER_DIS_DECRYPT_MSK)
1781a07f
EG
794 return 0;
795
796 if (!(fc & IEEE80211_FCTL_PROTECTED))
797 return 0;
798
e1623446 799 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
1781a07f
EG
800 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
801 case RX_RES_STATUS_SEC_TYPE_TKIP:
802 /* The uCode has got a bad phase 1 Key, pushes the packet.
803 * Decryption will be done in SW. */
804 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
805 RX_RES_STATUS_BAD_KEY_TTAK)
806 break;
807
808 case RX_RES_STATUS_SEC_TYPE_WEP:
809 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
810 RX_RES_STATUS_BAD_ICV_MIC) {
811 /* bad ICV, the packet is destroyed since the
812 * decryption is inplace, drop it */
e1623446 813 IWL_DEBUG_RX(priv, "Packet destroyed\n");
1781a07f
EG
814 return -1;
815 }
816 case RX_RES_STATUS_SEC_TYPE_CCMP:
817 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
818 RX_RES_STATUS_DECRYPT_OK) {
e1623446 819 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
1781a07f
EG
820 stats->flag |= RX_FLAG_DECRYPTED;
821 }
822 break;
823
824 default:
825 break;
826 }
827 return 0;
828}
466a19a0
SG
829
830static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
831 struct ieee80211_hdr *hdr,
832 u16 len,
833 u32 ampdu_status,
834 struct iwl_rx_mem_buffer *rxb,
835 struct ieee80211_rx_status *stats)
836{
837 struct sk_buff *skb;
838 __le16 fc = hdr->frame_control;
68b99311 839 struct iwl_rxon_context *ctx;
466a19a0
SG
840
841 /* We only process data packets if the interface is open */
842 if (unlikely(!priv->is_open)) {
843 IWL_DEBUG_DROP_LIMIT(priv,
844 "Dropping packet while interface is not open.\n");
845 return;
846 }
847
848 /* In case of HW accelerated crypto and bad decryption, drop */
849 if (!priv->cfg->mod_params->sw_crypto &&
850 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
851 return;
852
853 skb = dev_alloc_skb(128);
854 if (!skb) {
855 IWL_ERR(priv, "dev_alloc_skb failed\n");
856 return;
857 }
858
859 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
860
861 iwl_update_stats(priv, false, fc, len);
68b99311
GT
862
863 /*
864 * Wake any queues that were stopped due to a passive channel tx
865 * failure. This can happen because the regulatory enforcement in
866 * the device waits for a beacon before allowing transmission,
867 * sometimes even after already having transmitted frames for the
868 * association because the new RXON may reset the information.
869 */
870 if (unlikely(ieee80211_is_beacon(fc))) {
871 for_each_context(priv, ctx) {
872 if (!ctx->last_tx_rejected)
873 continue;
874 if (compare_ether_addr(hdr->addr3,
875 ctx->active.bssid_addr))
876 continue;
877 ctx->last_tx_rejected = false;
878 iwl_wake_any_queue(priv, ctx);
879 }
880 }
881
466a19a0
SG
882 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
883
884 ieee80211_rx(priv->hw, skb);
466a19a0
SG
885 rxb->page = NULL;
886}
887
888static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
889{
890 u32 decrypt_out = 0;
891
892 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
893 RX_RES_STATUS_STATION_FOUND)
894 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
895 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
896
897 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
898
899 /* packet was not encrypted */
900 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
901 RX_RES_STATUS_SEC_TYPE_NONE)
902 return decrypt_out;
903
904 /* packet was encrypted with unknown alg */
905 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
906 RX_RES_STATUS_SEC_TYPE_ERR)
907 return decrypt_out;
908
909 /* decryption was not done in HW */
910 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
911 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
912 return decrypt_out;
913
914 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
915
916 case RX_RES_STATUS_SEC_TYPE_CCMP:
917 /* alg is CCM: check MIC only */
918 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
919 /* Bad MIC */
920 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
921 else
922 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
923
924 break;
925
926 case RX_RES_STATUS_SEC_TYPE_TKIP:
927 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
928 /* Bad TTAK */
929 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
930 break;
931 }
932 /* fall through if TTAK OK */
933 default:
934 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
935 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
936 else
937 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
938 break;
939 }
940
941 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
942 decrypt_in, decrypt_out);
943
944 return decrypt_out;
945}
946
947/* Called for REPLY_RX (legacy ABG frames), or
948 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
949static void iwl_rx_reply_rx(struct iwl_priv *priv,
950 struct iwl_rx_mem_buffer *rxb)
951{
952 struct ieee80211_hdr *header;
953 struct ieee80211_rx_status rx_status;
954 struct iwl_rx_packet *pkt = rxb_addr(rxb);
955 struct iwl_rx_phy_res *phy_res;
956 __le32 rx_pkt_status;
957 struct iwl_rx_mpdu_res_start *amsdu;
958 u32 len;
959 u32 ampdu_status;
960 u32 rate_n_flags;
961
962 /**
963 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
964 * REPLY_RX: physical layer info is in this buffer
965 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
966 * command and cached in priv->last_phy_res
967 *
968 * Here we set up local variables depending on which command is
969 * received.
970 */
971 if (pkt->hdr.cmd == REPLY_RX) {
972 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
973 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
974 + phy_res->cfg_phy_cnt);
975
976 len = le16_to_cpu(phy_res->byte_count);
977 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
978 phy_res->cfg_phy_cnt + len);
979 ampdu_status = le32_to_cpu(rx_pkt_status);
980 } else {
981 if (!priv->_agn.last_phy_res_valid) {
982 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
983 return;
984 }
985 phy_res = &priv->_agn.last_phy_res;
986 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
987 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
988 len = le16_to_cpu(amsdu->byte_count);
989 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
990 ampdu_status = iwl_translate_rx_status(priv,
991 le32_to_cpu(rx_pkt_status));
992 }
993
994 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
995 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
996 phy_res->cfg_phy_cnt);
997 return;
998 }
999
1000 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1001 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1002 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1003 le32_to_cpu(rx_pkt_status));
1004 return;
1005 }
1006
1007 /* This will be used in several places later */
1008 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1009
1010 /* rx_status carries information about the packet to mac80211 */
1011 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1012 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1013 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1014 rx_status.freq =
1015 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
1016 rx_status.band);
1017 rx_status.rate_idx =
1018 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1019 rx_status.flag = 0;
1020
1021 /* TSF isn't reliable. In order to allow smooth user experience,
1022 * this W/A doesn't propagate it to the mac80211 */
1023 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
1024
1025 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1026
1027 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1028 rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
1029
1030 iwl_dbg_log_rx_data_frame(priv, len, header);
1031 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
1032 rx_status.signal, (unsigned long long)rx_status.mactime);
1033
1034 /*
1035 * "antenna number"
1036 *
1037 * It seems that the antenna field in the phy flags value
1038 * is actually a bit field. This is undefined by radiotap,
1039 * it wants an actual antenna number but I always get "7"
1040 * for most legacy frames I receive indicating that the
1041 * same frame was received on all three RX chains.
1042 *
1043 * I think this field should be removed in favor of a
1044 * new 802.11n radiotap field "RX chains" that is defined
1045 * as a bitmask.
1046 */
1047 rx_status.antenna =
1048 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1049 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1050
1051 /* set the preamble flag if appropriate */
1052 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1053 rx_status.flag |= RX_FLAG_SHORTPRE;
1054
1055 /* Set up the HT phy flags */
1056 if (rate_n_flags & RATE_MCS_HT_MSK)
1057 rx_status.flag |= RX_FLAG_HT;
1058 if (rate_n_flags & RATE_MCS_HT40_MSK)
1059 rx_status.flag |= RX_FLAG_40MHZ;
1060 if (rate_n_flags & RATE_MCS_SGI_MSK)
1061 rx_status.flag |= RX_FLAG_SHORT_GI;
1062
1063 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1064 rxb, &rx_status);
1065}
1066
1067/**
1068 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
1069 *
1070 * Setup the RX handlers for each of the reply types sent from the uCode
1071 * to the host.
1072 */
1073void iwl_setup_rx_handlers(struct iwl_priv *priv)
1074{
1075 void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
1076
1077 handlers = priv->rx_handlers;
1078
466a19a0
SG
1079 handlers[REPLY_ERROR] = iwl_rx_reply_error;
1080 handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
1081 handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
1082 handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
1083 handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
1084 handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
1085
1086 /*
1087 * The same handler is used for both the REPLY to a discrete
1088 * statistics request from the host as well as for the periodic
1089 * statistics notifications (after received beacons) from the uCode.
1090 */
1091 handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
1092 handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
1093
1094 iwl_setup_rx_scan_handlers(priv);
1095
1096 handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
1097 handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
1098
1099 /* Rx handlers */
1100 handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
1101 handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
1102
1103 /* block ack */
1104 handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
1105
1106 /* Set up hardware specific Rx handlers */
1107 priv->cfg->ops->lib->rx_handler_setup(priv);
1108}
This page took 0.805061 seconds and 5 git commands to generate.