iwlagn: clean up & autodetect statistics
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-rx.c
1 /******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30 #include <linux/etherdevice.h>
31 #include <linux/slab.h>
32 #include <linux/sched.h>
33 #include <net/mac80211.h>
34 #include <asm/unaligned.h>
35 #include "iwl-eeprom.h"
36 #include "iwl-dev.h"
37 #include "iwl-core.h"
38 #include "iwl-sta.h"
39 #include "iwl-io.h"
40 #include "iwl-helpers.h"
41 #include "iwl-agn-calib.h"
42 #include "iwl-agn.h"
43
44 /******************************************************************************
45 *
46 * RX path functions
47 *
48 ******************************************************************************/
49
50 /*
51 * Rx theory of operation
52 *
53 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
54 * each of which point to Receive Buffers to be filled by the NIC. These get
55 * used not only for Rx frames, but for any command response or notification
56 * from the NIC. The driver and NIC manage the Rx buffers by means
57 * of indexes into the circular buffer.
58 *
59 * Rx Queue Indexes
60 * The host/firmware share two index registers for managing the Rx buffers.
61 *
62 * The READ index maps to the first position that the firmware may be writing
63 * to -- the driver can read up to (but not including) this position and get
64 * good data.
65 * The READ index is managed by the firmware once the card is enabled.
66 *
67 * The WRITE index maps to the last position the driver has read from -- the
68 * position preceding WRITE is the last slot the firmware can place a packet.
69 *
70 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
71 * WRITE = READ.
72 *
73 * During initialization, the host sets up the READ queue position to the first
74 * INDEX position, and WRITE to the last (READ - 1 wrapped)
75 *
76 * When the firmware places a packet in a buffer, it will advance the READ index
77 * and fire the RX interrupt. The driver can then query the READ index and
78 * process as many packets as possible, moving the WRITE index forward as it
79 * resets the Rx queue buffers with new memory.
80 *
81 * The management in the driver is as follows:
82 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
83 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
84 * to replenish the iwl->rxq->rx_free.
85 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
86 * iwl->rxq is replenished and the READ INDEX is updated (updating the
87 * 'processed' and 'read' driver indexes as well)
88 * + A received packet is processed and handed to the kernel network stack,
89 * detached from the iwl->rxq. The driver 'processed' index is updated.
90 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
91 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
92 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
93 * were enough free buffers and RX_STALLED is set it is cleared.
94 *
95 *
96 * Driver sequence:
97 *
98 * iwl_rx_queue_alloc() Allocates rx_free
99 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
100 * iwl_rx_queue_restock
101 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
102 * queue, updates firmware pointers, and updates
103 * the WRITE index. If insufficient rx_free buffers
104 * are available, schedules iwl_rx_replenish
105 *
106 * -- enable interrupts --
107 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
108 * READ INDEX, detaching the SKB from the pool.
109 * Moves the packet buffer from queue to rx_used.
110 * Calls iwl_rx_queue_restock to refill any empty
111 * slots.
112 * ...
113 *
114 */
115
116 /**
117 * iwl_rx_queue_space - Return number of free slots available in queue.
118 */
119 int iwl_rx_queue_space(const struct iwl_rx_queue *q)
120 {
121 int s = q->read - q->write;
122 if (s <= 0)
123 s += RX_QUEUE_SIZE;
124 /* keep some buffer to not confuse full and empty queue */
125 s -= 2;
126 if (s < 0)
127 s = 0;
128 return s;
129 }
130
131 /**
132 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
133 */
134 void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
135 {
136 unsigned long flags;
137 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
138 u32 reg;
139
140 spin_lock_irqsave(&q->lock, flags);
141
142 if (q->need_update == 0)
143 goto exit_unlock;
144
145 if (priv->cfg->base_params->shadow_reg_enable) {
146 /* shadow register enabled */
147 /* Device expects a multiple of 8 */
148 q->write_actual = (q->write & ~0x7);
149 iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
150 } else {
151 /* If power-saving is in use, make sure device is awake */
152 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
153 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
154
155 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
156 IWL_DEBUG_INFO(priv,
157 "Rx queue requesting wakeup,"
158 " GP1 = 0x%x\n", reg);
159 iwl_set_bit(priv, CSR_GP_CNTRL,
160 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
161 goto exit_unlock;
162 }
163
164 q->write_actual = (q->write & ~0x7);
165 iwl_write_direct32(priv, rx_wrt_ptr_reg,
166 q->write_actual);
167
168 /* Else device is assumed to be awake */
169 } else {
170 /* Device expects a multiple of 8 */
171 q->write_actual = (q->write & ~0x7);
172 iwl_write_direct32(priv, rx_wrt_ptr_reg,
173 q->write_actual);
174 }
175 }
176 q->need_update = 0;
177
178 exit_unlock:
179 spin_unlock_irqrestore(&q->lock, flags);
180 }
181
182 int iwl_rx_queue_alloc(struct iwl_priv *priv)
183 {
184 struct iwl_rx_queue *rxq = &priv->rxq;
185 struct device *dev = &priv->pci_dev->dev;
186 int i;
187
188 spin_lock_init(&rxq->lock);
189 INIT_LIST_HEAD(&rxq->rx_free);
190 INIT_LIST_HEAD(&rxq->rx_used);
191
192 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
193 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
194 GFP_KERNEL);
195 if (!rxq->bd)
196 goto err_bd;
197
198 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
199 &rxq->rb_stts_dma, GFP_KERNEL);
200 if (!rxq->rb_stts)
201 goto err_rb;
202
203 /* Fill the rx_used queue with _all_ of the Rx buffers */
204 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
205 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
206
207 /* Set us so that we have processed and used all buffers, but have
208 * not restocked the Rx queue with fresh buffers */
209 rxq->read = rxq->write = 0;
210 rxq->write_actual = 0;
211 rxq->free_count = 0;
212 rxq->need_update = 0;
213 return 0;
214
215 err_rb:
216 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
217 rxq->bd_dma);
218 err_bd:
219 return -ENOMEM;
220 }
221
222 /******************************************************************************
223 *
224 * Generic RX handler implementations
225 *
226 ******************************************************************************/
227
228 static void iwl_rx_reply_alive(struct iwl_priv *priv,
229 struct iwl_rx_mem_buffer *rxb)
230 {
231 struct iwl_rx_packet *pkt = rxb_addr(rxb);
232 struct iwl_alive_resp *palive;
233 struct delayed_work *pwork;
234
235 palive = &pkt->u.alive_frame;
236
237 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
238 "0x%01X 0x%01X\n",
239 palive->is_valid, palive->ver_type,
240 palive->ver_subtype);
241
242 priv->device_pointers.log_event_table =
243 le32_to_cpu(palive->log_event_table_ptr);
244 priv->device_pointers.error_event_table =
245 le32_to_cpu(palive->error_event_table_ptr);
246
247 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
248 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
249 pwork = &priv->init_alive_start;
250 } else {
251 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
252 pwork = &priv->alive_start;
253 }
254
255 /* We delay the ALIVE response by 5ms to
256 * give the HW RF Kill time to activate... */
257 if (palive->is_valid == UCODE_VALID_OK)
258 queue_delayed_work(priv->workqueue, pwork,
259 msecs_to_jiffies(5));
260 else {
261 IWL_WARN(priv, "%s uCode did not respond OK.\n",
262 (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
263 "init" : "runtime");
264 /*
265 * If fail to load init uCode,
266 * let's try to load the init uCode again.
267 * We should not get into this situation, but if it
268 * does happen, we should not move on and loading "runtime"
269 * without proper calibrate the device.
270 */
271 if (palive->ver_subtype == INITIALIZE_SUBTYPE)
272 priv->ucode_type = UCODE_NONE;
273 queue_work(priv->workqueue, &priv->restart);
274 }
275 }
276
277 static void iwl_rx_reply_error(struct iwl_priv *priv,
278 struct iwl_rx_mem_buffer *rxb)
279 {
280 struct iwl_rx_packet *pkt = rxb_addr(rxb);
281
282 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
283 "seq 0x%04X ser 0x%08X\n",
284 le32_to_cpu(pkt->u.err_resp.error_type),
285 get_cmd_string(pkt->u.err_resp.cmd_id),
286 pkt->u.err_resp.cmd_id,
287 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
288 le32_to_cpu(pkt->u.err_resp.error_info));
289 }
290
291 static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
292 {
293 struct iwl_rx_packet *pkt = rxb_addr(rxb);
294 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
295 /*
296 * MULTI-FIXME
297 * See iwl_mac_channel_switch.
298 */
299 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
300 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
301
302 if (priv->switch_rxon.switch_in_progress) {
303 if (!le32_to_cpu(csa->status) &&
304 (csa->channel == priv->switch_rxon.channel)) {
305 rxon->channel = csa->channel;
306 ctx->staging.channel = csa->channel;
307 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
308 le16_to_cpu(csa->channel));
309 iwl_chswitch_done(priv, true);
310 } else {
311 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
312 le16_to_cpu(csa->channel));
313 iwl_chswitch_done(priv, false);
314 }
315 }
316 }
317
318
319 static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
320 struct iwl_rx_mem_buffer *rxb)
321 {
322 struct iwl_rx_packet *pkt = rxb_addr(rxb);
323 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
324
325 if (!report->state) {
326 IWL_DEBUG_11H(priv,
327 "Spectrum Measure Notification: Start\n");
328 return;
329 }
330
331 memcpy(&priv->measure_report, report, sizeof(*report));
332 priv->measurement_status |= MEASUREMENT_READY;
333 }
334
335 static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
336 struct iwl_rx_mem_buffer *rxb)
337 {
338 #ifdef CONFIG_IWLWIFI_DEBUG
339 struct iwl_rx_packet *pkt = rxb_addr(rxb);
340 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
341 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
342 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
343 #endif
344 }
345
346 static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
347 struct iwl_rx_mem_buffer *rxb)
348 {
349 struct iwl_rx_packet *pkt = rxb_addr(rxb);
350 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
351 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
352 "notification for %s:\n", len,
353 get_cmd_string(pkt->hdr.cmd));
354 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
355 }
356
357 static void iwl_rx_beacon_notif(struct iwl_priv *priv,
358 struct iwl_rx_mem_buffer *rxb)
359 {
360 struct iwl_rx_packet *pkt = rxb_addr(rxb);
361 struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
362 #ifdef CONFIG_IWLWIFI_DEBUG
363 u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
364 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
365
366 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
367 "tsf:0x%.8x%.8x rate:%d\n",
368 status & TX_STATUS_MSK,
369 beacon->beacon_notify_hdr.failure_frame,
370 le32_to_cpu(beacon->ibss_mgr_status),
371 le32_to_cpu(beacon->high_tsf),
372 le32_to_cpu(beacon->low_tsf), rate);
373 #endif
374
375 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
376
377 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
378 queue_work(priv->workqueue, &priv->beacon_update);
379 }
380
381 /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
382 #define ACK_CNT_RATIO (50)
383 #define BA_TIMEOUT_CNT (5)
384 #define BA_TIMEOUT_MAX (16)
385
386 /**
387 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
388 *
389 * When the ACK count ratio is low and aggregated BA timeout retries exceeding
390 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
391 * operation state.
392 */
393 static bool iwl_good_ack_health(struct iwl_priv *priv,
394 struct statistics_tx *cur)
395 {
396 int actual_delta, expected_delta, ba_timeout_delta;
397 struct statistics_tx *old;
398
399 if (priv->_agn.agg_tids_count)
400 return true;
401
402 old = &priv->statistics.tx;
403
404 actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
405 le32_to_cpu(old->actual_ack_cnt);
406 expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
407 le32_to_cpu(old->expected_ack_cnt);
408
409 /* Values should not be negative, but we do not trust the firmware */
410 if (actual_delta <= 0 || expected_delta <= 0)
411 return true;
412
413 ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
414 le32_to_cpu(old->agg.ba_timeout);
415
416 if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
417 ba_timeout_delta > BA_TIMEOUT_CNT) {
418 IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
419 actual_delta, expected_delta, ba_timeout_delta);
420
421 #ifdef CONFIG_IWLWIFI_DEBUGFS
422 /*
423 * This is ifdef'ed on DEBUGFS because otherwise the
424 * statistics aren't available. If DEBUGFS is set but
425 * DEBUG is not, these will just compile out.
426 */
427 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
428 priv->delta_stats.tx.rx_detected_cnt);
429 IWL_DEBUG_RADIO(priv,
430 "ack_or_ba_timeout_collision delta %d\n",
431 priv->delta_stats.tx.ack_or_ba_timeout_collision);
432 #endif
433
434 if (ba_timeout_delta >= BA_TIMEOUT_MAX)
435 return false;
436 }
437
438 return true;
439 }
440
441 /**
442 * iwl_good_plcp_health - checks for plcp error.
443 *
444 * When the plcp error is exceeding the thresholds, reset the radio
445 * to improve the throughput.
446 */
447 static bool iwl_good_plcp_health(struct iwl_priv *priv,
448 struct statistics_rx_phy *cur_ofdm,
449 struct statistics_rx_ht_phy *cur_ofdm_ht,
450 unsigned int msecs)
451 {
452 int delta;
453 int threshold = priv->cfg->base_params->plcp_delta_threshold;
454
455 if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
456 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
457 return true;
458 }
459
460 delta = le32_to_cpu(cur_ofdm->plcp_err) -
461 le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) +
462 le32_to_cpu(cur_ofdm_ht->plcp_err) -
463 le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err);
464
465 /* Can be negative if firmware reset statistics */
466 if (delta <= 0)
467 return true;
468
469 if ((delta * 100 / msecs) > threshold) {
470 IWL_DEBUG_RADIO(priv,
471 "plcp health threshold %u delta %d msecs %u\n",
472 threshold, delta, msecs);
473 return false;
474 }
475
476 return true;
477 }
478
479 static void iwl_recover_from_statistics(struct iwl_priv *priv,
480 struct statistics_rx_phy *cur_ofdm,
481 struct statistics_rx_ht_phy *cur_ofdm_ht,
482 struct statistics_tx *tx,
483 unsigned long stamp)
484 {
485 const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
486 unsigned int msecs;
487
488 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
489 return;
490
491 msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
492
493 /* Only gather statistics and update time stamp when not associated */
494 if (!iwl_is_any_associated(priv))
495 return;
496
497 /* Do not check/recover when do not have enough statistics data */
498 if (msecs < 99)
499 return;
500
501 if (mod_params->ack_check && !iwl_good_ack_health(priv, tx)) {
502 IWL_ERR(priv, "low ack count detected, restart firmware\n");
503 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
504 return;
505 }
506
507 if (mod_params->plcp_check &&
508 !iwl_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
509 iwl_force_reset(priv, IWL_RF_RESET, false);
510 }
511
512 /* Calculate noise level, based on measurements during network silence just
513 * before arriving beacon. This measurement can be done only if we know
514 * exactly when to expect beacons, therefore only when we're associated. */
515 static void iwl_rx_calc_noise(struct iwl_priv *priv)
516 {
517 struct statistics_rx_non_phy *rx_info;
518 int num_active_rx = 0;
519 int total_silence = 0;
520 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
521 int last_rx_noise;
522
523 rx_info = &priv->statistics.rx_non_phy;
524
525 bcn_silence_a =
526 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
527 bcn_silence_b =
528 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
529 bcn_silence_c =
530 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
531
532 if (bcn_silence_a) {
533 total_silence += bcn_silence_a;
534 num_active_rx++;
535 }
536 if (bcn_silence_b) {
537 total_silence += bcn_silence_b;
538 num_active_rx++;
539 }
540 if (bcn_silence_c) {
541 total_silence += bcn_silence_c;
542 num_active_rx++;
543 }
544
545 /* Average among active antennas */
546 if (num_active_rx)
547 last_rx_noise = (total_silence / num_active_rx) - 107;
548 else
549 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
550
551 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
552 bcn_silence_a, bcn_silence_b, bcn_silence_c,
553 last_rx_noise);
554 }
555
556 #ifdef CONFIG_IWLWIFI_DEBUGFS
557 /*
558 * based on the assumption of all statistics counter are in DWORD
559 * FIXME: This function is for debugging, do not deal with
560 * the case of counters roll-over.
561 */
562 static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
563 __le32 *max_delta, __le32 *accum, int size)
564 {
565 int i;
566
567 for (i = 0;
568 i < size / sizeof(__le32);
569 i++, prev++, cur++, delta++, max_delta++, accum++) {
570 if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
571 *delta = cpu_to_le32(
572 le32_to_cpu(*cur) - le32_to_cpu(*prev));
573 le32_add_cpu(accum, le32_to_cpu(*delta));
574 if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta))
575 *max_delta = *delta;
576 }
577 }
578 }
579
580 static void
581 iwl_accumulative_statistics(struct iwl_priv *priv,
582 struct statistics_general_common *common,
583 struct statistics_rx_non_phy *rx_non_phy,
584 struct statistics_rx_phy *rx_ofdm,
585 struct statistics_rx_ht_phy *rx_ofdm_ht,
586 struct statistics_rx_phy *rx_cck,
587 struct statistics_tx *tx,
588 struct statistics_bt_activity *bt_activity)
589 {
590 #define ACCUM(_name) \
591 accum_stats((__le32 *)&priv->statistics._name, \
592 (__le32 *)_name, \
593 (__le32 *)&priv->delta_stats._name, \
594 (__le32 *)&priv->max_delta_stats._name, \
595 (__le32 *)&priv->accum_stats._name, \
596 sizeof(*_name));
597
598 ACCUM(common);
599 ACCUM(rx_non_phy);
600 ACCUM(rx_ofdm);
601 ACCUM(rx_ofdm_ht);
602 ACCUM(rx_cck);
603 ACCUM(tx);
604 if (bt_activity)
605 ACCUM(bt_activity);
606 #undef ACCUM
607 }
608 #else
609 static inline void
610 iwl_accumulative_statistics(struct iwl_priv *priv,
611 struct statistics_general_common *common,
612 struct statistics_rx_non_phy *rx_non_phy,
613 struct statistics_rx_phy *rx_ofdm,
614 struct statistics_rx_ht_phy *rx_ofdm_ht,
615 struct statistics_rx_phy *rx_cck,
616 struct statistics_tx *tx,
617 struct statistics_bt_activity *bt_activity)
618 {
619 }
620 #endif
621
622 static void iwl_rx_statistics(struct iwl_priv *priv,
623 struct iwl_rx_mem_buffer *rxb)
624 {
625 unsigned long stamp = jiffies;
626 const int reg_recalib_period = 60;
627 int change;
628 struct iwl_rx_packet *pkt = rxb_addr(rxb);
629 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
630 __le32 *flag;
631 struct statistics_general_common *common;
632 struct statistics_rx_non_phy *rx_non_phy;
633 struct statistics_rx_phy *rx_ofdm;
634 struct statistics_rx_ht_phy *rx_ofdm_ht;
635 struct statistics_rx_phy *rx_cck;
636 struct statistics_tx *tx;
637 struct statistics_bt_activity *bt_activity;
638
639 len -= sizeof(struct iwl_cmd_header); /* skip header */
640
641 IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
642 len);
643
644 if (len == sizeof(struct iwl_bt_notif_statistics)) {
645 struct iwl_bt_notif_statistics *stats;
646 stats = &pkt->u.stats_bt;
647 flag = &stats->flag;
648 common = &stats->general.common;
649 rx_non_phy = &stats->rx.general.common;
650 rx_ofdm = &stats->rx.ofdm;
651 rx_ofdm_ht = &stats->rx.ofdm_ht;
652 rx_cck = &stats->rx.cck;
653 tx = &stats->tx;
654 bt_activity = &stats->general.activity;
655
656 #ifdef CONFIG_IWLWIFI_DEBUGFS
657 /* handle this exception directly */
658 priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills;
659 le32_add_cpu(&priv->statistics.accum_num_bt_kills,
660 le32_to_cpu(stats->rx.general.num_bt_kills));
661 #endif
662 } else if (len == sizeof(struct iwl_notif_statistics)) {
663 struct iwl_notif_statistics *stats;
664 stats = &pkt->u.stats;
665 flag = &stats->flag;
666 common = &stats->general.common;
667 rx_non_phy = &stats->rx.general;
668 rx_ofdm = &stats->rx.ofdm;
669 rx_ofdm_ht = &stats->rx.ofdm_ht;
670 rx_cck = &stats->rx.cck;
671 tx = &stats->tx;
672 bt_activity = NULL;
673 } else {
674 WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
675 len, sizeof(struct iwl_bt_notif_statistics),
676 sizeof(struct iwl_notif_statistics));
677 return;
678 }
679
680 change = common->temperature != priv->statistics.common.temperature ||
681 (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
682 (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
683
684 iwl_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
685 rx_ofdm_ht, rx_cck, tx, bt_activity);
686
687 iwl_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
688
689 priv->statistics.flag = *flag;
690 memcpy(&priv->statistics.common, common, sizeof(*common));
691 memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy));
692 memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm));
693 memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht));
694 memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck));
695 memcpy(&priv->statistics.tx, tx, sizeof(*tx));
696 #ifdef CONFIG_IWLWIFI_DEBUGFS
697 if (bt_activity)
698 memcpy(&priv->statistics.bt_activity, bt_activity,
699 sizeof(*bt_activity));
700 #endif
701
702 priv->rx_statistics_jiffies = stamp;
703
704 set_bit(STATUS_STATISTICS, &priv->status);
705
706 /* Reschedule the statistics timer to occur in
707 * reg_recalib_period seconds to ensure we get a
708 * thermal update even if the uCode doesn't give
709 * us one */
710 mod_timer(&priv->statistics_periodic, jiffies +
711 msecs_to_jiffies(reg_recalib_period * 1000));
712
713 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
714 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
715 iwl_rx_calc_noise(priv);
716 queue_work(priv->workqueue, &priv->run_time_calib_work);
717 }
718 if (priv->cfg->ops->lib->temp_ops.temperature && change)
719 priv->cfg->ops->lib->temp_ops.temperature(priv);
720 }
721
722 static void iwl_rx_reply_statistics(struct iwl_priv *priv,
723 struct iwl_rx_mem_buffer *rxb)
724 {
725 struct iwl_rx_packet *pkt = rxb_addr(rxb);
726
727 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
728 #ifdef CONFIG_IWLWIFI_DEBUGFS
729 memset(&priv->accum_stats, 0,
730 sizeof(priv->accum_stats));
731 memset(&priv->delta_stats, 0,
732 sizeof(priv->delta_stats));
733 memset(&priv->max_delta_stats, 0,
734 sizeof(priv->max_delta_stats));
735 #endif
736 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
737 }
738 iwl_rx_statistics(priv, rxb);
739 }
740
741 /* Handle notification from uCode that card's power state is changing
742 * due to software, hardware, or critical temperature RFKILL */
743 static void iwl_rx_card_state_notif(struct iwl_priv *priv,
744 struct iwl_rx_mem_buffer *rxb)
745 {
746 struct iwl_rx_packet *pkt = rxb_addr(rxb);
747 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
748 unsigned long status = priv->status;
749
750 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
751 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
752 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
753 (flags & CT_CARD_DISABLED) ?
754 "Reached" : "Not reached");
755
756 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
757 CT_CARD_DISABLED)) {
758
759 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
760 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
761
762 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
763 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
764
765 if (!(flags & RXON_CARD_DISABLED)) {
766 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
767 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
768 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
769 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
770 }
771 if (flags & CT_CARD_DISABLED)
772 iwl_tt_enter_ct_kill(priv);
773 }
774 if (!(flags & CT_CARD_DISABLED))
775 iwl_tt_exit_ct_kill(priv);
776
777 if (flags & HW_CARD_DISABLED)
778 set_bit(STATUS_RF_KILL_HW, &priv->status);
779 else
780 clear_bit(STATUS_RF_KILL_HW, &priv->status);
781
782
783 if (!(flags & RXON_CARD_DISABLED))
784 iwl_scan_cancel(priv);
785
786 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
787 test_bit(STATUS_RF_KILL_HW, &priv->status)))
788 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
789 test_bit(STATUS_RF_KILL_HW, &priv->status));
790 else
791 wake_up_interruptible(&priv->wait_command_queue);
792 }
793
794 static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
795 struct iwl_rx_mem_buffer *rxb)
796
797 {
798 struct iwl_rx_packet *pkt = rxb_addr(rxb);
799 struct iwl_missed_beacon_notif *missed_beacon;
800
801 missed_beacon = &pkt->u.missed_beacon;
802 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
803 priv->missed_beacon_threshold) {
804 IWL_DEBUG_CALIB(priv,
805 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
806 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
807 le32_to_cpu(missed_beacon->total_missed_becons),
808 le32_to_cpu(missed_beacon->num_recvd_beacons),
809 le32_to_cpu(missed_beacon->num_expected_beacons));
810 if (!test_bit(STATUS_SCANNING, &priv->status))
811 iwl_init_sensitivity(priv);
812 }
813 }
814
815 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
816 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
817 static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
818 struct iwl_rx_mem_buffer *rxb)
819 {
820 struct iwl_rx_packet *pkt = rxb_addr(rxb);
821
822 priv->_agn.last_phy_res_valid = true;
823 memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
824 sizeof(struct iwl_rx_phy_res));
825 }
826
827 /*
828 * returns non-zero if packet should be dropped
829 */
830 static int iwl_set_decrypted_flag(struct iwl_priv *priv,
831 struct ieee80211_hdr *hdr,
832 u32 decrypt_res,
833 struct ieee80211_rx_status *stats)
834 {
835 u16 fc = le16_to_cpu(hdr->frame_control);
836
837 /*
838 * All contexts have the same setting here due to it being
839 * a module parameter, so OK to check any context.
840 */
841 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
842 RXON_FILTER_DIS_DECRYPT_MSK)
843 return 0;
844
845 if (!(fc & IEEE80211_FCTL_PROTECTED))
846 return 0;
847
848 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
849 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
850 case RX_RES_STATUS_SEC_TYPE_TKIP:
851 /* The uCode has got a bad phase 1 Key, pushes the packet.
852 * Decryption will be done in SW. */
853 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
854 RX_RES_STATUS_BAD_KEY_TTAK)
855 break;
856
857 case RX_RES_STATUS_SEC_TYPE_WEP:
858 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
859 RX_RES_STATUS_BAD_ICV_MIC) {
860 /* bad ICV, the packet is destroyed since the
861 * decryption is inplace, drop it */
862 IWL_DEBUG_RX(priv, "Packet destroyed\n");
863 return -1;
864 }
865 case RX_RES_STATUS_SEC_TYPE_CCMP:
866 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
867 RX_RES_STATUS_DECRYPT_OK) {
868 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
869 stats->flag |= RX_FLAG_DECRYPTED;
870 }
871 break;
872
873 default:
874 break;
875 }
876 return 0;
877 }
878
879 static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
880 struct ieee80211_hdr *hdr,
881 u16 len,
882 u32 ampdu_status,
883 struct iwl_rx_mem_buffer *rxb,
884 struct ieee80211_rx_status *stats)
885 {
886 struct sk_buff *skb;
887 __le16 fc = hdr->frame_control;
888 struct iwl_rxon_context *ctx;
889
890 /* We only process data packets if the interface is open */
891 if (unlikely(!priv->is_open)) {
892 IWL_DEBUG_DROP_LIMIT(priv,
893 "Dropping packet while interface is not open.\n");
894 return;
895 }
896
897 /* In case of HW accelerated crypto and bad decryption, drop */
898 if (!priv->cfg->mod_params->sw_crypto &&
899 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
900 return;
901
902 skb = dev_alloc_skb(128);
903 if (!skb) {
904 IWL_ERR(priv, "dev_alloc_skb failed\n");
905 return;
906 }
907
908 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
909
910 iwl_update_stats(priv, false, fc, len);
911
912 /*
913 * Wake any queues that were stopped due to a passive channel tx
914 * failure. This can happen because the regulatory enforcement in
915 * the device waits for a beacon before allowing transmission,
916 * sometimes even after already having transmitted frames for the
917 * association because the new RXON may reset the information.
918 */
919 if (unlikely(ieee80211_is_beacon(fc))) {
920 for_each_context(priv, ctx) {
921 if (!ctx->last_tx_rejected)
922 continue;
923 if (compare_ether_addr(hdr->addr3,
924 ctx->active.bssid_addr))
925 continue;
926 ctx->last_tx_rejected = false;
927 iwl_wake_any_queue(priv, ctx);
928 }
929 }
930
931 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
932
933 ieee80211_rx(priv->hw, skb);
934 rxb->page = NULL;
935 }
936
937 static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
938 {
939 u32 decrypt_out = 0;
940
941 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
942 RX_RES_STATUS_STATION_FOUND)
943 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
944 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
945
946 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
947
948 /* packet was not encrypted */
949 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
950 RX_RES_STATUS_SEC_TYPE_NONE)
951 return decrypt_out;
952
953 /* packet was encrypted with unknown alg */
954 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
955 RX_RES_STATUS_SEC_TYPE_ERR)
956 return decrypt_out;
957
958 /* decryption was not done in HW */
959 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
960 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
961 return decrypt_out;
962
963 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
964
965 case RX_RES_STATUS_SEC_TYPE_CCMP:
966 /* alg is CCM: check MIC only */
967 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
968 /* Bad MIC */
969 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
970 else
971 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
972
973 break;
974
975 case RX_RES_STATUS_SEC_TYPE_TKIP:
976 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
977 /* Bad TTAK */
978 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
979 break;
980 }
981 /* fall through if TTAK OK */
982 default:
983 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
984 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
985 else
986 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
987 break;
988 }
989
990 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
991 decrypt_in, decrypt_out);
992
993 return decrypt_out;
994 }
995
996 /* Called for REPLY_RX (legacy ABG frames), or
997 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
998 static void iwl_rx_reply_rx(struct iwl_priv *priv,
999 struct iwl_rx_mem_buffer *rxb)
1000 {
1001 struct ieee80211_hdr *header;
1002 struct ieee80211_rx_status rx_status;
1003 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1004 struct iwl_rx_phy_res *phy_res;
1005 __le32 rx_pkt_status;
1006 struct iwl_rx_mpdu_res_start *amsdu;
1007 u32 len;
1008 u32 ampdu_status;
1009 u32 rate_n_flags;
1010
1011 /**
1012 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
1013 * REPLY_RX: physical layer info is in this buffer
1014 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
1015 * command and cached in priv->last_phy_res
1016 *
1017 * Here we set up local variables depending on which command is
1018 * received.
1019 */
1020 if (pkt->hdr.cmd == REPLY_RX) {
1021 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
1022 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
1023 + phy_res->cfg_phy_cnt);
1024
1025 len = le16_to_cpu(phy_res->byte_count);
1026 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
1027 phy_res->cfg_phy_cnt + len);
1028 ampdu_status = le32_to_cpu(rx_pkt_status);
1029 } else {
1030 if (!priv->_agn.last_phy_res_valid) {
1031 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
1032 return;
1033 }
1034 phy_res = &priv->_agn.last_phy_res;
1035 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
1036 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1037 len = le16_to_cpu(amsdu->byte_count);
1038 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
1039 ampdu_status = iwl_translate_rx_status(priv,
1040 le32_to_cpu(rx_pkt_status));
1041 }
1042
1043 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
1044 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1045 phy_res->cfg_phy_cnt);
1046 return;
1047 }
1048
1049 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1050 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1051 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1052 le32_to_cpu(rx_pkt_status));
1053 return;
1054 }
1055
1056 /* This will be used in several places later */
1057 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1058
1059 /* rx_status carries information about the packet to mac80211 */
1060 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1061 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1062 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1063 rx_status.freq =
1064 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
1065 rx_status.band);
1066 rx_status.rate_idx =
1067 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1068 rx_status.flag = 0;
1069
1070 /* TSF isn't reliable. In order to allow smooth user experience,
1071 * this W/A doesn't propagate it to the mac80211 */
1072 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
1073
1074 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1075
1076 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1077 rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
1078
1079 iwl_dbg_log_rx_data_frame(priv, len, header);
1080 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
1081 rx_status.signal, (unsigned long long)rx_status.mactime);
1082
1083 /*
1084 * "antenna number"
1085 *
1086 * It seems that the antenna field in the phy flags value
1087 * is actually a bit field. This is undefined by radiotap,
1088 * it wants an actual antenna number but I always get "7"
1089 * for most legacy frames I receive indicating that the
1090 * same frame was received on all three RX chains.
1091 *
1092 * I think this field should be removed in favor of a
1093 * new 802.11n radiotap field "RX chains" that is defined
1094 * as a bitmask.
1095 */
1096 rx_status.antenna =
1097 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1098 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1099
1100 /* set the preamble flag if appropriate */
1101 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1102 rx_status.flag |= RX_FLAG_SHORTPRE;
1103
1104 /* Set up the HT phy flags */
1105 if (rate_n_flags & RATE_MCS_HT_MSK)
1106 rx_status.flag |= RX_FLAG_HT;
1107 if (rate_n_flags & RATE_MCS_HT40_MSK)
1108 rx_status.flag |= RX_FLAG_40MHZ;
1109 if (rate_n_flags & RATE_MCS_SGI_MSK)
1110 rx_status.flag |= RX_FLAG_SHORT_GI;
1111
1112 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1113 rxb, &rx_status);
1114 }
1115
1116 /**
1117 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
1118 *
1119 * Setup the RX handlers for each of the reply types sent from the uCode
1120 * to the host.
1121 */
1122 void iwl_setup_rx_handlers(struct iwl_priv *priv)
1123 {
1124 void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
1125
1126 handlers = priv->rx_handlers;
1127
1128 handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
1129 handlers[REPLY_ERROR] = iwl_rx_reply_error;
1130 handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
1131 handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
1132 handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
1133 handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
1134 handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
1135
1136 /*
1137 * The same handler is used for both the REPLY to a discrete
1138 * statistics request from the host as well as for the periodic
1139 * statistics notifications (after received beacons) from the uCode.
1140 */
1141 handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
1142 handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
1143
1144 iwl_setup_rx_scan_handlers(priv);
1145
1146 handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
1147 handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
1148
1149 /* Rx handlers */
1150 handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
1151 handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
1152
1153 /* block ack */
1154 handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
1155
1156 /* Set up hardware specific Rx handlers */
1157 priv->cfg->ops->lib->rx_handler_setup(priv);
1158 }
This page took 0.08046 seconds and 5 git commands to generate.