Commit | Line | Data |
---|---|---|
a55360e4 TW |
1 | /****************************************************************************** |
2 | * | |
1f447808 | 3 | * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. |
a55360e4 TW |
4 | * |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
759ef89f | 25 | * Intel Linux Wireless <ilw@linux.intel.com> |
a55360e4 TW |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
27 | * | |
28 | *****************************************************************************/ | |
29 | ||
1781a07f | 30 | #include <linux/etherdevice.h> |
a55360e4 | 31 | #include <net/mac80211.h> |
a05ffd39 | 32 | #include <asm/unaligned.h> |
a55360e4 TW |
33 | #include "iwl-eeprom.h" |
34 | #include "iwl-dev.h" | |
35 | #include "iwl-core.h" | |
36 | #include "iwl-sta.h" | |
37 | #include "iwl-io.h" | |
c1354754 | 38 | #include "iwl-calib.h" |
a55360e4 TW |
39 | #include "iwl-helpers.h" |
40 | /************************** RX-FUNCTIONS ****************************/ | |
41 | /* | |
42 | * Rx theory of operation | |
43 | * | |
44 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | |
45 | * each of which point to Receive Buffers to be filled by the NIC. These get | |
46 | * used not only for Rx frames, but for any command response or notification | |
47 | * from the NIC. The driver and NIC manage the Rx buffers by means | |
48 | * of indexes into the circular buffer. | |
49 | * | |
50 | * Rx Queue Indexes | |
51 | * The host/firmware share two index registers for managing the Rx buffers. | |
52 | * | |
53 | * The READ index maps to the first position that the firmware may be writing | |
54 | * to -- the driver can read up to (but not including) this position and get | |
55 | * good data. | |
56 | * The READ index is managed by the firmware once the card is enabled. | |
57 | * | |
58 | * The WRITE index maps to the last position the driver has read from -- the | |
59 | * position preceding WRITE is the last slot the firmware can place a packet. | |
60 | * | |
61 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | |
62 | * WRITE = READ. | |
63 | * | |
64 | * During initialization, the host sets up the READ queue position to the first | |
65 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | |
66 | * | |
67 | * When the firmware places a packet in a buffer, it will advance the READ index | |
68 | * and fire the RX interrupt. The driver can then query the READ index and | |
69 | * process as many packets as possible, moving the WRITE index forward as it | |
70 | * resets the Rx queue buffers with new memory. | |
71 | * | |
72 | * The management in the driver is as follows: | |
73 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | |
74 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | |
75 | * to replenish the iwl->rxq->rx_free. | |
76 | * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the | |
77 | * iwl->rxq is replenished and the READ INDEX is updated (updating the | |
78 | * 'processed' and 'read' driver indexes as well) | |
79 | * + A received packet is processed and handed to the kernel network stack, | |
80 | * detached from the iwl->rxq. The driver 'processed' index is updated. | |
81 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free | |
82 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ | |
83 | * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there | |
84 | * were enough free buffers and RX_STALLED is set it is cleared. | |
85 | * | |
86 | * | |
87 | * Driver sequence: | |
88 | * | |
89 | * iwl_rx_queue_alloc() Allocates rx_free | |
90 | * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls | |
91 | * iwl_rx_queue_restock | |
92 | * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx | |
93 | * queue, updates firmware pointers, and updates | |
94 | * the WRITE index. If insufficient rx_free buffers | |
95 | * are available, schedules iwl_rx_replenish | |
96 | * | |
97 | * -- enable interrupts -- | |
98 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the | |
99 | * READ INDEX, detaching the SKB from the pool. | |
100 | * Moves the packet buffer from queue to rx_used. | |
101 | * Calls iwl_rx_queue_restock to refill any empty | |
102 | * slots. | |
103 | * ... | |
104 | * | |
105 | */ | |
106 | ||
107 | /** | |
108 | * iwl_rx_queue_space - Return number of free slots available in queue. | |
109 | */ | |
110 | int iwl_rx_queue_space(const struct iwl_rx_queue *q) | |
111 | { | |
112 | int s = q->read - q->write; | |
113 | if (s <= 0) | |
114 | s += RX_QUEUE_SIZE; | |
115 | /* keep some buffer to not confuse full and empty queue */ | |
116 | s -= 2; | |
117 | if (s < 0) | |
118 | s = 0; | |
119 | return s; | |
120 | } | |
121 | EXPORT_SYMBOL(iwl_rx_queue_space); | |
122 | ||
123 | /** | |
124 | * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue | |
125 | */ | |
7bfedc59 | 126 | void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) |
a55360e4 | 127 | { |
a55360e4 | 128 | unsigned long flags; |
141c43a3 WT |
129 | u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; |
130 | u32 reg; | |
a55360e4 TW |
131 | |
132 | spin_lock_irqsave(&q->lock, flags); | |
133 | ||
134 | if (q->need_update == 0) | |
135 | goto exit_unlock; | |
136 | ||
137 | /* If power-saving is in use, make sure device is awake */ | |
138 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | |
139 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | |
140 | ||
141 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
309e731a BC |
142 | IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n", |
143 | reg); | |
a55360e4 TW |
144 | iwl_set_bit(priv, CSR_GP_CNTRL, |
145 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | |
146 | goto exit_unlock; | |
147 | } | |
148 | ||
4752c93c MA |
149 | q->write_actual = (q->write & ~0x7); |
150 | iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); | |
a55360e4 TW |
151 | |
152 | /* Else device is assumed to be awake */ | |
141c43a3 | 153 | } else { |
a55360e4 | 154 | /* Device expects a multiple of 8 */ |
4752c93c MA |
155 | q->write_actual = (q->write & ~0x7); |
156 | iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); | |
141c43a3 | 157 | } |
a55360e4 TW |
158 | |
159 | q->need_update = 0; | |
160 | ||
161 | exit_unlock: | |
162 | spin_unlock_irqrestore(&q->lock, flags); | |
a55360e4 TW |
163 | } |
164 | EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); | |
a55360e4 TW |
165 | |
166 | int iwl_rx_queue_alloc(struct iwl_priv *priv) | |
167 | { | |
168 | struct iwl_rx_queue *rxq = &priv->rxq; | |
f36d04ab | 169 | struct device *dev = &priv->pci_dev->dev; |
a55360e4 TW |
170 | int i; |
171 | ||
172 | spin_lock_init(&rxq->lock); | |
173 | INIT_LIST_HEAD(&rxq->rx_free); | |
174 | INIT_LIST_HEAD(&rxq->rx_used); | |
175 | ||
176 | /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ | |
f36d04ab SG |
177 | rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr, |
178 | GFP_KERNEL); | |
a55360e4 | 179 | if (!rxq->bd) |
8d86422a WT |
180 | goto err_bd; |
181 | ||
f36d04ab SG |
182 | rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status), |
183 | &rxq->rb_stts_dma, GFP_KERNEL); | |
8d86422a WT |
184 | if (!rxq->rb_stts) |
185 | goto err_rb; | |
a55360e4 TW |
186 | |
187 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
188 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) | |
189 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
190 | ||
191 | /* Set us so that we have processed and used all buffers, but have | |
192 | * not restocked the Rx queue with fresh buffers */ | |
193 | rxq->read = rxq->write = 0; | |
4752c93c | 194 | rxq->write_actual = 0; |
a55360e4 TW |
195 | rxq->free_count = 0; |
196 | rxq->need_update = 0; | |
197 | return 0; | |
8d86422a WT |
198 | |
199 | err_rb: | |
f36d04ab SG |
200 | dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, |
201 | rxq->dma_addr); | |
8d86422a WT |
202 | err_bd: |
203 | return -ENOMEM; | |
a55360e4 TW |
204 | } |
205 | EXPORT_SYMBOL(iwl_rx_queue_alloc); | |
206 | ||
c1354754 TW |
207 | void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, |
208 | struct iwl_rx_mem_buffer *rxb) | |
209 | ||
210 | { | |
2f301227 | 211 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
2aa6ab86 | 212 | struct iwl_missed_beacon_notif *missed_beacon; |
c1354754 TW |
213 | |
214 | missed_beacon = &pkt->u.missed_beacon; | |
a13d276f WYG |
215 | if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > |
216 | priv->missed_beacon_threshold) { | |
e1623446 | 217 | IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n", |
a13d276f | 218 | le32_to_cpu(missed_beacon->consecutive_missed_beacons), |
c1354754 TW |
219 | le32_to_cpu(missed_beacon->total_missed_becons), |
220 | le32_to_cpu(missed_beacon->num_recvd_beacons), | |
221 | le32_to_cpu(missed_beacon->num_expected_beacons)); | |
222 | if (!test_bit(STATUS_SCANNING, &priv->status)) | |
223 | iwl_init_sensitivity(priv); | |
224 | } | |
c1354754 TW |
225 | } |
226 | EXPORT_SYMBOL(iwl_rx_missed_beacon_notif); | |
8f91aecb | 227 | |
81963d68 RC |
228 | void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, |
229 | struct iwl_rx_mem_buffer *rxb) | |
230 | { | |
231 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
232 | struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); | |
233 | ||
234 | if (!report->state) { | |
235 | IWL_DEBUG_11H(priv, | |
236 | "Spectrum Measure Notification: Start\n"); | |
237 | return; | |
238 | } | |
239 | ||
240 | memcpy(&priv->measure_report, report, sizeof(*report)); | |
241 | priv->measurement_status |= MEASUREMENT_READY; | |
242 | } | |
243 | EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif); | |
244 | ||
245 | ||
8f91aecb EG |
246 | |
247 | /* Calculate noise level, based on measurements during network silence just | |
248 | * before arriving beacon. This measurement can be done only if we know | |
249 | * exactly when to expect beacons, therefore only when we're associated. */ | |
250 | static void iwl_rx_calc_noise(struct iwl_priv *priv) | |
251 | { | |
252 | struct statistics_rx_non_phy *rx_info | |
a2064b7a | 253 | = &(priv->_agn.statistics.rx.general); |
8f91aecb EG |
254 | int num_active_rx = 0; |
255 | int total_silence = 0; | |
256 | int bcn_silence_a = | |
257 | le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; | |
258 | int bcn_silence_b = | |
259 | le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; | |
260 | int bcn_silence_c = | |
261 | le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; | |
ed1b6e99 | 262 | int last_rx_noise; |
8f91aecb EG |
263 | |
264 | if (bcn_silence_a) { | |
265 | total_silence += bcn_silence_a; | |
266 | num_active_rx++; | |
267 | } | |
268 | if (bcn_silence_b) { | |
269 | total_silence += bcn_silence_b; | |
270 | num_active_rx++; | |
271 | } | |
272 | if (bcn_silence_c) { | |
273 | total_silence += bcn_silence_c; | |
274 | num_active_rx++; | |
275 | } | |
276 | ||
277 | /* Average among active antennas */ | |
278 | if (num_active_rx) | |
ed1b6e99 | 279 | last_rx_noise = (total_silence / num_active_rx) - 107; |
8f91aecb | 280 | else |
ed1b6e99 | 281 | last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; |
8f91aecb | 282 | |
e1623446 | 283 | IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", |
8f91aecb | 284 | bcn_silence_a, bcn_silence_b, bcn_silence_c, |
ed1b6e99 | 285 | last_rx_noise); |
8f91aecb EG |
286 | } |
287 | ||
d73e4923 | 288 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
92a35bda WYG |
289 | /* |
290 | * based on the assumption of all statistics counter are in DWORD | |
291 | * FIXME: This function is for debugging, do not deal with | |
292 | * the case of counters roll-over. | |
293 | */ | |
294 | static void iwl_accumulative_statistics(struct iwl_priv *priv, | |
295 | __le32 *stats) | |
296 | { | |
297 | int i; | |
298 | __le32 *prev_stats; | |
299 | u32 *accum_stats; | |
e3ef2164 | 300 | u32 *delta, *max_delta; |
92a35bda | 301 | |
a2064b7a WYG |
302 | prev_stats = (__le32 *)&priv->_agn.statistics; |
303 | accum_stats = (u32 *)&priv->_agn.accum_statistics; | |
304 | delta = (u32 *)&priv->_agn.delta_statistics; | |
305 | max_delta = (u32 *)&priv->_agn.max_delta; | |
92a35bda WYG |
306 | |
307 | for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics); | |
e3ef2164 WYG |
308 | i += sizeof(__le32), stats++, prev_stats++, delta++, |
309 | max_delta++, accum_stats++) { | |
310 | if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { | |
311 | *delta = (le32_to_cpu(*stats) - | |
92a35bda | 312 | le32_to_cpu(*prev_stats)); |
e3ef2164 WYG |
313 | *accum_stats += *delta; |
314 | if (*delta > *max_delta) | |
315 | *max_delta = *delta; | |
316 | } | |
317 | } | |
92a35bda WYG |
318 | |
319 | /* reset accumulative statistics for "no-counter" type statistics */ | |
a2064b7a WYG |
320 | priv->_agn.accum_statistics.general.temperature = |
321 | priv->_agn.statistics.general.temperature; | |
322 | priv->_agn.accum_statistics.general.temperature_m = | |
323 | priv->_agn.statistics.general.temperature_m; | |
324 | priv->_agn.accum_statistics.general.ttl_timestamp = | |
325 | priv->_agn.statistics.general.ttl_timestamp; | |
326 | priv->_agn.accum_statistics.tx.tx_power.ant_a = | |
327 | priv->_agn.statistics.tx.tx_power.ant_a; | |
328 | priv->_agn.accum_statistics.tx.tx_power.ant_b = | |
329 | priv->_agn.statistics.tx.tx_power.ant_b; | |
330 | priv->_agn.accum_statistics.tx.tx_power.ant_c = | |
331 | priv->_agn.statistics.tx.tx_power.ant_c; | |
92a35bda WYG |
332 | } |
333 | #endif | |
334 | ||
8f91aecb EG |
335 | #define REG_RECALIB_PERIOD (60) |
336 | ||
fa8f130c WYG |
337 | /** |
338 | * iwl_good_plcp_health - checks for plcp error. | |
339 | * | |
340 | * When the plcp error is exceeding the thresholds, reset the radio | |
341 | * to improve the throughput. | |
342 | */ | |
343 | bool iwl_good_plcp_health(struct iwl_priv *priv, | |
344 | struct iwl_rx_packet *pkt) | |
345 | { | |
346 | bool rc = true; | |
347 | int combined_plcp_delta; | |
348 | unsigned int plcp_msec; | |
349 | unsigned long plcp_received_jiffies; | |
8f91aecb | 350 | |
3e4fb5fa TAN |
351 | /* |
352 | * check for plcp_err and trigger radio reset if it exceeds | |
353 | * the plcp error threshold plcp_delta. | |
354 | */ | |
355 | plcp_received_jiffies = jiffies; | |
356 | plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - | |
357 | (long) priv->plcp_jiffies); | |
358 | priv->plcp_jiffies = plcp_received_jiffies; | |
359 | /* | |
360 | * check to make sure plcp_msec is not 0 to prevent division | |
361 | * by zero. | |
362 | */ | |
363 | if (plcp_msec) { | |
364 | combined_plcp_delta = | |
365 | (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) - | |
a2064b7a | 366 | le32_to_cpu(priv->_agn.statistics.rx.ofdm.plcp_err)) + |
3e4fb5fa | 367 | (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) - |
a2064b7a | 368 | le32_to_cpu(priv->_agn.statistics.rx.ofdm_ht.plcp_err)); |
3e4fb5fa TAN |
369 | |
370 | if ((combined_plcp_delta > 0) && | |
beac5498 | 371 | ((combined_plcp_delta * 100) / plcp_msec) > |
3e4fb5fa TAN |
372 | priv->cfg->plcp_delta_threshold) { |
373 | /* | |
beac5498 WYG |
374 | * if plcp_err exceed the threshold, |
375 | * the following data is printed in csv format: | |
3e4fb5fa TAN |
376 | * Text: plcp_err exceeded %d, |
377 | * Received ofdm.plcp_err, | |
378 | * Current ofdm.plcp_err, | |
379 | * Received ofdm_ht.plcp_err, | |
380 | * Current ofdm_ht.plcp_err, | |
381 | * combined_plcp_delta, | |
382 | * plcp_msec | |
383 | */ | |
fa8f130c WYG |
384 | IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " |
385 | "%u, %u, %u, %u, %d, %u mSecs\n", | |
3e4fb5fa TAN |
386 | priv->cfg->plcp_delta_threshold, |
387 | le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err), | |
a2064b7a | 388 | le32_to_cpu(priv->_agn.statistics.rx.ofdm.plcp_err), |
3e4fb5fa TAN |
389 | le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err), |
390 | le32_to_cpu( | |
a2064b7a | 391 | priv->_agn.statistics.rx.ofdm_ht.plcp_err), |
3e4fb5fa | 392 | combined_plcp_delta, plcp_msec); |
fa8f130c WYG |
393 | rc = false; |
394 | } | |
395 | } | |
396 | return rc; | |
397 | } | |
398 | EXPORT_SYMBOL(iwl_good_plcp_health); | |
399 | ||
a29576a7 | 400 | void iwl_recover_from_statistics(struct iwl_priv *priv, |
fa8f130c WYG |
401 | struct iwl_rx_packet *pkt) |
402 | { | |
403 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
404 | return; | |
405 | if (iwl_is_associated(priv)) { | |
406 | if (priv->cfg->ops->lib->check_ack_health) { | |
407 | if (!priv->cfg->ops->lib->check_ack_health( | |
408 | priv, pkt)) { | |
409 | /* | |
410 | * low ack count detected | |
411 | * restart Firmware | |
412 | */ | |
413 | IWL_ERR(priv, "low ack count detected, " | |
414 | "restart firmware\n"); | |
3d38f173 WYG |
415 | if (!iwl_force_reset(priv, IWL_FW_RESET)) |
416 | return; | |
fa8f130c | 417 | } |
3d38f173 WYG |
418 | } |
419 | if (priv->cfg->ops->lib->check_plcp_health) { | |
fa8f130c WYG |
420 | if (!priv->cfg->ops->lib->check_plcp_health( |
421 | priv, pkt)) { | |
422 | /* | |
423 | * high plcp error detected | |
424 | * reset Radio | |
425 | */ | |
426 | iwl_force_reset(priv, IWL_RF_RESET); | |
427 | } | |
3e4fb5fa TAN |
428 | } |
429 | } | |
beac5498 | 430 | } |
a29576a7 | 431 | EXPORT_SYMBOL(iwl_recover_from_statistics); |
beac5498 WYG |
432 | |
433 | void iwl_rx_statistics(struct iwl_priv *priv, | |
434 | struct iwl_rx_mem_buffer *rxb) | |
435 | { | |
436 | int change; | |
437 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
438 | ||
439 | ||
440 | IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", | |
a2064b7a | 441 | (int)sizeof(priv->_agn.statistics), |
beac5498 WYG |
442 | le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); |
443 | ||
a2064b7a | 444 | change = ((priv->_agn.statistics.general.temperature != |
beac5498 | 445 | pkt->u.stats.general.temperature) || |
a2064b7a | 446 | ((priv->_agn.statistics.flag & |
beac5498 WYG |
447 | STATISTICS_REPLY_FLG_HT40_MODE_MSK) != |
448 | (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); | |
449 | ||
d73e4923 | 450 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
beac5498 WYG |
451 | iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); |
452 | #endif | |
fa8f130c | 453 | iwl_recover_from_statistics(priv, pkt); |
3e4fb5fa | 454 | |
a2064b7a WYG |
455 | memcpy(&priv->_agn.statistics, &pkt->u.stats, |
456 | sizeof(priv->_agn.statistics)); | |
8f91aecb EG |
457 | |
458 | set_bit(STATUS_STATISTICS, &priv->status); | |
459 | ||
460 | /* Reschedule the statistics timer to occur in | |
461 | * REG_RECALIB_PERIOD seconds to ensure we get a | |
462 | * thermal update even if the uCode doesn't give | |
463 | * us one */ | |
464 | mod_timer(&priv->statistics_periodic, jiffies + | |
465 | msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); | |
466 | ||
467 | if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && | |
468 | (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { | |
469 | iwl_rx_calc_noise(priv); | |
470 | queue_work(priv->workqueue, &priv->run_time_calib_work); | |
471 | } | |
62161aef WYG |
472 | if (priv->cfg->ops->lib->temp_ops.temperature && change) |
473 | priv->cfg->ops->lib->temp_ops.temperature(priv); | |
8f91aecb EG |
474 | } |
475 | EXPORT_SYMBOL(iwl_rx_statistics); | |
1781a07f | 476 | |
ef8d5529 WYG |
477 | void iwl_reply_statistics(struct iwl_priv *priv, |
478 | struct iwl_rx_mem_buffer *rxb) | |
479 | { | |
480 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
481 | ||
482 | if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { | |
d73e4923 | 483 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
a2064b7a | 484 | memset(&priv->_agn.accum_statistics, 0, |
ef8d5529 | 485 | sizeof(struct iwl_notif_statistics)); |
a2064b7a | 486 | memset(&priv->_agn.delta_statistics, 0, |
e3ef2164 | 487 | sizeof(struct iwl_notif_statistics)); |
a2064b7a | 488 | memset(&priv->_agn.max_delta, 0, |
e3ef2164 | 489 | sizeof(struct iwl_notif_statistics)); |
ef8d5529 WYG |
490 | #endif |
491 | IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); | |
492 | } | |
493 | iwl_rx_statistics(priv, rxb); | |
494 | } | |
495 | EXPORT_SYMBOL(iwl_reply_statistics); | |
496 | ||
1781a07f EG |
497 | /* |
498 | * returns non-zero if packet should be dropped | |
499 | */ | |
8ccde88a SO |
500 | int iwl_set_decrypted_flag(struct iwl_priv *priv, |
501 | struct ieee80211_hdr *hdr, | |
502 | u32 decrypt_res, | |
503 | struct ieee80211_rx_status *stats) | |
1781a07f EG |
504 | { |
505 | u16 fc = le16_to_cpu(hdr->frame_control); | |
506 | ||
507 | if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) | |
508 | return 0; | |
509 | ||
510 | if (!(fc & IEEE80211_FCTL_PROTECTED)) | |
511 | return 0; | |
512 | ||
e1623446 | 513 | IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res); |
1781a07f EG |
514 | switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { |
515 | case RX_RES_STATUS_SEC_TYPE_TKIP: | |
516 | /* The uCode has got a bad phase 1 Key, pushes the packet. | |
517 | * Decryption will be done in SW. */ | |
518 | if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == | |
519 | RX_RES_STATUS_BAD_KEY_TTAK) | |
520 | break; | |
521 | ||
522 | case RX_RES_STATUS_SEC_TYPE_WEP: | |
523 | if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == | |
524 | RX_RES_STATUS_BAD_ICV_MIC) { | |
525 | /* bad ICV, the packet is destroyed since the | |
526 | * decryption is inplace, drop it */ | |
e1623446 | 527 | IWL_DEBUG_RX(priv, "Packet destroyed\n"); |
1781a07f EG |
528 | return -1; |
529 | } | |
530 | case RX_RES_STATUS_SEC_TYPE_CCMP: | |
531 | if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == | |
532 | RX_RES_STATUS_DECRYPT_OK) { | |
e1623446 | 533 | IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n"); |
1781a07f EG |
534 | stats->flag |= RX_FLAG_DECRYPTED; |
535 | } | |
536 | break; | |
537 | ||
538 | default: | |
539 | break; | |
540 | } | |
541 | return 0; | |
542 | } | |
8ccde88a | 543 | EXPORT_SYMBOL(iwl_set_decrypted_flag); |