iwlwifi: cleanup iwl_recover_from_statistics
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-rx.c
CommitLineData
a55360e4
TW
1/******************************************************************************
2 *
1f447808 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
a55360e4
TW
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
a55360e4
TW
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
1781a07f 30#include <linux/etherdevice.h>
5a0e3ad6 31#include <linux/slab.h>
a55360e4 32#include <net/mac80211.h>
a05ffd39 33#include <asm/unaligned.h>
a55360e4
TW
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40/************************** RX-FUNCTIONS ****************************/
41/*
42 * Rx theory of operation
43 *
44 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
45 * each of which point to Receive Buffers to be filled by the NIC. These get
46 * used not only for Rx frames, but for any command response or notification
47 * from the NIC. The driver and NIC manage the Rx buffers by means
48 * of indexes into the circular buffer.
49 *
50 * Rx Queue Indexes
51 * The host/firmware share two index registers for managing the Rx buffers.
52 *
53 * The READ index maps to the first position that the firmware may be writing
54 * to -- the driver can read up to (but not including) this position and get
55 * good data.
56 * The READ index is managed by the firmware once the card is enabled.
57 *
58 * The WRITE index maps to the last position the driver has read from -- the
59 * position preceding WRITE is the last slot the firmware can place a packet.
60 *
61 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
62 * WRITE = READ.
63 *
64 * During initialization, the host sets up the READ queue position to the first
65 * INDEX position, and WRITE to the last (READ - 1 wrapped)
66 *
67 * When the firmware places a packet in a buffer, it will advance the READ index
68 * and fire the RX interrupt. The driver can then query the READ index and
69 * process as many packets as possible, moving the WRITE index forward as it
70 * resets the Rx queue buffers with new memory.
71 *
72 * The management in the driver is as follows:
73 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
74 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
75 * to replenish the iwl->rxq->rx_free.
76 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
77 * iwl->rxq is replenished and the READ INDEX is updated (updating the
78 * 'processed' and 'read' driver indexes as well)
79 * + A received packet is processed and handed to the kernel network stack,
80 * detached from the iwl->rxq. The driver 'processed' index is updated.
81 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
82 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
83 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
84 * were enough free buffers and RX_STALLED is set it is cleared.
85 *
86 *
87 * Driver sequence:
88 *
89 * iwl_rx_queue_alloc() Allocates rx_free
90 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
91 * iwl_rx_queue_restock
92 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
93 * queue, updates firmware pointers, and updates
94 * the WRITE index. If insufficient rx_free buffers
95 * are available, schedules iwl_rx_replenish
96 *
97 * -- enable interrupts --
98 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
99 * READ INDEX, detaching the SKB from the pool.
100 * Moves the packet buffer from queue to rx_used.
101 * Calls iwl_rx_queue_restock to refill any empty
102 * slots.
103 * ...
104 *
105 */
106
107/**
108 * iwl_rx_queue_space - Return number of free slots available in queue.
109 */
110int iwl_rx_queue_space(const struct iwl_rx_queue *q)
111{
112 int s = q->read - q->write;
113 if (s <= 0)
114 s += RX_QUEUE_SIZE;
115 /* keep some buffer to not confuse full and empty queue */
116 s -= 2;
117 if (s < 0)
118 s = 0;
119 return s;
120}
121EXPORT_SYMBOL(iwl_rx_queue_space);
122
123/**
124 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */
7bfedc59 126void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
a55360e4 127{
a55360e4 128 unsigned long flags;
141c43a3
WT
129 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
130 u32 reg;
a55360e4
TW
131
132 spin_lock_irqsave(&q->lock, flags);
133
134 if (q->need_update == 0)
135 goto exit_unlock;
136
f81c1f48
WYG
137 if (priv->cfg->base_params->shadow_reg_enable) {
138 /* shadow register enabled */
a55360e4 139 /* Device expects a multiple of 8 */
4752c93c 140 q->write_actual = (q->write & ~0x7);
fd11743d 141 iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
f81c1f48
WYG
142 } else {
143 /* If power-saving is in use, make sure device is awake */
144 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
145 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
146
147 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
148 IWL_DEBUG_INFO(priv,
149 "Rx queue requesting wakeup,"
150 " GP1 = 0x%x\n", reg);
151 iwl_set_bit(priv, CSR_GP_CNTRL,
152 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
153 goto exit_unlock;
154 }
155
156 q->write_actual = (q->write & ~0x7);
157 iwl_write_direct32(priv, rx_wrt_ptr_reg,
158 q->write_actual);
159
160 /* Else device is assumed to be awake */
161 } else {
162 /* Device expects a multiple of 8 */
163 q->write_actual = (q->write & ~0x7);
164 iwl_write_direct32(priv, rx_wrt_ptr_reg,
165 q->write_actual);
166 }
141c43a3 167 }
a55360e4
TW
168 q->need_update = 0;
169
170 exit_unlock:
171 spin_unlock_irqrestore(&q->lock, flags);
a55360e4
TW
172}
173EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
a55360e4
TW
174
175int iwl_rx_queue_alloc(struct iwl_priv *priv)
176{
177 struct iwl_rx_queue *rxq = &priv->rxq;
f36d04ab 178 struct device *dev = &priv->pci_dev->dev;
a55360e4
TW
179 int i;
180
181 spin_lock_init(&rxq->lock);
182 INIT_LIST_HEAD(&rxq->rx_free);
183 INIT_LIST_HEAD(&rxq->rx_used);
184
185 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
d5b25c90 186 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
f36d04ab 187 GFP_KERNEL);
a55360e4 188 if (!rxq->bd)
8d86422a
WT
189 goto err_bd;
190
f36d04ab
SG
191 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
192 &rxq->rb_stts_dma, GFP_KERNEL);
8d86422a
WT
193 if (!rxq->rb_stts)
194 goto err_rb;
a55360e4
TW
195
196 /* Fill the rx_used queue with _all_ of the Rx buffers */
197 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
198 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
199
200 /* Set us so that we have processed and used all buffers, but have
201 * not restocked the Rx queue with fresh buffers */
202 rxq->read = rxq->write = 0;
4752c93c 203 rxq->write_actual = 0;
a55360e4
TW
204 rxq->free_count = 0;
205 rxq->need_update = 0;
206 return 0;
8d86422a
WT
207
208err_rb:
f36d04ab 209 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
d5b25c90 210 rxq->bd_dma);
8d86422a
WT
211err_bd:
212 return -ENOMEM;
a55360e4
TW
213}
214EXPORT_SYMBOL(iwl_rx_queue_alloc);
215
8f91aecb 216
81963d68
RC
217void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
218 struct iwl_rx_mem_buffer *rxb)
219{
220 struct iwl_rx_packet *pkt = rxb_addr(rxb);
221 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
222
223 if (!report->state) {
224 IWL_DEBUG_11H(priv,
225 "Spectrum Measure Notification: Start\n");
226 return;
227 }
228
229 memcpy(&priv->measure_report, report, sizeof(*report));
230 priv->measurement_status |= MEASUREMENT_READY;
231}
232EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
233
a29576a7 234void iwl_recover_from_statistics(struct iwl_priv *priv,
fa8f130c
WYG
235 struct iwl_rx_packet *pkt)
236{
ca3d9389
SG
237 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
238 !iwl_is_any_associated(priv))
fa8f130c 239 return;
ca3d9389
SG
240
241 if (priv->cfg->ops->lib->check_ack_health &&
242 !priv->cfg->ops->lib->check_ack_health(priv, pkt)) {
243 IWL_ERR(priv, "low ack count detected, restart firmware\n");
244 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
245 return;
3e4fb5fa 246 }
ca3d9389
SG
247
248 if (priv->cfg->ops->lib->check_plcp_health &&
249 !priv->cfg->ops->lib->check_plcp_health(priv, pkt))
250 iwl_force_reset(priv, IWL_RF_RESET, false);
beac5498 251}
a29576a7 252EXPORT_SYMBOL(iwl_recover_from_statistics);
beac5498 253
1781a07f
EG
254/*
255 * returns non-zero if packet should be dropped
256 */
8ccde88a
SO
257int iwl_set_decrypted_flag(struct iwl_priv *priv,
258 struct ieee80211_hdr *hdr,
259 u32 decrypt_res,
260 struct ieee80211_rx_status *stats)
1781a07f
EG
261{
262 u16 fc = le16_to_cpu(hdr->frame_control);
263
246ed355
JB
264 /*
265 * All contexts have the same setting here due to it being
266 * a module parameter, so OK to check any context.
267 */
268 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
269 RXON_FILTER_DIS_DECRYPT_MSK)
1781a07f
EG
270 return 0;
271
272 if (!(fc & IEEE80211_FCTL_PROTECTED))
273 return 0;
274
e1623446 275 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
1781a07f
EG
276 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
277 case RX_RES_STATUS_SEC_TYPE_TKIP:
278 /* The uCode has got a bad phase 1 Key, pushes the packet.
279 * Decryption will be done in SW. */
280 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
281 RX_RES_STATUS_BAD_KEY_TTAK)
282 break;
283
284 case RX_RES_STATUS_SEC_TYPE_WEP:
285 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
286 RX_RES_STATUS_BAD_ICV_MIC) {
287 /* bad ICV, the packet is destroyed since the
288 * decryption is inplace, drop it */
e1623446 289 IWL_DEBUG_RX(priv, "Packet destroyed\n");
1781a07f
EG
290 return -1;
291 }
292 case RX_RES_STATUS_SEC_TYPE_CCMP:
293 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
294 RX_RES_STATUS_DECRYPT_OK) {
e1623446 295 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
1781a07f
EG
296 stats->flag |= RX_FLAG_DECRYPTED;
297 }
298 break;
299
300 default:
301 break;
302 }
303 return 0;
304}
8ccde88a 305EXPORT_SYMBOL(iwl_set_decrypted_flag);
This page took 0.608402 seconds and 5 git commands to generate.