Commit | Line | Data |
---|---|---|
e04ed0a5 WYG |
1 | /****************************************************************************** |
2 | * | |
3 | * GPL LICENSE SUMMARY | |
4 | * | |
5 | * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of version 2 of the GNU General Public License as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but | |
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | |
19 | * USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution | |
22 | * in the file called LICENSE.GPL. | |
23 | * | |
24 | * Contact Information: | |
25 | * Intel Linux Wireless <ilw@linux.intel.com> | |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
27 | * | |
28 | *****************************************************************************/ | |
8d801080 | 29 | #include <linux/etherdevice.h> |
e04ed0a5 WYG |
30 | #include <linux/kernel.h> |
31 | #include <linux/module.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/sched.h> | |
34 | ||
35 | #include "iwl-dev.h" | |
36 | #include "iwl-core.h" | |
37 | #include "iwl-io.h" | |
38 | #include "iwl-helpers.h" | |
39 | #include "iwl-agn-hw.h" | |
40 | #include "iwl-agn.h" | |
1fa61b2e | 41 | #include "iwl-sta.h" |
e04ed0a5 WYG |
42 | |
43 | static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) | |
44 | { | |
45 | return le32_to_cpup((__le32 *)&tx_resp->status + | |
46 | tx_resp->frame_count) & MAX_SN; | |
47 | } | |
48 | ||
49 | static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv, | |
50 | struct iwl_ht_agg *agg, | |
51 | struct iwl5000_tx_resp *tx_resp, | |
52 | int txq_id, u16 start_idx) | |
53 | { | |
54 | u16 status; | |
55 | struct agg_tx_status *frame_status = &tx_resp->status; | |
56 | struct ieee80211_tx_info *info = NULL; | |
57 | struct ieee80211_hdr *hdr = NULL; | |
58 | u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); | |
59 | int i, sh, idx; | |
60 | u16 seq; | |
61 | ||
62 | if (agg->wait_for_ba) | |
63 | IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n"); | |
64 | ||
65 | agg->frame_count = tx_resp->frame_count; | |
66 | agg->start_idx = start_idx; | |
67 | agg->rate_n_flags = rate_n_flags; | |
68 | agg->bitmap = 0; | |
69 | ||
70 | /* # frames attempted by Tx command */ | |
71 | if (agg->frame_count == 1) { | |
72 | /* Only one frame was attempted; no block-ack will arrive */ | |
73 | status = le16_to_cpu(frame_status[0].status); | |
74 | idx = start_idx; | |
75 | ||
76 | /* FIXME: code repetition */ | |
77 | IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", | |
78 | agg->frame_count, agg->start_idx, idx); | |
79 | ||
ff0d91c3 | 80 | info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); |
e04ed0a5 WYG |
81 | info->status.rates[0].count = tx_resp->failure_frame + 1; |
82 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; | |
83 | info->flags |= iwl_tx_status_to_mac80211(status); | |
8d801080 | 84 | iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info); |
e04ed0a5 WYG |
85 | |
86 | /* FIXME: code repetition end */ | |
87 | ||
88 | IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", | |
89 | status & 0xff, tx_resp->failure_frame); | |
90 | IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags); | |
91 | ||
92 | agg->wait_for_ba = 0; | |
93 | } else { | |
94 | /* Two or more frames were attempted; expect block-ack */ | |
95 | u64 bitmap = 0; | |
f668da2f DH |
96 | |
97 | /* | |
98 | * Start is the lowest frame sent. It may not be the first | |
99 | * frame in the batch; we figure this out dynamically during | |
100 | * the following loop. | |
101 | */ | |
e04ed0a5 WYG |
102 | int start = agg->start_idx; |
103 | ||
104 | /* Construct bit-map of pending frames within Tx window */ | |
105 | for (i = 0; i < agg->frame_count; i++) { | |
106 | u16 sc; | |
107 | status = le16_to_cpu(frame_status[i].status); | |
108 | seq = le16_to_cpu(frame_status[i].sequence); | |
109 | idx = SEQ_TO_INDEX(seq); | |
110 | txq_id = SEQ_TO_QUEUE(seq); | |
111 | ||
112 | if (status & (AGG_TX_STATE_FEW_BYTES_MSK | | |
113 | AGG_TX_STATE_ABORT_MSK)) | |
114 | continue; | |
115 | ||
116 | IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", | |
117 | agg->frame_count, txq_id, idx); | |
118 | ||
119 | hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); | |
120 | if (!hdr) { | |
121 | IWL_ERR(priv, | |
122 | "BUG_ON idx doesn't point to valid skb" | |
123 | " idx=%d, txq_id=%d\n", idx, txq_id); | |
124 | return -1; | |
125 | } | |
126 | ||
127 | sc = le16_to_cpu(hdr->seq_ctrl); | |
128 | if (idx != (SEQ_TO_SN(sc) & 0xff)) { | |
129 | IWL_ERR(priv, | |
130 | "BUG_ON idx doesn't match seq control" | |
131 | " idx=%d, seq_idx=%d, seq=%d\n", | |
132 | idx, SEQ_TO_SN(sc), | |
133 | hdr->seq_ctrl); | |
134 | return -1; | |
135 | } | |
136 | ||
137 | IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", | |
138 | i, idx, SEQ_TO_SN(sc)); | |
139 | ||
f668da2f DH |
140 | /* |
141 | * sh -> how many frames ahead of the starting frame is | |
142 | * the current one? | |
143 | * | |
144 | * Note that all frames sent in the batch must be in a | |
145 | * 64-frame window, so this number should be in [0,63]. | |
146 | * If outside of this window, then we've found a new | |
147 | * "first" frame in the batch and need to change start. | |
148 | */ | |
e04ed0a5 | 149 | sh = idx - start; |
f668da2f DH |
150 | |
151 | /* | |
152 | * If >= 64, out of window. start must be at the front | |
153 | * of the circular buffer, idx must be near the end of | |
154 | * the buffer, and idx is the new "first" frame. Shift | |
155 | * the indices around. | |
156 | */ | |
157 | if (sh >= 64) { | |
158 | /* Shift bitmap by start - idx, wrapped */ | |
159 | sh = 0x100 - idx + start; | |
e04ed0a5 | 160 | bitmap = bitmap << sh; |
f668da2f | 161 | /* Now idx is the new start so sh = 0 */ |
e04ed0a5 WYG |
162 | sh = 0; |
163 | start = idx; | |
f668da2f DH |
164 | /* |
165 | * If <= -64 then wraps the 256-pkt circular buffer | |
166 | * (e.g., start = 255 and idx = 0, sh should be 1) | |
167 | */ | |
168 | } else if (sh <= -64) { | |
169 | sh = 0x100 - start + idx; | |
170 | /* | |
171 | * If < 0 but > -64, out of window. idx is before start | |
172 | * but not wrapped. Shift the indices around. | |
173 | */ | |
174 | } else if (sh < 0) { | |
175 | /* Shift by how far start is ahead of idx */ | |
e04ed0a5 | 176 | sh = start - idx; |
e04ed0a5 | 177 | bitmap = bitmap << sh; |
f668da2f DH |
178 | /* Now idx is the new start so sh = 0 */ |
179 | start = idx; | |
e04ed0a5 WYG |
180 | sh = 0; |
181 | } | |
f668da2f | 182 | /* Sequence number start + sh was sent in this batch */ |
e04ed0a5 WYG |
183 | bitmap |= 1ULL << sh; |
184 | IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", | |
185 | start, (unsigned long long)bitmap); | |
186 | } | |
187 | ||
f668da2f DH |
188 | /* |
189 | * Store the bitmap and possibly the new start, if we wrapped | |
190 | * the buffer above | |
191 | */ | |
e04ed0a5 WYG |
192 | agg->bitmap = bitmap; |
193 | agg->start_idx = start; | |
194 | IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", | |
195 | agg->frame_count, agg->start_idx, | |
196 | (unsigned long long)agg->bitmap); | |
197 | ||
198 | if (bitmap) | |
199 | agg->wait_for_ba = 1; | |
200 | } | |
201 | return 0; | |
202 | } | |
203 | ||
04569cbe WYG |
204 | void iwl_check_abort_status(struct iwl_priv *priv, |
205 | u8 frame_count, u32 status) | |
206 | { | |
207 | if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { | |
65550636 WYG |
208 | IWL_ERR(priv, "Tx flush command to flush out all frames\n"); |
209 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
210 | queue_work(priv->workqueue, &priv->tx_flush); | |
04569cbe WYG |
211 | } |
212 | } | |
213 | ||
e04ed0a5 WYG |
214 | static void iwlagn_rx_reply_tx(struct iwl_priv *priv, |
215 | struct iwl_rx_mem_buffer *rxb) | |
216 | { | |
217 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
218 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | |
219 | int txq_id = SEQ_TO_QUEUE(sequence); | |
220 | int index = SEQ_TO_INDEX(sequence); | |
221 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | |
222 | struct ieee80211_tx_info *info; | |
223 | struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; | |
224 | u32 status = le16_to_cpu(tx_resp->status.status); | |
225 | int tid; | |
226 | int sta_id; | |
227 | int freed; | |
9c5ac091 | 228 | unsigned long flags; |
e04ed0a5 WYG |
229 | |
230 | if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { | |
231 | IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " | |
232 | "is out of range [0-%d] %d %d\n", txq_id, | |
233 | index, txq->q.n_bd, txq->q.write_ptr, | |
234 | txq->q.read_ptr); | |
235 | return; | |
236 | } | |
237 | ||
ff0d91c3 | 238 | info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); |
e04ed0a5 WYG |
239 | memset(&info->status, 0, sizeof(info->status)); |
240 | ||
241 | tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS; | |
242 | sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS; | |
243 | ||
9c5ac091 | 244 | spin_lock_irqsave(&priv->sta_lock, flags); |
e04ed0a5 WYG |
245 | if (txq->sched_retry) { |
246 | const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp); | |
9c5ac091 | 247 | struct iwl_ht_agg *agg; |
e04ed0a5 WYG |
248 | |
249 | agg = &priv->stations[sta_id].tid[tid].agg; | |
250 | ||
251 | iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); | |
252 | ||
253 | /* check if BAR is needed */ | |
254 | if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) | |
255 | info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | |
256 | ||
257 | if (txq->q.read_ptr != (scd_ssn & 0xff)) { | |
258 | index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); | |
259 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim " | |
260 | "scd_ssn=%d idx=%d txq=%d swq=%d\n", | |
261 | scd_ssn , index, txq_id, txq->swq_id); | |
262 | ||
74bcdb33 | 263 | freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); |
e04ed0a5 WYG |
264 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
265 | ||
266 | if (priv->mac80211_registered && | |
267 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | |
268 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { | |
269 | if (agg->state == IWL_AGG_OFF) | |
270 | iwl_wake_queue(priv, txq_id); | |
271 | else | |
272 | iwl_wake_queue(priv, txq->swq_id); | |
273 | } | |
274 | } | |
275 | } else { | |
276 | BUG_ON(txq_id != txq->swq_id); | |
277 | ||
278 | info->status.rates[0].count = tx_resp->failure_frame + 1; | |
279 | info->flags |= iwl_tx_status_to_mac80211(status); | |
8d801080 | 280 | iwlagn_hwrate_to_tx_control(priv, |
e04ed0a5 WYG |
281 | le32_to_cpu(tx_resp->rate_n_flags), |
282 | info); | |
283 | ||
284 | IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags " | |
285 | "0x%x retries %d\n", | |
286 | txq_id, | |
287 | iwl_get_tx_fail_reason(status), status, | |
288 | le32_to_cpu(tx_resp->rate_n_flags), | |
289 | tx_resp->failure_frame); | |
290 | ||
74bcdb33 | 291 | freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); |
e04ed0a5 WYG |
292 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
293 | ||
294 | if (priv->mac80211_registered && | |
295 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) | |
296 | iwl_wake_queue(priv, txq_id); | |
297 | } | |
298 | ||
74bcdb33 | 299 | iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); |
e04ed0a5 | 300 | |
04569cbe | 301 | iwl_check_abort_status(priv, tx_resp->frame_count, status); |
9c5ac091 | 302 | spin_unlock_irqrestore(&priv->sta_lock, flags); |
e04ed0a5 WYG |
303 | } |
304 | ||
305 | void iwlagn_rx_handler_setup(struct iwl_priv *priv) | |
306 | { | |
307 | /* init calibration handlers */ | |
308 | priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = | |
309 | iwlagn_rx_calib_result; | |
310 | priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] = | |
311 | iwlagn_rx_calib_complete; | |
312 | priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; | |
313 | } | |
314 | ||
315 | void iwlagn_setup_deferred_work(struct iwl_priv *priv) | |
316 | { | |
317 | /* in agn, the tx power calibration is done in uCode */ | |
318 | priv->disable_tx_power_cal = 1; | |
319 | } | |
320 | ||
321 | int iwlagn_hw_valid_rtc_data_addr(u32 addr) | |
322 | { | |
323 | return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) && | |
324 | (addr < IWLAGN_RTC_DATA_UPPER_BOUND); | |
325 | } | |
326 | ||
327 | int iwlagn_send_tx_power(struct iwl_priv *priv) | |
328 | { | |
329 | struct iwl5000_tx_power_dbm_cmd tx_power_cmd; | |
330 | u8 tx_ant_cfg_cmd; | |
331 | ||
332 | /* half dBm need to multiply */ | |
333 | tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); | |
334 | ||
335 | if (priv->tx_power_lmt_in_half_dbm && | |
336 | priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) { | |
337 | /* | |
338 | * For the newer devices which using enhanced/extend tx power | |
339 | * table in EEPROM, the format is in half dBm. driver need to | |
340 | * convert to dBm format before report to mac80211. | |
341 | * By doing so, there is a possibility of 1/2 dBm resolution | |
342 | * lost. driver will perform "round-up" operation before | |
343 | * reporting, but it will cause 1/2 dBm tx power over the | |
344 | * regulatory limit. Perform the checking here, if the | |
345 | * "tx_power_user_lmt" is higher than EEPROM value (in | |
346 | * half-dBm format), lower the tx power based on EEPROM | |
347 | */ | |
348 | tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm; | |
349 | } | |
350 | tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED; | |
351 | tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO; | |
352 | ||
353 | if (IWL_UCODE_API(priv->ucode_ver) == 1) | |
354 | tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1; | |
355 | else | |
356 | tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; | |
357 | ||
358 | return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd, | |
359 | sizeof(tx_power_cmd), &tx_power_cmd, | |
360 | NULL); | |
361 | } | |
362 | ||
363 | void iwlagn_temperature(struct iwl_priv *priv) | |
364 | { | |
365 | /* store temperature from statistics (in Celsius) */ | |
f3aebeee | 366 | priv->temperature = |
325322ee | 367 | le32_to_cpu(priv->_agn.statistics.general.common.temperature); |
e04ed0a5 WYG |
368 | iwl_tt_handler(priv); |
369 | } | |
370 | ||
371 | u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv) | |
372 | { | |
373 | struct iwl_eeprom_calib_hdr { | |
374 | u8 version; | |
375 | u8 pa_type; | |
376 | u16 voltage; | |
377 | } *hdr; | |
378 | ||
379 | hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, | |
7944f8e4 | 380 | EEPROM_CALIB_ALL); |
e04ed0a5 WYG |
381 | return hdr->version; |
382 | ||
383 | } | |
384 | ||
385 | /* | |
386 | * EEPROM | |
387 | */ | |
388 | static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) | |
389 | { | |
390 | u16 offset = 0; | |
391 | ||
392 | if ((address & INDIRECT_ADDRESS) == 0) | |
393 | return address; | |
394 | ||
395 | switch (address & INDIRECT_TYPE_MSK) { | |
396 | case INDIRECT_HOST: | |
7944f8e4 | 397 | offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST); |
e04ed0a5 WYG |
398 | break; |
399 | case INDIRECT_GENERAL: | |
7944f8e4 | 400 | offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL); |
e04ed0a5 WYG |
401 | break; |
402 | case INDIRECT_REGULATORY: | |
7944f8e4 | 403 | offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY); |
e04ed0a5 WYG |
404 | break; |
405 | case INDIRECT_CALIBRATION: | |
7944f8e4 | 406 | offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION); |
e04ed0a5 WYG |
407 | break; |
408 | case INDIRECT_PROCESS_ADJST: | |
7944f8e4 | 409 | offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST); |
e04ed0a5 WYG |
410 | break; |
411 | case INDIRECT_OTHERS: | |
7944f8e4 | 412 | offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS); |
e04ed0a5 WYG |
413 | break; |
414 | default: | |
415 | IWL_ERR(priv, "illegal indirect type: 0x%X\n", | |
416 | address & INDIRECT_TYPE_MSK); | |
417 | break; | |
418 | } | |
419 | ||
420 | /* translate the offset from words to byte */ | |
421 | return (address & ADDRESS_MSK) + (offset << 1); | |
422 | } | |
423 | ||
424 | const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, | |
425 | size_t offset) | |
426 | { | |
427 | u32 address = eeprom_indirect_address(priv, offset); | |
428 | BUG_ON(address >= priv->cfg->eeprom_size); | |
429 | return &priv->eeprom[address]; | |
430 | } | |
348ee7cd WYG |
431 | |
432 | struct iwl_mod_params iwlagn_mod_params = { | |
433 | .amsdu_size_8K = 1, | |
434 | .restart_fw = 1, | |
435 | /* the rest are 0 by default */ | |
436 | }; | |
74bcdb33 WYG |
437 | |
438 | void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | |
439 | { | |
440 | unsigned long flags; | |
441 | int i; | |
442 | spin_lock_irqsave(&rxq->lock, flags); | |
443 | INIT_LIST_HEAD(&rxq->rx_free); | |
444 | INIT_LIST_HEAD(&rxq->rx_used); | |
445 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
446 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | |
447 | /* In the reset function, these buffers may have been allocated | |
448 | * to an SKB, so we need to unmap and free potential storage */ | |
449 | if (rxq->pool[i].page != NULL) { | |
450 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | |
451 | PAGE_SIZE << priv->hw_params.rx_page_order, | |
452 | PCI_DMA_FROMDEVICE); | |
453 | __iwl_free_pages(priv, rxq->pool[i].page); | |
454 | rxq->pool[i].page = NULL; | |
455 | } | |
456 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
457 | } | |
458 | ||
6aac74b4 ZY |
459 | for (i = 0; i < RX_QUEUE_SIZE; i++) |
460 | rxq->queue[i] = NULL; | |
461 | ||
74bcdb33 WYG |
462 | /* Set us so that we have processed and used all buffers, but have |
463 | * not restocked the Rx queue with fresh buffers */ | |
464 | rxq->read = rxq->write = 0; | |
465 | rxq->write_actual = 0; | |
466 | rxq->free_count = 0; | |
467 | spin_unlock_irqrestore(&rxq->lock, flags); | |
468 | } | |
469 | ||
470 | int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | |
471 | { | |
472 | u32 rb_size; | |
473 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | |
474 | u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ | |
475 | ||
476 | if (!priv->cfg->use_isr_legacy) | |
477 | rb_timeout = RX_RB_TIMEOUT; | |
478 | ||
479 | if (priv->cfg->mod_params->amsdu_size_8K) | |
480 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | |
481 | else | |
482 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
483 | ||
484 | /* Stop Rx DMA */ | |
485 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | |
486 | ||
487 | /* Reset driver's Rx queue write index */ | |
488 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | |
489 | ||
490 | /* Tell device where to find RBD circular buffer in DRAM */ | |
491 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | |
d5b25c90 | 492 | (u32)(rxq->bd_dma >> 8)); |
74bcdb33 WYG |
493 | |
494 | /* Tell device where in DRAM to update its Rx status */ | |
495 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, | |
496 | rxq->rb_stts_dma >> 4); | |
497 | ||
498 | /* Enable Rx DMA | |
499 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
500 | * the credit mechanism in 5000 HW RX FIFO | |
501 | * Direct rx interrupts to hosts | |
502 | * Rx buffer size 4 or 8k | |
503 | * RB timeout 0x10 | |
504 | * 256 RBDs | |
505 | */ | |
506 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, | |
507 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | |
508 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
509 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
510 | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | | |
511 | rb_size| | |
512 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | |
513 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | |
514 | ||
515 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
516 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
517 | ||
518 | return 0; | |
519 | } | |
520 | ||
521 | int iwlagn_hw_nic_init(struct iwl_priv *priv) | |
522 | { | |
523 | unsigned long flags; | |
524 | struct iwl_rx_queue *rxq = &priv->rxq; | |
525 | int ret; | |
526 | ||
527 | /* nic_init */ | |
528 | spin_lock_irqsave(&priv->lock, flags); | |
529 | priv->cfg->ops->lib->apm_ops.init(priv); | |
530 | ||
531 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | |
532 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); | |
533 | ||
534 | spin_unlock_irqrestore(&priv->lock, flags); | |
535 | ||
536 | ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); | |
537 | ||
538 | priv->cfg->ops->lib->apm_ops.config(priv); | |
539 | ||
540 | /* Allocate the RX queue, or reset if it is already allocated */ | |
541 | if (!rxq->bd) { | |
542 | ret = iwl_rx_queue_alloc(priv); | |
543 | if (ret) { | |
544 | IWL_ERR(priv, "Unable to initialize Rx queue\n"); | |
545 | return -ENOMEM; | |
546 | } | |
547 | } else | |
548 | iwlagn_rx_queue_reset(priv, rxq); | |
549 | ||
54b81550 | 550 | iwlagn_rx_replenish(priv); |
74bcdb33 WYG |
551 | |
552 | iwlagn_rx_init(priv, rxq); | |
553 | ||
554 | spin_lock_irqsave(&priv->lock, flags); | |
555 | ||
556 | rxq->need_update = 1; | |
557 | iwl_rx_queue_update_write_ptr(priv, rxq); | |
558 | ||
559 | spin_unlock_irqrestore(&priv->lock, flags); | |
560 | ||
470058e0 ZY |
561 | /* Allocate or reset and init all Tx and Command queues */ |
562 | if (!priv->txq) { | |
563 | ret = iwlagn_txq_ctx_alloc(priv); | |
564 | if (ret) | |
565 | return ret; | |
566 | } else | |
567 | iwlagn_txq_ctx_reset(priv); | |
74bcdb33 WYG |
568 | |
569 | set_bit(STATUS_INIT, &priv->status); | |
570 | ||
571 | return 0; | |
572 | } | |
54b81550 WYG |
573 | |
574 | /** | |
575 | * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | |
576 | */ | |
577 | static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv, | |
578 | dma_addr_t dma_addr) | |
579 | { | |
580 | return cpu_to_le32((u32)(dma_addr >> 8)); | |
581 | } | |
582 | ||
583 | /** | |
584 | * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool | |
585 | * | |
586 | * If there are slots in the RX queue that need to be restocked, | |
587 | * and we have free pre-allocated buffers, fill the ranks as much | |
588 | * as we can, pulling from rx_free. | |
589 | * | |
590 | * This moves the 'write' index forward to catch up with 'processed', and | |
591 | * also updates the memory address in the firmware to reference the new | |
592 | * target buffer. | |
593 | */ | |
594 | void iwlagn_rx_queue_restock(struct iwl_priv *priv) | |
595 | { | |
596 | struct iwl_rx_queue *rxq = &priv->rxq; | |
597 | struct list_head *element; | |
598 | struct iwl_rx_mem_buffer *rxb; | |
599 | unsigned long flags; | |
54b81550 WYG |
600 | |
601 | spin_lock_irqsave(&rxq->lock, flags); | |
54b81550 | 602 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { |
6aac74b4 ZY |
603 | /* The overwritten rxb must be a used one */ |
604 | rxb = rxq->queue[rxq->write]; | |
605 | BUG_ON(rxb && rxb->page); | |
606 | ||
54b81550 WYG |
607 | /* Get next free Rx buffer, remove from free list */ |
608 | element = rxq->rx_free.next; | |
609 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | |
610 | list_del(element); | |
611 | ||
612 | /* Point to Rx buffer via next RBD in circular buffer */ | |
613 | rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv, | |
614 | rxb->page_dma); | |
615 | rxq->queue[rxq->write] = rxb; | |
616 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | |
617 | rxq->free_count--; | |
618 | } | |
619 | spin_unlock_irqrestore(&rxq->lock, flags); | |
620 | /* If the pre-allocated buffer pool is dropping low, schedule to | |
621 | * refill it */ | |
622 | if (rxq->free_count <= RX_LOW_WATERMARK) | |
623 | queue_work(priv->workqueue, &priv->rx_replenish); | |
624 | ||
625 | ||
626 | /* If we've added more space for the firmware to place data, tell it. | |
627 | * Increment device's write pointer in multiples of 8. */ | |
628 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
629 | spin_lock_irqsave(&rxq->lock, flags); | |
630 | rxq->need_update = 1; | |
631 | spin_unlock_irqrestore(&rxq->lock, flags); | |
632 | iwl_rx_queue_update_write_ptr(priv, rxq); | |
633 | } | |
634 | } | |
635 | ||
636 | /** | |
637 | * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free | |
638 | * | |
639 | * When moving to rx_free an SKB is allocated for the slot. | |
640 | * | |
641 | * Also restock the Rx queue via iwl_rx_queue_restock. | |
642 | * This is called as a scheduled work item (except for during initialization) | |
643 | */ | |
644 | void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority) | |
645 | { | |
646 | struct iwl_rx_queue *rxq = &priv->rxq; | |
647 | struct list_head *element; | |
648 | struct iwl_rx_mem_buffer *rxb; | |
649 | struct page *page; | |
650 | unsigned long flags; | |
651 | gfp_t gfp_mask = priority; | |
652 | ||
653 | while (1) { | |
654 | spin_lock_irqsave(&rxq->lock, flags); | |
655 | if (list_empty(&rxq->rx_used)) { | |
656 | spin_unlock_irqrestore(&rxq->lock, flags); | |
657 | return; | |
658 | } | |
659 | spin_unlock_irqrestore(&rxq->lock, flags); | |
660 | ||
661 | if (rxq->free_count > RX_LOW_WATERMARK) | |
662 | gfp_mask |= __GFP_NOWARN; | |
663 | ||
664 | if (priv->hw_params.rx_page_order > 0) | |
665 | gfp_mask |= __GFP_COMP; | |
666 | ||
667 | /* Alloc a new receive buffer */ | |
668 | page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); | |
669 | if (!page) { | |
670 | if (net_ratelimit()) | |
671 | IWL_DEBUG_INFO(priv, "alloc_pages failed, " | |
672 | "order: %d\n", | |
673 | priv->hw_params.rx_page_order); | |
674 | ||
675 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | |
676 | net_ratelimit()) | |
677 | IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n", | |
678 | priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", | |
679 | rxq->free_count); | |
680 | /* We don't reschedule replenish work here -- we will | |
681 | * call the restock method and if it still needs | |
682 | * more buffers it will schedule replenish */ | |
683 | return; | |
684 | } | |
685 | ||
686 | spin_lock_irqsave(&rxq->lock, flags); | |
687 | ||
688 | if (list_empty(&rxq->rx_used)) { | |
689 | spin_unlock_irqrestore(&rxq->lock, flags); | |
690 | __free_pages(page, priv->hw_params.rx_page_order); | |
691 | return; | |
692 | } | |
693 | element = rxq->rx_used.next; | |
694 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | |
695 | list_del(element); | |
696 | ||
697 | spin_unlock_irqrestore(&rxq->lock, flags); | |
698 | ||
6aac74b4 | 699 | BUG_ON(rxb->page); |
54b81550 WYG |
700 | rxb->page = page; |
701 | /* Get physical address of the RB */ | |
702 | rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, | |
703 | PAGE_SIZE << priv->hw_params.rx_page_order, | |
704 | PCI_DMA_FROMDEVICE); | |
705 | /* dma address must be no more than 36 bits */ | |
706 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | |
707 | /* and also 256 byte aligned! */ | |
708 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | |
709 | ||
710 | spin_lock_irqsave(&rxq->lock, flags); | |
711 | ||
712 | list_add_tail(&rxb->list, &rxq->rx_free); | |
713 | rxq->free_count++; | |
714 | priv->alloc_rxb_page++; | |
715 | ||
716 | spin_unlock_irqrestore(&rxq->lock, flags); | |
717 | } | |
718 | } | |
719 | ||
720 | void iwlagn_rx_replenish(struct iwl_priv *priv) | |
721 | { | |
722 | unsigned long flags; | |
723 | ||
724 | iwlagn_rx_allocate(priv, GFP_KERNEL); | |
725 | ||
726 | spin_lock_irqsave(&priv->lock, flags); | |
727 | iwlagn_rx_queue_restock(priv); | |
728 | spin_unlock_irqrestore(&priv->lock, flags); | |
729 | } | |
730 | ||
731 | void iwlagn_rx_replenish_now(struct iwl_priv *priv) | |
732 | { | |
733 | iwlagn_rx_allocate(priv, GFP_ATOMIC); | |
734 | ||
735 | iwlagn_rx_queue_restock(priv); | |
736 | } | |
737 | ||
738 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | |
739 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL | |
740 | * This free routine walks the list of POOL entries and if SKB is set to | |
741 | * non NULL it is unmapped and freed | |
742 | */ | |
743 | void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | |
744 | { | |
745 | int i; | |
746 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | |
747 | if (rxq->pool[i].page != NULL) { | |
748 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | |
749 | PAGE_SIZE << priv->hw_params.rx_page_order, | |
750 | PCI_DMA_FROMDEVICE); | |
751 | __iwl_free_pages(priv, rxq->pool[i].page); | |
752 | rxq->pool[i].page = NULL; | |
753 | } | |
754 | } | |
755 | ||
756 | dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, | |
d5b25c90 | 757 | rxq->bd_dma); |
54b81550 WYG |
758 | dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), |
759 | rxq->rb_stts, rxq->rb_stts_dma); | |
760 | rxq->bd = NULL; | |
761 | rxq->rb_stts = NULL; | |
762 | } | |
763 | ||
764 | int iwlagn_rxq_stop(struct iwl_priv *priv) | |
765 | { | |
766 | ||
767 | /* stop Rx DMA */ | |
768 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | |
769 | iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, | |
770 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | |
771 | ||
772 | return 0; | |
773 | } | |
8d801080 WYG |
774 | |
775 | int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) | |
776 | { | |
777 | int idx = 0; | |
778 | int band_offset = 0; | |
779 | ||
780 | /* HT rate format: mac80211 wants an MCS number, which is just LSB */ | |
781 | if (rate_n_flags & RATE_MCS_HT_MSK) { | |
782 | idx = (rate_n_flags & 0xff); | |
783 | return idx; | |
784 | /* Legacy rate format, search for match in table */ | |
785 | } else { | |
786 | if (band == IEEE80211_BAND_5GHZ) | |
787 | band_offset = IWL_FIRST_OFDM_RATE; | |
788 | for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) | |
789 | if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) | |
790 | return idx - band_offset; | |
791 | } | |
792 | ||
793 | return -1; | |
794 | } | |
795 | ||
796 | /* Calc max signal level (dBm) among 3 possible receivers */ | |
797 | static inline int iwlagn_calc_rssi(struct iwl_priv *priv, | |
798 | struct iwl_rx_phy_res *rx_resp) | |
799 | { | |
800 | return priv->cfg->ops->utils->calc_rssi(priv, rx_resp); | |
801 | } | |
802 | ||
8d801080 WYG |
803 | static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) |
804 | { | |
805 | u32 decrypt_out = 0; | |
806 | ||
807 | if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == | |
808 | RX_RES_STATUS_STATION_FOUND) | |
809 | decrypt_out |= (RX_RES_STATUS_STATION_FOUND | | |
810 | RX_RES_STATUS_NO_STATION_INFO_MISMATCH); | |
811 | ||
812 | decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); | |
813 | ||
814 | /* packet was not encrypted */ | |
815 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | |
816 | RX_RES_STATUS_SEC_TYPE_NONE) | |
817 | return decrypt_out; | |
818 | ||
819 | /* packet was encrypted with unknown alg */ | |
820 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | |
821 | RX_RES_STATUS_SEC_TYPE_ERR) | |
822 | return decrypt_out; | |
823 | ||
824 | /* decryption was not done in HW */ | |
825 | if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != | |
826 | RX_MPDU_RES_STATUS_DEC_DONE_MSK) | |
827 | return decrypt_out; | |
828 | ||
829 | switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { | |
830 | ||
831 | case RX_RES_STATUS_SEC_TYPE_CCMP: | |
832 | /* alg is CCM: check MIC only */ | |
833 | if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) | |
834 | /* Bad MIC */ | |
835 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | |
836 | else | |
837 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | |
838 | ||
839 | break; | |
840 | ||
841 | case RX_RES_STATUS_SEC_TYPE_TKIP: | |
842 | if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { | |
843 | /* Bad TTAK */ | |
844 | decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; | |
845 | break; | |
846 | } | |
847 | /* fall through if TTAK OK */ | |
848 | default: | |
849 | if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) | |
850 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | |
851 | else | |
852 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | |
853 | break; | |
ee289b64 | 854 | } |
8d801080 WYG |
855 | |
856 | IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", | |
857 | decrypt_in, decrypt_out); | |
858 | ||
859 | return decrypt_out; | |
860 | } | |
861 | ||
862 | static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, | |
863 | struct ieee80211_hdr *hdr, | |
864 | u16 len, | |
865 | u32 ampdu_status, | |
866 | struct iwl_rx_mem_buffer *rxb, | |
867 | struct ieee80211_rx_status *stats) | |
868 | { | |
869 | struct sk_buff *skb; | |
8d801080 WYG |
870 | __le16 fc = hdr->frame_control; |
871 | ||
872 | /* We only process data packets if the interface is open */ | |
873 | if (unlikely(!priv->is_open)) { | |
874 | IWL_DEBUG_DROP_LIMIT(priv, | |
875 | "Dropping packet while interface is not open.\n"); | |
876 | return; | |
877 | } | |
878 | ||
879 | /* In case of HW accelerated crypto and bad decryption, drop */ | |
880 | if (!priv->cfg->mod_params->sw_crypto && | |
881 | iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) | |
882 | return; | |
883 | ||
ecdf94b8 | 884 | skb = dev_alloc_skb(128); |
8d801080 | 885 | if (!skb) { |
ecdf94b8 | 886 | IWL_ERR(priv, "dev_alloc_skb failed\n"); |
8d801080 WYG |
887 | return; |
888 | } | |
889 | ||
8d801080 WYG |
890 | skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); |
891 | ||
8d801080 WYG |
892 | iwl_update_stats(priv, false, fc, len); |
893 | memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); | |
894 | ||
895 | ieee80211_rx(priv->hw, skb); | |
8d801080 WYG |
896 | priv->alloc_rxb_page--; |
897 | rxb->page = NULL; | |
898 | } | |
899 | ||
900 | /* Called for REPLY_RX (legacy ABG frames), or | |
901 | * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ | |
902 | void iwlagn_rx_reply_rx(struct iwl_priv *priv, | |
903 | struct iwl_rx_mem_buffer *rxb) | |
904 | { | |
905 | struct ieee80211_hdr *header; | |
906 | struct ieee80211_rx_status rx_status; | |
907 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
908 | struct iwl_rx_phy_res *phy_res; | |
909 | __le32 rx_pkt_status; | |
2fb291ee | 910 | struct iwl_rx_mpdu_res_start *amsdu; |
8d801080 WYG |
911 | u32 len; |
912 | u32 ampdu_status; | |
913 | u32 rate_n_flags; | |
914 | ||
915 | /** | |
916 | * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. | |
917 | * REPLY_RX: physical layer info is in this buffer | |
918 | * REPLY_RX_MPDU_CMD: physical layer info was sent in separate | |
919 | * command and cached in priv->last_phy_res | |
920 | * | |
921 | * Here we set up local variables depending on which command is | |
922 | * received. | |
923 | */ | |
924 | if (pkt->hdr.cmd == REPLY_RX) { | |
925 | phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; | |
926 | header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) | |
927 | + phy_res->cfg_phy_cnt); | |
928 | ||
929 | len = le16_to_cpu(phy_res->byte_count); | |
930 | rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + | |
931 | phy_res->cfg_phy_cnt + len); | |
932 | ampdu_status = le32_to_cpu(rx_pkt_status); | |
933 | } else { | |
05d57520 | 934 | if (!priv->_agn.last_phy_res_valid) { |
8d801080 WYG |
935 | IWL_ERR(priv, "MPDU frame without cached PHY data\n"); |
936 | return; | |
937 | } | |
05d57520 | 938 | phy_res = &priv->_agn.last_phy_res; |
2fb291ee | 939 | amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; |
8d801080 WYG |
940 | header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); |
941 | len = le16_to_cpu(amsdu->byte_count); | |
942 | rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); | |
943 | ampdu_status = iwlagn_translate_rx_status(priv, | |
944 | le32_to_cpu(rx_pkt_status)); | |
945 | } | |
946 | ||
947 | if ((unlikely(phy_res->cfg_phy_cnt > 20))) { | |
948 | IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", | |
949 | phy_res->cfg_phy_cnt); | |
950 | return; | |
951 | } | |
952 | ||
953 | if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || | |
954 | !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { | |
955 | IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", | |
956 | le32_to_cpu(rx_pkt_status)); | |
957 | return; | |
958 | } | |
959 | ||
960 | /* This will be used in several places later */ | |
961 | rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); | |
962 | ||
963 | /* rx_status carries information about the packet to mac80211 */ | |
964 | rx_status.mactime = le64_to_cpu(phy_res->timestamp); | |
965 | rx_status.freq = | |
966 | ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel)); | |
967 | rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? | |
968 | IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; | |
969 | rx_status.rate_idx = | |
970 | iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); | |
971 | rx_status.flag = 0; | |
972 | ||
973 | /* TSF isn't reliable. In order to allow smooth user experience, | |
974 | * this W/A doesn't propagate it to the mac80211 */ | |
975 | /*rx_status.flag |= RX_FLAG_TSFT;*/ | |
976 | ||
977 | priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); | |
978 | ||
979 | /* Find max signal strength (dBm) among 3 antenna/receiver chains */ | |
980 | rx_status.signal = iwlagn_calc_rssi(priv, phy_res); | |
981 | ||
8d801080 | 982 | iwl_dbg_log_rx_data_frame(priv, len, header); |
ed1b6e99 JB |
983 | IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", |
984 | rx_status.signal, (unsigned long long)rx_status.mactime); | |
8d801080 WYG |
985 | |
986 | /* | |
987 | * "antenna number" | |
988 | * | |
989 | * It seems that the antenna field in the phy flags value | |
990 | * is actually a bit field. This is undefined by radiotap, | |
991 | * it wants an actual antenna number but I always get "7" | |
992 | * for most legacy frames I receive indicating that the | |
993 | * same frame was received on all three RX chains. | |
994 | * | |
995 | * I think this field should be removed in favor of a | |
996 | * new 802.11n radiotap field "RX chains" that is defined | |
997 | * as a bitmask. | |
998 | */ | |
999 | rx_status.antenna = | |
1000 | (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) | |
1001 | >> RX_RES_PHY_FLAGS_ANTENNA_POS; | |
1002 | ||
1003 | /* set the preamble flag if appropriate */ | |
1004 | if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) | |
1005 | rx_status.flag |= RX_FLAG_SHORTPRE; | |
1006 | ||
1007 | /* Set up the HT phy flags */ | |
1008 | if (rate_n_flags & RATE_MCS_HT_MSK) | |
1009 | rx_status.flag |= RX_FLAG_HT; | |
1010 | if (rate_n_flags & RATE_MCS_HT40_MSK) | |
1011 | rx_status.flag |= RX_FLAG_40MHZ; | |
1012 | if (rate_n_flags & RATE_MCS_SGI_MSK) | |
1013 | rx_status.flag |= RX_FLAG_SHORT_GI; | |
1014 | ||
1015 | iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, | |
1016 | rxb, &rx_status); | |
1017 | } | |
1018 | ||
1019 | /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). | |
1020 | * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ | |
1021 | void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, | |
05d57520 | 1022 | struct iwl_rx_mem_buffer *rxb) |
8d801080 WYG |
1023 | { |
1024 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | |
05d57520 JB |
1025 | priv->_agn.last_phy_res_valid = true; |
1026 | memcpy(&priv->_agn.last_phy_res, pkt->u.raw, | |
8d801080 WYG |
1027 | sizeof(struct iwl_rx_phy_res)); |
1028 | } | |
b6e4c55a JB |
1029 | |
1030 | static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, | |
1dda6d28 JB |
1031 | struct ieee80211_vif *vif, |
1032 | enum ieee80211_band band, | |
1033 | struct iwl_scan_channel *scan_ch) | |
b6e4c55a JB |
1034 | { |
1035 | const struct ieee80211_supported_band *sband; | |
b6e4c55a JB |
1036 | u16 passive_dwell = 0; |
1037 | u16 active_dwell = 0; | |
14023641 | 1038 | int added = 0; |
b6e4c55a JB |
1039 | u16 channel = 0; |
1040 | ||
1041 | sband = iwl_get_hw_mode(priv, band); | |
1042 | if (!sband) { | |
1043 | IWL_ERR(priv, "invalid band\n"); | |
1044 | return added; | |
1045 | } | |
1046 | ||
1047 | active_dwell = iwl_get_active_dwell_time(priv, band, 0); | |
1dda6d28 | 1048 | passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); |
b6e4c55a JB |
1049 | |
1050 | if (passive_dwell <= active_dwell) | |
1051 | passive_dwell = active_dwell + 1; | |
1052 | ||
14023641 | 1053 | channel = iwl_get_single_channel_number(priv, band); |
b6e4c55a JB |
1054 | if (channel) { |
1055 | scan_ch->channel = cpu_to_le16(channel); | |
1056 | scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; | |
1057 | scan_ch->active_dwell = cpu_to_le16(active_dwell); | |
1058 | scan_ch->passive_dwell = cpu_to_le16(passive_dwell); | |
1059 | /* Set txpower levels to defaults */ | |
1060 | scan_ch->dsp_atten = 110; | |
1061 | if (band == IEEE80211_BAND_5GHZ) | |
1062 | scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; | |
1063 | else | |
1064 | scan_ch->tx_gain = ((1 << 5) | (5 << 3)); | |
1065 | added++; | |
1066 | } else | |
1067 | IWL_ERR(priv, "no valid channel found\n"); | |
1068 | return added; | |
1069 | } | |
1070 | ||
1071 | static int iwl_get_channels_for_scan(struct iwl_priv *priv, | |
1dda6d28 | 1072 | struct ieee80211_vif *vif, |
b6e4c55a JB |
1073 | enum ieee80211_band band, |
1074 | u8 is_active, u8 n_probes, | |
1075 | struct iwl_scan_channel *scan_ch) | |
1076 | { | |
1077 | struct ieee80211_channel *chan; | |
1078 | const struct ieee80211_supported_band *sband; | |
1079 | const struct iwl_channel_info *ch_info; | |
1080 | u16 passive_dwell = 0; | |
1081 | u16 active_dwell = 0; | |
1082 | int added, i; | |
1083 | u16 channel; | |
1084 | ||
1085 | sband = iwl_get_hw_mode(priv, band); | |
1086 | if (!sband) | |
1087 | return 0; | |
1088 | ||
1089 | active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); | |
1dda6d28 | 1090 | passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); |
b6e4c55a JB |
1091 | |
1092 | if (passive_dwell <= active_dwell) | |
1093 | passive_dwell = active_dwell + 1; | |
1094 | ||
1095 | for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { | |
1096 | chan = priv->scan_request->channels[i]; | |
1097 | ||
1098 | if (chan->band != band) | |
1099 | continue; | |
1100 | ||
81e95430 | 1101 | channel = chan->hw_value; |
b6e4c55a JB |
1102 | scan_ch->channel = cpu_to_le16(channel); |
1103 | ||
1104 | ch_info = iwl_get_channel_info(priv, band, channel); | |
1105 | if (!is_channel_valid(ch_info)) { | |
1106 | IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n", | |
1107 | channel); | |
1108 | continue; | |
1109 | } | |
1110 | ||
1111 | if (!is_active || is_channel_passive(ch_info) || | |
1112 | (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) | |
1113 | scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; | |
1114 | else | |
1115 | scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; | |
1116 | ||
1117 | if (n_probes) | |
1118 | scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); | |
1119 | ||
1120 | scan_ch->active_dwell = cpu_to_le16(active_dwell); | |
1121 | scan_ch->passive_dwell = cpu_to_le16(passive_dwell); | |
1122 | ||
1123 | /* Set txpower levels to defaults */ | |
1124 | scan_ch->dsp_atten = 110; | |
1125 | ||
1126 | /* NOTE: if we were doing 6Mb OFDM for scans we'd use | |
1127 | * power level: | |
1128 | * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; | |
1129 | */ | |
1130 | if (band == IEEE80211_BAND_5GHZ) | |
1131 | scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; | |
1132 | else | |
1133 | scan_ch->tx_gain = ((1 << 5) | (5 << 3)); | |
1134 | ||
1135 | IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n", | |
1136 | channel, le32_to_cpu(scan_ch->type), | |
1137 | (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? | |
1138 | "ACTIVE" : "PASSIVE", | |
1139 | (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? | |
1140 | active_dwell : passive_dwell); | |
1141 | ||
1142 | scan_ch++; | |
1143 | added++; | |
1144 | } | |
1145 | ||
1146 | IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); | |
1147 | return added; | |
1148 | } | |
1149 | ||
1dda6d28 | 1150 | void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) |
b6e4c55a JB |
1151 | { |
1152 | struct iwl_host_cmd cmd = { | |
1153 | .id = REPLY_SCAN_CMD, | |
1154 | .len = sizeof(struct iwl_scan_cmd), | |
1155 | .flags = CMD_SIZE_HUGE, | |
1156 | }; | |
1157 | struct iwl_scan_cmd *scan; | |
1158 | struct ieee80211_conf *conf = NULL; | |
1159 | u32 rate_flags = 0; | |
1160 | u16 cmd_len; | |
1161 | u16 rx_chain = 0; | |
1162 | enum ieee80211_band band; | |
1163 | u8 n_probes = 0; | |
1164 | u8 rx_ant = priv->hw_params.valid_rx_ant; | |
1165 | u8 rate; | |
1166 | bool is_active = false; | |
1167 | int chan_mod; | |
1168 | u8 active_chains; | |
0e1654fa | 1169 | u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; |
b6e4c55a JB |
1170 | |
1171 | conf = ieee80211_get_hw_conf(priv->hw); | |
1172 | ||
1173 | cancel_delayed_work(&priv->scan_check); | |
1174 | ||
1175 | if (!iwl_is_ready(priv)) { | |
1176 | IWL_WARN(priv, "request scan called when driver not ready.\n"); | |
1177 | goto done; | |
1178 | } | |
1179 | ||
1180 | /* Make sure the scan wasn't canceled before this queued work | |
1181 | * was given the chance to run... */ | |
1182 | if (!test_bit(STATUS_SCANNING, &priv->status)) | |
1183 | goto done; | |
1184 | ||
1185 | /* This should never be called or scheduled if there is currently | |
1186 | * a scan active in the hardware. */ | |
1187 | if (test_bit(STATUS_SCAN_HW, &priv->status)) { | |
1188 | IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. " | |
1189 | "Ignoring second request.\n"); | |
1190 | goto done; | |
1191 | } | |
1192 | ||
1193 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { | |
1194 | IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n"); | |
1195 | goto done; | |
1196 | } | |
1197 | ||
1198 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | |
1199 | IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n"); | |
1200 | goto done; | |
1201 | } | |
1202 | ||
1203 | if (iwl_is_rfkill(priv)) { | |
1204 | IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n"); | |
1205 | goto done; | |
1206 | } | |
1207 | ||
1208 | if (!test_bit(STATUS_READY, &priv->status)) { | |
1209 | IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n"); | |
1210 | goto done; | |
1211 | } | |
1212 | ||
1213 | if (!priv->scan_cmd) { | |
1214 | priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) + | |
1215 | IWL_MAX_SCAN_SIZE, GFP_KERNEL); | |
1216 | if (!priv->scan_cmd) { | |
1217 | IWL_DEBUG_SCAN(priv, | |
1218 | "fail to allocate memory for scan\n"); | |
1219 | goto done; | |
1220 | } | |
1221 | } | |
1222 | scan = priv->scan_cmd; | |
1223 | memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); | |
1224 | ||
1225 | scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; | |
1226 | scan->quiet_time = IWL_ACTIVE_QUIET_TIME; | |
1227 | ||
1228 | if (iwl_is_associated(priv)) { | |
1229 | u16 interval = 0; | |
1230 | u32 extra; | |
1231 | u32 suspend_time = 100; | |
1232 | u32 scan_suspend_time = 100; | |
1233 | unsigned long flags; | |
1234 | ||
1235 | IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); | |
1236 | spin_lock_irqsave(&priv->lock, flags); | |
a6e492b9 JL |
1237 | if (priv->is_internal_short_scan) |
1238 | interval = 0; | |
1239 | else | |
1240 | interval = vif->bss_conf.beacon_int; | |
b6e4c55a JB |
1241 | spin_unlock_irqrestore(&priv->lock, flags); |
1242 | ||
1243 | scan->suspend_time = 0; | |
1244 | scan->max_out_time = cpu_to_le32(200 * 1024); | |
1245 | if (!interval) | |
1246 | interval = suspend_time; | |
1247 | ||
1248 | extra = (suspend_time / interval) << 22; | |
1249 | scan_suspend_time = (extra | | |
1250 | ((suspend_time % interval) * 1024)); | |
1251 | scan->suspend_time = cpu_to_le32(scan_suspend_time); | |
1252 | IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", | |
1253 | scan_suspend_time, interval); | |
1254 | } | |
1255 | ||
1256 | if (priv->is_internal_short_scan) { | |
1257 | IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); | |
1258 | } else if (priv->scan_request->n_ssids) { | |
1259 | int i, p = 0; | |
1260 | IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); | |
1261 | for (i = 0; i < priv->scan_request->n_ssids; i++) { | |
1262 | /* always does wildcard anyway */ | |
1263 | if (!priv->scan_request->ssids[i].ssid_len) | |
1264 | continue; | |
1265 | scan->direct_scan[p].id = WLAN_EID_SSID; | |
1266 | scan->direct_scan[p].len = | |
1267 | priv->scan_request->ssids[i].ssid_len; | |
1268 | memcpy(scan->direct_scan[p].ssid, | |
1269 | priv->scan_request->ssids[i].ssid, | |
1270 | priv->scan_request->ssids[i].ssid_len); | |
1271 | n_probes++; | |
1272 | p++; | |
1273 | } | |
1274 | is_active = true; | |
1275 | } else | |
1276 | IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); | |
1277 | ||
1278 | scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; | |
1279 | scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id; | |
1280 | scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | |
1281 | ||
1282 | switch (priv->scan_band) { | |
1283 | case IEEE80211_BAND_2GHZ: | |
1284 | scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; | |
1285 | chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK) | |
1286 | >> RXON_FLG_CHANNEL_MODE_POS; | |
1287 | if (chan_mod == CHANNEL_MODE_PURE_40) { | |
1288 | rate = IWL_RATE_6M_PLCP; | |
1289 | } else { | |
1290 | rate = IWL_RATE_1M_PLCP; | |
1291 | rate_flags = RATE_MCS_CCK_MSK; | |
1292 | } | |
d44ae69e JB |
1293 | /* |
1294 | * Internal scans are passive, so we can indiscriminately set | |
1295 | * the BT ignore flag on 2.4 GHz since it applies to TX only. | |
1296 | */ | |
1297 | if (priv->cfg->advanced_bt_coexist) | |
1298 | scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; | |
ad41ee3a | 1299 | scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED; |
b6e4c55a JB |
1300 | break; |
1301 | case IEEE80211_BAND_5GHZ: | |
1302 | rate = IWL_RATE_6M_PLCP; | |
1303 | /* | |
ad41ee3a RC |
1304 | * If active scanning is requested but a certain channel is |
1305 | * marked passive, we can do active scanning if we detect | |
1306 | * transmissions. | |
1307 | * | |
1308 | * There is an issue with some firmware versions that triggers | |
1309 | * a sysassert on a "good CRC threshold" of zero (== disabled), | |
1310 | * on a radar channel even though this means that we should NOT | |
1311 | * send probes. | |
1312 | * | |
1313 | * The "good CRC threshold" is the number of frames that we | |
1314 | * need to receive during our dwell time on a channel before | |
1315 | * sending out probes -- setting this to a huge value will | |
1316 | * mean we never reach it, but at the same time work around | |
1317 | * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER | |
1318 | * here instead of IWL_GOOD_CRC_TH_DISABLED. | |
b6e4c55a | 1319 | */ |
ad41ee3a RC |
1320 | scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : |
1321 | IWL_GOOD_CRC_TH_NEVER; | |
b6e4c55a JB |
1322 | break; |
1323 | default: | |
1324 | IWL_WARN(priv, "Invalid scan band count\n"); | |
1325 | goto done; | |
1326 | } | |
1327 | ||
1328 | band = priv->scan_band; | |
1329 | ||
0e1654fa JB |
1330 | if (priv->cfg->scan_rx_antennas[band]) |
1331 | rx_ant = priv->cfg->scan_rx_antennas[band]; | |
e7cb4955 | 1332 | |
0e1654fa JB |
1333 | if (priv->cfg->scan_tx_antennas[band]) |
1334 | scan_tx_antennas = priv->cfg->scan_tx_antennas[band]; | |
1335 | ||
bee008b7 WYG |
1336 | if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) { |
1337 | /* operated as 1x1 in full concurrency mode */ | |
1338 | scan_tx_antennas = | |
1339 | first_antenna(priv->cfg->scan_tx_antennas[band]); | |
1340 | } | |
1341 | ||
0e1654fa JB |
1342 | priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band], |
1343 | scan_tx_antennas); | |
b6e4c55a JB |
1344 | rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); |
1345 | scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); | |
1346 | ||
1347 | /* In power save mode use one chain, otherwise use all chains */ | |
1348 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | |
1349 | /* rx_ant has been set to all valid chains previously */ | |
1350 | active_chains = rx_ant & | |
1351 | ((u8)(priv->chain_noise_data.active_chains)); | |
1352 | if (!active_chains) | |
1353 | active_chains = rx_ant; | |
1354 | ||
1355 | IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n", | |
1356 | priv->chain_noise_data.active_chains); | |
1357 | ||
1358 | rx_ant = first_antenna(active_chains); | |
1359 | } | |
bee008b7 WYG |
1360 | if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) { |
1361 | /* operated as 1x1 in full concurrency mode */ | |
1362 | rx_ant = first_antenna(rx_ant); | |
1363 | } | |
1364 | ||
b6e4c55a JB |
1365 | /* MIMO is not used here, but value is required */ |
1366 | rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; | |
1367 | rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; | |
1368 | rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; | |
1369 | rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; | |
1370 | scan->rx_chain = cpu_to_le16(rx_chain); | |
1371 | if (!priv->is_internal_short_scan) { | |
1372 | cmd_len = iwl_fill_probe_req(priv, | |
1373 | (struct ieee80211_mgmt *)scan->data, | |
3a0b9aad | 1374 | vif->addr, |
b6e4c55a JB |
1375 | priv->scan_request->ie, |
1376 | priv->scan_request->ie_len, | |
1377 | IWL_MAX_SCAN_SIZE - sizeof(*scan)); | |
1378 | } else { | |
3a0b9aad | 1379 | /* use bcast addr, will not be transmitted but must be valid */ |
b6e4c55a JB |
1380 | cmd_len = iwl_fill_probe_req(priv, |
1381 | (struct ieee80211_mgmt *)scan->data, | |
3a0b9aad | 1382 | iwl_bcast_addr, NULL, 0, |
b6e4c55a JB |
1383 | IWL_MAX_SCAN_SIZE - sizeof(*scan)); |
1384 | ||
1385 | } | |
1386 | scan->tx_cmd.len = cpu_to_le16(cmd_len); | |
b6e4c55a JB |
1387 | |
1388 | scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | | |
1389 | RXON_FILTER_BCON_AWARE_MSK); | |
1390 | ||
1391 | if (priv->is_internal_short_scan) { | |
1392 | scan->channel_count = | |
1dda6d28 | 1393 | iwl_get_single_channel_for_scan(priv, vif, band, |
b6e4c55a JB |
1394 | (void *)&scan->data[le16_to_cpu( |
1395 | scan->tx_cmd.len)]); | |
1396 | } else { | |
1397 | scan->channel_count = | |
1dda6d28 | 1398 | iwl_get_channels_for_scan(priv, vif, band, |
b6e4c55a JB |
1399 | is_active, n_probes, |
1400 | (void *)&scan->data[le16_to_cpu( | |
1401 | scan->tx_cmd.len)]); | |
1402 | } | |
1403 | if (scan->channel_count == 0) { | |
1404 | IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); | |
1405 | goto done; | |
1406 | } | |
1407 | ||
1408 | cmd.len += le16_to_cpu(scan->tx_cmd.len) + | |
1409 | scan->channel_count * sizeof(struct iwl_scan_channel); | |
1410 | cmd.data = scan; | |
1411 | scan->len = cpu_to_le16(cmd.len); | |
1412 | ||
1413 | set_bit(STATUS_SCAN_HW, &priv->status); | |
1414 | if (iwl_send_cmd_sync(priv, &cmd)) | |
1415 | goto done; | |
1416 | ||
1417 | queue_delayed_work(priv->workqueue, &priv->scan_check, | |
1418 | IWL_SCAN_CHECK_WATCHDOG); | |
1419 | ||
1420 | return; | |
1421 | ||
1422 | done: | |
1423 | /* Cannot perform scan. Make sure we clear scanning | |
1424 | * bits from status so next scan request can be performed. | |
1425 | * If we don't clear scanning status bit here all next scan | |
1426 | * will fail | |
1427 | */ | |
1428 | clear_bit(STATUS_SCAN_HW, &priv->status); | |
1429 | clear_bit(STATUS_SCANNING, &priv->status); | |
1430 | /* inform mac80211 scan aborted */ | |
1431 | queue_work(priv->workqueue, &priv->scan_completed); | |
1432 | } | |
1fa61b2e JB |
1433 | |
1434 | int iwlagn_manage_ibss_station(struct iwl_priv *priv, | |
1435 | struct ieee80211_vif *vif, bool add) | |
1436 | { | |
fd1af15d JB |
1437 | struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; |
1438 | ||
1fa61b2e | 1439 | if (add) |
57f8db89 | 1440 | return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true, |
fd1af15d JB |
1441 | &vif_priv->ibss_bssid_sta_id); |
1442 | return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, | |
1443 | vif->bss_conf.bssid); | |
1fa61b2e | 1444 | } |
1ff504e0 JB |
1445 | |
1446 | void iwl_free_tfds_in_queue(struct iwl_priv *priv, | |
1447 | int sta_id, int tid, int freed) | |
1448 | { | |
a24d52f3 | 1449 | lockdep_assert_held(&priv->sta_lock); |
9c5ac091 | 1450 | |
1ff504e0 JB |
1451 | if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) |
1452 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | |
1453 | else { | |
1454 | IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n", | |
1455 | priv->stations[sta_id].tid[tid].tfds_in_queue, | |
1456 | freed); | |
1457 | priv->stations[sta_id].tid[tid].tfds_in_queue = 0; | |
1458 | } | |
1459 | } | |
716c74b0 WYG |
1460 | |
1461 | #define IWL_FLUSH_WAIT_MS 2000 | |
1462 | ||
1463 | int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv) | |
1464 | { | |
1465 | struct iwl_tx_queue *txq; | |
1466 | struct iwl_queue *q; | |
1467 | int cnt; | |
1468 | unsigned long now = jiffies; | |
1469 | int ret = 0; | |
1470 | ||
1471 | /* waiting for all the tx frames complete might take a while */ | |
1472 | for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { | |
1473 | if (cnt == IWL_CMD_QUEUE_NUM) | |
1474 | continue; | |
1475 | txq = &priv->txq[cnt]; | |
1476 | q = &txq->q; | |
1477 | while (q->read_ptr != q->write_ptr && !time_after(jiffies, | |
1478 | now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) | |
1479 | msleep(1); | |
1480 | ||
1481 | if (q->read_ptr != q->write_ptr) { | |
1482 | IWL_ERR(priv, "fail to flush all tx fifo queues\n"); | |
1483 | ret = -ETIMEDOUT; | |
1484 | break; | |
1485 | } | |
1486 | } | |
1487 | return ret; | |
1488 | } | |
1489 | ||
1490 | #define IWL_TX_QUEUE_MSK 0xfffff | |
1491 | ||
1492 | /** | |
1493 | * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode | |
1494 | * | |
1495 | * pre-requirements: | |
1496 | * 1. acquire mutex before calling | |
1497 | * 2. make sure rf is on and not in exit state | |
1498 | */ | |
1499 | int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control) | |
1500 | { | |
1501 | struct iwl_txfifo_flush_cmd flush_cmd; | |
1502 | struct iwl_host_cmd cmd = { | |
1503 | .id = REPLY_TXFIFO_FLUSH, | |
1504 | .len = sizeof(struct iwl_txfifo_flush_cmd), | |
1505 | .flags = CMD_SYNC, | |
1506 | .data = &flush_cmd, | |
1507 | }; | |
1508 | ||
1509 | might_sleep(); | |
1510 | ||
1511 | memset(&flush_cmd, 0, sizeof(flush_cmd)); | |
1512 | flush_cmd.fifo_control = IWL_TX_FIFO_VO_MSK | IWL_TX_FIFO_VI_MSK | | |
1513 | IWL_TX_FIFO_BE_MSK | IWL_TX_FIFO_BK_MSK; | |
1514 | if (priv->cfg->sku & IWL_SKU_N) | |
1515 | flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; | |
1516 | ||
1517 | IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", | |
1518 | flush_cmd.fifo_control); | |
1519 | flush_cmd.flush_control = cpu_to_le16(flush_control); | |
1520 | ||
1521 | return iwl_send_cmd(priv, &cmd); | |
1522 | } | |
65550636 WYG |
1523 | |
1524 | void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control) | |
1525 | { | |
1526 | mutex_lock(&priv->mutex); | |
1527 | ieee80211_stop_queues(priv->hw); | |
1528 | if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) { | |
1529 | IWL_ERR(priv, "flush request fail\n"); | |
1530 | goto done; | |
1531 | } | |
1532 | IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); | |
1533 | iwlagn_wait_tx_queue_empty(priv); | |
1534 | done: | |
1535 | ieee80211_wake_queues(priv->hw); | |
1536 | mutex_unlock(&priv->mutex); | |
1537 | } |