iwlwifi: mvm: remove code that stops multiple UMAC scans of a type
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / mvm / scan.c
1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65
66 #include <linux/etherdevice.h>
67 #include <net/mac80211.h>
68
69 #include "mvm.h"
70 #include "fw-api-scan.h"
71
72 #define IWL_DENSE_EBS_SCAN_RATIO 5
73 #define IWL_SPARSE_EBS_SCAN_RATIO 1
74
75 struct iwl_mvm_scan_params {
76 u32 max_out_time;
77 u32 suspend_time;
78 bool passive_fragmented;
79 u32 n_channels;
80 u16 delay;
81 int n_ssids;
82 struct cfg80211_ssid *ssids;
83 struct ieee80211_channel **channels;
84 u16 interval; /* interval between scans (in secs) */
85 u32 flags;
86 u8 *mac_addr;
87 u8 *mac_addr_mask;
88 bool no_cck;
89 bool pass_all;
90 int n_match_sets;
91 struct iwl_scan_probe_req preq;
92 struct cfg80211_match_set *match_sets;
93 struct _dwell {
94 u16 passive;
95 u16 active;
96 u16 fragmented;
97 } dwell[IEEE80211_NUM_BANDS];
98 struct {
99 u8 iterations;
100 u8 full_scan_mul; /* not used for UMAC */
101 } schedule[2];
102 };
103
104 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
105 {
106 if (mvm->scan_rx_ant != ANT_NONE)
107 return mvm->scan_rx_ant;
108 return iwl_mvm_get_valid_rx_ant(mvm);
109 }
110
111 static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
112 {
113 u16 rx_chain;
114 u8 rx_ant;
115
116 rx_ant = iwl_mvm_scan_rx_ant(mvm);
117 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
118 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
119 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
120 rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
121 return cpu_to_le16(rx_chain);
122 }
123
124 static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
125 {
126 if (band == IEEE80211_BAND_2GHZ)
127 return cpu_to_le32(PHY_BAND_24);
128 else
129 return cpu_to_le32(PHY_BAND_5);
130 }
131
132 static inline __le32
133 iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
134 bool no_cck)
135 {
136 u32 tx_ant;
137
138 mvm->scan_last_antenna_idx =
139 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
140 mvm->scan_last_antenna_idx);
141 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
142
143 if (band == IEEE80211_BAND_2GHZ && !no_cck)
144 return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
145 tx_ant);
146 else
147 return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
148 }
149
150 /*
151 * If req->n_ssids > 0, it means we should do an active scan.
152 * In case of active scan w/o directed scan, we receive a zero-length SSID
153 * just to notify that this scan is active and not passive.
154 * In order to notify the FW of the number of SSIDs we wish to scan (including
155 * the zero-length one), we need to set the corresponding bits in chan->type,
156 * one for each SSID, and set the active bit (first). If the first SSID is
157 * already included in the probe template, so we need to set only
158 * req->n_ssids - 1 bits in addition to the first bit.
159 */
160 static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
161 enum ieee80211_band band, int n_ssids)
162 {
163 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
164 return 10;
165 if (band == IEEE80211_BAND_2GHZ)
166 return 20 + 3 * (n_ssids + 1);
167 return 10 + 2 * (n_ssids + 1);
168 }
169
170 static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
171 enum ieee80211_band band)
172 {
173 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
174 return 110;
175 return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
176 }
177
178 static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
179 struct ieee80211_vif *vif)
180 {
181 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
182 int *global_cnt = data;
183
184 if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
185 mvmvif->phy_ctxt->id < MAX_PHYS)
186 *global_cnt += 1;
187 }
188
189 static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
190 struct ieee80211_vif *vif,
191 struct iwl_mvm_scan_params *params)
192 {
193 int global_cnt = 0;
194 enum ieee80211_band band;
195 u8 frag_passive_dwell = 0;
196
197 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
198 IEEE80211_IFACE_ITER_NORMAL,
199 iwl_mvm_scan_condition_iterator,
200 &global_cnt);
201 if (!global_cnt)
202 goto not_bound;
203
204 params->suspend_time = 30;
205 params->max_out_time = 120;
206
207 if (iwl_mvm_low_latency(mvm)) {
208 if (mvm->fw->ucode_capa.api[0] &
209 IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
210 params->suspend_time = 105;
211 /*
212 * If there is more than one active interface make
213 * passive scan more fragmented.
214 */
215 frag_passive_dwell = 40;
216 params->max_out_time = frag_passive_dwell;
217 } else {
218 params->suspend_time = 120;
219 params->max_out_time = 120;
220 }
221 }
222
223 if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] &
224 IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
225 /*
226 * P2P device scan should not be fragmented to avoid negative
227 * impact on P2P device discovery. Configure max_out_time to be
228 * equal to dwell time on passive channel. Take a longest
229 * possible value, one that corresponds to 2GHz band
230 */
231 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
232 u32 passive_dwell =
233 iwl_mvm_get_passive_dwell(mvm,
234 IEEE80211_BAND_2GHZ);
235 params->max_out_time = passive_dwell;
236 } else {
237 params->passive_fragmented = true;
238 }
239 }
240
241 if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
242 (params->max_out_time > 200))
243 params->max_out_time = 200;
244
245 not_bound:
246
247 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
248 if (params->passive_fragmented)
249 params->dwell[band].fragmented = frag_passive_dwell;
250
251 params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
252 band);
253 params->dwell[band].active =
254 iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
255 }
256
257 IWL_DEBUG_SCAN(mvm,
258 "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
259 params->max_out_time, params->suspend_time,
260 params->passive_fragmented);
261 IWL_DEBUG_SCAN(mvm,
262 "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
263 params->dwell[IEEE80211_BAND_2GHZ].passive,
264 params->dwell[IEEE80211_BAND_2GHZ].active,
265 params->dwell[IEEE80211_BAND_2GHZ].fragmented);
266 IWL_DEBUG_SCAN(mvm,
267 "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
268 params->dwell[IEEE80211_BAND_5GHZ].passive,
269 params->dwell[IEEE80211_BAND_5GHZ].active,
270 params->dwell[IEEE80211_BAND_5GHZ].fragmented);
271 }
272
273 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
274 {
275 /* require rrm scan whenever the fw supports it */
276 return mvm->fw->ucode_capa.capa[0] &
277 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT;
278 }
279
280 static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
281 {
282 int max_probe_len;
283
284 max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
285
286 /* we create the 802.11 header and SSID element */
287 max_probe_len -= 24 + 2;
288
289 /* DS parameter set element is added on 2.4GHZ band if required */
290 if (iwl_mvm_rrm_scan_needed(mvm))
291 max_probe_len -= 3;
292
293 return max_probe_len;
294 }
295
296 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
297 {
298 int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
299
300 /* TODO: [BUG] This function should return the maximum allowed size of
301 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
302 * in the same command. So the correct implementation of this function
303 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
304 * command has only 512 bytes and it would leave us with about 240
305 * bytes for scan IEs, which is clearly not enough. So meanwhile
306 * we will report an incorrect value. This may result in a failure to
307 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
308 * functions with -ENOBUFS, if a large enough probe will be provided.
309 */
310 return max_ie_len;
311 }
312
313 static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
314 int num_res, u8 *buf, size_t buf_size)
315 {
316 int i;
317 u8 *pos = buf, *end = buf + buf_size;
318
319 for (i = 0; pos < end && i < num_res; i++)
320 pos += snprintf(pos, end - pos, " %u", res[i].channel);
321
322 /* terminate the string in case the buffer was too short */
323 *(buf + buf_size - 1) = '\0';
324
325 return buf;
326 }
327
328 int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
329 struct iwl_rx_cmd_buffer *rxb,
330 struct iwl_device_cmd *cmd)
331 {
332 struct iwl_rx_packet *pkt = rxb_addr(rxb);
333 struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
334 u8 buf[256];
335
336 IWL_DEBUG_SCAN(mvm,
337 "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
338 notif->status, notif->scanned_channels,
339 iwl_mvm_dump_channel_list(notif->results,
340 notif->scanned_channels, buf,
341 sizeof(buf)));
342 return 0;
343 }
344
345 int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
346 struct iwl_rx_cmd_buffer *rxb,
347 struct iwl_device_cmd *cmd)
348 {
349 IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
350 ieee80211_sched_scan_results(mvm->hw);
351
352 return 0;
353 }
354
355 int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
356 struct iwl_rx_cmd_buffer *rxb,
357 struct iwl_device_cmd *cmd)
358 {
359 struct iwl_rx_packet *pkt = rxb_addr(rxb);
360 struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
361 bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
362 bool ebs_successful = (scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS);
363
364 /* scan status must be locked for proper checking */
365 lockdep_assert_held(&mvm->mutex);
366
367 /* We first check if we were stopping a scan, in which case we
368 * just clear the stopping flag. Then we check if it was a
369 * firmware initiated stop, in which case we need to inform
370 * mac80211.
371 * Note that we can have a stopping and a running scan
372 * simultaneously, but we can't have two different types of
373 * scans stopping or running at the same time (since LMAC
374 * doesn't support it).
375 */
376
377 if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
378 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
379
380 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
381 aborted ? "aborted" : "completed",
382 ebs_successful ? "successful" : "failed");
383
384 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
385 } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
386 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
387 aborted ? "aborted" : "completed",
388 ebs_successful ? "successful" : "failed");
389
390 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
391 } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
392 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
393
394 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
395 aborted ? "aborted" : "completed",
396 ebs_successful ? "successful" : "failed");
397
398 mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
399 ieee80211_sched_scan_stopped(mvm->hw);
400 } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
401 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
402 aborted ? "aborted" : "completed",
403 ebs_successful ? "successful" : "failed");
404
405 mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
406 ieee80211_scan_completed(mvm->hw,
407 scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
408 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
409 }
410
411 mvm->last_ebs_successful = ebs_successful;
412
413 return 0;
414 }
415
416 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
417 {
418 int i;
419
420 for (i = 0; i < PROBE_OPTION_MAX; i++) {
421 if (!ssid_list[i].len)
422 break;
423 if (ssid_list[i].len == ssid_len &&
424 !memcmp(ssid_list->ssid, ssid, ssid_len))
425 return i;
426 }
427 return -1;
428 }
429
430 /* We insert the SSIDs in an inverted order, because the FW will
431 * invert it back.
432 */
433 static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
434 struct iwl_ssid_ie *ssids,
435 u32 *ssid_bitmap)
436 {
437 int i, j;
438 int index;
439
440 /*
441 * copy SSIDs from match list.
442 * iwl_config_sched_scan_profiles() uses the order of these ssids to
443 * config match list.
444 */
445 for (i = 0, j = params->n_match_sets - 1;
446 j >= 0 && i < PROBE_OPTION_MAX;
447 i++, j--) {
448 /* skip empty SSID matchsets */
449 if (!params->match_sets[j].ssid.ssid_len)
450 continue;
451 ssids[i].id = WLAN_EID_SSID;
452 ssids[i].len = params->match_sets[j].ssid.ssid_len;
453 memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
454 ssids[i].len);
455 }
456
457 /* add SSIDs from scan SSID list */
458 *ssid_bitmap = 0;
459 for (j = params->n_ssids - 1;
460 j >= 0 && i < PROBE_OPTION_MAX;
461 i++, j--) {
462 index = iwl_ssid_exist(params->ssids[j].ssid,
463 params->ssids[j].ssid_len,
464 ssids);
465 if (index < 0) {
466 ssids[i].id = WLAN_EID_SSID;
467 ssids[i].len = params->ssids[j].ssid_len;
468 memcpy(ssids[i].ssid, params->ssids[j].ssid,
469 ssids[i].len);
470 *ssid_bitmap |= BIT(i);
471 } else {
472 *ssid_bitmap |= BIT(index);
473 }
474 }
475 }
476
477 static int
478 iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
479 struct cfg80211_sched_scan_request *req)
480 {
481 struct iwl_scan_offload_profile *profile;
482 struct iwl_scan_offload_profile_cfg *profile_cfg;
483 struct iwl_scan_offload_blacklist *blacklist;
484 struct iwl_host_cmd cmd = {
485 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
486 .len[1] = sizeof(*profile_cfg),
487 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
488 .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
489 };
490 int blacklist_len;
491 int i;
492 int ret;
493
494 if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
495 return -EIO;
496
497 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
498 blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
499 else
500 blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
501
502 blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
503 if (!blacklist)
504 return -ENOMEM;
505
506 profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
507 if (!profile_cfg) {
508 ret = -ENOMEM;
509 goto free_blacklist;
510 }
511
512 cmd.data[0] = blacklist;
513 cmd.len[0] = sizeof(*blacklist) * blacklist_len;
514 cmd.data[1] = profile_cfg;
515
516 /* No blacklist configuration */
517
518 profile_cfg->num_profiles = req->n_match_sets;
519 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
520 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
521 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
522 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
523 profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
524
525 for (i = 0; i < req->n_match_sets; i++) {
526 profile = &profile_cfg->profiles[i];
527 profile->ssid_index = i;
528 /* Support any cipher and auth algorithm */
529 profile->unicast_cipher = 0xff;
530 profile->auth_alg = 0xff;
531 profile->network_type = IWL_NETWORK_TYPE_ANY;
532 profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
533 profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
534 }
535
536 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
537
538 ret = iwl_mvm_send_cmd(mvm, &cmd);
539 kfree(profile_cfg);
540 free_blacklist:
541 kfree(blacklist);
542
543 return ret;
544 }
545
546 static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
547 struct cfg80211_sched_scan_request *req)
548 {
549 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
550 IWL_DEBUG_SCAN(mvm,
551 "Sending scheduled scan with filtering, n_match_sets %d\n",
552 req->n_match_sets);
553 return false;
554 }
555
556 IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
557 return true;
558 }
559
560 static int iwl_mvm_send_lmac_scan_abort(struct iwl_mvm *mvm)
561 {
562 int ret;
563 struct iwl_host_cmd cmd = {
564 .id = SCAN_OFFLOAD_ABORT_CMD,
565 };
566 u32 status;
567
568 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
569 if (ret)
570 return ret;
571
572 if (status != CAN_ABORT_STATUS) {
573 /*
574 * The scan abort will return 1 for success or
575 * 2 for "failure". A failure condition can be
576 * due to simply not being in an active scan which
577 * can occur if we send the scan abort before the
578 * microcode has notified us that a scan is completed.
579 */
580 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
581 ret = -ENOENT;
582 }
583
584 return ret;
585 }
586
587 static int iwl_mvm_lmac_scan_stop(struct iwl_mvm *mvm, int type)
588 {
589 int ret;
590 struct iwl_notification_wait wait_scan_done;
591 static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
592 bool sched = type & IWL_MVM_SCAN_SCHED;
593
594 lockdep_assert_held(&mvm->mutex);
595
596 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
597 scan_done_notif,
598 ARRAY_SIZE(scan_done_notif),
599 NULL, NULL);
600
601 ret = iwl_mvm_send_lmac_scan_abort(mvm);
602 if (ret) {
603 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
604 sched ? "offloaded " : "", ret);
605 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
606 goto out;
607 }
608
609 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
610 sched ? "scheduled " : "");
611
612 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
613 out:
614 return ret;
615 }
616
617 static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
618 struct iwl_scan_req_tx_cmd *tx_cmd,
619 bool no_cck)
620 {
621 tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
622 TX_CMD_FLG_BT_DIS);
623 tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
624 IEEE80211_BAND_2GHZ,
625 no_cck);
626 tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
627
628 tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
629 TX_CMD_FLG_BT_DIS);
630 tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
631 IEEE80211_BAND_5GHZ,
632 no_cck);
633 tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
634 }
635
636 static void
637 iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
638 struct ieee80211_channel **channels,
639 int n_channels, u32 ssid_bitmap,
640 struct iwl_scan_req_lmac *cmd)
641 {
642 struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
643 int i;
644
645 for (i = 0; i < n_channels; i++) {
646 channel_cfg[i].channel_num =
647 cpu_to_le16(channels[i]->hw_value);
648 channel_cfg[i].iter_count = cpu_to_le16(1);
649 channel_cfg[i].iter_interval = 0;
650 channel_cfg[i].flags =
651 cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
652 ssid_bitmap);
653 }
654 }
655
656 static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
657 size_t len, u8 *const pos)
658 {
659 static const u8 before_ds_params[] = {
660 WLAN_EID_SSID,
661 WLAN_EID_SUPP_RATES,
662 WLAN_EID_REQUEST,
663 WLAN_EID_EXT_SUPP_RATES,
664 };
665 size_t offs;
666 u8 *newpos = pos;
667
668 if (!iwl_mvm_rrm_scan_needed(mvm)) {
669 memcpy(newpos, ies, len);
670 return newpos + len;
671 }
672
673 offs = ieee80211_ie_split(ies, len,
674 before_ds_params,
675 ARRAY_SIZE(before_ds_params),
676 0);
677
678 memcpy(newpos, ies, offs);
679 newpos += offs;
680
681 /* Add a placeholder for DS Parameter Set element */
682 *newpos++ = WLAN_EID_DS_PARAMS;
683 *newpos++ = 1;
684 *newpos++ = 0;
685
686 memcpy(newpos, ies + offs, len - offs);
687 newpos += len - offs;
688
689 return newpos;
690 }
691
692 static void
693 iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
694 struct ieee80211_scan_ies *ies,
695 struct iwl_mvm_scan_params *params)
696 {
697 struct ieee80211_mgmt *frame = (void *)params->preq.buf;
698 u8 *pos, *newpos;
699 const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
700 params->mac_addr : NULL;
701
702 /*
703 * Unfortunately, right now the offload scan doesn't support randomising
704 * within the firmware, so until the firmware API is ready we implement
705 * it in the driver. This means that the scan iterations won't really be
706 * random, only when it's restarted, but at least that helps a bit.
707 */
708 if (mac_addr)
709 get_random_mask_addr(frame->sa, mac_addr,
710 params->mac_addr_mask);
711 else
712 memcpy(frame->sa, vif->addr, ETH_ALEN);
713
714 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
715 eth_broadcast_addr(frame->da);
716 eth_broadcast_addr(frame->bssid);
717 frame->seq_ctrl = 0;
718
719 pos = frame->u.probe_req.variable;
720 *pos++ = WLAN_EID_SSID;
721 *pos++ = 0;
722
723 params->preq.mac_header.offset = 0;
724 params->preq.mac_header.len = cpu_to_le16(24 + 2);
725
726 /* Insert ds parameter set element on 2.4 GHz band */
727 newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
728 ies->ies[IEEE80211_BAND_2GHZ],
729 ies->len[IEEE80211_BAND_2GHZ],
730 pos);
731 params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
732 params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
733 pos = newpos;
734
735 memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
736 ies->len[IEEE80211_BAND_5GHZ]);
737 params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
738 params->preq.band_data[1].len =
739 cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
740 pos += ies->len[IEEE80211_BAND_5GHZ];
741
742 memcpy(pos, ies->common_ies, ies->common_ie_len);
743 params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
744 params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
745 }
746
747 static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
748 enum iwl_scan_priority_ext prio)
749 {
750 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY)
751 return cpu_to_le32(prio);
752
753 if (prio <= IWL_SCAN_PRIORITY_EXT_2)
754 return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
755
756 if (prio <= IWL_SCAN_PRIORITY_EXT_4)
757 return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
758
759 return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
760 }
761
762 static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
763 struct iwl_scan_req_lmac *cmd,
764 struct iwl_mvm_scan_params *params)
765 {
766 cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
767 cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
768 if (params->passive_fragmented)
769 cmd->fragmented_dwell =
770 params->dwell[IEEE80211_BAND_2GHZ].fragmented;
771 cmd->max_out_time = cpu_to_le32(params->max_out_time);
772 cmd->suspend_time = cpu_to_le32(params->suspend_time);
773 cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
774 }
775
776 static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
777 struct ieee80211_scan_ies *ies,
778 int n_channels)
779 {
780 return ((n_ssids <= PROBE_OPTION_MAX) &&
781 (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
782 (ies->common_ie_len +
783 ies->len[NL80211_BAND_2GHZ] +
784 ies->len[NL80211_BAND_5GHZ] <=
785 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
786 }
787
788 static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, int n_iterations)
789 {
790 const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
791
792 /* We can only use EBS if:
793 * 1. the feature is supported;
794 * 2. the last EBS was successful;
795 * 3. if only single scan, the single scan EBS API is supported.
796 */
797 return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
798 mvm->last_ebs_successful &&
799 (n_iterations > 1 ||
800 (capa->api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)));
801 }
802
803 static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
804 {
805 return params->schedule[0].iterations + params->schedule[1].iterations;
806 }
807
808 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
809 struct iwl_mvm_scan_params *params)
810 {
811 int flags = 0;
812
813 if (params->n_ssids == 0)
814 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
815
816 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
817 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
818
819 if (params->passive_fragmented)
820 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
821
822 if (iwl_mvm_rrm_scan_needed(mvm))
823 flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
824
825 if (params->pass_all)
826 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
827 else
828 flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
829
830 #ifdef CONFIG_IWLWIFI_DEBUGFS
831 if (mvm->scan_iter_notif_enabled)
832 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
833 #endif
834
835 return flags;
836 }
837
838 static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
839 struct iwl_mvm_scan_params *params)
840 {
841 struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
842 struct iwl_scan_probe_req *preq =
843 (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
844 mvm->fw->ucode_capa.n_scan_channels);
845 u32 ssid_bitmap = 0;
846 int n_iterations = iwl_mvm_scan_total_iterations(params);
847
848 lockdep_assert_held(&mvm->mutex);
849
850 memset(cmd, 0, ksize(cmd));
851
852 iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
853
854 cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
855 cmd->iter_num = cpu_to_le32(1);
856 cmd->n_channels = (u8)params->n_channels;
857
858 cmd->delay = cpu_to_le32(params->delay);
859
860 cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params));
861
862 cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
863 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
864 MAC_FILTER_IN_BEACON);
865 iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
866 iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
867
868 /* this API uses bits 1-20 instead of 0-19 */
869 ssid_bitmap <<= 1;
870
871 cmd->schedule[0].delay = cpu_to_le16(params->interval);
872 cmd->schedule[0].iterations = params->schedule[0].iterations;
873 cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
874 cmd->schedule[1].delay = cpu_to_le16(params->interval);
875 cmd->schedule[1].iterations = params->schedule[1].iterations;
876 cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
877
878 if (iwl_mvm_scan_use_ebs(mvm, n_iterations)) {
879 cmd->channel_opt[0].flags =
880 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
881 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
882 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
883 cmd->channel_opt[0].non_ebs_ratio =
884 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
885 cmd->channel_opt[1].flags =
886 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
887 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
888 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
889 cmd->channel_opt[1].non_ebs_ratio =
890 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
891 }
892
893 iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
894 params->n_channels, ssid_bitmap, cmd);
895
896 *preq = params->preq;
897
898 return 0;
899 }
900
901 static int rate_to_scan_rate_flag(unsigned int rate)
902 {
903 static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
904 [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
905 [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
906 [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
907 [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
908 [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
909 [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
910 [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
911 [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
912 [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
913 [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
914 [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
915 [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
916 };
917
918 return rate_to_scan_rate[rate];
919 }
920
921 static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
922 {
923 struct ieee80211_supported_band *band;
924 unsigned int rates = 0;
925 int i;
926
927 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
928 for (i = 0; i < band->n_bitrates; i++)
929 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
930 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
931 for (i = 0; i < band->n_bitrates; i++)
932 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
933
934 /* Set both basic rates and supported rates */
935 rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
936
937 return cpu_to_le32(rates);
938 }
939
940 int iwl_mvm_config_scan(struct iwl_mvm *mvm)
941 {
942
943 struct iwl_scan_config *scan_config;
944 struct ieee80211_supported_band *band;
945 int num_channels =
946 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
947 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
948 int ret, i, j = 0, cmd_size, data_size;
949 struct iwl_host_cmd cmd = {
950 .id = SCAN_CFG_CMD,
951 };
952
953 if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
954 return -ENOBUFS;
955
956 cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
957
958 scan_config = kzalloc(cmd_size, GFP_KERNEL);
959 if (!scan_config)
960 return -ENOMEM;
961
962 data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
963 scan_config->hdr.size = cpu_to_le16(data_size);
964 scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
965 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
966 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
967 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
968 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
969 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
970 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
971 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
972 SCAN_CONFIG_N_CHANNELS(num_channels));
973 scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
974 scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
975 scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
976 scan_config->out_of_channel_time = cpu_to_le32(170);
977 scan_config->suspend_time = cpu_to_le32(30);
978 scan_config->dwell_active = 20;
979 scan_config->dwell_passive = 110;
980 scan_config->dwell_fragmented = 20;
981
982 memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
983
984 scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
985 scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
986 IWL_CHANNEL_FLAG_ACCURATE_EBS |
987 IWL_CHANNEL_FLAG_EBS_ADD |
988 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
989
990 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
991 for (i = 0; i < band->n_channels; i++, j++)
992 scan_config->channel_array[j] = band->channels[i].hw_value;
993 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
994 for (i = 0; i < band->n_channels; i++, j++)
995 scan_config->channel_array[j] = band->channels[i].hw_value;
996
997 cmd.data[0] = scan_config;
998 cmd.len[0] = cmd_size;
999 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1000
1001 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1002
1003 ret = iwl_mvm_send_cmd(mvm, &cmd);
1004
1005 kfree(scan_config);
1006 return ret;
1007 }
1008
1009 static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
1010 {
1011 int i;
1012
1013 for (i = 0; i < mvm->max_scans; i++)
1014 if (mvm->scan_uid_status[i] == status)
1015 return i;
1016
1017 return -ENOENT;
1018 }
1019
1020 static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
1021 struct iwl_scan_req_umac *cmd,
1022 struct iwl_mvm_scan_params *params)
1023 {
1024 cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
1025 cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
1026 if (params->passive_fragmented)
1027 cmd->fragmented_dwell =
1028 params->dwell[IEEE80211_BAND_2GHZ].fragmented;
1029 cmd->max_out_time = cpu_to_le32(params->max_out_time);
1030 cmd->suspend_time = cpu_to_le32(params->suspend_time);
1031 cmd->scan_priority =
1032 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1033
1034 if (iwl_mvm_scan_total_iterations(params) == 0)
1035 cmd->ooc_priority =
1036 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1037 else
1038 cmd->ooc_priority =
1039 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
1040 }
1041
1042 static void
1043 iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
1044 struct ieee80211_channel **channels,
1045 int n_channels, u32 ssid_bitmap,
1046 struct iwl_scan_req_umac *cmd)
1047 {
1048 struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
1049 int i;
1050
1051 for (i = 0; i < n_channels; i++) {
1052 channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
1053 channel_cfg[i].channel_num = channels[i]->hw_value;
1054 channel_cfg[i].iter_count = 1;
1055 channel_cfg[i].iter_interval = 0;
1056 }
1057 }
1058
1059 static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
1060 struct iwl_mvm_scan_params *params)
1061 {
1062 int flags = 0;
1063
1064 if (params->n_ssids == 0)
1065 flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
1066
1067 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
1068 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
1069
1070 if (params->passive_fragmented)
1071 flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
1072
1073 if (iwl_mvm_rrm_scan_needed(mvm))
1074 flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
1075
1076 if (params->pass_all)
1077 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
1078 else
1079 flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
1080
1081 if (iwl_mvm_scan_total_iterations(params) > 1)
1082 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
1083
1084 #ifdef CONFIG_IWLWIFI_DEBUGFS
1085 if (mvm->scan_iter_notif_enabled)
1086 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1087 #endif
1088 return flags;
1089 }
1090
1091 static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1092 struct iwl_mvm_scan_params *params,
1093 int type)
1094 {
1095 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
1096 struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
1097 sizeof(struct iwl_scan_channel_cfg_umac) *
1098 mvm->fw->ucode_capa.n_scan_channels;
1099 int uid;
1100 u32 ssid_bitmap = 0;
1101 int n_iterations = iwl_mvm_scan_total_iterations(params);
1102
1103 lockdep_assert_held(&mvm->mutex);
1104
1105 uid = iwl_mvm_scan_uid_by_status(mvm, 0);
1106 if (uid < 0)
1107 return uid;
1108
1109 memset(cmd, 0, ksize(cmd));
1110 cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
1111 sizeof(struct iwl_mvm_umac_cmd_hdr));
1112
1113 iwl_mvm_scan_umac_dwell(mvm, cmd, params);
1114
1115 mvm->scan_uid_status[uid] = type;
1116
1117 cmd->uid = cpu_to_le32(uid);
1118 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
1119
1120 if (iwl_mvm_scan_use_ebs(mvm, n_iterations))
1121 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1122 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1123 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1124
1125 cmd->n_channels = params->n_channels;
1126
1127 iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
1128
1129 iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
1130 params->n_channels, ssid_bitmap, cmd);
1131
1132 /* With UMAC we use only one schedule for now, so use the sum
1133 * of the iterations (with a a maximum of 255).
1134 */
1135 sec_part->schedule[0].iter_count =
1136 (n_iterations > 255) ? 255 : n_iterations;
1137 sec_part->schedule[0].interval = cpu_to_le16(params->interval);
1138
1139 sec_part->delay = cpu_to_le16(params->delay);
1140 sec_part->preq = params->preq;
1141
1142 return 0;
1143 }
1144
1145 static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
1146 {
1147 return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
1148 }
1149
1150 static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1151 {
1152 /* This looks a bit arbitrary, but the idea is that if we run
1153 * out of possible simultaneous scans and the userspace is
1154 * trying to run a scan type that is already running, we
1155 * return -EBUSY. But if the userspace wants to start a
1156 * different type of scan, we stop the opposite type to make
1157 * space for the new request. The reason is backwards
1158 * compatibility with old wpa_supplicant that wouldn't stop a
1159 * scheduled scan before starting a normal scan.
1160 */
1161
1162 if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
1163 return 0;
1164
1165 /* Use a switch, even though this is a bitmask, so that more
1166 * than one bits set will fall in default and we will warn.
1167 */
1168 switch (type) {
1169 case IWL_MVM_SCAN_REGULAR:
1170 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
1171 return -EBUSY;
1172 return iwl_mvm_sched_scan_stop(mvm, true);
1173 case IWL_MVM_SCAN_SCHED:
1174 if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
1175 return -EBUSY;
1176 return iwl_mvm_reg_scan_stop(mvm);
1177 case IWL_MVM_SCAN_NETDETECT:
1178 /* No need to stop anything for net-detect since the
1179 * firmware is restarted anyway. This way, any sched
1180 * scans that were running will be restarted when we
1181 * resume.
1182 */
1183 return 0;
1184 default:
1185 WARN_ON(1);
1186 break;
1187 }
1188
1189 return -EIO;
1190 }
1191
1192 int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1193 struct cfg80211_scan_request *req,
1194 struct ieee80211_scan_ies *ies)
1195 {
1196 struct iwl_host_cmd hcmd = {
1197 .len = { iwl_mvm_scan_size(mvm), },
1198 .data = { mvm->scan_cmd, },
1199 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1200 };
1201 struct iwl_mvm_scan_params params = {};
1202 int ret;
1203
1204 lockdep_assert_held(&mvm->mutex);
1205
1206 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
1207 IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
1208 return -EBUSY;
1209 }
1210
1211 ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
1212 if (ret)
1213 return ret;
1214
1215 iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
1216
1217 /* we should have failed registration if scan_cmd was NULL */
1218 if (WARN_ON(!mvm->scan_cmd))
1219 return -ENOMEM;
1220
1221 if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
1222 return -ENOBUFS;
1223
1224 params.n_ssids = req->n_ssids;
1225 params.flags = req->flags;
1226 params.n_channels = req->n_channels;
1227 params.delay = 0;
1228 params.interval = 0;
1229 params.ssids = req->ssids;
1230 params.channels = req->channels;
1231 params.mac_addr = req->mac_addr;
1232 params.mac_addr_mask = req->mac_addr_mask;
1233 params.no_cck = req->no_cck;
1234 params.pass_all = true;
1235 params.n_match_sets = 0;
1236 params.match_sets = NULL;
1237
1238 params.schedule[0].iterations = 1;
1239 params.schedule[0].full_scan_mul = 0;
1240 params.schedule[1].iterations = 0;
1241 params.schedule[1].full_scan_mul = 0;
1242
1243 iwl_mvm_scan_calc_dwell(mvm, vif, &params);
1244
1245 iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
1246
1247 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
1248 hcmd.id = SCAN_REQ_UMAC;
1249 ret = iwl_mvm_scan_umac(mvm, vif, &params,
1250 IWL_MVM_SCAN_REGULAR);
1251 } else {
1252 hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
1253 ret = iwl_mvm_scan_lmac(mvm, vif, &params);
1254 }
1255
1256 if (ret)
1257 return ret;
1258
1259 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1260 if (!ret) {
1261 IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
1262 mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
1263 } else {
1264 /* If the scan failed, it usually means that the FW was unable
1265 * to allocate the time events. Warn on it, but maybe we
1266 * should try to send the command again with different params.
1267 */
1268 IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
1269 }
1270
1271 if (ret)
1272 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1273
1274 return ret;
1275 }
1276
1277 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1278 struct ieee80211_vif *vif,
1279 struct cfg80211_sched_scan_request *req,
1280 struct ieee80211_scan_ies *ies,
1281 int type)
1282 {
1283 struct iwl_host_cmd hcmd = {
1284 .len = { iwl_mvm_scan_size(mvm), },
1285 .data = { mvm->scan_cmd, },
1286 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1287 };
1288 struct iwl_mvm_scan_params params = {};
1289 int ret;
1290
1291 lockdep_assert_held(&mvm->mutex);
1292
1293 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
1294 IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
1295 return -EBUSY;
1296 }
1297
1298 ret = iwl_mvm_check_running_scans(mvm, type);
1299 if (ret)
1300 return ret;
1301
1302 /* we should have failed registration if scan_cmd was NULL */
1303 if (WARN_ON(!mvm->scan_cmd))
1304 return -ENOMEM;
1305
1306 if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
1307 return -ENOBUFS;
1308
1309 params.n_ssids = req->n_ssids;
1310 params.flags = req->flags;
1311 params.n_channels = req->n_channels;
1312 params.ssids = req->ssids;
1313 params.channels = req->channels;
1314 params.mac_addr = req->mac_addr;
1315 params.mac_addr_mask = req->mac_addr_mask;
1316 params.no_cck = false;
1317 params.pass_all = iwl_mvm_scan_pass_all(mvm, req);
1318 params.n_match_sets = req->n_match_sets;
1319 params.match_sets = req->match_sets;
1320
1321 params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
1322 params.schedule[0].full_scan_mul = 1;
1323 params.schedule[1].iterations = 0xff;
1324 params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
1325
1326 if (req->interval > U16_MAX) {
1327 IWL_DEBUG_SCAN(mvm,
1328 "interval value is > 16-bits, set to max possible\n");
1329 params.interval = U16_MAX;
1330 } else {
1331 params.interval = req->interval / MSEC_PER_SEC;
1332 }
1333
1334 /* In theory, LMAC scans can handle a 32-bit delay, but since
1335 * waiting for over 18 hours to start the scan is a bit silly
1336 * and to keep it aligned with UMAC scans (which only support
1337 * 16-bit delays), trim it down to 16-bits.
1338 */
1339 if (req->delay > U16_MAX) {
1340 IWL_DEBUG_SCAN(mvm,
1341 "delay value is > 16-bits, set to max possible\n");
1342 params.delay = U16_MAX;
1343 } else {
1344 params.delay = req->delay;
1345 }
1346
1347 iwl_mvm_scan_calc_dwell(mvm, vif, &params);
1348
1349 ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
1350 if (ret)
1351 return ret;
1352
1353 iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
1354
1355 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
1356 hcmd.id = SCAN_REQ_UMAC;
1357 ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
1358 } else {
1359 hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
1360 ret = iwl_mvm_scan_lmac(mvm, vif, &params);
1361 }
1362
1363 if (ret)
1364 return ret;
1365
1366 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1367 if (!ret) {
1368 IWL_DEBUG_SCAN(mvm,
1369 "Sched scan request was sent successfully\n");
1370 mvm->scan_status |= type;
1371 } else {
1372 /* If the scan failed, it usually means that the FW was unable
1373 * to allocate the time events. Warn on it, but maybe we
1374 * should try to send the command again with different params.
1375 */
1376 IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
1377 }
1378
1379 return ret;
1380 }
1381
1382 int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1383 struct iwl_rx_cmd_buffer *rxb,
1384 struct iwl_device_cmd *cmd)
1385 {
1386 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1387 struct iwl_umac_scan_complete *notif = (void *)pkt->data;
1388 u32 uid = __le32_to_cpu(notif->uid);
1389 bool sched = (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED);
1390
1391 /* the status may be already zero in case of scan abort from above */
1392 if (mvm->scan_uid_status[uid] == 0)
1393 return 0;
1394
1395 IWL_DEBUG_SCAN(mvm,
1396 "Scan completed, uid %u type %s, status %s, EBS status %s\n",
1397 uid, sched ? "sched" : "regular",
1398 notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
1399 "completed" : "aborted",
1400 notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
1401 "success" : "failed");
1402
1403 if (notif->ebs_status)
1404 mvm->last_ebs_successful = false;
1405
1406 mvm->scan_uid_status[uid] = 0;
1407
1408 if (!sched) {
1409 ieee80211_scan_completed(mvm->hw,
1410 notif->status ==
1411 IWL_SCAN_OFFLOAD_ABORTED);
1412 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1413 } else if (iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED) < 0) {
1414 ieee80211_sched_scan_stopped(mvm->hw);
1415 } else {
1416 IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
1417 }
1418
1419 return 0;
1420 }
1421
1422 int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1423 struct iwl_rx_cmd_buffer *rxb,
1424 struct iwl_device_cmd *cmd)
1425 {
1426 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1427 struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
1428 u8 buf[256];
1429
1430 IWL_DEBUG_SCAN(mvm,
1431 "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
1432 notif->status, notif->scanned_channels,
1433 iwl_mvm_dump_channel_list(notif->results,
1434 notif->scanned_channels, buf,
1435 sizeof(buf)));
1436 return 0;
1437 }
1438
1439 static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
1440 {
1441 struct iwl_umac_scan_abort cmd = {
1442 .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
1443 sizeof(struct iwl_mvm_umac_cmd_hdr)),
1444 };
1445 int uid, ret;
1446
1447 lockdep_assert_held(&mvm->mutex);
1448
1449 /* We should always get a valid index here, because we already
1450 * checked that this type of scan was running in the generic
1451 * code.
1452 */
1453 uid = iwl_mvm_scan_uid_by_status(mvm, type);
1454 if (WARN_ON_ONCE(uid < 0))
1455 return uid;
1456
1457 cmd.uid = cpu_to_le32(uid);
1458
1459 IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
1460
1461 ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
1462 if (!ret)
1463 mvm->scan_uid_status[uid] = 0;
1464
1465 return ret;
1466 }
1467
1468 static int iwl_mvm_umac_scan_stop(struct iwl_mvm *mvm, int type)
1469 {
1470 struct iwl_notification_wait wait_scan_done;
1471 static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
1472 int ret;
1473
1474 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
1475 scan_done_notif,
1476 ARRAY_SIZE(scan_done_notif),
1477 NULL, NULL);
1478
1479 IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
1480
1481 ret = iwl_mvm_umac_scan_abort(mvm, type);
1482 if (ret) {
1483 IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
1484 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
1485 return ret;
1486 }
1487
1488 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
1489
1490 return ret;
1491 }
1492
1493 int iwl_mvm_scan_size(struct iwl_mvm *mvm)
1494 {
1495 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
1496 return sizeof(struct iwl_scan_req_umac) +
1497 sizeof(struct iwl_scan_channel_cfg_umac) *
1498 mvm->fw->ucode_capa.n_scan_channels +
1499 sizeof(struct iwl_scan_req_umac_tail);
1500
1501 return sizeof(struct iwl_scan_req_lmac) +
1502 sizeof(struct iwl_scan_channel_cfg_lmac) *
1503 mvm->fw->ucode_capa.n_scan_channels +
1504 sizeof(struct iwl_scan_probe_req);
1505 }
1506
1507 /*
1508 * This function is used in nic restart flow, to inform mac80211 about scans
1509 * that was aborted by restart flow or by an assert.
1510 */
1511 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
1512 {
1513 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
1514 int uid, i;
1515
1516 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
1517 if (uid >= 0) {
1518 ieee80211_scan_completed(mvm->hw, true);
1519 mvm->scan_uid_status[uid] = 0;
1520 }
1521 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
1522 if (uid >= 0 && !mvm->restart_fw) {
1523 ieee80211_sched_scan_stopped(mvm->hw);
1524 mvm->scan_uid_status[uid] = 0;
1525 }
1526
1527 /* We shouldn't have any UIDs still set. Loop over all the
1528 * UIDs to make sure there's nothing left there and warn if
1529 * any is found.
1530 */
1531 for (i = 0; i < mvm->max_scans; i++) {
1532 if (WARN_ONCE(mvm->scan_uid_status[i],
1533 "UMAC scan UID %d status was not cleaned\n",
1534 i))
1535 mvm->scan_uid_status[i] = 0;
1536 }
1537 } else {
1538 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
1539 ieee80211_scan_completed(mvm->hw, true);
1540
1541 /* Sched scan will be restarted by mac80211 in
1542 * restart_hw, so do not report if FW is about to be
1543 * restarted.
1544 */
1545 if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw)
1546 ieee80211_sched_scan_stopped(mvm->hw);
1547 }
1548 }
1549
1550 int iwl_mvm_reg_scan_stop(struct iwl_mvm *mvm)
1551 {
1552 int ret;
1553
1554 if (!(mvm->scan_status & IWL_MVM_SCAN_REGULAR))
1555 return 0;
1556
1557 if (iwl_mvm_is_radio_killed(mvm)) {
1558 ret = 0;
1559 goto out;
1560 }
1561
1562 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
1563 ret = iwl_mvm_umac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR);
1564 else
1565 ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR);
1566
1567 if (!ret)
1568 mvm->scan_status |= IWL_MVM_SCAN_STOPPING_REGULAR;
1569 out:
1570 /* Clear the scan status so the next scan requests will
1571 * succeed and mark the scan as stopping, so that the Rx
1572 * handler doesn't do anything, as the scan was stopped from
1573 * above. Since the rx handler won't do anything now, we have
1574 * to release the scan reference here.
1575 */
1576 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1577
1578 mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
1579 ieee80211_scan_completed(mvm->hw, true);
1580
1581 return ret;
1582 }
1583
1584 int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify)
1585 {
1586 int ret;
1587
1588 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED))
1589 return 0;
1590
1591 if (iwl_mvm_is_radio_killed(mvm)) {
1592 ret = 0;
1593 goto out;
1594 }
1595
1596 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
1597 ret = iwl_mvm_umac_scan_stop(mvm, IWL_MVM_SCAN_SCHED);
1598 else
1599 ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_SCHED);
1600
1601 if (!ret)
1602 mvm->scan_status |= IWL_MVM_SCAN_STOPPING_SCHED;
1603 out:
1604 mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
1605 if (notify)
1606 ieee80211_sched_scan_stopped(mvm->hw);
1607
1608 return ret;
1609 }
This page took 0.066792 seconds and 5 git commands to generate.