wlcore: enable beacon filtering only after receiving a beacon
[deliverable/linux.git] / drivers / net / wireless / ti / wlcore / main.c
1
2 /*
3 * This file is part of wlcore
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
30
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43
44 #define WL1271_BOOT_RETRIES 3
45
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
50
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
58 {
59 int ret;
60
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 return -EINVAL;
63
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
65 return 0;
66
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
68 return 0;
69
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
71 if (ret < 0)
72 return ret;
73
74 wl1271_info("Association completed.");
75 return 0;
76 }
77
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
80 {
81 struct ieee80211_supported_band *band;
82 struct ieee80211_channel *ch;
83 int i;
84 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 struct wl1271 *wl = hw->priv;
86
87 band = wiphy->bands[IEEE80211_BAND_5GHZ];
88 for (i = 0; i < band->n_channels; i++) {
89 ch = &band->channels[i];
90 if (ch->flags & IEEE80211_CHAN_DISABLED)
91 continue;
92
93 if (ch->flags & IEEE80211_CHAN_RADAR)
94 ch->flags |= IEEE80211_CHAN_NO_IR;
95
96 }
97
98 wlcore_regdomain_config(wl);
99 }
100
101 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
102 bool enable)
103 {
104 int ret = 0;
105
106 /* we should hold wl->mutex */
107 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
108 if (ret < 0)
109 goto out;
110
111 if (enable)
112 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
113 else
114 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
115 out:
116 return ret;
117 }
118
119 /*
120 * this function is being called when the rx_streaming interval
121 * has beed changed or rx_streaming should be disabled
122 */
123 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
124 {
125 int ret = 0;
126 int period = wl->conf.rx_streaming.interval;
127
128 /* don't reconfigure if rx_streaming is disabled */
129 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
130 goto out;
131
132 /* reconfigure/disable according to new streaming_period */
133 if (period &&
134 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
135 (wl->conf.rx_streaming.always ||
136 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
137 ret = wl1271_set_rx_streaming(wl, wlvif, true);
138 else {
139 ret = wl1271_set_rx_streaming(wl, wlvif, false);
140 /* don't cancel_work_sync since we might deadlock */
141 del_timer_sync(&wlvif->rx_streaming_timer);
142 }
143 out:
144 return ret;
145 }
146
147 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
148 {
149 int ret;
150 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
151 rx_streaming_enable_work);
152 struct wl1271 *wl = wlvif->wl;
153
154 mutex_lock(&wl->mutex);
155
156 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
157 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
158 (!wl->conf.rx_streaming.always &&
159 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
160 goto out;
161
162 if (!wl->conf.rx_streaming.interval)
163 goto out;
164
165 ret = wl1271_ps_elp_wakeup(wl);
166 if (ret < 0)
167 goto out;
168
169 ret = wl1271_set_rx_streaming(wl, wlvif, true);
170 if (ret < 0)
171 goto out_sleep;
172
173 /* stop it after some time of inactivity */
174 mod_timer(&wlvif->rx_streaming_timer,
175 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
176
177 out_sleep:
178 wl1271_ps_elp_sleep(wl);
179 out:
180 mutex_unlock(&wl->mutex);
181 }
182
183 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
184 {
185 int ret;
186 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
187 rx_streaming_disable_work);
188 struct wl1271 *wl = wlvif->wl;
189
190 mutex_lock(&wl->mutex);
191
192 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
193 goto out;
194
195 ret = wl1271_ps_elp_wakeup(wl);
196 if (ret < 0)
197 goto out;
198
199 ret = wl1271_set_rx_streaming(wl, wlvif, false);
200 if (ret)
201 goto out_sleep;
202
203 out_sleep:
204 wl1271_ps_elp_sleep(wl);
205 out:
206 mutex_unlock(&wl->mutex);
207 }
208
209 static void wl1271_rx_streaming_timer(unsigned long data)
210 {
211 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
212 struct wl1271 *wl = wlvif->wl;
213 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
214 }
215
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
218 {
219 /* if the watchdog is not armed, don't do anything */
220 if (wl->tx_allocated_blocks == 0)
221 return;
222
223 cancel_delayed_work(&wl->tx_watchdog_work);
224 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
226 }
227
228 static void wl12xx_tx_watchdog_work(struct work_struct *work)
229 {
230 struct delayed_work *dwork;
231 struct wl1271 *wl;
232
233 dwork = container_of(work, struct delayed_work, work);
234 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
235
236 mutex_lock(&wl->mutex);
237
238 if (unlikely(wl->state != WLCORE_STATE_ON))
239 goto out;
240
241 /* Tx went out in the meantime - everything is ok */
242 if (unlikely(wl->tx_allocated_blocks == 0))
243 goto out;
244
245 /*
246 * if a ROC is in progress, we might not have any Tx for a long
247 * time (e.g. pending Tx on the non-ROC channels)
248 */
249 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
250 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
251 wl->conf.tx.tx_watchdog_timeout);
252 wl12xx_rearm_tx_watchdog_locked(wl);
253 goto out;
254 }
255
256 /*
257 * if a scan is in progress, we might not have any Tx for a long
258 * time
259 */
260 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
261 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
262 wl->conf.tx.tx_watchdog_timeout);
263 wl12xx_rearm_tx_watchdog_locked(wl);
264 goto out;
265 }
266
267 /*
268 * AP might cache a frame for a long time for a sleeping station,
269 * so rearm the timer if there's an AP interface with stations. If
270 * Tx is genuinely stuck we will most hopefully discover it when all
271 * stations are removed due to inactivity.
272 */
273 if (wl->active_sta_count) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
275 " %d stations",
276 wl->conf.tx.tx_watchdog_timeout,
277 wl->active_sta_count);
278 wl12xx_rearm_tx_watchdog_locked(wl);
279 goto out;
280 }
281
282 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
283 wl->conf.tx.tx_watchdog_timeout);
284 wl12xx_queue_recovery_work(wl);
285
286 out:
287 mutex_unlock(&wl->mutex);
288 }
289
290 static void wlcore_adjust_conf(struct wl1271 *wl)
291 {
292 /* Adjust settings according to optional module parameters */
293
294 /* Firmware Logger params */
295 if (fwlog_mem_blocks != -1) {
296 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
297 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
298 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
299 } else {
300 wl1271_error(
301 "Illegal fwlog_mem_blocks=%d using default %d",
302 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
303 }
304 }
305
306 if (fwlog_param) {
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
317 } else {
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
319 }
320 }
321
322 if (bug_on_recovery != -1)
323 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
324
325 if (no_recovery != -1)
326 wl->conf.recovery.no_recovery = (u8) no_recovery;
327 }
328
329 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
330 struct wl12xx_vif *wlvif,
331 u8 hlid, u8 tx_pkts)
332 {
333 bool fw_ps;
334
335 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
336
337 /*
338 * Wake up from high level PS if the STA is asleep with too little
339 * packets in FW or if the STA is awake.
340 */
341 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
342 wl12xx_ps_link_end(wl, wlvif, hlid);
343
344 /*
345 * Start high-level PS if the STA is asleep with enough blocks in FW.
346 * Make an exception if this is the only connected link. In this
347 * case FW-memory congestion is less of a problem.
348 * Note that a single connected STA means 2*ap_count + 1 active links,
349 * since we must account for the global and broadcast AP links
350 * for each AP. The "fw_ps" check assures us the other link is a STA
351 * connected to the AP. Otherwise the FW would not set the PSM bit.
352 */
353 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
354 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 wl12xx_ps_link_start(wl, wlvif, hlid, true);
356 }
357
358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 struct wl12xx_vif *wlvif,
360 struct wl_fw_status *status)
361 {
362 u32 cur_fw_ps_map;
363 u8 hlid;
364
365 cur_fw_ps_map = status->link_ps_bitmap;
366 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 wl1271_debug(DEBUG_PSM,
368 "link ps prev 0x%x cur 0x%x changed 0x%x",
369 wl->ap_fw_ps_map, cur_fw_ps_map,
370 wl->ap_fw_ps_map ^ cur_fw_ps_map);
371
372 wl->ap_fw_ps_map = cur_fw_ps_map;
373 }
374
375 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 wl->links[hlid].allocated_pkts);
378 }
379
380 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
381 {
382 struct wl12xx_vif *wlvif;
383 struct timespec ts;
384 u32 old_tx_blk_count = wl->tx_blocks_available;
385 int avail, freed_blocks;
386 int i;
387 int ret;
388 struct wl1271_link *lnk;
389
390 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
391 wl->raw_fw_status,
392 wl->fw_status_len, false);
393 if (ret < 0)
394 return ret;
395
396 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
397
398 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
399 "drv_rx_counter = %d, tx_results_counter = %d)",
400 status->intr,
401 status->fw_rx_counter,
402 status->drv_rx_counter,
403 status->tx_results_counter);
404
405 for (i = 0; i < NUM_TX_QUEUES; i++) {
406 /* prevent wrap-around in freed-packets counter */
407 wl->tx_allocated_pkts[i] -=
408 (status->counters.tx_released_pkts[i] -
409 wl->tx_pkts_freed[i]) & 0xff;
410
411 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
412 }
413
414
415 for_each_set_bit(i, wl->links_map, wl->num_links) {
416 u8 diff;
417 lnk = &wl->links[i];
418
419 /* prevent wrap-around in freed-packets counter */
420 diff = (status->counters.tx_lnk_free_pkts[i] -
421 lnk->prev_freed_pkts) & 0xff;
422
423 if (diff == 0)
424 continue;
425
426 lnk->allocated_pkts -= diff;
427 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
428
429 /* accumulate the prev_freed_pkts counter */
430 lnk->total_freed_pkts += diff;
431 }
432
433 /* prevent wrap-around in total blocks counter */
434 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
435 freed_blocks = status->total_released_blks -
436 wl->tx_blocks_freed;
437 else
438 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
439 status->total_released_blks;
440
441 wl->tx_blocks_freed = status->total_released_blks;
442
443 wl->tx_allocated_blocks -= freed_blocks;
444
445 /*
446 * If the FW freed some blocks:
447 * If we still have allocated blocks - re-arm the timer, Tx is
448 * not stuck. Otherwise, cancel the timer (no Tx currently).
449 */
450 if (freed_blocks) {
451 if (wl->tx_allocated_blocks)
452 wl12xx_rearm_tx_watchdog_locked(wl);
453 else
454 cancel_delayed_work(&wl->tx_watchdog_work);
455 }
456
457 avail = status->tx_total - wl->tx_allocated_blocks;
458
459 /*
460 * The FW might change the total number of TX memblocks before
461 * we get a notification about blocks being released. Thus, the
462 * available blocks calculation might yield a temporary result
463 * which is lower than the actual available blocks. Keeping in
464 * mind that only blocks that were allocated can be moved from
465 * TX to RX, tx_blocks_available should never decrease here.
466 */
467 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
468 avail);
469
470 /* if more blocks are available now, tx work can be scheduled */
471 if (wl->tx_blocks_available > old_tx_blk_count)
472 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
473
474 /* for AP update num of allocated TX blocks per link and ps status */
475 wl12xx_for_each_wlvif_ap(wl, wlvif) {
476 wl12xx_irq_update_links_status(wl, wlvif, status);
477 }
478
479 /* update the host-chipset time offset */
480 getnstimeofday(&ts);
481 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
482 (s64)(status->fw_localtime);
483
484 wl->fw_fast_lnk_map = status->link_fast_bitmap;
485
486 return 0;
487 }
488
489 static void wl1271_flush_deferred_work(struct wl1271 *wl)
490 {
491 struct sk_buff *skb;
492
493 /* Pass all received frames to the network stack */
494 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
495 ieee80211_rx_ni(wl->hw, skb);
496
497 /* Return sent skbs to the network stack */
498 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
499 ieee80211_tx_status_ni(wl->hw, skb);
500 }
501
502 static void wl1271_netstack_work(struct work_struct *work)
503 {
504 struct wl1271 *wl =
505 container_of(work, struct wl1271, netstack_work);
506
507 do {
508 wl1271_flush_deferred_work(wl);
509 } while (skb_queue_len(&wl->deferred_rx_queue));
510 }
511
512 #define WL1271_IRQ_MAX_LOOPS 256
513
514 static int wlcore_irq_locked(struct wl1271 *wl)
515 {
516 int ret = 0;
517 u32 intr;
518 int loopcount = WL1271_IRQ_MAX_LOOPS;
519 bool done = false;
520 unsigned int defer_count;
521 unsigned long flags;
522
523 /*
524 * In case edge triggered interrupt must be used, we cannot iterate
525 * more than once without introducing race conditions with the hardirq.
526 */
527 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
528 loopcount = 1;
529
530 wl1271_debug(DEBUG_IRQ, "IRQ work");
531
532 if (unlikely(wl->state != WLCORE_STATE_ON))
533 goto out;
534
535 ret = wl1271_ps_elp_wakeup(wl);
536 if (ret < 0)
537 goto out;
538
539 while (!done && loopcount--) {
540 /*
541 * In order to avoid a race with the hardirq, clear the flag
542 * before acknowledging the chip. Since the mutex is held,
543 * wl1271_ps_elp_wakeup cannot be called concurrently.
544 */
545 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
546 smp_mb__after_clear_bit();
547
548 ret = wlcore_fw_status(wl, wl->fw_status);
549 if (ret < 0)
550 goto out;
551
552 wlcore_hw_tx_immediate_compl(wl);
553
554 intr = wl->fw_status->intr;
555 intr &= WLCORE_ALL_INTR_MASK;
556 if (!intr) {
557 done = true;
558 continue;
559 }
560
561 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
562 wl1271_error("HW watchdog interrupt received! starting recovery.");
563 wl->watchdog_recovery = true;
564 ret = -EIO;
565
566 /* restarting the chip. ignore any other interrupt. */
567 goto out;
568 }
569
570 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
571 wl1271_error("SW watchdog interrupt received! "
572 "starting recovery.");
573 wl->watchdog_recovery = true;
574 ret = -EIO;
575
576 /* restarting the chip. ignore any other interrupt. */
577 goto out;
578 }
579
580 if (likely(intr & WL1271_ACX_INTR_DATA)) {
581 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
582
583 ret = wlcore_rx(wl, wl->fw_status);
584 if (ret < 0)
585 goto out;
586
587 /* Check if any tx blocks were freed */
588 spin_lock_irqsave(&wl->wl_lock, flags);
589 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
590 wl1271_tx_total_queue_count(wl) > 0) {
591 spin_unlock_irqrestore(&wl->wl_lock, flags);
592 /*
593 * In order to avoid starvation of the TX path,
594 * call the work function directly.
595 */
596 ret = wlcore_tx_work_locked(wl);
597 if (ret < 0)
598 goto out;
599 } else {
600 spin_unlock_irqrestore(&wl->wl_lock, flags);
601 }
602
603 /* check for tx results */
604 ret = wlcore_hw_tx_delayed_compl(wl);
605 if (ret < 0)
606 goto out;
607
608 /* Make sure the deferred queues don't get too long */
609 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
610 skb_queue_len(&wl->deferred_rx_queue);
611 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
612 wl1271_flush_deferred_work(wl);
613 }
614
615 if (intr & WL1271_ACX_INTR_EVENT_A) {
616 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
617 ret = wl1271_event_handle(wl, 0);
618 if (ret < 0)
619 goto out;
620 }
621
622 if (intr & WL1271_ACX_INTR_EVENT_B) {
623 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
624 ret = wl1271_event_handle(wl, 1);
625 if (ret < 0)
626 goto out;
627 }
628
629 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
630 wl1271_debug(DEBUG_IRQ,
631 "WL1271_ACX_INTR_INIT_COMPLETE");
632
633 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
634 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
635 }
636
637 wl1271_ps_elp_sleep(wl);
638
639 out:
640 return ret;
641 }
642
643 static irqreturn_t wlcore_irq(int irq, void *cookie)
644 {
645 int ret;
646 unsigned long flags;
647 struct wl1271 *wl = cookie;
648
649 /* complete the ELP completion */
650 spin_lock_irqsave(&wl->wl_lock, flags);
651 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
652 if (wl->elp_compl) {
653 complete(wl->elp_compl);
654 wl->elp_compl = NULL;
655 }
656
657 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
658 /* don't enqueue a work right now. mark it as pending */
659 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
660 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
661 disable_irq_nosync(wl->irq);
662 pm_wakeup_event(wl->dev, 0);
663 spin_unlock_irqrestore(&wl->wl_lock, flags);
664 return IRQ_HANDLED;
665 }
666 spin_unlock_irqrestore(&wl->wl_lock, flags);
667
668 /* TX might be handled here, avoid redundant work */
669 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
670 cancel_work_sync(&wl->tx_work);
671
672 mutex_lock(&wl->mutex);
673
674 ret = wlcore_irq_locked(wl);
675 if (ret)
676 wl12xx_queue_recovery_work(wl);
677
678 spin_lock_irqsave(&wl->wl_lock, flags);
679 /* In case TX was not handled here, queue TX work */
680 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
681 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
682 wl1271_tx_total_queue_count(wl) > 0)
683 ieee80211_queue_work(wl->hw, &wl->tx_work);
684 spin_unlock_irqrestore(&wl->wl_lock, flags);
685
686 mutex_unlock(&wl->mutex);
687
688 return IRQ_HANDLED;
689 }
690
691 struct vif_counter_data {
692 u8 counter;
693
694 struct ieee80211_vif *cur_vif;
695 bool cur_vif_running;
696 };
697
698 static void wl12xx_vif_count_iter(void *data, u8 *mac,
699 struct ieee80211_vif *vif)
700 {
701 struct vif_counter_data *counter = data;
702
703 counter->counter++;
704 if (counter->cur_vif == vif)
705 counter->cur_vif_running = true;
706 }
707
708 /* caller must not hold wl->mutex, as it might deadlock */
709 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
710 struct ieee80211_vif *cur_vif,
711 struct vif_counter_data *data)
712 {
713 memset(data, 0, sizeof(*data));
714 data->cur_vif = cur_vif;
715
716 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
717 wl12xx_vif_count_iter, data);
718 }
719
720 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
721 {
722 const struct firmware *fw;
723 const char *fw_name;
724 enum wl12xx_fw_type fw_type;
725 int ret;
726
727 if (plt) {
728 fw_type = WL12XX_FW_TYPE_PLT;
729 fw_name = wl->plt_fw_name;
730 } else {
731 /*
732 * we can't call wl12xx_get_vif_count() here because
733 * wl->mutex is taken, so use the cached last_vif_count value
734 */
735 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
736 fw_type = WL12XX_FW_TYPE_MULTI;
737 fw_name = wl->mr_fw_name;
738 } else {
739 fw_type = WL12XX_FW_TYPE_NORMAL;
740 fw_name = wl->sr_fw_name;
741 }
742 }
743
744 if (wl->fw_type == fw_type)
745 return 0;
746
747 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
748
749 ret = request_firmware(&fw, fw_name, wl->dev);
750
751 if (ret < 0) {
752 wl1271_error("could not get firmware %s: %d", fw_name, ret);
753 return ret;
754 }
755
756 if (fw->size % 4) {
757 wl1271_error("firmware size is not multiple of 32 bits: %zu",
758 fw->size);
759 ret = -EILSEQ;
760 goto out;
761 }
762
763 vfree(wl->fw);
764 wl->fw_type = WL12XX_FW_TYPE_NONE;
765 wl->fw_len = fw->size;
766 wl->fw = vmalloc(wl->fw_len);
767
768 if (!wl->fw) {
769 wl1271_error("could not allocate memory for the firmware");
770 ret = -ENOMEM;
771 goto out;
772 }
773
774 memcpy(wl->fw, fw->data, wl->fw_len);
775 ret = 0;
776 wl->fw_type = fw_type;
777 out:
778 release_firmware(fw);
779
780 return ret;
781 }
782
783 void wl12xx_queue_recovery_work(struct wl1271 *wl)
784 {
785 /* Avoid a recursive recovery */
786 if (wl->state == WLCORE_STATE_ON) {
787 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
788 &wl->flags));
789
790 wl->state = WLCORE_STATE_RESTARTING;
791 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
792 wl1271_ps_elp_wakeup(wl);
793 wlcore_disable_interrupts_nosync(wl);
794 ieee80211_queue_work(wl->hw, &wl->recovery_work);
795 }
796 }
797
798 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
799 {
800 size_t len;
801
802 /* Make sure we have enough room */
803 len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
804
805 /* Fill the FW log file, consumed by the sysfs fwlog entry */
806 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
807 wl->fwlog_size += len;
808
809 return len;
810 }
811
812 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
813 {
814 struct wlcore_partition_set part, old_part;
815 u32 addr;
816 u32 offset;
817 u32 end_of_log;
818 u8 *block;
819 int ret;
820
821 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
822 (wl->conf.fwlog.mem_blocks == 0))
823 return;
824
825 wl1271_info("Reading FW panic log");
826
827 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
828 if (!block)
829 return;
830
831 /*
832 * Make sure the chip is awake and the logger isn't active.
833 * Do not send a stop fwlog command if the fw is hanged or if
834 * dbgpins are used (due to some fw bug).
835 */
836 if (wl1271_ps_elp_wakeup(wl))
837 goto out;
838 if (!wl->watchdog_recovery &&
839 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
840 wl12xx_cmd_stop_fwlog(wl);
841
842 /* Read the first memory block address */
843 ret = wlcore_fw_status(wl, wl->fw_status);
844 if (ret < 0)
845 goto out;
846
847 addr = wl->fw_status->log_start_addr;
848 if (!addr)
849 goto out;
850
851 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
852 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
853 end_of_log = wl->fwlog_end;
854 } else {
855 offset = sizeof(addr);
856 end_of_log = addr;
857 }
858
859 old_part = wl->curr_part;
860 memset(&part, 0, sizeof(part));
861
862 /* Traverse the memory blocks linked list */
863 do {
864 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
865 part.mem.size = PAGE_SIZE;
866
867 ret = wlcore_set_partition(wl, &part);
868 if (ret < 0) {
869 wl1271_error("%s: set_partition start=0x%X size=%d",
870 __func__, part.mem.start, part.mem.size);
871 goto out;
872 }
873
874 memset(block, 0, wl->fw_mem_block_size);
875 ret = wlcore_read_hwaddr(wl, addr, block,
876 wl->fw_mem_block_size, false);
877
878 if (ret < 0)
879 goto out;
880
881 /*
882 * Memory blocks are linked to one another. The first 4 bytes
883 * of each memory block hold the hardware address of the next
884 * one. The last memory block points to the first one in
885 * on demand mode and is equal to 0x2000000 in continuous mode.
886 */
887 addr = le32_to_cpup((__le32 *)block);
888
889 if (!wl12xx_copy_fwlog(wl, block + offset,
890 wl->fw_mem_block_size - offset))
891 break;
892 } while (addr && (addr != end_of_log));
893
894 wake_up_interruptible(&wl->fwlog_waitq);
895
896 out:
897 kfree(block);
898 wlcore_set_partition(wl, &old_part);
899 }
900
901 static void wlcore_print_recovery(struct wl1271 *wl)
902 {
903 u32 pc = 0;
904 u32 hint_sts = 0;
905 int ret;
906
907 wl1271_info("Hardware recovery in progress. FW ver: %s",
908 wl->chip.fw_ver_str);
909
910 /* change partitions momentarily so we can read the FW pc */
911 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
912 if (ret < 0)
913 return;
914
915 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
916 if (ret < 0)
917 return;
918
919 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
920 if (ret < 0)
921 return;
922
923 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
924 pc, hint_sts, ++wl->recovery_count);
925
926 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
927 }
928
929
930 static void wl1271_recovery_work(struct work_struct *work)
931 {
932 struct wl1271 *wl =
933 container_of(work, struct wl1271, recovery_work);
934 struct wl12xx_vif *wlvif;
935 struct ieee80211_vif *vif;
936
937 mutex_lock(&wl->mutex);
938
939 if (wl->state == WLCORE_STATE_OFF || wl->plt)
940 goto out_unlock;
941
942 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
943 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
944 wl12xx_read_fwlog_panic(wl);
945 wlcore_print_recovery(wl);
946 }
947
948 BUG_ON(wl->conf.recovery.bug_on_recovery &&
949 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
950
951 if (wl->conf.recovery.no_recovery) {
952 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
953 goto out_unlock;
954 }
955
956 /* Prevent spurious TX during FW restart */
957 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
958
959 /* reboot the chipset */
960 while (!list_empty(&wl->wlvif_list)) {
961 wlvif = list_first_entry(&wl->wlvif_list,
962 struct wl12xx_vif, list);
963 vif = wl12xx_wlvif_to_vif(wlvif);
964 __wl1271_op_remove_interface(wl, vif, false);
965 }
966
967 wlcore_op_stop_locked(wl);
968
969 ieee80211_restart_hw(wl->hw);
970
971 /*
972 * Its safe to enable TX now - the queues are stopped after a request
973 * to restart the HW.
974 */
975 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
976
977 out_unlock:
978 wl->watchdog_recovery = false;
979 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
980 mutex_unlock(&wl->mutex);
981 }
982
983 static int wlcore_fw_wakeup(struct wl1271 *wl)
984 {
985 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
986 }
987
988 static int wl1271_setup(struct wl1271 *wl)
989 {
990 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
991 if (!wl->raw_fw_status)
992 goto err;
993
994 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
995 if (!wl->fw_status)
996 goto err;
997
998 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
999 if (!wl->tx_res_if)
1000 goto err;
1001
1002 return 0;
1003 err:
1004 kfree(wl->fw_status);
1005 kfree(wl->raw_fw_status);
1006 return -ENOMEM;
1007 }
1008
1009 static int wl12xx_set_power_on(struct wl1271 *wl)
1010 {
1011 int ret;
1012
1013 msleep(WL1271_PRE_POWER_ON_SLEEP);
1014 ret = wl1271_power_on(wl);
1015 if (ret < 0)
1016 goto out;
1017 msleep(WL1271_POWER_ON_SLEEP);
1018 wl1271_io_reset(wl);
1019 wl1271_io_init(wl);
1020
1021 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1022 if (ret < 0)
1023 goto fail;
1024
1025 /* ELP module wake up */
1026 ret = wlcore_fw_wakeup(wl);
1027 if (ret < 0)
1028 goto fail;
1029
1030 out:
1031 return ret;
1032
1033 fail:
1034 wl1271_power_off(wl);
1035 return ret;
1036 }
1037
1038 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1039 {
1040 int ret = 0;
1041
1042 ret = wl12xx_set_power_on(wl);
1043 if (ret < 0)
1044 goto out;
1045
1046 /*
1047 * For wl127x based devices we could use the default block
1048 * size (512 bytes), but due to a bug in the sdio driver, we
1049 * need to set it explicitly after the chip is powered on. To
1050 * simplify the code and since the performance impact is
1051 * negligible, we use the same block size for all different
1052 * chip types.
1053 *
1054 * Check if the bus supports blocksize alignment and, if it
1055 * doesn't, make sure we don't have the quirk.
1056 */
1057 if (!wl1271_set_block_size(wl))
1058 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1059
1060 /* TODO: make sure the lower driver has set things up correctly */
1061
1062 ret = wl1271_setup(wl);
1063 if (ret < 0)
1064 goto out;
1065
1066 ret = wl12xx_fetch_firmware(wl, plt);
1067 if (ret < 0)
1068 goto out;
1069
1070 out:
1071 return ret;
1072 }
1073
1074 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1075 {
1076 int retries = WL1271_BOOT_RETRIES;
1077 struct wiphy *wiphy = wl->hw->wiphy;
1078
1079 static const char* const PLT_MODE[] = {
1080 "PLT_OFF",
1081 "PLT_ON",
1082 "PLT_FEM_DETECT",
1083 "PLT_CHIP_AWAKE"
1084 };
1085
1086 int ret;
1087
1088 mutex_lock(&wl->mutex);
1089
1090 wl1271_notice("power up");
1091
1092 if (wl->state != WLCORE_STATE_OFF) {
1093 wl1271_error("cannot go into PLT state because not "
1094 "in off state: %d", wl->state);
1095 ret = -EBUSY;
1096 goto out;
1097 }
1098
1099 /* Indicate to lower levels that we are now in PLT mode */
1100 wl->plt = true;
1101 wl->plt_mode = plt_mode;
1102
1103 while (retries) {
1104 retries--;
1105 ret = wl12xx_chip_wakeup(wl, true);
1106 if (ret < 0)
1107 goto power_off;
1108
1109 if (plt_mode != PLT_CHIP_AWAKE) {
1110 ret = wl->ops->plt_init(wl);
1111 if (ret < 0)
1112 goto power_off;
1113 }
1114
1115 wl->state = WLCORE_STATE_ON;
1116 wl1271_notice("firmware booted in PLT mode %s (%s)",
1117 PLT_MODE[plt_mode],
1118 wl->chip.fw_ver_str);
1119
1120 /* update hw/fw version info in wiphy struct */
1121 wiphy->hw_version = wl->chip.id;
1122 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1123 sizeof(wiphy->fw_version));
1124
1125 goto out;
1126
1127 power_off:
1128 wl1271_power_off(wl);
1129 }
1130
1131 wl->plt = false;
1132 wl->plt_mode = PLT_OFF;
1133
1134 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1135 WL1271_BOOT_RETRIES);
1136 out:
1137 mutex_unlock(&wl->mutex);
1138
1139 return ret;
1140 }
1141
1142 int wl1271_plt_stop(struct wl1271 *wl)
1143 {
1144 int ret = 0;
1145
1146 wl1271_notice("power down");
1147
1148 /*
1149 * Interrupts must be disabled before setting the state to OFF.
1150 * Otherwise, the interrupt handler might be called and exit without
1151 * reading the interrupt status.
1152 */
1153 wlcore_disable_interrupts(wl);
1154 mutex_lock(&wl->mutex);
1155 if (!wl->plt) {
1156 mutex_unlock(&wl->mutex);
1157
1158 /*
1159 * This will not necessarily enable interrupts as interrupts
1160 * may have been disabled when op_stop was called. It will,
1161 * however, balance the above call to disable_interrupts().
1162 */
1163 wlcore_enable_interrupts(wl);
1164
1165 wl1271_error("cannot power down because not in PLT "
1166 "state: %d", wl->state);
1167 ret = -EBUSY;
1168 goto out;
1169 }
1170
1171 mutex_unlock(&wl->mutex);
1172
1173 wl1271_flush_deferred_work(wl);
1174 cancel_work_sync(&wl->netstack_work);
1175 cancel_work_sync(&wl->recovery_work);
1176 cancel_delayed_work_sync(&wl->elp_work);
1177 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1178
1179 mutex_lock(&wl->mutex);
1180 wl1271_power_off(wl);
1181 wl->flags = 0;
1182 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1183 wl->state = WLCORE_STATE_OFF;
1184 wl->plt = false;
1185 wl->plt_mode = PLT_OFF;
1186 wl->rx_counter = 0;
1187 mutex_unlock(&wl->mutex);
1188
1189 out:
1190 return ret;
1191 }
1192
1193 static void wl1271_op_tx(struct ieee80211_hw *hw,
1194 struct ieee80211_tx_control *control,
1195 struct sk_buff *skb)
1196 {
1197 struct wl1271 *wl = hw->priv;
1198 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1199 struct ieee80211_vif *vif = info->control.vif;
1200 struct wl12xx_vif *wlvif = NULL;
1201 unsigned long flags;
1202 int q, mapping;
1203 u8 hlid;
1204
1205 if (!vif) {
1206 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1207 ieee80211_free_txskb(hw, skb);
1208 return;
1209 }
1210
1211 wlvif = wl12xx_vif_to_data(vif);
1212 mapping = skb_get_queue_mapping(skb);
1213 q = wl1271_tx_get_queue(mapping);
1214
1215 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1216
1217 spin_lock_irqsave(&wl->wl_lock, flags);
1218
1219 /*
1220 * drop the packet if the link is invalid or the queue is stopped
1221 * for any reason but watermark. Watermark is a "soft"-stop so we
1222 * allow these packets through.
1223 */
1224 if (hlid == WL12XX_INVALID_LINK_ID ||
1225 (!test_bit(hlid, wlvif->links_map)) ||
1226 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1227 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1228 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1229 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1230 ieee80211_free_txskb(hw, skb);
1231 goto out;
1232 }
1233
1234 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1235 hlid, q, skb->len);
1236 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1237
1238 wl->tx_queue_count[q]++;
1239 wlvif->tx_queue_count[q]++;
1240
1241 /*
1242 * The workqueue is slow to process the tx_queue and we need stop
1243 * the queue here, otherwise the queue will get too long.
1244 */
1245 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1246 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1247 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1248 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1249 wlcore_stop_queue_locked(wl, wlvif, q,
1250 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1251 }
1252
1253 /*
1254 * The chip specific setup must run before the first TX packet -
1255 * before that, the tx_work will not be initialized!
1256 */
1257
1258 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1259 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1260 ieee80211_queue_work(wl->hw, &wl->tx_work);
1261
1262 out:
1263 spin_unlock_irqrestore(&wl->wl_lock, flags);
1264 }
1265
1266 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1267 {
1268 unsigned long flags;
1269 int q;
1270
1271 /* no need to queue a new dummy packet if one is already pending */
1272 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1273 return 0;
1274
1275 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1276
1277 spin_lock_irqsave(&wl->wl_lock, flags);
1278 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1279 wl->tx_queue_count[q]++;
1280 spin_unlock_irqrestore(&wl->wl_lock, flags);
1281
1282 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1283 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1284 return wlcore_tx_work_locked(wl);
1285
1286 /*
1287 * If the FW TX is busy, TX work will be scheduled by the threaded
1288 * interrupt handler function
1289 */
1290 return 0;
1291 }
1292
1293 /*
1294 * The size of the dummy packet should be at least 1400 bytes. However, in
1295 * order to minimize the number of bus transactions, aligning it to 512 bytes
1296 * boundaries could be beneficial, performance wise
1297 */
1298 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1299
1300 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1301 {
1302 struct sk_buff *skb;
1303 struct ieee80211_hdr_3addr *hdr;
1304 unsigned int dummy_packet_size;
1305
1306 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1307 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1308
1309 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1310 if (!skb) {
1311 wl1271_warning("Failed to allocate a dummy packet skb");
1312 return NULL;
1313 }
1314
1315 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1316
1317 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1318 memset(hdr, 0, sizeof(*hdr));
1319 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1320 IEEE80211_STYPE_NULLFUNC |
1321 IEEE80211_FCTL_TODS);
1322
1323 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1324
1325 /* Dummy packets require the TID to be management */
1326 skb->priority = WL1271_TID_MGMT;
1327
1328 /* Initialize all fields that might be used */
1329 skb_set_queue_mapping(skb, 0);
1330 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1331
1332 return skb;
1333 }
1334
1335
1336 #ifdef CONFIG_PM
1337 static int
1338 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1339 {
1340 int num_fields = 0, in_field = 0, fields_size = 0;
1341 int i, pattern_len = 0;
1342
1343 if (!p->mask) {
1344 wl1271_warning("No mask in WoWLAN pattern");
1345 return -EINVAL;
1346 }
1347
1348 /*
1349 * The pattern is broken up into segments of bytes at different offsets
1350 * that need to be checked by the FW filter. Each segment is called
1351 * a field in the FW API. We verify that the total number of fields
1352 * required for this pattern won't exceed FW limits (8)
1353 * as well as the total fields buffer won't exceed the FW limit.
1354 * Note that if there's a pattern which crosses Ethernet/IP header
1355 * boundary a new field is required.
1356 */
1357 for (i = 0; i < p->pattern_len; i++) {
1358 if (test_bit(i, (unsigned long *)p->mask)) {
1359 if (!in_field) {
1360 in_field = 1;
1361 pattern_len = 1;
1362 } else {
1363 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1364 num_fields++;
1365 fields_size += pattern_len +
1366 RX_FILTER_FIELD_OVERHEAD;
1367 pattern_len = 1;
1368 } else
1369 pattern_len++;
1370 }
1371 } else {
1372 if (in_field) {
1373 in_field = 0;
1374 fields_size += pattern_len +
1375 RX_FILTER_FIELD_OVERHEAD;
1376 num_fields++;
1377 }
1378 }
1379 }
1380
1381 if (in_field) {
1382 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1383 num_fields++;
1384 }
1385
1386 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1387 wl1271_warning("RX Filter too complex. Too many segments");
1388 return -EINVAL;
1389 }
1390
1391 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1392 wl1271_warning("RX filter pattern is too big");
1393 return -E2BIG;
1394 }
1395
1396 return 0;
1397 }
1398
1399 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1400 {
1401 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1402 }
1403
1404 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1405 {
1406 int i;
1407
1408 if (filter == NULL)
1409 return;
1410
1411 for (i = 0; i < filter->num_fields; i++)
1412 kfree(filter->fields[i].pattern);
1413
1414 kfree(filter);
1415 }
1416
1417 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1418 u16 offset, u8 flags,
1419 u8 *pattern, u8 len)
1420 {
1421 struct wl12xx_rx_filter_field *field;
1422
1423 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1424 wl1271_warning("Max fields per RX filter. can't alloc another");
1425 return -EINVAL;
1426 }
1427
1428 field = &filter->fields[filter->num_fields];
1429
1430 field->pattern = kzalloc(len, GFP_KERNEL);
1431 if (!field->pattern) {
1432 wl1271_warning("Failed to allocate RX filter pattern");
1433 return -ENOMEM;
1434 }
1435
1436 filter->num_fields++;
1437
1438 field->offset = cpu_to_le16(offset);
1439 field->flags = flags;
1440 field->len = len;
1441 memcpy(field->pattern, pattern, len);
1442
1443 return 0;
1444 }
1445
1446 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1447 {
1448 int i, fields_size = 0;
1449
1450 for (i = 0; i < filter->num_fields; i++)
1451 fields_size += filter->fields[i].len +
1452 sizeof(struct wl12xx_rx_filter_field) -
1453 sizeof(u8 *);
1454
1455 return fields_size;
1456 }
1457
1458 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1459 u8 *buf)
1460 {
1461 int i;
1462 struct wl12xx_rx_filter_field *field;
1463
1464 for (i = 0; i < filter->num_fields; i++) {
1465 field = (struct wl12xx_rx_filter_field *)buf;
1466
1467 field->offset = filter->fields[i].offset;
1468 field->flags = filter->fields[i].flags;
1469 field->len = filter->fields[i].len;
1470
1471 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1472 buf += sizeof(struct wl12xx_rx_filter_field) -
1473 sizeof(u8 *) + field->len;
1474 }
1475 }
1476
1477 /*
1478 * Allocates an RX filter returned through f
1479 * which needs to be freed using rx_filter_free()
1480 */
1481 static int
1482 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1483 struct wl12xx_rx_filter **f)
1484 {
1485 int i, j, ret = 0;
1486 struct wl12xx_rx_filter *filter;
1487 u16 offset;
1488 u8 flags, len;
1489
1490 filter = wl1271_rx_filter_alloc();
1491 if (!filter) {
1492 wl1271_warning("Failed to alloc rx filter");
1493 ret = -ENOMEM;
1494 goto err;
1495 }
1496
1497 i = 0;
1498 while (i < p->pattern_len) {
1499 if (!test_bit(i, (unsigned long *)p->mask)) {
1500 i++;
1501 continue;
1502 }
1503
1504 for (j = i; j < p->pattern_len; j++) {
1505 if (!test_bit(j, (unsigned long *)p->mask))
1506 break;
1507
1508 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1509 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1510 break;
1511 }
1512
1513 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1514 offset = i;
1515 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1516 } else {
1517 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1518 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1519 }
1520
1521 len = j - i;
1522
1523 ret = wl1271_rx_filter_alloc_field(filter,
1524 offset,
1525 flags,
1526 &p->pattern[i], len);
1527 if (ret)
1528 goto err;
1529
1530 i = j;
1531 }
1532
1533 filter->action = FILTER_SIGNAL;
1534
1535 *f = filter;
1536 return 0;
1537
1538 err:
1539 wl1271_rx_filter_free(filter);
1540 *f = NULL;
1541
1542 return ret;
1543 }
1544
1545 static int wl1271_configure_wowlan(struct wl1271 *wl,
1546 struct cfg80211_wowlan *wow)
1547 {
1548 int i, ret;
1549
1550 if (!wow || wow->any || !wow->n_patterns) {
1551 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1552 FILTER_SIGNAL);
1553 if (ret)
1554 goto out;
1555
1556 ret = wl1271_rx_filter_clear_all(wl);
1557 if (ret)
1558 goto out;
1559
1560 return 0;
1561 }
1562
1563 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1564 return -EINVAL;
1565
1566 /* Validate all incoming patterns before clearing current FW state */
1567 for (i = 0; i < wow->n_patterns; i++) {
1568 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1569 if (ret) {
1570 wl1271_warning("Bad wowlan pattern %d", i);
1571 return ret;
1572 }
1573 }
1574
1575 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1576 if (ret)
1577 goto out;
1578
1579 ret = wl1271_rx_filter_clear_all(wl);
1580 if (ret)
1581 goto out;
1582
1583 /* Translate WoWLAN patterns into filters */
1584 for (i = 0; i < wow->n_patterns; i++) {
1585 struct cfg80211_pkt_pattern *p;
1586 struct wl12xx_rx_filter *filter = NULL;
1587
1588 p = &wow->patterns[i];
1589
1590 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1591 if (ret) {
1592 wl1271_warning("Failed to create an RX filter from "
1593 "wowlan pattern %d", i);
1594 goto out;
1595 }
1596
1597 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1598
1599 wl1271_rx_filter_free(filter);
1600 if (ret)
1601 goto out;
1602 }
1603
1604 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1605
1606 out:
1607 return ret;
1608 }
1609
1610 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1611 struct wl12xx_vif *wlvif,
1612 struct cfg80211_wowlan *wow)
1613 {
1614 int ret = 0;
1615
1616 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1617 goto out;
1618
1619 ret = wl1271_ps_elp_wakeup(wl);
1620 if (ret < 0)
1621 goto out;
1622
1623 ret = wl1271_configure_wowlan(wl, wow);
1624 if (ret < 0)
1625 goto out_sleep;
1626
1627 if ((wl->conf.conn.suspend_wake_up_event ==
1628 wl->conf.conn.wake_up_event) &&
1629 (wl->conf.conn.suspend_listen_interval ==
1630 wl->conf.conn.listen_interval))
1631 goto out_sleep;
1632
1633 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1634 wl->conf.conn.suspend_wake_up_event,
1635 wl->conf.conn.suspend_listen_interval);
1636
1637 if (ret < 0)
1638 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1639
1640 out_sleep:
1641 wl1271_ps_elp_sleep(wl);
1642 out:
1643 return ret;
1644
1645 }
1646
1647 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1648 struct wl12xx_vif *wlvif)
1649 {
1650 int ret = 0;
1651
1652 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1653 goto out;
1654
1655 ret = wl1271_ps_elp_wakeup(wl);
1656 if (ret < 0)
1657 goto out;
1658
1659 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1660
1661 wl1271_ps_elp_sleep(wl);
1662 out:
1663 return ret;
1664
1665 }
1666
1667 static int wl1271_configure_suspend(struct wl1271 *wl,
1668 struct wl12xx_vif *wlvif,
1669 struct cfg80211_wowlan *wow)
1670 {
1671 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1672 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1673 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1674 return wl1271_configure_suspend_ap(wl, wlvif);
1675 return 0;
1676 }
1677
1678 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1679 {
1680 int ret = 0;
1681 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1682 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1683
1684 if ((!is_ap) && (!is_sta))
1685 return;
1686
1687 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1688 return;
1689
1690 ret = wl1271_ps_elp_wakeup(wl);
1691 if (ret < 0)
1692 return;
1693
1694 if (is_sta) {
1695 wl1271_configure_wowlan(wl, NULL);
1696
1697 if ((wl->conf.conn.suspend_wake_up_event ==
1698 wl->conf.conn.wake_up_event) &&
1699 (wl->conf.conn.suspend_listen_interval ==
1700 wl->conf.conn.listen_interval))
1701 goto out_sleep;
1702
1703 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1704 wl->conf.conn.wake_up_event,
1705 wl->conf.conn.listen_interval);
1706
1707 if (ret < 0)
1708 wl1271_error("resume: wake up conditions failed: %d",
1709 ret);
1710
1711 } else if (is_ap) {
1712 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1713 }
1714
1715 out_sleep:
1716 wl1271_ps_elp_sleep(wl);
1717 }
1718
1719 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1720 struct cfg80211_wowlan *wow)
1721 {
1722 struct wl1271 *wl = hw->priv;
1723 struct wl12xx_vif *wlvif;
1724 int ret;
1725
1726 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1727 WARN_ON(!wow);
1728
1729 /* we want to perform the recovery before suspending */
1730 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1731 wl1271_warning("postponing suspend to perform recovery");
1732 return -EBUSY;
1733 }
1734
1735 wl1271_tx_flush(wl);
1736
1737 mutex_lock(&wl->mutex);
1738 wl->wow_enabled = true;
1739 wl12xx_for_each_wlvif(wl, wlvif) {
1740 ret = wl1271_configure_suspend(wl, wlvif, wow);
1741 if (ret < 0) {
1742 mutex_unlock(&wl->mutex);
1743 wl1271_warning("couldn't prepare device to suspend");
1744 return ret;
1745 }
1746 }
1747 mutex_unlock(&wl->mutex);
1748 /* flush any remaining work */
1749 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1750
1751 /*
1752 * disable and re-enable interrupts in order to flush
1753 * the threaded_irq
1754 */
1755 wlcore_disable_interrupts(wl);
1756
1757 /*
1758 * set suspended flag to avoid triggering a new threaded_irq
1759 * work. no need for spinlock as interrupts are disabled.
1760 */
1761 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1762
1763 wlcore_enable_interrupts(wl);
1764 flush_work(&wl->tx_work);
1765 flush_delayed_work(&wl->elp_work);
1766
1767 /*
1768 * Cancel the watchdog even if above tx_flush failed. We will detect
1769 * it on resume anyway.
1770 */
1771 cancel_delayed_work(&wl->tx_watchdog_work);
1772
1773 return 0;
1774 }
1775
1776 static int wl1271_op_resume(struct ieee80211_hw *hw)
1777 {
1778 struct wl1271 *wl = hw->priv;
1779 struct wl12xx_vif *wlvif;
1780 unsigned long flags;
1781 bool run_irq_work = false, pending_recovery;
1782 int ret;
1783
1784 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1785 wl->wow_enabled);
1786 WARN_ON(!wl->wow_enabled);
1787
1788 /*
1789 * re-enable irq_work enqueuing, and call irq_work directly if
1790 * there is a pending work.
1791 */
1792 spin_lock_irqsave(&wl->wl_lock, flags);
1793 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1794 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1795 run_irq_work = true;
1796 spin_unlock_irqrestore(&wl->wl_lock, flags);
1797
1798 mutex_lock(&wl->mutex);
1799
1800 /* test the recovery flag before calling any SDIO functions */
1801 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1802 &wl->flags);
1803
1804 if (run_irq_work) {
1805 wl1271_debug(DEBUG_MAC80211,
1806 "run postponed irq_work directly");
1807
1808 /* don't talk to the HW if recovery is pending */
1809 if (!pending_recovery) {
1810 ret = wlcore_irq_locked(wl);
1811 if (ret)
1812 wl12xx_queue_recovery_work(wl);
1813 }
1814
1815 wlcore_enable_interrupts(wl);
1816 }
1817
1818 if (pending_recovery) {
1819 wl1271_warning("queuing forgotten recovery on resume");
1820 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1821 goto out;
1822 }
1823
1824 wl12xx_for_each_wlvif(wl, wlvif) {
1825 wl1271_configure_resume(wl, wlvif);
1826 }
1827
1828 out:
1829 wl->wow_enabled = false;
1830
1831 /*
1832 * Set a flag to re-init the watchdog on the first Tx after resume.
1833 * That way we avoid possible conditions where Tx-complete interrupts
1834 * fail to arrive and we perform a spurious recovery.
1835 */
1836 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1837 mutex_unlock(&wl->mutex);
1838
1839 return 0;
1840 }
1841 #endif
1842
1843 static int wl1271_op_start(struct ieee80211_hw *hw)
1844 {
1845 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1846
1847 /*
1848 * We have to delay the booting of the hardware because
1849 * we need to know the local MAC address before downloading and
1850 * initializing the firmware. The MAC address cannot be changed
1851 * after boot, and without the proper MAC address, the firmware
1852 * will not function properly.
1853 *
1854 * The MAC address is first known when the corresponding interface
1855 * is added. That is where we will initialize the hardware.
1856 */
1857
1858 return 0;
1859 }
1860
1861 static void wlcore_op_stop_locked(struct wl1271 *wl)
1862 {
1863 int i;
1864
1865 if (wl->state == WLCORE_STATE_OFF) {
1866 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1867 &wl->flags))
1868 wlcore_enable_interrupts(wl);
1869
1870 return;
1871 }
1872
1873 /*
1874 * this must be before the cancel_work calls below, so that the work
1875 * functions don't perform further work.
1876 */
1877 wl->state = WLCORE_STATE_OFF;
1878
1879 /*
1880 * Use the nosync variant to disable interrupts, so the mutex could be
1881 * held while doing so without deadlocking.
1882 */
1883 wlcore_disable_interrupts_nosync(wl);
1884
1885 mutex_unlock(&wl->mutex);
1886
1887 wlcore_synchronize_interrupts(wl);
1888 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1889 cancel_work_sync(&wl->recovery_work);
1890 wl1271_flush_deferred_work(wl);
1891 cancel_delayed_work_sync(&wl->scan_complete_work);
1892 cancel_work_sync(&wl->netstack_work);
1893 cancel_work_sync(&wl->tx_work);
1894 cancel_delayed_work_sync(&wl->elp_work);
1895 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1896
1897 /* let's notify MAC80211 about the remaining pending TX frames */
1898 mutex_lock(&wl->mutex);
1899 wl12xx_tx_reset(wl);
1900
1901 wl1271_power_off(wl);
1902 /*
1903 * In case a recovery was scheduled, interrupts were disabled to avoid
1904 * an interrupt storm. Now that the power is down, it is safe to
1905 * re-enable interrupts to balance the disable depth
1906 */
1907 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1908 wlcore_enable_interrupts(wl);
1909
1910 wl->band = IEEE80211_BAND_2GHZ;
1911
1912 wl->rx_counter = 0;
1913 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1914 wl->channel_type = NL80211_CHAN_NO_HT;
1915 wl->tx_blocks_available = 0;
1916 wl->tx_allocated_blocks = 0;
1917 wl->tx_results_count = 0;
1918 wl->tx_packets_count = 0;
1919 wl->time_offset = 0;
1920 wl->ap_fw_ps_map = 0;
1921 wl->ap_ps_map = 0;
1922 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1923 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1924 memset(wl->links_map, 0, sizeof(wl->links_map));
1925 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1926 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1927 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1928 wl->active_sta_count = 0;
1929 wl->active_link_count = 0;
1930
1931 /* The system link is always allocated */
1932 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1933 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1934 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1935
1936 /*
1937 * this is performed after the cancel_work calls and the associated
1938 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1939 * get executed before all these vars have been reset.
1940 */
1941 wl->flags = 0;
1942
1943 wl->tx_blocks_freed = 0;
1944
1945 for (i = 0; i < NUM_TX_QUEUES; i++) {
1946 wl->tx_pkts_freed[i] = 0;
1947 wl->tx_allocated_pkts[i] = 0;
1948 }
1949
1950 wl1271_debugfs_reset(wl);
1951
1952 kfree(wl->raw_fw_status);
1953 wl->raw_fw_status = NULL;
1954 kfree(wl->fw_status);
1955 wl->fw_status = NULL;
1956 kfree(wl->tx_res_if);
1957 wl->tx_res_if = NULL;
1958 kfree(wl->target_mem_map);
1959 wl->target_mem_map = NULL;
1960
1961 /*
1962 * FW channels must be re-calibrated after recovery,
1963 * save current Reg-Domain channel configuration and clear it.
1964 */
1965 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1966 sizeof(wl->reg_ch_conf_pending));
1967 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1968 }
1969
1970 static void wlcore_op_stop(struct ieee80211_hw *hw)
1971 {
1972 struct wl1271 *wl = hw->priv;
1973
1974 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1975
1976 mutex_lock(&wl->mutex);
1977
1978 wlcore_op_stop_locked(wl);
1979
1980 mutex_unlock(&wl->mutex);
1981 }
1982
1983 static void wlcore_channel_switch_work(struct work_struct *work)
1984 {
1985 struct delayed_work *dwork;
1986 struct wl1271 *wl;
1987 struct ieee80211_vif *vif;
1988 struct wl12xx_vif *wlvif;
1989 int ret;
1990
1991 dwork = container_of(work, struct delayed_work, work);
1992 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1993 wl = wlvif->wl;
1994
1995 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1996
1997 mutex_lock(&wl->mutex);
1998
1999 if (unlikely(wl->state != WLCORE_STATE_ON))
2000 goto out;
2001
2002 /* check the channel switch is still ongoing */
2003 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2004 goto out;
2005
2006 vif = wl12xx_wlvif_to_vif(wlvif);
2007 ieee80211_chswitch_done(vif, false);
2008
2009 ret = wl1271_ps_elp_wakeup(wl);
2010 if (ret < 0)
2011 goto out;
2012
2013 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2014
2015 wl1271_ps_elp_sleep(wl);
2016 out:
2017 mutex_unlock(&wl->mutex);
2018 }
2019
2020 static void wlcore_connection_loss_work(struct work_struct *work)
2021 {
2022 struct delayed_work *dwork;
2023 struct wl1271 *wl;
2024 struct ieee80211_vif *vif;
2025 struct wl12xx_vif *wlvif;
2026
2027 dwork = container_of(work, struct delayed_work, work);
2028 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2029 wl = wlvif->wl;
2030
2031 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2032
2033 mutex_lock(&wl->mutex);
2034
2035 if (unlikely(wl->state != WLCORE_STATE_ON))
2036 goto out;
2037
2038 /* Call mac80211 connection loss */
2039 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2040 goto out;
2041
2042 vif = wl12xx_wlvif_to_vif(wlvif);
2043 ieee80211_connection_loss(vif);
2044 out:
2045 mutex_unlock(&wl->mutex);
2046 }
2047
2048 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2049 {
2050 struct delayed_work *dwork;
2051 struct wl1271 *wl;
2052 struct wl12xx_vif *wlvif;
2053 unsigned long time_spare;
2054 int ret;
2055
2056 dwork = container_of(work, struct delayed_work, work);
2057 wlvif = container_of(dwork, struct wl12xx_vif,
2058 pending_auth_complete_work);
2059 wl = wlvif->wl;
2060
2061 mutex_lock(&wl->mutex);
2062
2063 if (unlikely(wl->state != WLCORE_STATE_ON))
2064 goto out;
2065
2066 /*
2067 * Make sure a second really passed since the last auth reply. Maybe
2068 * a second auth reply arrived while we were stuck on the mutex.
2069 * Check for a little less than the timeout to protect from scheduler
2070 * irregularities.
2071 */
2072 time_spare = jiffies +
2073 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2074 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2075 goto out;
2076
2077 ret = wl1271_ps_elp_wakeup(wl);
2078 if (ret < 0)
2079 goto out;
2080
2081 /* cancel the ROC if active */
2082 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2083
2084 wl1271_ps_elp_sleep(wl);
2085 out:
2086 mutex_unlock(&wl->mutex);
2087 }
2088
2089 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2090 {
2091 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2092 WL12XX_MAX_RATE_POLICIES);
2093 if (policy >= WL12XX_MAX_RATE_POLICIES)
2094 return -EBUSY;
2095
2096 __set_bit(policy, wl->rate_policies_map);
2097 *idx = policy;
2098 return 0;
2099 }
2100
2101 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2102 {
2103 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2104 return;
2105
2106 __clear_bit(*idx, wl->rate_policies_map);
2107 *idx = WL12XX_MAX_RATE_POLICIES;
2108 }
2109
2110 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2111 {
2112 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2113 WLCORE_MAX_KLV_TEMPLATES);
2114 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2115 return -EBUSY;
2116
2117 __set_bit(policy, wl->klv_templates_map);
2118 *idx = policy;
2119 return 0;
2120 }
2121
2122 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2123 {
2124 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2125 return;
2126
2127 __clear_bit(*idx, wl->klv_templates_map);
2128 *idx = WLCORE_MAX_KLV_TEMPLATES;
2129 }
2130
2131 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2132 {
2133 switch (wlvif->bss_type) {
2134 case BSS_TYPE_AP_BSS:
2135 if (wlvif->p2p)
2136 return WL1271_ROLE_P2P_GO;
2137 else
2138 return WL1271_ROLE_AP;
2139
2140 case BSS_TYPE_STA_BSS:
2141 if (wlvif->p2p)
2142 return WL1271_ROLE_P2P_CL;
2143 else
2144 return WL1271_ROLE_STA;
2145
2146 case BSS_TYPE_IBSS:
2147 return WL1271_ROLE_IBSS;
2148
2149 default:
2150 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2151 }
2152 return WL12XX_INVALID_ROLE_TYPE;
2153 }
2154
2155 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2156 {
2157 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2158 int i;
2159
2160 /* clear everything but the persistent data */
2161 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2162
2163 switch (ieee80211_vif_type_p2p(vif)) {
2164 case NL80211_IFTYPE_P2P_CLIENT:
2165 wlvif->p2p = 1;
2166 /* fall-through */
2167 case NL80211_IFTYPE_STATION:
2168 wlvif->bss_type = BSS_TYPE_STA_BSS;
2169 break;
2170 case NL80211_IFTYPE_ADHOC:
2171 wlvif->bss_type = BSS_TYPE_IBSS;
2172 break;
2173 case NL80211_IFTYPE_P2P_GO:
2174 wlvif->p2p = 1;
2175 /* fall-through */
2176 case NL80211_IFTYPE_AP:
2177 wlvif->bss_type = BSS_TYPE_AP_BSS;
2178 break;
2179 default:
2180 wlvif->bss_type = MAX_BSS_TYPE;
2181 return -EOPNOTSUPP;
2182 }
2183
2184 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2185 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2186 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2187
2188 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2189 wlvif->bss_type == BSS_TYPE_IBSS) {
2190 /* init sta/ibss data */
2191 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2192 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2193 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2194 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2195 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2196 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2197 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2198 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2199 } else {
2200 /* init ap data */
2201 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2202 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2203 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2204 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2205 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2206 wl12xx_allocate_rate_policy(wl,
2207 &wlvif->ap.ucast_rate_idx[i]);
2208 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2209 /*
2210 * TODO: check if basic_rate shouldn't be
2211 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2212 * instead (the same thing for STA above).
2213 */
2214 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2215 /* TODO: this seems to be used only for STA, check it */
2216 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2217 }
2218
2219 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2220 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2221 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2222
2223 /*
2224 * mac80211 configures some values globally, while we treat them
2225 * per-interface. thus, on init, we have to copy them from wl
2226 */
2227 wlvif->band = wl->band;
2228 wlvif->channel = wl->channel;
2229 wlvif->power_level = wl->power_level;
2230 wlvif->channel_type = wl->channel_type;
2231
2232 INIT_WORK(&wlvif->rx_streaming_enable_work,
2233 wl1271_rx_streaming_enable_work);
2234 INIT_WORK(&wlvif->rx_streaming_disable_work,
2235 wl1271_rx_streaming_disable_work);
2236 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2237 wlcore_channel_switch_work);
2238 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2239 wlcore_connection_loss_work);
2240 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2241 wlcore_pending_auth_complete_work);
2242 INIT_LIST_HEAD(&wlvif->list);
2243
2244 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2245 (unsigned long) wlvif);
2246 return 0;
2247 }
2248
2249 static int wl12xx_init_fw(struct wl1271 *wl)
2250 {
2251 int retries = WL1271_BOOT_RETRIES;
2252 bool booted = false;
2253 struct wiphy *wiphy = wl->hw->wiphy;
2254 int ret;
2255
2256 while (retries) {
2257 retries--;
2258 ret = wl12xx_chip_wakeup(wl, false);
2259 if (ret < 0)
2260 goto power_off;
2261
2262 ret = wl->ops->boot(wl);
2263 if (ret < 0)
2264 goto power_off;
2265
2266 ret = wl1271_hw_init(wl);
2267 if (ret < 0)
2268 goto irq_disable;
2269
2270 booted = true;
2271 break;
2272
2273 irq_disable:
2274 mutex_unlock(&wl->mutex);
2275 /* Unlocking the mutex in the middle of handling is
2276 inherently unsafe. In this case we deem it safe to do,
2277 because we need to let any possibly pending IRQ out of
2278 the system (and while we are WLCORE_STATE_OFF the IRQ
2279 work function will not do anything.) Also, any other
2280 possible concurrent operations will fail due to the
2281 current state, hence the wl1271 struct should be safe. */
2282 wlcore_disable_interrupts(wl);
2283 wl1271_flush_deferred_work(wl);
2284 cancel_work_sync(&wl->netstack_work);
2285 mutex_lock(&wl->mutex);
2286 power_off:
2287 wl1271_power_off(wl);
2288 }
2289
2290 if (!booted) {
2291 wl1271_error("firmware boot failed despite %d retries",
2292 WL1271_BOOT_RETRIES);
2293 goto out;
2294 }
2295
2296 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2297
2298 /* update hw/fw version info in wiphy struct */
2299 wiphy->hw_version = wl->chip.id;
2300 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2301 sizeof(wiphy->fw_version));
2302
2303 /*
2304 * Now we know if 11a is supported (info from the NVS), so disable
2305 * 11a channels if not supported
2306 */
2307 if (!wl->enable_11a)
2308 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2309
2310 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2311 wl->enable_11a ? "" : "not ");
2312
2313 wl->state = WLCORE_STATE_ON;
2314 out:
2315 return ret;
2316 }
2317
2318 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2319 {
2320 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2321 }
2322
2323 /*
2324 * Check whether a fw switch (i.e. moving from one loaded
2325 * fw to another) is needed. This function is also responsible
2326 * for updating wl->last_vif_count, so it must be called before
2327 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2328 * will be used).
2329 */
2330 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2331 struct vif_counter_data vif_counter_data,
2332 bool add)
2333 {
2334 enum wl12xx_fw_type current_fw = wl->fw_type;
2335 u8 vif_count = vif_counter_data.counter;
2336
2337 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2338 return false;
2339
2340 /* increase the vif count if this is a new vif */
2341 if (add && !vif_counter_data.cur_vif_running)
2342 vif_count++;
2343
2344 wl->last_vif_count = vif_count;
2345
2346 /* no need for fw change if the device is OFF */
2347 if (wl->state == WLCORE_STATE_OFF)
2348 return false;
2349
2350 /* no need for fw change if a single fw is used */
2351 if (!wl->mr_fw_name)
2352 return false;
2353
2354 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2355 return true;
2356 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2357 return true;
2358
2359 return false;
2360 }
2361
2362 /*
2363 * Enter "forced psm". Make sure the sta is in psm against the ap,
2364 * to make the fw switch a bit more disconnection-persistent.
2365 */
2366 static void wl12xx_force_active_psm(struct wl1271 *wl)
2367 {
2368 struct wl12xx_vif *wlvif;
2369
2370 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2371 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2372 }
2373 }
2374
2375 struct wlcore_hw_queue_iter_data {
2376 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2377 /* current vif */
2378 struct ieee80211_vif *vif;
2379 /* is the current vif among those iterated */
2380 bool cur_running;
2381 };
2382
2383 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2384 struct ieee80211_vif *vif)
2385 {
2386 struct wlcore_hw_queue_iter_data *iter_data = data;
2387
2388 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2389 return;
2390
2391 if (iter_data->cur_running || vif == iter_data->vif) {
2392 iter_data->cur_running = true;
2393 return;
2394 }
2395
2396 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2397 }
2398
2399 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2400 struct wl12xx_vif *wlvif)
2401 {
2402 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2403 struct wlcore_hw_queue_iter_data iter_data = {};
2404 int i, q_base;
2405
2406 iter_data.vif = vif;
2407
2408 /* mark all bits taken by active interfaces */
2409 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2410 IEEE80211_IFACE_ITER_RESUME_ALL,
2411 wlcore_hw_queue_iter, &iter_data);
2412
2413 /* the current vif is already running in mac80211 (resume/recovery) */
2414 if (iter_data.cur_running) {
2415 wlvif->hw_queue_base = vif->hw_queue[0];
2416 wl1271_debug(DEBUG_MAC80211,
2417 "using pre-allocated hw queue base %d",
2418 wlvif->hw_queue_base);
2419
2420 /* interface type might have changed type */
2421 goto adjust_cab_queue;
2422 }
2423
2424 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2425 WLCORE_NUM_MAC_ADDRESSES);
2426 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2427 return -EBUSY;
2428
2429 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2430 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2431 wlvif->hw_queue_base);
2432
2433 for (i = 0; i < NUM_TX_QUEUES; i++) {
2434 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2435 /* register hw queues in mac80211 */
2436 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2437 }
2438
2439 adjust_cab_queue:
2440 /* the last places are reserved for cab queues per interface */
2441 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2442 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2443 wlvif->hw_queue_base / NUM_TX_QUEUES;
2444 else
2445 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2446
2447 return 0;
2448 }
2449
2450 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2451 struct ieee80211_vif *vif)
2452 {
2453 struct wl1271 *wl = hw->priv;
2454 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2455 struct vif_counter_data vif_count;
2456 int ret = 0;
2457 u8 role_type;
2458
2459 if (wl->plt) {
2460 wl1271_error("Adding Interface not allowed while in PLT mode");
2461 return -EBUSY;
2462 }
2463
2464 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2465 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2466
2467 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2468 ieee80211_vif_type_p2p(vif), vif->addr);
2469
2470 wl12xx_get_vif_count(hw, vif, &vif_count);
2471
2472 mutex_lock(&wl->mutex);
2473 ret = wl1271_ps_elp_wakeup(wl);
2474 if (ret < 0)
2475 goto out_unlock;
2476
2477 /*
2478 * in some very corner case HW recovery scenarios its possible to
2479 * get here before __wl1271_op_remove_interface is complete, so
2480 * opt out if that is the case.
2481 */
2482 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2483 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2484 ret = -EBUSY;
2485 goto out;
2486 }
2487
2488
2489 ret = wl12xx_init_vif_data(wl, vif);
2490 if (ret < 0)
2491 goto out;
2492
2493 wlvif->wl = wl;
2494 role_type = wl12xx_get_role_type(wl, wlvif);
2495 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2496 ret = -EINVAL;
2497 goto out;
2498 }
2499
2500 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2501 if (ret < 0)
2502 goto out;
2503
2504 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2505 wl12xx_force_active_psm(wl);
2506 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2507 mutex_unlock(&wl->mutex);
2508 wl1271_recovery_work(&wl->recovery_work);
2509 return 0;
2510 }
2511
2512 /*
2513 * TODO: after the nvs issue will be solved, move this block
2514 * to start(), and make sure here the driver is ON.
2515 */
2516 if (wl->state == WLCORE_STATE_OFF) {
2517 /*
2518 * we still need this in order to configure the fw
2519 * while uploading the nvs
2520 */
2521 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2522
2523 ret = wl12xx_init_fw(wl);
2524 if (ret < 0)
2525 goto out;
2526 }
2527
2528 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2529 role_type, &wlvif->role_id);
2530 if (ret < 0)
2531 goto out;
2532
2533 ret = wl1271_init_vif_specific(wl, vif);
2534 if (ret < 0)
2535 goto out;
2536
2537 list_add(&wlvif->list, &wl->wlvif_list);
2538 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2539
2540 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2541 wl->ap_count++;
2542 else
2543 wl->sta_count++;
2544 out:
2545 wl1271_ps_elp_sleep(wl);
2546 out_unlock:
2547 mutex_unlock(&wl->mutex);
2548
2549 return ret;
2550 }
2551
2552 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2553 struct ieee80211_vif *vif,
2554 bool reset_tx_queues)
2555 {
2556 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2557 int i, ret;
2558 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2559
2560 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2561
2562 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2563 return;
2564
2565 /* because of hardware recovery, we may get here twice */
2566 if (wl->state == WLCORE_STATE_OFF)
2567 return;
2568
2569 wl1271_info("down");
2570
2571 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2572 wl->scan_wlvif == wlvif) {
2573 /*
2574 * Rearm the tx watchdog just before idling scan. This
2575 * prevents just-finished scans from triggering the watchdog
2576 */
2577 wl12xx_rearm_tx_watchdog_locked(wl);
2578
2579 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2580 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2581 wl->scan_wlvif = NULL;
2582 wl->scan.req = NULL;
2583 ieee80211_scan_completed(wl->hw, true);
2584 }
2585
2586 if (wl->sched_vif == wlvif)
2587 wl->sched_vif = NULL;
2588
2589 if (wl->roc_vif == vif) {
2590 wl->roc_vif = NULL;
2591 ieee80211_remain_on_channel_expired(wl->hw);
2592 }
2593
2594 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2595 /* disable active roles */
2596 ret = wl1271_ps_elp_wakeup(wl);
2597 if (ret < 0)
2598 goto deinit;
2599
2600 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2601 wlvif->bss_type == BSS_TYPE_IBSS) {
2602 if (wl12xx_dev_role_started(wlvif))
2603 wl12xx_stop_dev(wl, wlvif);
2604 }
2605
2606 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2607 if (ret < 0)
2608 goto deinit;
2609
2610 wl1271_ps_elp_sleep(wl);
2611 }
2612 deinit:
2613 wl12xx_tx_reset_wlvif(wl, wlvif);
2614
2615 /* clear all hlids (except system_hlid) */
2616 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2617
2618 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2619 wlvif->bss_type == BSS_TYPE_IBSS) {
2620 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2621 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2622 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2623 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2624 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2625 } else {
2626 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2627 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2628 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2629 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2630 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2631 wl12xx_free_rate_policy(wl,
2632 &wlvif->ap.ucast_rate_idx[i]);
2633 wl1271_free_ap_keys(wl, wlvif);
2634 }
2635
2636 dev_kfree_skb(wlvif->probereq);
2637 wlvif->probereq = NULL;
2638 if (wl->last_wlvif == wlvif)
2639 wl->last_wlvif = NULL;
2640 list_del(&wlvif->list);
2641 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2642 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2643 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2644
2645 if (is_ap)
2646 wl->ap_count--;
2647 else
2648 wl->sta_count--;
2649
2650 /*
2651 * Last AP, have more stations. Configure sleep auth according to STA.
2652 * Don't do thin on unintended recovery.
2653 */
2654 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2655 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2656 goto unlock;
2657
2658 if (wl->ap_count == 0 && is_ap) {
2659 /* mask ap events */
2660 wl->event_mask &= ~wl->ap_event_mask;
2661 wl1271_event_unmask(wl);
2662 }
2663
2664 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2665 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2666 /* Configure for power according to debugfs */
2667 if (sta_auth != WL1271_PSM_ILLEGAL)
2668 wl1271_acx_sleep_auth(wl, sta_auth);
2669 /* Configure for ELP power saving */
2670 else
2671 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2672 }
2673
2674 unlock:
2675 mutex_unlock(&wl->mutex);
2676
2677 del_timer_sync(&wlvif->rx_streaming_timer);
2678 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2679 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2680 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2681 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2682 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2683
2684 mutex_lock(&wl->mutex);
2685 }
2686
2687 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2688 struct ieee80211_vif *vif)
2689 {
2690 struct wl1271 *wl = hw->priv;
2691 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2692 struct wl12xx_vif *iter;
2693 struct vif_counter_data vif_count;
2694
2695 wl12xx_get_vif_count(hw, vif, &vif_count);
2696 mutex_lock(&wl->mutex);
2697
2698 if (wl->state == WLCORE_STATE_OFF ||
2699 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2700 goto out;
2701
2702 /*
2703 * wl->vif can be null here if someone shuts down the interface
2704 * just when hardware recovery has been started.
2705 */
2706 wl12xx_for_each_wlvif(wl, iter) {
2707 if (iter != wlvif)
2708 continue;
2709
2710 __wl1271_op_remove_interface(wl, vif, true);
2711 break;
2712 }
2713 WARN_ON(iter != wlvif);
2714 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2715 wl12xx_force_active_psm(wl);
2716 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2717 wl12xx_queue_recovery_work(wl);
2718 }
2719 out:
2720 mutex_unlock(&wl->mutex);
2721 }
2722
2723 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2724 struct ieee80211_vif *vif,
2725 enum nl80211_iftype new_type, bool p2p)
2726 {
2727 struct wl1271 *wl = hw->priv;
2728 int ret;
2729
2730 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2731 wl1271_op_remove_interface(hw, vif);
2732
2733 vif->type = new_type;
2734 vif->p2p = p2p;
2735 ret = wl1271_op_add_interface(hw, vif);
2736
2737 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2738 return ret;
2739 }
2740
2741 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2742 {
2743 int ret;
2744 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2745
2746 /*
2747 * One of the side effects of the JOIN command is that is clears
2748 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2749 * to a WPA/WPA2 access point will therefore kill the data-path.
2750 * Currently the only valid scenario for JOIN during association
2751 * is on roaming, in which case we will also be given new keys.
2752 * Keep the below message for now, unless it starts bothering
2753 * users who really like to roam a lot :)
2754 */
2755 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2756 wl1271_info("JOIN while associated.");
2757
2758 /* clear encryption type */
2759 wlvif->encryption_type = KEY_NONE;
2760
2761 if (is_ibss)
2762 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2763 else {
2764 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2765 /*
2766 * TODO: this is an ugly workaround for wl12xx fw
2767 * bug - we are not able to tx/rx after the first
2768 * start_sta, so make dummy start+stop calls,
2769 * and then call start_sta again.
2770 * this should be fixed in the fw.
2771 */
2772 wl12xx_cmd_role_start_sta(wl, wlvif);
2773 wl12xx_cmd_role_stop_sta(wl, wlvif);
2774 }
2775
2776 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2777 }
2778
2779 return ret;
2780 }
2781
2782 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2783 int offset)
2784 {
2785 u8 ssid_len;
2786 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2787 skb->len - offset);
2788
2789 if (!ptr) {
2790 wl1271_error("No SSID in IEs!");
2791 return -ENOENT;
2792 }
2793
2794 ssid_len = ptr[1];
2795 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2796 wl1271_error("SSID is too long!");
2797 return -EINVAL;
2798 }
2799
2800 wlvif->ssid_len = ssid_len;
2801 memcpy(wlvif->ssid, ptr+2, ssid_len);
2802 return 0;
2803 }
2804
2805 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2806 {
2807 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2808 struct sk_buff *skb;
2809 int ieoffset;
2810
2811 /* we currently only support setting the ssid from the ap probe req */
2812 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2813 return -EINVAL;
2814
2815 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2816 if (!skb)
2817 return -EINVAL;
2818
2819 ieoffset = offsetof(struct ieee80211_mgmt,
2820 u.probe_req.variable);
2821 wl1271_ssid_set(wlvif, skb, ieoffset);
2822 dev_kfree_skb(skb);
2823
2824 return 0;
2825 }
2826
2827 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2828 struct ieee80211_bss_conf *bss_conf,
2829 u32 sta_rate_set)
2830 {
2831 int ieoffset;
2832 int ret;
2833
2834 wlvif->aid = bss_conf->aid;
2835 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2836 wlvif->beacon_int = bss_conf->beacon_int;
2837 wlvif->wmm_enabled = bss_conf->qos;
2838
2839 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2840
2841 /*
2842 * with wl1271, we don't need to update the
2843 * beacon_int and dtim_period, because the firmware
2844 * updates it by itself when the first beacon is
2845 * received after a join.
2846 */
2847 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2848 if (ret < 0)
2849 return ret;
2850
2851 /*
2852 * Get a template for hardware connection maintenance
2853 */
2854 dev_kfree_skb(wlvif->probereq);
2855 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2856 wlvif,
2857 NULL);
2858 ieoffset = offsetof(struct ieee80211_mgmt,
2859 u.probe_req.variable);
2860 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2861
2862 /* enable the connection monitoring feature */
2863 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2864 if (ret < 0)
2865 return ret;
2866
2867 /*
2868 * The join command disable the keep-alive mode, shut down its process,
2869 * and also clear the template config, so we need to reset it all after
2870 * the join. The acx_aid starts the keep-alive process, and the order
2871 * of the commands below is relevant.
2872 */
2873 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2874 if (ret < 0)
2875 return ret;
2876
2877 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2878 if (ret < 0)
2879 return ret;
2880
2881 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2882 if (ret < 0)
2883 return ret;
2884
2885 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2886 wlvif->sta.klv_template_id,
2887 ACX_KEEP_ALIVE_TPL_VALID);
2888 if (ret < 0)
2889 return ret;
2890
2891 /*
2892 * The default fw psm configuration is AUTO, while mac80211 default
2893 * setting is off (ACTIVE), so sync the fw with the correct value.
2894 */
2895 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2896 if (ret < 0)
2897 return ret;
2898
2899 if (sta_rate_set) {
2900 wlvif->rate_set =
2901 wl1271_tx_enabled_rates_get(wl,
2902 sta_rate_set,
2903 wlvif->band);
2904 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2905 if (ret < 0)
2906 return ret;
2907 }
2908
2909 return ret;
2910 }
2911
2912 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2913 {
2914 int ret;
2915 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2916
2917 /* make sure we are connected (sta) joined */
2918 if (sta &&
2919 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2920 return false;
2921
2922 /* make sure we are joined (ibss) */
2923 if (!sta &&
2924 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2925 return false;
2926
2927 if (sta) {
2928 /* use defaults when not associated */
2929 wlvif->aid = 0;
2930
2931 /* free probe-request template */
2932 dev_kfree_skb(wlvif->probereq);
2933 wlvif->probereq = NULL;
2934
2935 /* disable connection monitor features */
2936 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2937 if (ret < 0)
2938 return ret;
2939
2940 /* Disable the keep-alive feature */
2941 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2942 if (ret < 0)
2943 return ret;
2944
2945 /* disable beacon filtering */
2946 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
2947 if (ret < 0)
2948 return ret;
2949 }
2950
2951 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2952 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2953
2954 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2955 ieee80211_chswitch_done(vif, false);
2956 cancel_delayed_work(&wlvif->channel_switch_work);
2957 }
2958
2959 /* invalidate keep-alive template */
2960 wl1271_acx_keep_alive_config(wl, wlvif,
2961 wlvif->sta.klv_template_id,
2962 ACX_KEEP_ALIVE_TPL_INVALID);
2963
2964 return 0;
2965 }
2966
2967 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2968 {
2969 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2970 wlvif->rate_set = wlvif->basic_rate_set;
2971 }
2972
2973 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2974 bool idle)
2975 {
2976 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2977
2978 if (idle == cur_idle)
2979 return;
2980
2981 if (idle) {
2982 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2983 } else {
2984 /* The current firmware only supports sched_scan in idle */
2985 if (wl->sched_vif == wlvif)
2986 wl->ops->sched_scan_stop(wl, wlvif);
2987
2988 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2989 }
2990 }
2991
2992 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2993 struct ieee80211_conf *conf, u32 changed)
2994 {
2995 int ret;
2996
2997 if (conf->power_level != wlvif->power_level) {
2998 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2999 if (ret < 0)
3000 return ret;
3001
3002 wlvif->power_level = conf->power_level;
3003 }
3004
3005 return 0;
3006 }
3007
3008 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3009 {
3010 struct wl1271 *wl = hw->priv;
3011 struct wl12xx_vif *wlvif;
3012 struct ieee80211_conf *conf = &hw->conf;
3013 int ret = 0;
3014
3015 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3016 " changed 0x%x",
3017 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3018 conf->power_level,
3019 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3020 changed);
3021
3022 mutex_lock(&wl->mutex);
3023
3024 if (changed & IEEE80211_CONF_CHANGE_POWER)
3025 wl->power_level = conf->power_level;
3026
3027 if (unlikely(wl->state != WLCORE_STATE_ON))
3028 goto out;
3029
3030 ret = wl1271_ps_elp_wakeup(wl);
3031 if (ret < 0)
3032 goto out;
3033
3034 /* configure each interface */
3035 wl12xx_for_each_wlvif(wl, wlvif) {
3036 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3037 if (ret < 0)
3038 goto out_sleep;
3039 }
3040
3041 out_sleep:
3042 wl1271_ps_elp_sleep(wl);
3043
3044 out:
3045 mutex_unlock(&wl->mutex);
3046
3047 return ret;
3048 }
3049
3050 struct wl1271_filter_params {
3051 bool enabled;
3052 int mc_list_length;
3053 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3054 };
3055
3056 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3057 struct netdev_hw_addr_list *mc_list)
3058 {
3059 struct wl1271_filter_params *fp;
3060 struct netdev_hw_addr *ha;
3061
3062 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3063 if (!fp) {
3064 wl1271_error("Out of memory setting filters.");
3065 return 0;
3066 }
3067
3068 /* update multicast filtering parameters */
3069 fp->mc_list_length = 0;
3070 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3071 fp->enabled = false;
3072 } else {
3073 fp->enabled = true;
3074 netdev_hw_addr_list_for_each(ha, mc_list) {
3075 memcpy(fp->mc_list[fp->mc_list_length],
3076 ha->addr, ETH_ALEN);
3077 fp->mc_list_length++;
3078 }
3079 }
3080
3081 return (u64)(unsigned long)fp;
3082 }
3083
3084 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3085 FIF_ALLMULTI | \
3086 FIF_FCSFAIL | \
3087 FIF_BCN_PRBRESP_PROMISC | \
3088 FIF_CONTROL | \
3089 FIF_OTHER_BSS)
3090
3091 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3092 unsigned int changed,
3093 unsigned int *total, u64 multicast)
3094 {
3095 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3096 struct wl1271 *wl = hw->priv;
3097 struct wl12xx_vif *wlvif;
3098
3099 int ret;
3100
3101 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3102 " total %x", changed, *total);
3103
3104 mutex_lock(&wl->mutex);
3105
3106 *total &= WL1271_SUPPORTED_FILTERS;
3107 changed &= WL1271_SUPPORTED_FILTERS;
3108
3109 if (unlikely(wl->state != WLCORE_STATE_ON))
3110 goto out;
3111
3112 ret = wl1271_ps_elp_wakeup(wl);
3113 if (ret < 0)
3114 goto out;
3115
3116 wl12xx_for_each_wlvif(wl, wlvif) {
3117 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3118 if (*total & FIF_ALLMULTI)
3119 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3120 false,
3121 NULL, 0);
3122 else if (fp)
3123 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3124 fp->enabled,
3125 fp->mc_list,
3126 fp->mc_list_length);
3127 if (ret < 0)
3128 goto out_sleep;
3129 }
3130 }
3131
3132 /*
3133 * the fw doesn't provide an api to configure the filters. instead,
3134 * the filters configuration is based on the active roles / ROC
3135 * state.
3136 */
3137
3138 out_sleep:
3139 wl1271_ps_elp_sleep(wl);
3140
3141 out:
3142 mutex_unlock(&wl->mutex);
3143 kfree(fp);
3144 }
3145
3146 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3147 u8 id, u8 key_type, u8 key_size,
3148 const u8 *key, u8 hlid, u32 tx_seq_32,
3149 u16 tx_seq_16)
3150 {
3151 struct wl1271_ap_key *ap_key;
3152 int i;
3153
3154 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3155
3156 if (key_size > MAX_KEY_SIZE)
3157 return -EINVAL;
3158
3159 /*
3160 * Find next free entry in ap_keys. Also check we are not replacing
3161 * an existing key.
3162 */
3163 for (i = 0; i < MAX_NUM_KEYS; i++) {
3164 if (wlvif->ap.recorded_keys[i] == NULL)
3165 break;
3166
3167 if (wlvif->ap.recorded_keys[i]->id == id) {
3168 wl1271_warning("trying to record key replacement");
3169 return -EINVAL;
3170 }
3171 }
3172
3173 if (i == MAX_NUM_KEYS)
3174 return -EBUSY;
3175
3176 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3177 if (!ap_key)
3178 return -ENOMEM;
3179
3180 ap_key->id = id;
3181 ap_key->key_type = key_type;
3182 ap_key->key_size = key_size;
3183 memcpy(ap_key->key, key, key_size);
3184 ap_key->hlid = hlid;
3185 ap_key->tx_seq_32 = tx_seq_32;
3186 ap_key->tx_seq_16 = tx_seq_16;
3187
3188 wlvif->ap.recorded_keys[i] = ap_key;
3189 return 0;
3190 }
3191
3192 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3193 {
3194 int i;
3195
3196 for (i = 0; i < MAX_NUM_KEYS; i++) {
3197 kfree(wlvif->ap.recorded_keys[i]);
3198 wlvif->ap.recorded_keys[i] = NULL;
3199 }
3200 }
3201
3202 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3203 {
3204 int i, ret = 0;
3205 struct wl1271_ap_key *key;
3206 bool wep_key_added = false;
3207
3208 for (i = 0; i < MAX_NUM_KEYS; i++) {
3209 u8 hlid;
3210 if (wlvif->ap.recorded_keys[i] == NULL)
3211 break;
3212
3213 key = wlvif->ap.recorded_keys[i];
3214 hlid = key->hlid;
3215 if (hlid == WL12XX_INVALID_LINK_ID)
3216 hlid = wlvif->ap.bcast_hlid;
3217
3218 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3219 key->id, key->key_type,
3220 key->key_size, key->key,
3221 hlid, key->tx_seq_32,
3222 key->tx_seq_16);
3223 if (ret < 0)
3224 goto out;
3225
3226 if (key->key_type == KEY_WEP)
3227 wep_key_added = true;
3228 }
3229
3230 if (wep_key_added) {
3231 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3232 wlvif->ap.bcast_hlid);
3233 if (ret < 0)
3234 goto out;
3235 }
3236
3237 out:
3238 wl1271_free_ap_keys(wl, wlvif);
3239 return ret;
3240 }
3241
3242 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3243 u16 action, u8 id, u8 key_type,
3244 u8 key_size, const u8 *key, u32 tx_seq_32,
3245 u16 tx_seq_16, struct ieee80211_sta *sta)
3246 {
3247 int ret;
3248 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3249
3250 if (is_ap) {
3251 struct wl1271_station *wl_sta;
3252 u8 hlid;
3253
3254 if (sta) {
3255 wl_sta = (struct wl1271_station *)sta->drv_priv;
3256 hlid = wl_sta->hlid;
3257 } else {
3258 hlid = wlvif->ap.bcast_hlid;
3259 }
3260
3261 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3262 /*
3263 * We do not support removing keys after AP shutdown.
3264 * Pretend we do to make mac80211 happy.
3265 */
3266 if (action != KEY_ADD_OR_REPLACE)
3267 return 0;
3268
3269 ret = wl1271_record_ap_key(wl, wlvif, id,
3270 key_type, key_size,
3271 key, hlid, tx_seq_32,
3272 tx_seq_16);
3273 } else {
3274 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3275 id, key_type, key_size,
3276 key, hlid, tx_seq_32,
3277 tx_seq_16);
3278 }
3279
3280 if (ret < 0)
3281 return ret;
3282 } else {
3283 const u8 *addr;
3284 static const u8 bcast_addr[ETH_ALEN] = {
3285 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3286 };
3287
3288 addr = sta ? sta->addr : bcast_addr;
3289
3290 if (is_zero_ether_addr(addr)) {
3291 /* We dont support TX only encryption */
3292 return -EOPNOTSUPP;
3293 }
3294
3295 /* The wl1271 does not allow to remove unicast keys - they
3296 will be cleared automatically on next CMD_JOIN. Ignore the
3297 request silently, as we dont want the mac80211 to emit
3298 an error message. */
3299 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3300 return 0;
3301
3302 /* don't remove key if hlid was already deleted */
3303 if (action == KEY_REMOVE &&
3304 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3305 return 0;
3306
3307 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3308 id, key_type, key_size,
3309 key, addr, tx_seq_32,
3310 tx_seq_16);
3311 if (ret < 0)
3312 return ret;
3313
3314 }
3315
3316 return 0;
3317 }
3318
3319 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3320 struct ieee80211_vif *vif,
3321 struct ieee80211_sta *sta,
3322 struct ieee80211_key_conf *key_conf)
3323 {
3324 struct wl1271 *wl = hw->priv;
3325 int ret;
3326 bool might_change_spare =
3327 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3328 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3329
3330 if (might_change_spare) {
3331 /*
3332 * stop the queues and flush to ensure the next packets are
3333 * in sync with FW spare block accounting
3334 */
3335 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3336 wl1271_tx_flush(wl);
3337 }
3338
3339 mutex_lock(&wl->mutex);
3340
3341 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3342 ret = -EAGAIN;
3343 goto out_wake_queues;
3344 }
3345
3346 ret = wl1271_ps_elp_wakeup(wl);
3347 if (ret < 0)
3348 goto out_wake_queues;
3349
3350 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3351
3352 wl1271_ps_elp_sleep(wl);
3353
3354 out_wake_queues:
3355 if (might_change_spare)
3356 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3357
3358 mutex_unlock(&wl->mutex);
3359
3360 return ret;
3361 }
3362
3363 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3364 struct ieee80211_vif *vif,
3365 struct ieee80211_sta *sta,
3366 struct ieee80211_key_conf *key_conf)
3367 {
3368 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3369 int ret;
3370 u32 tx_seq_32 = 0;
3371 u16 tx_seq_16 = 0;
3372 u8 key_type;
3373 u8 hlid;
3374
3375 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3376
3377 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3378 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3379 key_conf->cipher, key_conf->keyidx,
3380 key_conf->keylen, key_conf->flags);
3381 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3382
3383 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3384 if (sta) {
3385 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3386 hlid = wl_sta->hlid;
3387 } else {
3388 hlid = wlvif->ap.bcast_hlid;
3389 }
3390 else
3391 hlid = wlvif->sta.hlid;
3392
3393 if (hlid != WL12XX_INVALID_LINK_ID) {
3394 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3395 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3396 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3397 }
3398
3399 switch (key_conf->cipher) {
3400 case WLAN_CIPHER_SUITE_WEP40:
3401 case WLAN_CIPHER_SUITE_WEP104:
3402 key_type = KEY_WEP;
3403
3404 key_conf->hw_key_idx = key_conf->keyidx;
3405 break;
3406 case WLAN_CIPHER_SUITE_TKIP:
3407 key_type = KEY_TKIP;
3408 key_conf->hw_key_idx = key_conf->keyidx;
3409 break;
3410 case WLAN_CIPHER_SUITE_CCMP:
3411 key_type = KEY_AES;
3412 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3413 break;
3414 case WL1271_CIPHER_SUITE_GEM:
3415 key_type = KEY_GEM;
3416 break;
3417 default:
3418 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3419
3420 return -EOPNOTSUPP;
3421 }
3422
3423 switch (cmd) {
3424 case SET_KEY:
3425 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3426 key_conf->keyidx, key_type,
3427 key_conf->keylen, key_conf->key,
3428 tx_seq_32, tx_seq_16, sta);
3429 if (ret < 0) {
3430 wl1271_error("Could not add or replace key");
3431 return ret;
3432 }
3433
3434 /*
3435 * reconfiguring arp response if the unicast (or common)
3436 * encryption key type was changed
3437 */
3438 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3439 (sta || key_type == KEY_WEP) &&
3440 wlvif->encryption_type != key_type) {
3441 wlvif->encryption_type = key_type;
3442 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3443 if (ret < 0) {
3444 wl1271_warning("build arp rsp failed: %d", ret);
3445 return ret;
3446 }
3447 }
3448 break;
3449
3450 case DISABLE_KEY:
3451 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3452 key_conf->keyidx, key_type,
3453 key_conf->keylen, key_conf->key,
3454 0, 0, sta);
3455 if (ret < 0) {
3456 wl1271_error("Could not remove key");
3457 return ret;
3458 }
3459 break;
3460
3461 default:
3462 wl1271_error("Unsupported key cmd 0x%x", cmd);
3463 return -EOPNOTSUPP;
3464 }
3465
3466 return ret;
3467 }
3468 EXPORT_SYMBOL_GPL(wlcore_set_key);
3469
3470 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3471 struct ieee80211_vif *vif,
3472 int key_idx)
3473 {
3474 struct wl1271 *wl = hw->priv;
3475 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3476 int ret;
3477
3478 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3479 key_idx);
3480
3481 /* we don't handle unsetting of default key */
3482 if (key_idx == -1)
3483 return;
3484
3485 mutex_lock(&wl->mutex);
3486
3487 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3488 ret = -EAGAIN;
3489 goto out_unlock;
3490 }
3491
3492 ret = wl1271_ps_elp_wakeup(wl);
3493 if (ret < 0)
3494 goto out_unlock;
3495
3496 wlvif->default_key = key_idx;
3497
3498 /* the default WEP key needs to be configured at least once */
3499 if (wlvif->encryption_type == KEY_WEP) {
3500 ret = wl12xx_cmd_set_default_wep_key(wl,
3501 key_idx,
3502 wlvif->sta.hlid);
3503 if (ret < 0)
3504 goto out_sleep;
3505 }
3506
3507 out_sleep:
3508 wl1271_ps_elp_sleep(wl);
3509
3510 out_unlock:
3511 mutex_unlock(&wl->mutex);
3512 }
3513
3514 void wlcore_regdomain_config(struct wl1271 *wl)
3515 {
3516 int ret;
3517
3518 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3519 return;
3520
3521 mutex_lock(&wl->mutex);
3522
3523 if (unlikely(wl->state != WLCORE_STATE_ON))
3524 goto out;
3525
3526 ret = wl1271_ps_elp_wakeup(wl);
3527 if (ret < 0)
3528 goto out;
3529
3530 ret = wlcore_cmd_regdomain_config_locked(wl);
3531 if (ret < 0) {
3532 wl12xx_queue_recovery_work(wl);
3533 goto out;
3534 }
3535
3536 wl1271_ps_elp_sleep(wl);
3537 out:
3538 mutex_unlock(&wl->mutex);
3539 }
3540
3541 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3542 struct ieee80211_vif *vif,
3543 struct cfg80211_scan_request *req)
3544 {
3545 struct wl1271 *wl = hw->priv;
3546 int ret;
3547 u8 *ssid = NULL;
3548 size_t len = 0;
3549
3550 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3551
3552 if (req->n_ssids) {
3553 ssid = req->ssids[0].ssid;
3554 len = req->ssids[0].ssid_len;
3555 }
3556
3557 mutex_lock(&wl->mutex);
3558
3559 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3560 /*
3561 * We cannot return -EBUSY here because cfg80211 will expect
3562 * a call to ieee80211_scan_completed if we do - in this case
3563 * there won't be any call.
3564 */
3565 ret = -EAGAIN;
3566 goto out;
3567 }
3568
3569 ret = wl1271_ps_elp_wakeup(wl);
3570 if (ret < 0)
3571 goto out;
3572
3573 /* fail if there is any role in ROC */
3574 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3575 /* don't allow scanning right now */
3576 ret = -EBUSY;
3577 goto out_sleep;
3578 }
3579
3580 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3581 out_sleep:
3582 wl1271_ps_elp_sleep(wl);
3583 out:
3584 mutex_unlock(&wl->mutex);
3585
3586 return ret;
3587 }
3588
3589 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3590 struct ieee80211_vif *vif)
3591 {
3592 struct wl1271 *wl = hw->priv;
3593 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3594 int ret;
3595
3596 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3597
3598 mutex_lock(&wl->mutex);
3599
3600 if (unlikely(wl->state != WLCORE_STATE_ON))
3601 goto out;
3602
3603 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3604 goto out;
3605
3606 ret = wl1271_ps_elp_wakeup(wl);
3607 if (ret < 0)
3608 goto out;
3609
3610 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3611 ret = wl->ops->scan_stop(wl, wlvif);
3612 if (ret < 0)
3613 goto out_sleep;
3614 }
3615
3616 /*
3617 * Rearm the tx watchdog just before idling scan. This
3618 * prevents just-finished scans from triggering the watchdog
3619 */
3620 wl12xx_rearm_tx_watchdog_locked(wl);
3621
3622 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3623 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3624 wl->scan_wlvif = NULL;
3625 wl->scan.req = NULL;
3626 ieee80211_scan_completed(wl->hw, true);
3627
3628 out_sleep:
3629 wl1271_ps_elp_sleep(wl);
3630 out:
3631 mutex_unlock(&wl->mutex);
3632
3633 cancel_delayed_work_sync(&wl->scan_complete_work);
3634 }
3635
3636 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3637 struct ieee80211_vif *vif,
3638 struct cfg80211_sched_scan_request *req,
3639 struct ieee80211_sched_scan_ies *ies)
3640 {
3641 struct wl1271 *wl = hw->priv;
3642 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3643 int ret;
3644
3645 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3646
3647 mutex_lock(&wl->mutex);
3648
3649 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3650 ret = -EAGAIN;
3651 goto out;
3652 }
3653
3654 ret = wl1271_ps_elp_wakeup(wl);
3655 if (ret < 0)
3656 goto out;
3657
3658 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3659 if (ret < 0)
3660 goto out_sleep;
3661
3662 wl->sched_vif = wlvif;
3663
3664 out_sleep:
3665 wl1271_ps_elp_sleep(wl);
3666 out:
3667 mutex_unlock(&wl->mutex);
3668 return ret;
3669 }
3670
3671 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3672 struct ieee80211_vif *vif)
3673 {
3674 struct wl1271 *wl = hw->priv;
3675 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3676 int ret;
3677
3678 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3679
3680 mutex_lock(&wl->mutex);
3681
3682 if (unlikely(wl->state != WLCORE_STATE_ON))
3683 goto out;
3684
3685 ret = wl1271_ps_elp_wakeup(wl);
3686 if (ret < 0)
3687 goto out;
3688
3689 wl->ops->sched_scan_stop(wl, wlvif);
3690
3691 wl1271_ps_elp_sleep(wl);
3692 out:
3693 mutex_unlock(&wl->mutex);
3694 }
3695
3696 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3697 {
3698 struct wl1271 *wl = hw->priv;
3699 int ret = 0;
3700
3701 mutex_lock(&wl->mutex);
3702
3703 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3704 ret = -EAGAIN;
3705 goto out;
3706 }
3707
3708 ret = wl1271_ps_elp_wakeup(wl);
3709 if (ret < 0)
3710 goto out;
3711
3712 ret = wl1271_acx_frag_threshold(wl, value);
3713 if (ret < 0)
3714 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3715
3716 wl1271_ps_elp_sleep(wl);
3717
3718 out:
3719 mutex_unlock(&wl->mutex);
3720
3721 return ret;
3722 }
3723
3724 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3725 {
3726 struct wl1271 *wl = hw->priv;
3727 struct wl12xx_vif *wlvif;
3728 int ret = 0;
3729
3730 mutex_lock(&wl->mutex);
3731
3732 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3733 ret = -EAGAIN;
3734 goto out;
3735 }
3736
3737 ret = wl1271_ps_elp_wakeup(wl);
3738 if (ret < 0)
3739 goto out;
3740
3741 wl12xx_for_each_wlvif(wl, wlvif) {
3742 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3743 if (ret < 0)
3744 wl1271_warning("set rts threshold failed: %d", ret);
3745 }
3746 wl1271_ps_elp_sleep(wl);
3747
3748 out:
3749 mutex_unlock(&wl->mutex);
3750
3751 return ret;
3752 }
3753
3754 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3755 {
3756 int len;
3757 const u8 *next, *end = skb->data + skb->len;
3758 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3759 skb->len - ieoffset);
3760 if (!ie)
3761 return;
3762 len = ie[1] + 2;
3763 next = ie + len;
3764 memmove(ie, next, end - next);
3765 skb_trim(skb, skb->len - len);
3766 }
3767
3768 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3769 unsigned int oui, u8 oui_type,
3770 int ieoffset)
3771 {
3772 int len;
3773 const u8 *next, *end = skb->data + skb->len;
3774 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3775 skb->data + ieoffset,
3776 skb->len - ieoffset);
3777 if (!ie)
3778 return;
3779 len = ie[1] + 2;
3780 next = ie + len;
3781 memmove(ie, next, end - next);
3782 skb_trim(skb, skb->len - len);
3783 }
3784
3785 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3786 struct ieee80211_vif *vif)
3787 {
3788 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3789 struct sk_buff *skb;
3790 int ret;
3791
3792 skb = ieee80211_proberesp_get(wl->hw, vif);
3793 if (!skb)
3794 return -EOPNOTSUPP;
3795
3796 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3797 CMD_TEMPL_AP_PROBE_RESPONSE,
3798 skb->data,
3799 skb->len, 0,
3800 rates);
3801 dev_kfree_skb(skb);
3802
3803 if (ret < 0)
3804 goto out;
3805
3806 wl1271_debug(DEBUG_AP, "probe response updated");
3807 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3808
3809 out:
3810 return ret;
3811 }
3812
3813 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3814 struct ieee80211_vif *vif,
3815 u8 *probe_rsp_data,
3816 size_t probe_rsp_len,
3817 u32 rates)
3818 {
3819 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3820 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3821 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3822 int ssid_ie_offset, ie_offset, templ_len;
3823 const u8 *ptr;
3824
3825 /* no need to change probe response if the SSID is set correctly */
3826 if (wlvif->ssid_len > 0)
3827 return wl1271_cmd_template_set(wl, wlvif->role_id,
3828 CMD_TEMPL_AP_PROBE_RESPONSE,
3829 probe_rsp_data,
3830 probe_rsp_len, 0,
3831 rates);
3832
3833 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3834 wl1271_error("probe_rsp template too big");
3835 return -EINVAL;
3836 }
3837
3838 /* start searching from IE offset */
3839 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3840
3841 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3842 probe_rsp_len - ie_offset);
3843 if (!ptr) {
3844 wl1271_error("No SSID in beacon!");
3845 return -EINVAL;
3846 }
3847
3848 ssid_ie_offset = ptr - probe_rsp_data;
3849 ptr += (ptr[1] + 2);
3850
3851 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3852
3853 /* insert SSID from bss_conf */
3854 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3855 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3856 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3857 bss_conf->ssid, bss_conf->ssid_len);
3858 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3859
3860 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3861 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3862 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3863
3864 return wl1271_cmd_template_set(wl, wlvif->role_id,
3865 CMD_TEMPL_AP_PROBE_RESPONSE,
3866 probe_rsp_templ,
3867 templ_len, 0,
3868 rates);
3869 }
3870
3871 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3872 struct ieee80211_vif *vif,
3873 struct ieee80211_bss_conf *bss_conf,
3874 u32 changed)
3875 {
3876 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3877 int ret = 0;
3878
3879 if (changed & BSS_CHANGED_ERP_SLOT) {
3880 if (bss_conf->use_short_slot)
3881 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3882 else
3883 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3884 if (ret < 0) {
3885 wl1271_warning("Set slot time failed %d", ret);
3886 goto out;
3887 }
3888 }
3889
3890 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3891 if (bss_conf->use_short_preamble)
3892 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3893 else
3894 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3895 }
3896
3897 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3898 if (bss_conf->use_cts_prot)
3899 ret = wl1271_acx_cts_protect(wl, wlvif,
3900 CTSPROTECT_ENABLE);
3901 else
3902 ret = wl1271_acx_cts_protect(wl, wlvif,
3903 CTSPROTECT_DISABLE);
3904 if (ret < 0) {
3905 wl1271_warning("Set ctsprotect failed %d", ret);
3906 goto out;
3907 }
3908 }
3909
3910 out:
3911 return ret;
3912 }
3913
3914 static int wlcore_set_beacon_template(struct wl1271 *wl,
3915 struct ieee80211_vif *vif,
3916 bool is_ap)
3917 {
3918 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3919 struct ieee80211_hdr *hdr;
3920 u32 min_rate;
3921 int ret;
3922 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3923 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3924 u16 tmpl_id;
3925
3926 if (!beacon) {
3927 ret = -EINVAL;
3928 goto out;
3929 }
3930
3931 wl1271_debug(DEBUG_MASTER, "beacon updated");
3932
3933 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3934 if (ret < 0) {
3935 dev_kfree_skb(beacon);
3936 goto out;
3937 }
3938 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3939 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3940 CMD_TEMPL_BEACON;
3941 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3942 beacon->data,
3943 beacon->len, 0,
3944 min_rate);
3945 if (ret < 0) {
3946 dev_kfree_skb(beacon);
3947 goto out;
3948 }
3949
3950 wlvif->wmm_enabled =
3951 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3952 WLAN_OUI_TYPE_MICROSOFT_WMM,
3953 beacon->data + ieoffset,
3954 beacon->len - ieoffset);
3955
3956 /*
3957 * In case we already have a probe-resp beacon set explicitly
3958 * by usermode, don't use the beacon data.
3959 */
3960 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3961 goto end_bcn;
3962
3963 /* remove TIM ie from probe response */
3964 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3965
3966 /*
3967 * remove p2p ie from probe response.
3968 * the fw reponds to probe requests that don't include
3969 * the p2p ie. probe requests with p2p ie will be passed,
3970 * and will be responded by the supplicant (the spec
3971 * forbids including the p2p ie when responding to probe
3972 * requests that didn't include it).
3973 */
3974 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3975 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3976
3977 hdr = (struct ieee80211_hdr *) beacon->data;
3978 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3979 IEEE80211_STYPE_PROBE_RESP);
3980 if (is_ap)
3981 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3982 beacon->data,
3983 beacon->len,
3984 min_rate);
3985 else
3986 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3987 CMD_TEMPL_PROBE_RESPONSE,
3988 beacon->data,
3989 beacon->len, 0,
3990 min_rate);
3991 end_bcn:
3992 dev_kfree_skb(beacon);
3993 if (ret < 0)
3994 goto out;
3995
3996 out:
3997 return ret;
3998 }
3999
4000 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4001 struct ieee80211_vif *vif,
4002 struct ieee80211_bss_conf *bss_conf,
4003 u32 changed)
4004 {
4005 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4006 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4007 int ret = 0;
4008
4009 if (changed & BSS_CHANGED_BEACON_INT) {
4010 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4011 bss_conf->beacon_int);
4012
4013 wlvif->beacon_int = bss_conf->beacon_int;
4014 }
4015
4016 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4017 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4018
4019 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4020 }
4021
4022 if (changed & BSS_CHANGED_BEACON) {
4023 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4024 if (ret < 0)
4025 goto out;
4026 }
4027
4028 out:
4029 if (ret != 0)
4030 wl1271_error("beacon info change failed: %d", ret);
4031 return ret;
4032 }
4033
4034 /* AP mode changes */
4035 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4036 struct ieee80211_vif *vif,
4037 struct ieee80211_bss_conf *bss_conf,
4038 u32 changed)
4039 {
4040 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4041 int ret = 0;
4042
4043 if (changed & BSS_CHANGED_BASIC_RATES) {
4044 u32 rates = bss_conf->basic_rates;
4045
4046 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4047 wlvif->band);
4048 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4049 wlvif->basic_rate_set);
4050
4051 ret = wl1271_init_ap_rates(wl, wlvif);
4052 if (ret < 0) {
4053 wl1271_error("AP rate policy change failed %d", ret);
4054 goto out;
4055 }
4056
4057 ret = wl1271_ap_init_templates(wl, vif);
4058 if (ret < 0)
4059 goto out;
4060
4061 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4062 if (ret < 0)
4063 goto out;
4064
4065 ret = wlcore_set_beacon_template(wl, vif, true);
4066 if (ret < 0)
4067 goto out;
4068 }
4069
4070 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4071 if (ret < 0)
4072 goto out;
4073
4074 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4075 if (bss_conf->enable_beacon) {
4076 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4077 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4078 if (ret < 0)
4079 goto out;
4080
4081 ret = wl1271_ap_init_hwenc(wl, wlvif);
4082 if (ret < 0)
4083 goto out;
4084
4085 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4086 wl1271_debug(DEBUG_AP, "started AP");
4087 }
4088 } else {
4089 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4090 /*
4091 * AP might be in ROC in case we have just
4092 * sent auth reply. handle it.
4093 */
4094 if (test_bit(wlvif->role_id, wl->roc_map))
4095 wl12xx_croc(wl, wlvif->role_id);
4096
4097 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4098 if (ret < 0)
4099 goto out;
4100
4101 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4102 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4103 &wlvif->flags);
4104 wl1271_debug(DEBUG_AP, "stopped AP");
4105 }
4106 }
4107 }
4108
4109 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4110 if (ret < 0)
4111 goto out;
4112
4113 /* Handle HT information change */
4114 if ((changed & BSS_CHANGED_HT) &&
4115 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4116 ret = wl1271_acx_set_ht_information(wl, wlvif,
4117 bss_conf->ht_operation_mode);
4118 if (ret < 0) {
4119 wl1271_warning("Set ht information failed %d", ret);
4120 goto out;
4121 }
4122 }
4123
4124 out:
4125 return;
4126 }
4127
4128 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4129 struct ieee80211_bss_conf *bss_conf,
4130 u32 sta_rate_set)
4131 {
4132 u32 rates;
4133 int ret;
4134
4135 wl1271_debug(DEBUG_MAC80211,
4136 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4137 bss_conf->bssid, bss_conf->aid,
4138 bss_conf->beacon_int,
4139 bss_conf->basic_rates, sta_rate_set);
4140
4141 wlvif->beacon_int = bss_conf->beacon_int;
4142 rates = bss_conf->basic_rates;
4143 wlvif->basic_rate_set =
4144 wl1271_tx_enabled_rates_get(wl, rates,
4145 wlvif->band);
4146 wlvif->basic_rate =
4147 wl1271_tx_min_rate_get(wl,
4148 wlvif->basic_rate_set);
4149
4150 if (sta_rate_set)
4151 wlvif->rate_set =
4152 wl1271_tx_enabled_rates_get(wl,
4153 sta_rate_set,
4154 wlvif->band);
4155
4156 /* we only support sched_scan while not connected */
4157 if (wl->sched_vif == wlvif)
4158 wl->ops->sched_scan_stop(wl, wlvif);
4159
4160 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4161 if (ret < 0)
4162 return ret;
4163
4164 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4165 if (ret < 0)
4166 return ret;
4167
4168 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4169 if (ret < 0)
4170 return ret;
4171
4172 wlcore_set_ssid(wl, wlvif);
4173
4174 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4175
4176 return 0;
4177 }
4178
4179 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4180 {
4181 int ret;
4182
4183 /* revert back to minimum rates for the current band */
4184 wl1271_set_band_rate(wl, wlvif);
4185 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4186
4187 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4188 if (ret < 0)
4189 return ret;
4190
4191 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4192 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4193 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4194 if (ret < 0)
4195 return ret;
4196 }
4197
4198 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4199 return 0;
4200 }
4201 /* STA/IBSS mode changes */
4202 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4203 struct ieee80211_vif *vif,
4204 struct ieee80211_bss_conf *bss_conf,
4205 u32 changed)
4206 {
4207 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4208 bool do_join = false;
4209 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4210 bool ibss_joined = false;
4211 u32 sta_rate_set = 0;
4212 int ret;
4213 struct ieee80211_sta *sta;
4214 bool sta_exists = false;
4215 struct ieee80211_sta_ht_cap sta_ht_cap;
4216
4217 if (is_ibss) {
4218 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4219 changed);
4220 if (ret < 0)
4221 goto out;
4222 }
4223
4224 if (changed & BSS_CHANGED_IBSS) {
4225 if (bss_conf->ibss_joined) {
4226 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4227 ibss_joined = true;
4228 } else {
4229 wlcore_unset_assoc(wl, wlvif);
4230 wl12xx_cmd_role_stop_sta(wl, wlvif);
4231 }
4232 }
4233
4234 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4235 do_join = true;
4236
4237 /* Need to update the SSID (for filtering etc) */
4238 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4239 do_join = true;
4240
4241 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4242 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4243 bss_conf->enable_beacon ? "enabled" : "disabled");
4244
4245 do_join = true;
4246 }
4247
4248 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4249 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4250
4251 if (changed & BSS_CHANGED_CQM) {
4252 bool enable = false;
4253 if (bss_conf->cqm_rssi_thold)
4254 enable = true;
4255 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4256 bss_conf->cqm_rssi_thold,
4257 bss_conf->cqm_rssi_hyst);
4258 if (ret < 0)
4259 goto out;
4260 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4261 }
4262
4263 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4264 BSS_CHANGED_ASSOC)) {
4265 rcu_read_lock();
4266 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4267 if (sta) {
4268 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4269
4270 /* save the supp_rates of the ap */
4271 sta_rate_set = sta->supp_rates[wlvif->band];
4272 if (sta->ht_cap.ht_supported)
4273 sta_rate_set |=
4274 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4275 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4276 sta_ht_cap = sta->ht_cap;
4277 sta_exists = true;
4278 }
4279
4280 rcu_read_unlock();
4281 }
4282
4283 if (changed & BSS_CHANGED_BSSID) {
4284 if (!is_zero_ether_addr(bss_conf->bssid)) {
4285 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4286 sta_rate_set);
4287 if (ret < 0)
4288 goto out;
4289
4290 /* Need to update the BSSID (for filtering etc) */
4291 do_join = true;
4292 } else {
4293 ret = wlcore_clear_bssid(wl, wlvif);
4294 if (ret < 0)
4295 goto out;
4296 }
4297 }
4298
4299 if (changed & BSS_CHANGED_IBSS) {
4300 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4301 bss_conf->ibss_joined);
4302
4303 if (bss_conf->ibss_joined) {
4304 u32 rates = bss_conf->basic_rates;
4305 wlvif->basic_rate_set =
4306 wl1271_tx_enabled_rates_get(wl, rates,
4307 wlvif->band);
4308 wlvif->basic_rate =
4309 wl1271_tx_min_rate_get(wl,
4310 wlvif->basic_rate_set);
4311
4312 /* by default, use 11b + OFDM rates */
4313 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4314 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4315 if (ret < 0)
4316 goto out;
4317 }
4318 }
4319
4320 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4321 /* enable beacon filtering */
4322 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4323 if (ret < 0)
4324 goto out;
4325 }
4326
4327 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4328 if (ret < 0)
4329 goto out;
4330
4331 if (do_join) {
4332 ret = wlcore_join(wl, wlvif);
4333 if (ret < 0) {
4334 wl1271_warning("cmd join failed %d", ret);
4335 goto out;
4336 }
4337 }
4338
4339 if (changed & BSS_CHANGED_ASSOC) {
4340 if (bss_conf->assoc) {
4341 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4342 sta_rate_set);
4343 if (ret < 0)
4344 goto out;
4345
4346 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4347 wl12xx_set_authorized(wl, wlvif);
4348 } else {
4349 wlcore_unset_assoc(wl, wlvif);
4350 }
4351 }
4352
4353 if (changed & BSS_CHANGED_PS) {
4354 if ((bss_conf->ps) &&
4355 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4356 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4357 int ps_mode;
4358 char *ps_mode_str;
4359
4360 if (wl->conf.conn.forced_ps) {
4361 ps_mode = STATION_POWER_SAVE_MODE;
4362 ps_mode_str = "forced";
4363 } else {
4364 ps_mode = STATION_AUTO_PS_MODE;
4365 ps_mode_str = "auto";
4366 }
4367
4368 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4369
4370 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4371 if (ret < 0)
4372 wl1271_warning("enter %s ps failed %d",
4373 ps_mode_str, ret);
4374 } else if (!bss_conf->ps &&
4375 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4376 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4377
4378 ret = wl1271_ps_set_mode(wl, wlvif,
4379 STATION_ACTIVE_MODE);
4380 if (ret < 0)
4381 wl1271_warning("exit auto ps failed %d", ret);
4382 }
4383 }
4384
4385 /* Handle new association with HT. Do this after join. */
4386 if (sta_exists) {
4387 bool enabled =
4388 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4389
4390 ret = wlcore_hw_set_peer_cap(wl,
4391 &sta_ht_cap,
4392 enabled,
4393 wlvif->rate_set,
4394 wlvif->sta.hlid);
4395 if (ret < 0) {
4396 wl1271_warning("Set ht cap failed %d", ret);
4397 goto out;
4398
4399 }
4400
4401 if (enabled) {
4402 ret = wl1271_acx_set_ht_information(wl, wlvif,
4403 bss_conf->ht_operation_mode);
4404 if (ret < 0) {
4405 wl1271_warning("Set ht information failed %d",
4406 ret);
4407 goto out;
4408 }
4409 }
4410 }
4411
4412 /* Handle arp filtering. Done after join. */
4413 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4414 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4415 __be32 addr = bss_conf->arp_addr_list[0];
4416 wlvif->sta.qos = bss_conf->qos;
4417 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4418
4419 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4420 wlvif->ip_addr = addr;
4421 /*
4422 * The template should have been configured only upon
4423 * association. however, it seems that the correct ip
4424 * isn't being set (when sending), so we have to
4425 * reconfigure the template upon every ip change.
4426 */
4427 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4428 if (ret < 0) {
4429 wl1271_warning("build arp rsp failed: %d", ret);
4430 goto out;
4431 }
4432
4433 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4434 (ACX_ARP_FILTER_ARP_FILTERING |
4435 ACX_ARP_FILTER_AUTO_ARP),
4436 addr);
4437 } else {
4438 wlvif->ip_addr = 0;
4439 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4440 }
4441
4442 if (ret < 0)
4443 goto out;
4444 }
4445
4446 out:
4447 return;
4448 }
4449
4450 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4451 struct ieee80211_vif *vif,
4452 struct ieee80211_bss_conf *bss_conf,
4453 u32 changed)
4454 {
4455 struct wl1271 *wl = hw->priv;
4456 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4457 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4458 int ret;
4459
4460 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4461 wlvif->role_id, (int)changed);
4462
4463 /*
4464 * make sure to cancel pending disconnections if our association
4465 * state changed
4466 */
4467 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4468 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4469
4470 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4471 !bss_conf->enable_beacon)
4472 wl1271_tx_flush(wl);
4473
4474 mutex_lock(&wl->mutex);
4475
4476 if (unlikely(wl->state != WLCORE_STATE_ON))
4477 goto out;
4478
4479 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4480 goto out;
4481
4482 ret = wl1271_ps_elp_wakeup(wl);
4483 if (ret < 0)
4484 goto out;
4485
4486 if ((changed & BSS_CHANGED_TXPOWER) &&
4487 bss_conf->txpower != wlvif->power_level) {
4488
4489 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4490 if (ret < 0)
4491 goto out;
4492
4493 wlvif->power_level = bss_conf->txpower;
4494 }
4495
4496 if (is_ap)
4497 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4498 else
4499 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4500
4501 wl1271_ps_elp_sleep(wl);
4502
4503 out:
4504 mutex_unlock(&wl->mutex);
4505 }
4506
4507 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4508 struct ieee80211_chanctx_conf *ctx)
4509 {
4510 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4511 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4512 cfg80211_get_chandef_type(&ctx->def));
4513 return 0;
4514 }
4515
4516 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4517 struct ieee80211_chanctx_conf *ctx)
4518 {
4519 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4520 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4521 cfg80211_get_chandef_type(&ctx->def));
4522 }
4523
4524 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4525 struct ieee80211_chanctx_conf *ctx,
4526 u32 changed)
4527 {
4528 wl1271_debug(DEBUG_MAC80211,
4529 "mac80211 change chanctx %d (type %d) changed 0x%x",
4530 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4531 cfg80211_get_chandef_type(&ctx->def), changed);
4532 }
4533
4534 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4535 struct ieee80211_vif *vif,
4536 struct ieee80211_chanctx_conf *ctx)
4537 {
4538 struct wl1271 *wl = hw->priv;
4539 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4540 int channel = ieee80211_frequency_to_channel(
4541 ctx->def.chan->center_freq);
4542
4543 wl1271_debug(DEBUG_MAC80211,
4544 "mac80211 assign chanctx (role %d) %d (type %d)",
4545 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4546
4547 mutex_lock(&wl->mutex);
4548
4549 wlvif->band = ctx->def.chan->band;
4550 wlvif->channel = channel;
4551 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4552
4553 /* update default rates according to the band */
4554 wl1271_set_band_rate(wl, wlvif);
4555
4556 mutex_unlock(&wl->mutex);
4557
4558 return 0;
4559 }
4560
4561 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4562 struct ieee80211_vif *vif,
4563 struct ieee80211_chanctx_conf *ctx)
4564 {
4565 struct wl1271 *wl = hw->priv;
4566 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4567
4568 wl1271_debug(DEBUG_MAC80211,
4569 "mac80211 unassign chanctx (role %d) %d (type %d)",
4570 wlvif->role_id,
4571 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4572 cfg80211_get_chandef_type(&ctx->def));
4573
4574 wl1271_tx_flush(wl);
4575 }
4576
4577 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4578 struct ieee80211_vif *vif, u16 queue,
4579 const struct ieee80211_tx_queue_params *params)
4580 {
4581 struct wl1271 *wl = hw->priv;
4582 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4583 u8 ps_scheme;
4584 int ret = 0;
4585
4586 mutex_lock(&wl->mutex);
4587
4588 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4589
4590 if (params->uapsd)
4591 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4592 else
4593 ps_scheme = CONF_PS_SCHEME_LEGACY;
4594
4595 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4596 goto out;
4597
4598 ret = wl1271_ps_elp_wakeup(wl);
4599 if (ret < 0)
4600 goto out;
4601
4602 /*
4603 * the txop is confed in units of 32us by the mac80211,
4604 * we need us
4605 */
4606 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4607 params->cw_min, params->cw_max,
4608 params->aifs, params->txop << 5);
4609 if (ret < 0)
4610 goto out_sleep;
4611
4612 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4613 CONF_CHANNEL_TYPE_EDCF,
4614 wl1271_tx_get_queue(queue),
4615 ps_scheme, CONF_ACK_POLICY_LEGACY,
4616 0, 0);
4617
4618 out_sleep:
4619 wl1271_ps_elp_sleep(wl);
4620
4621 out:
4622 mutex_unlock(&wl->mutex);
4623
4624 return ret;
4625 }
4626
4627 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4628 struct ieee80211_vif *vif)
4629 {
4630
4631 struct wl1271 *wl = hw->priv;
4632 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4633 u64 mactime = ULLONG_MAX;
4634 int ret;
4635
4636 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4637
4638 mutex_lock(&wl->mutex);
4639
4640 if (unlikely(wl->state != WLCORE_STATE_ON))
4641 goto out;
4642
4643 ret = wl1271_ps_elp_wakeup(wl);
4644 if (ret < 0)
4645 goto out;
4646
4647 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4648 if (ret < 0)
4649 goto out_sleep;
4650
4651 out_sleep:
4652 wl1271_ps_elp_sleep(wl);
4653
4654 out:
4655 mutex_unlock(&wl->mutex);
4656 return mactime;
4657 }
4658
4659 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4660 struct survey_info *survey)
4661 {
4662 struct ieee80211_conf *conf = &hw->conf;
4663
4664 if (idx != 0)
4665 return -ENOENT;
4666
4667 survey->channel = conf->chandef.chan;
4668 survey->filled = 0;
4669 return 0;
4670 }
4671
4672 static int wl1271_allocate_sta(struct wl1271 *wl,
4673 struct wl12xx_vif *wlvif,
4674 struct ieee80211_sta *sta)
4675 {
4676 struct wl1271_station *wl_sta;
4677 int ret;
4678
4679
4680 if (wl->active_sta_count >= wl->max_ap_stations) {
4681 wl1271_warning("could not allocate HLID - too much stations");
4682 return -EBUSY;
4683 }
4684
4685 wl_sta = (struct wl1271_station *)sta->drv_priv;
4686 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4687 if (ret < 0) {
4688 wl1271_warning("could not allocate HLID - too many links");
4689 return -EBUSY;
4690 }
4691
4692 /* use the previous security seq, if this is a recovery/resume */
4693 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4694
4695 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4696 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4697 wl->active_sta_count++;
4698 return 0;
4699 }
4700
4701 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4702 {
4703 struct wl1271_station *wl_sta;
4704 struct ieee80211_sta *sta;
4705 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4706
4707 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4708 return;
4709
4710 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4711 __clear_bit(hlid, &wl->ap_ps_map);
4712 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4713
4714 /*
4715 * save the last used PN in the private part of iee80211_sta,
4716 * in case of recovery/suspend
4717 */
4718 rcu_read_lock();
4719 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4720 if (sta) {
4721 wl_sta = (void *)sta->drv_priv;
4722 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4723
4724 /*
4725 * increment the initial seq number on recovery to account for
4726 * transmitted packets that we haven't yet got in the FW status
4727 */
4728 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4729 wl_sta->total_freed_pkts +=
4730 WL1271_TX_SQN_POST_RECOVERY_PADDING;
4731 }
4732 rcu_read_unlock();
4733
4734 wl12xx_free_link(wl, wlvif, &hlid);
4735 wl->active_sta_count--;
4736
4737 /*
4738 * rearm the tx watchdog when the last STA is freed - give the FW a
4739 * chance to return STA-buffered packets before complaining.
4740 */
4741 if (wl->active_sta_count == 0)
4742 wl12xx_rearm_tx_watchdog_locked(wl);
4743 }
4744
4745 static int wl12xx_sta_add(struct wl1271 *wl,
4746 struct wl12xx_vif *wlvif,
4747 struct ieee80211_sta *sta)
4748 {
4749 struct wl1271_station *wl_sta;
4750 int ret = 0;
4751 u8 hlid;
4752
4753 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4754
4755 ret = wl1271_allocate_sta(wl, wlvif, sta);
4756 if (ret < 0)
4757 return ret;
4758
4759 wl_sta = (struct wl1271_station *)sta->drv_priv;
4760 hlid = wl_sta->hlid;
4761
4762 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4763 if (ret < 0)
4764 wl1271_free_sta(wl, wlvif, hlid);
4765
4766 return ret;
4767 }
4768
4769 static int wl12xx_sta_remove(struct wl1271 *wl,
4770 struct wl12xx_vif *wlvif,
4771 struct ieee80211_sta *sta)
4772 {
4773 struct wl1271_station *wl_sta;
4774 int ret = 0, id;
4775
4776 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4777
4778 wl_sta = (struct wl1271_station *)sta->drv_priv;
4779 id = wl_sta->hlid;
4780 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4781 return -EINVAL;
4782
4783 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
4784 if (ret < 0)
4785 return ret;
4786
4787 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4788 return ret;
4789 }
4790
4791 static void wlcore_roc_if_possible(struct wl1271 *wl,
4792 struct wl12xx_vif *wlvif)
4793 {
4794 if (find_first_bit(wl->roc_map,
4795 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4796 return;
4797
4798 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4799 return;
4800
4801 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4802 }
4803
4804 /*
4805 * when wl_sta is NULL, we treat this call as if coming from a
4806 * pending auth reply.
4807 * wl->mutex must be taken and the FW must be awake when the call
4808 * takes place.
4809 */
4810 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4811 struct wl1271_station *wl_sta, bool in_conn)
4812 {
4813 if (in_conn) {
4814 if (WARN_ON(wl_sta && wl_sta->in_connection))
4815 return;
4816
4817 if (!wlvif->ap_pending_auth_reply &&
4818 !wlvif->inconn_count)
4819 wlcore_roc_if_possible(wl, wlvif);
4820
4821 if (wl_sta) {
4822 wl_sta->in_connection = true;
4823 wlvif->inconn_count++;
4824 } else {
4825 wlvif->ap_pending_auth_reply = true;
4826 }
4827 } else {
4828 if (wl_sta && !wl_sta->in_connection)
4829 return;
4830
4831 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4832 return;
4833
4834 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4835 return;
4836
4837 if (wl_sta) {
4838 wl_sta->in_connection = false;
4839 wlvif->inconn_count--;
4840 } else {
4841 wlvif->ap_pending_auth_reply = false;
4842 }
4843
4844 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4845 test_bit(wlvif->role_id, wl->roc_map))
4846 wl12xx_croc(wl, wlvif->role_id);
4847 }
4848 }
4849
4850 static int wl12xx_update_sta_state(struct wl1271 *wl,
4851 struct wl12xx_vif *wlvif,
4852 struct ieee80211_sta *sta,
4853 enum ieee80211_sta_state old_state,
4854 enum ieee80211_sta_state new_state)
4855 {
4856 struct wl1271_station *wl_sta;
4857 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4858 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4859 int ret;
4860
4861 wl_sta = (struct wl1271_station *)sta->drv_priv;
4862
4863 /* Add station (AP mode) */
4864 if (is_ap &&
4865 old_state == IEEE80211_STA_NOTEXIST &&
4866 new_state == IEEE80211_STA_NONE) {
4867 ret = wl12xx_sta_add(wl, wlvif, sta);
4868 if (ret)
4869 return ret;
4870
4871 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4872 }
4873
4874 /* Remove station (AP mode) */
4875 if (is_ap &&
4876 old_state == IEEE80211_STA_NONE &&
4877 new_state == IEEE80211_STA_NOTEXIST) {
4878 /* must not fail */
4879 wl12xx_sta_remove(wl, wlvif, sta);
4880
4881 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4882 }
4883
4884 /* Authorize station (AP mode) */
4885 if (is_ap &&
4886 new_state == IEEE80211_STA_AUTHORIZED) {
4887 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4888 if (ret < 0)
4889 return ret;
4890
4891 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4892 wl_sta->hlid);
4893 if (ret)
4894 return ret;
4895
4896 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4897 }
4898
4899 /* Authorize station */
4900 if (is_sta &&
4901 new_state == IEEE80211_STA_AUTHORIZED) {
4902 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4903 ret = wl12xx_set_authorized(wl, wlvif);
4904 if (ret)
4905 return ret;
4906 }
4907
4908 if (is_sta &&
4909 old_state == IEEE80211_STA_AUTHORIZED &&
4910 new_state == IEEE80211_STA_ASSOC) {
4911 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4912 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4913 }
4914
4915 /* clear ROCs on failure or authorization */
4916 if (is_sta &&
4917 (new_state == IEEE80211_STA_AUTHORIZED ||
4918 new_state == IEEE80211_STA_NOTEXIST)) {
4919 if (test_bit(wlvif->role_id, wl->roc_map))
4920 wl12xx_croc(wl, wlvif->role_id);
4921 }
4922
4923 if (is_sta &&
4924 old_state == IEEE80211_STA_NOTEXIST &&
4925 new_state == IEEE80211_STA_NONE) {
4926 if (find_first_bit(wl->roc_map,
4927 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4928 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4929 wl12xx_roc(wl, wlvif, wlvif->role_id,
4930 wlvif->band, wlvif->channel);
4931 }
4932 }
4933 return 0;
4934 }
4935
4936 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4937 struct ieee80211_vif *vif,
4938 struct ieee80211_sta *sta,
4939 enum ieee80211_sta_state old_state,
4940 enum ieee80211_sta_state new_state)
4941 {
4942 struct wl1271 *wl = hw->priv;
4943 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4944 int ret;
4945
4946 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4947 sta->aid, old_state, new_state);
4948
4949 mutex_lock(&wl->mutex);
4950
4951 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4952 ret = -EBUSY;
4953 goto out;
4954 }
4955
4956 ret = wl1271_ps_elp_wakeup(wl);
4957 if (ret < 0)
4958 goto out;
4959
4960 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4961
4962 wl1271_ps_elp_sleep(wl);
4963 out:
4964 mutex_unlock(&wl->mutex);
4965 if (new_state < old_state)
4966 return 0;
4967 return ret;
4968 }
4969
4970 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4971 struct ieee80211_vif *vif,
4972 enum ieee80211_ampdu_mlme_action action,
4973 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4974 u8 buf_size)
4975 {
4976 struct wl1271 *wl = hw->priv;
4977 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4978 int ret;
4979 u8 hlid, *ba_bitmap;
4980
4981 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4982 tid);
4983
4984 /* sanity check - the fields in FW are only 8bits wide */
4985 if (WARN_ON(tid > 0xFF))
4986 return -ENOTSUPP;
4987
4988 mutex_lock(&wl->mutex);
4989
4990 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4991 ret = -EAGAIN;
4992 goto out;
4993 }
4994
4995 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4996 hlid = wlvif->sta.hlid;
4997 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4998 struct wl1271_station *wl_sta;
4999
5000 wl_sta = (struct wl1271_station *)sta->drv_priv;
5001 hlid = wl_sta->hlid;
5002 } else {
5003 ret = -EINVAL;
5004 goto out;
5005 }
5006
5007 ba_bitmap = &wl->links[hlid].ba_bitmap;
5008
5009 ret = wl1271_ps_elp_wakeup(wl);
5010 if (ret < 0)
5011 goto out;
5012
5013 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5014 tid, action);
5015
5016 switch (action) {
5017 case IEEE80211_AMPDU_RX_START:
5018 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5019 ret = -ENOTSUPP;
5020 break;
5021 }
5022
5023 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5024 ret = -EBUSY;
5025 wl1271_error("exceeded max RX BA sessions");
5026 break;
5027 }
5028
5029 if (*ba_bitmap & BIT(tid)) {
5030 ret = -EINVAL;
5031 wl1271_error("cannot enable RX BA session on active "
5032 "tid: %d", tid);
5033 break;
5034 }
5035
5036 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5037 hlid);
5038 if (!ret) {
5039 *ba_bitmap |= BIT(tid);
5040 wl->ba_rx_session_count++;
5041 }
5042 break;
5043
5044 case IEEE80211_AMPDU_RX_STOP:
5045 if (!(*ba_bitmap & BIT(tid))) {
5046 /*
5047 * this happens on reconfig - so only output a debug
5048 * message for now, and don't fail the function.
5049 */
5050 wl1271_debug(DEBUG_MAC80211,
5051 "no active RX BA session on tid: %d",
5052 tid);
5053 ret = 0;
5054 break;
5055 }
5056
5057 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5058 hlid);
5059 if (!ret) {
5060 *ba_bitmap &= ~BIT(tid);
5061 wl->ba_rx_session_count--;
5062 }
5063 break;
5064
5065 /*
5066 * The BA initiator session management in FW independently.
5067 * Falling break here on purpose for all TX APDU commands.
5068 */
5069 case IEEE80211_AMPDU_TX_START:
5070 case IEEE80211_AMPDU_TX_STOP_CONT:
5071 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5072 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5073 case IEEE80211_AMPDU_TX_OPERATIONAL:
5074 ret = -EINVAL;
5075 break;
5076
5077 default:
5078 wl1271_error("Incorrect ampdu action id=%x\n", action);
5079 ret = -EINVAL;
5080 }
5081
5082 wl1271_ps_elp_sleep(wl);
5083
5084 out:
5085 mutex_unlock(&wl->mutex);
5086
5087 return ret;
5088 }
5089
5090 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5091 struct ieee80211_vif *vif,
5092 const struct cfg80211_bitrate_mask *mask)
5093 {
5094 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5095 struct wl1271 *wl = hw->priv;
5096 int i, ret = 0;
5097
5098 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5099 mask->control[NL80211_BAND_2GHZ].legacy,
5100 mask->control[NL80211_BAND_5GHZ].legacy);
5101
5102 mutex_lock(&wl->mutex);
5103
5104 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5105 wlvif->bitrate_masks[i] =
5106 wl1271_tx_enabled_rates_get(wl,
5107 mask->control[i].legacy,
5108 i);
5109
5110 if (unlikely(wl->state != WLCORE_STATE_ON))
5111 goto out;
5112
5113 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5114 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5115
5116 ret = wl1271_ps_elp_wakeup(wl);
5117 if (ret < 0)
5118 goto out;
5119
5120 wl1271_set_band_rate(wl, wlvif);
5121 wlvif->basic_rate =
5122 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5123 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5124
5125 wl1271_ps_elp_sleep(wl);
5126 }
5127 out:
5128 mutex_unlock(&wl->mutex);
5129
5130 return ret;
5131 }
5132
5133 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5134 struct ieee80211_channel_switch *ch_switch)
5135 {
5136 struct wl1271 *wl = hw->priv;
5137 struct wl12xx_vif *wlvif;
5138 int ret;
5139
5140 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5141
5142 wl1271_tx_flush(wl);
5143
5144 mutex_lock(&wl->mutex);
5145
5146 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5147 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5148 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5149 ieee80211_chswitch_done(vif, false);
5150 }
5151 goto out;
5152 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5153 goto out;
5154 }
5155
5156 ret = wl1271_ps_elp_wakeup(wl);
5157 if (ret < 0)
5158 goto out;
5159
5160 /* TODO: change mac80211 to pass vif as param */
5161 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5162 unsigned long delay_usec;
5163
5164 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5165 if (ret)
5166 goto out_sleep;
5167
5168 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5169
5170 /* indicate failure 5 seconds after channel switch time */
5171 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5172 ch_switch->count;
5173 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5174 usecs_to_jiffies(delay_usec) +
5175 msecs_to_jiffies(5000));
5176 }
5177
5178 out_sleep:
5179 wl1271_ps_elp_sleep(wl);
5180
5181 out:
5182 mutex_unlock(&wl->mutex);
5183 }
5184
5185 static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
5186 {
5187 struct wl1271 *wl = hw->priv;
5188
5189 wl1271_tx_flush(wl);
5190 }
5191
5192 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5193 struct ieee80211_vif *vif,
5194 struct ieee80211_channel *chan,
5195 int duration,
5196 enum ieee80211_roc_type type)
5197 {
5198 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5199 struct wl1271 *wl = hw->priv;
5200 int channel, ret = 0;
5201
5202 channel = ieee80211_frequency_to_channel(chan->center_freq);
5203
5204 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5205 channel, wlvif->role_id);
5206
5207 mutex_lock(&wl->mutex);
5208
5209 if (unlikely(wl->state != WLCORE_STATE_ON))
5210 goto out;
5211
5212 /* return EBUSY if we can't ROC right now */
5213 if (WARN_ON(wl->roc_vif ||
5214 find_first_bit(wl->roc_map,
5215 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5216 ret = -EBUSY;
5217 goto out;
5218 }
5219
5220 ret = wl1271_ps_elp_wakeup(wl);
5221 if (ret < 0)
5222 goto out;
5223
5224 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5225 if (ret < 0)
5226 goto out_sleep;
5227
5228 wl->roc_vif = vif;
5229 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5230 msecs_to_jiffies(duration));
5231 out_sleep:
5232 wl1271_ps_elp_sleep(wl);
5233 out:
5234 mutex_unlock(&wl->mutex);
5235 return ret;
5236 }
5237
5238 static int __wlcore_roc_completed(struct wl1271 *wl)
5239 {
5240 struct wl12xx_vif *wlvif;
5241 int ret;
5242
5243 /* already completed */
5244 if (unlikely(!wl->roc_vif))
5245 return 0;
5246
5247 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5248
5249 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5250 return -EBUSY;
5251
5252 ret = wl12xx_stop_dev(wl, wlvif);
5253 if (ret < 0)
5254 return ret;
5255
5256 wl->roc_vif = NULL;
5257
5258 return 0;
5259 }
5260
5261 static int wlcore_roc_completed(struct wl1271 *wl)
5262 {
5263 int ret;
5264
5265 wl1271_debug(DEBUG_MAC80211, "roc complete");
5266
5267 mutex_lock(&wl->mutex);
5268
5269 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5270 ret = -EBUSY;
5271 goto out;
5272 }
5273
5274 ret = wl1271_ps_elp_wakeup(wl);
5275 if (ret < 0)
5276 goto out;
5277
5278 ret = __wlcore_roc_completed(wl);
5279
5280 wl1271_ps_elp_sleep(wl);
5281 out:
5282 mutex_unlock(&wl->mutex);
5283
5284 return ret;
5285 }
5286
5287 static void wlcore_roc_complete_work(struct work_struct *work)
5288 {
5289 struct delayed_work *dwork;
5290 struct wl1271 *wl;
5291 int ret;
5292
5293 dwork = container_of(work, struct delayed_work, work);
5294 wl = container_of(dwork, struct wl1271, roc_complete_work);
5295
5296 ret = wlcore_roc_completed(wl);
5297 if (!ret)
5298 ieee80211_remain_on_channel_expired(wl->hw);
5299 }
5300
5301 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5302 {
5303 struct wl1271 *wl = hw->priv;
5304
5305 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5306
5307 /* TODO: per-vif */
5308 wl1271_tx_flush(wl);
5309
5310 /*
5311 * we can't just flush_work here, because it might deadlock
5312 * (as we might get called from the same workqueue)
5313 */
5314 cancel_delayed_work_sync(&wl->roc_complete_work);
5315 wlcore_roc_completed(wl);
5316
5317 return 0;
5318 }
5319
5320 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5321 struct ieee80211_vif *vif,
5322 struct ieee80211_sta *sta,
5323 u32 changed)
5324 {
5325 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5326 struct wl1271 *wl = hw->priv;
5327
5328 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5329 }
5330
5331 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5332 struct ieee80211_vif *vif,
5333 struct ieee80211_sta *sta,
5334 s8 *rssi_dbm)
5335 {
5336 struct wl1271 *wl = hw->priv;
5337 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5338 int ret = 0;
5339
5340 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5341
5342 mutex_lock(&wl->mutex);
5343
5344 if (unlikely(wl->state != WLCORE_STATE_ON))
5345 goto out;
5346
5347 ret = wl1271_ps_elp_wakeup(wl);
5348 if (ret < 0)
5349 goto out_sleep;
5350
5351 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5352 if (ret < 0)
5353 goto out_sleep;
5354
5355 out_sleep:
5356 wl1271_ps_elp_sleep(wl);
5357
5358 out:
5359 mutex_unlock(&wl->mutex);
5360
5361 return ret;
5362 }
5363
5364 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5365 {
5366 struct wl1271 *wl = hw->priv;
5367 bool ret = false;
5368
5369 mutex_lock(&wl->mutex);
5370
5371 if (unlikely(wl->state != WLCORE_STATE_ON))
5372 goto out;
5373
5374 /* packets are considered pending if in the TX queue or the FW */
5375 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5376 out:
5377 mutex_unlock(&wl->mutex);
5378
5379 return ret;
5380 }
5381
5382 /* can't be const, mac80211 writes to this */
5383 static struct ieee80211_rate wl1271_rates[] = {
5384 { .bitrate = 10,
5385 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5386 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5387 { .bitrate = 20,
5388 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5389 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5390 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5391 { .bitrate = 55,
5392 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5393 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5394 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5395 { .bitrate = 110,
5396 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5397 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5398 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5399 { .bitrate = 60,
5400 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5401 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5402 { .bitrate = 90,
5403 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5404 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5405 { .bitrate = 120,
5406 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5407 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5408 { .bitrate = 180,
5409 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5410 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5411 { .bitrate = 240,
5412 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5413 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5414 { .bitrate = 360,
5415 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5416 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5417 { .bitrate = 480,
5418 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5419 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5420 { .bitrate = 540,
5421 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5422 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5423 };
5424
5425 /* can't be const, mac80211 writes to this */
5426 static struct ieee80211_channel wl1271_channels[] = {
5427 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5428 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5429 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5430 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5431 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5432 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5433 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5434 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5435 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5436 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5437 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5438 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5439 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5440 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5441 };
5442
5443 /* can't be const, mac80211 writes to this */
5444 static struct ieee80211_supported_band wl1271_band_2ghz = {
5445 .channels = wl1271_channels,
5446 .n_channels = ARRAY_SIZE(wl1271_channels),
5447 .bitrates = wl1271_rates,
5448 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5449 };
5450
5451 /* 5 GHz data rates for WL1273 */
5452 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5453 { .bitrate = 60,
5454 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5455 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5456 { .bitrate = 90,
5457 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5458 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5459 { .bitrate = 120,
5460 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5461 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5462 { .bitrate = 180,
5463 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5464 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5465 { .bitrate = 240,
5466 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5467 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5468 { .bitrate = 360,
5469 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5470 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5471 { .bitrate = 480,
5472 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5473 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5474 { .bitrate = 540,
5475 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5476 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5477 };
5478
5479 /* 5 GHz band channels for WL1273 */
5480 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5481 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5482 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5483 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5484 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5485 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5486 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5487 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5488 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5489 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5490 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5491 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5492 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5493 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5494 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5495 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5496 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5497 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5498 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5499 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5500 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5501 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5502 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5503 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5504 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5505 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5506 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5507 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5508 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5509 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5510 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5511 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5512 };
5513
5514 static struct ieee80211_supported_band wl1271_band_5ghz = {
5515 .channels = wl1271_channels_5ghz,
5516 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5517 .bitrates = wl1271_rates_5ghz,
5518 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5519 };
5520
5521 static const struct ieee80211_ops wl1271_ops = {
5522 .start = wl1271_op_start,
5523 .stop = wlcore_op_stop,
5524 .add_interface = wl1271_op_add_interface,
5525 .remove_interface = wl1271_op_remove_interface,
5526 .change_interface = wl12xx_op_change_interface,
5527 #ifdef CONFIG_PM
5528 .suspend = wl1271_op_suspend,
5529 .resume = wl1271_op_resume,
5530 #endif
5531 .config = wl1271_op_config,
5532 .prepare_multicast = wl1271_op_prepare_multicast,
5533 .configure_filter = wl1271_op_configure_filter,
5534 .tx = wl1271_op_tx,
5535 .set_key = wlcore_op_set_key,
5536 .hw_scan = wl1271_op_hw_scan,
5537 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5538 .sched_scan_start = wl1271_op_sched_scan_start,
5539 .sched_scan_stop = wl1271_op_sched_scan_stop,
5540 .bss_info_changed = wl1271_op_bss_info_changed,
5541 .set_frag_threshold = wl1271_op_set_frag_threshold,
5542 .set_rts_threshold = wl1271_op_set_rts_threshold,
5543 .conf_tx = wl1271_op_conf_tx,
5544 .get_tsf = wl1271_op_get_tsf,
5545 .get_survey = wl1271_op_get_survey,
5546 .sta_state = wl12xx_op_sta_state,
5547 .ampdu_action = wl1271_op_ampdu_action,
5548 .tx_frames_pending = wl1271_tx_frames_pending,
5549 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5550 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5551 .channel_switch = wl12xx_op_channel_switch,
5552 .flush = wlcore_op_flush,
5553 .remain_on_channel = wlcore_op_remain_on_channel,
5554 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5555 .add_chanctx = wlcore_op_add_chanctx,
5556 .remove_chanctx = wlcore_op_remove_chanctx,
5557 .change_chanctx = wlcore_op_change_chanctx,
5558 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5559 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5560 .sta_rc_update = wlcore_op_sta_rc_update,
5561 .get_rssi = wlcore_op_get_rssi,
5562 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5563 };
5564
5565
5566 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5567 {
5568 u8 idx;
5569
5570 BUG_ON(band >= 2);
5571
5572 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5573 wl1271_error("Illegal RX rate from HW: %d", rate);
5574 return 0;
5575 }
5576
5577 idx = wl->band_rate_to_idx[band][rate];
5578 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5579 wl1271_error("Unsupported RX rate from HW: %d", rate);
5580 return 0;
5581 }
5582
5583 return idx;
5584 }
5585
5586 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5587 {
5588 int i;
5589
5590 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5591 oui, nic);
5592
5593 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5594 wl1271_warning("NIC part of the MAC address wraps around!");
5595
5596 for (i = 0; i < wl->num_mac_addr; i++) {
5597 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5598 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5599 wl->addresses[i].addr[2] = (u8) oui;
5600 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5601 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5602 wl->addresses[i].addr[5] = (u8) nic;
5603 nic++;
5604 }
5605
5606 /* we may be one address short at the most */
5607 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5608
5609 /*
5610 * turn on the LAA bit in the first address and use it as
5611 * the last address.
5612 */
5613 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5614 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5615 memcpy(&wl->addresses[idx], &wl->addresses[0],
5616 sizeof(wl->addresses[0]));
5617 /* LAA bit */
5618 wl->addresses[idx].addr[2] |= BIT(1);
5619 }
5620
5621 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5622 wl->hw->wiphy->addresses = wl->addresses;
5623 }
5624
5625 static int wl12xx_get_hw_info(struct wl1271 *wl)
5626 {
5627 int ret;
5628
5629 ret = wl12xx_set_power_on(wl);
5630 if (ret < 0)
5631 return ret;
5632
5633 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5634 if (ret < 0)
5635 goto out;
5636
5637 wl->fuse_oui_addr = 0;
5638 wl->fuse_nic_addr = 0;
5639
5640 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5641 if (ret < 0)
5642 goto out;
5643
5644 if (wl->ops->get_mac)
5645 ret = wl->ops->get_mac(wl);
5646
5647 out:
5648 wl1271_power_off(wl);
5649 return ret;
5650 }
5651
5652 static int wl1271_register_hw(struct wl1271 *wl)
5653 {
5654 int ret;
5655 u32 oui_addr = 0, nic_addr = 0;
5656
5657 if (wl->mac80211_registered)
5658 return 0;
5659
5660 if (wl->nvs_len >= 12) {
5661 /* NOTE: The wl->nvs->nvs element must be first, in
5662 * order to simplify the casting, we assume it is at
5663 * the beginning of the wl->nvs structure.
5664 */
5665 u8 *nvs_ptr = (u8 *)wl->nvs;
5666
5667 oui_addr =
5668 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5669 nic_addr =
5670 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5671 }
5672
5673 /* if the MAC address is zeroed in the NVS derive from fuse */
5674 if (oui_addr == 0 && nic_addr == 0) {
5675 oui_addr = wl->fuse_oui_addr;
5676 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5677 nic_addr = wl->fuse_nic_addr + 1;
5678 }
5679
5680 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5681
5682 ret = ieee80211_register_hw(wl->hw);
5683 if (ret < 0) {
5684 wl1271_error("unable to register mac80211 hw: %d", ret);
5685 goto out;
5686 }
5687
5688 wl->mac80211_registered = true;
5689
5690 wl1271_debugfs_init(wl);
5691
5692 wl1271_notice("loaded");
5693
5694 out:
5695 return ret;
5696 }
5697
5698 static void wl1271_unregister_hw(struct wl1271 *wl)
5699 {
5700 if (wl->plt)
5701 wl1271_plt_stop(wl);
5702
5703 ieee80211_unregister_hw(wl->hw);
5704 wl->mac80211_registered = false;
5705
5706 }
5707
5708 static int wl1271_init_ieee80211(struct wl1271 *wl)
5709 {
5710 int i;
5711 static const u32 cipher_suites[] = {
5712 WLAN_CIPHER_SUITE_WEP40,
5713 WLAN_CIPHER_SUITE_WEP104,
5714 WLAN_CIPHER_SUITE_TKIP,
5715 WLAN_CIPHER_SUITE_CCMP,
5716 WL1271_CIPHER_SUITE_GEM,
5717 };
5718
5719 /* The tx descriptor buffer */
5720 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5721
5722 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5723 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5724
5725 /* unit us */
5726 /* FIXME: find a proper value */
5727 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5728
5729 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5730 IEEE80211_HW_SUPPORTS_PS |
5731 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5732 IEEE80211_HW_SUPPORTS_UAPSD |
5733 IEEE80211_HW_HAS_RATE_CONTROL |
5734 IEEE80211_HW_CONNECTION_MONITOR |
5735 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5736 IEEE80211_HW_SPECTRUM_MGMT |
5737 IEEE80211_HW_AP_LINK_PS |
5738 IEEE80211_HW_AMPDU_AGGREGATION |
5739 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5740 IEEE80211_HW_QUEUE_CONTROL;
5741
5742 wl->hw->wiphy->cipher_suites = cipher_suites;
5743 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5744
5745 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5746 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5747 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5748 wl->hw->wiphy->max_scan_ssids = 1;
5749 wl->hw->wiphy->max_sched_scan_ssids = 16;
5750 wl->hw->wiphy->max_match_sets = 16;
5751 /*
5752 * Maximum length of elements in scanning probe request templates
5753 * should be the maximum length possible for a template, without
5754 * the IEEE80211 header of the template
5755 */
5756 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5757 sizeof(struct ieee80211_header);
5758
5759 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5760 sizeof(struct ieee80211_header);
5761
5762 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5763
5764 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5765 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5766 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5767
5768 /* make sure all our channels fit in the scanned_ch bitmask */
5769 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5770 ARRAY_SIZE(wl1271_channels_5ghz) >
5771 WL1271_MAX_CHANNELS);
5772 /*
5773 * clear channel flags from the previous usage
5774 * and restore max_power & max_antenna_gain values.
5775 */
5776 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5777 wl1271_band_2ghz.channels[i].flags = 0;
5778 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5779 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5780 }
5781
5782 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5783 wl1271_band_5ghz.channels[i].flags = 0;
5784 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5785 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5786 }
5787
5788 /*
5789 * We keep local copies of the band structs because we need to
5790 * modify them on a per-device basis.
5791 */
5792 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5793 sizeof(wl1271_band_2ghz));
5794 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5795 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5796 sizeof(*wl->ht_cap));
5797 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5798 sizeof(wl1271_band_5ghz));
5799 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5800 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5801 sizeof(*wl->ht_cap));
5802
5803 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5804 &wl->bands[IEEE80211_BAND_2GHZ];
5805 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5806 &wl->bands[IEEE80211_BAND_5GHZ];
5807
5808 /*
5809 * allow 4 queues per mac address we support +
5810 * 1 cab queue per mac + one global offchannel Tx queue
5811 */
5812 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5813
5814 /* the last queue is the offchannel queue */
5815 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5816 wl->hw->max_rates = 1;
5817
5818 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5819
5820 /* the FW answers probe-requests in AP-mode */
5821 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5822 wl->hw->wiphy->probe_resp_offload =
5823 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5824 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5825 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5826
5827 /* allowed interface combinations */
5828 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
5829 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
5830
5831 SET_IEEE80211_DEV(wl->hw, wl->dev);
5832
5833 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5834 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5835
5836 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5837
5838 return 0;
5839 }
5840
5841 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5842 u32 mbox_size)
5843 {
5844 struct ieee80211_hw *hw;
5845 struct wl1271 *wl;
5846 int i, j, ret;
5847 unsigned int order;
5848
5849 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5850 if (!hw) {
5851 wl1271_error("could not alloc ieee80211_hw");
5852 ret = -ENOMEM;
5853 goto err_hw_alloc;
5854 }
5855
5856 wl = hw->priv;
5857 memset(wl, 0, sizeof(*wl));
5858
5859 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5860 if (!wl->priv) {
5861 wl1271_error("could not alloc wl priv");
5862 ret = -ENOMEM;
5863 goto err_priv_alloc;
5864 }
5865
5866 INIT_LIST_HEAD(&wl->wlvif_list);
5867
5868 wl->hw = hw;
5869
5870 /*
5871 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5872 * we don't allocate any additional resource here, so that's fine.
5873 */
5874 for (i = 0; i < NUM_TX_QUEUES; i++)
5875 for (j = 0; j < WLCORE_MAX_LINKS; j++)
5876 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5877
5878 skb_queue_head_init(&wl->deferred_rx_queue);
5879 skb_queue_head_init(&wl->deferred_tx_queue);
5880
5881 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5882 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5883 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5884 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5885 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5886 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5887 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5888
5889 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5890 if (!wl->freezable_wq) {
5891 ret = -ENOMEM;
5892 goto err_hw;
5893 }
5894
5895 wl->channel = 0;
5896 wl->rx_counter = 0;
5897 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5898 wl->band = IEEE80211_BAND_2GHZ;
5899 wl->channel_type = NL80211_CHAN_NO_HT;
5900 wl->flags = 0;
5901 wl->sg_enabled = true;
5902 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5903 wl->recovery_count = 0;
5904 wl->hw_pg_ver = -1;
5905 wl->ap_ps_map = 0;
5906 wl->ap_fw_ps_map = 0;
5907 wl->quirks = 0;
5908 wl->platform_quirks = 0;
5909 wl->system_hlid = WL12XX_SYSTEM_HLID;
5910 wl->active_sta_count = 0;
5911 wl->active_link_count = 0;
5912 wl->fwlog_size = 0;
5913 init_waitqueue_head(&wl->fwlog_waitq);
5914
5915 /* The system link is always allocated */
5916 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5917
5918 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5919 for (i = 0; i < wl->num_tx_desc; i++)
5920 wl->tx_frames[i] = NULL;
5921
5922 spin_lock_init(&wl->wl_lock);
5923
5924 wl->state = WLCORE_STATE_OFF;
5925 wl->fw_type = WL12XX_FW_TYPE_NONE;
5926 mutex_init(&wl->mutex);
5927 mutex_init(&wl->flush_mutex);
5928 init_completion(&wl->nvs_loading_complete);
5929
5930 order = get_order(aggr_buf_size);
5931 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5932 if (!wl->aggr_buf) {
5933 ret = -ENOMEM;
5934 goto err_wq;
5935 }
5936 wl->aggr_buf_size = aggr_buf_size;
5937
5938 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5939 if (!wl->dummy_packet) {
5940 ret = -ENOMEM;
5941 goto err_aggr;
5942 }
5943
5944 /* Allocate one page for the FW log */
5945 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5946 if (!wl->fwlog) {
5947 ret = -ENOMEM;
5948 goto err_dummy_packet;
5949 }
5950
5951 wl->mbox_size = mbox_size;
5952 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5953 if (!wl->mbox) {
5954 ret = -ENOMEM;
5955 goto err_fwlog;
5956 }
5957
5958 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5959 if (!wl->buffer_32) {
5960 ret = -ENOMEM;
5961 goto err_mbox;
5962 }
5963
5964 return hw;
5965
5966 err_mbox:
5967 kfree(wl->mbox);
5968
5969 err_fwlog:
5970 free_page((unsigned long)wl->fwlog);
5971
5972 err_dummy_packet:
5973 dev_kfree_skb(wl->dummy_packet);
5974
5975 err_aggr:
5976 free_pages((unsigned long)wl->aggr_buf, order);
5977
5978 err_wq:
5979 destroy_workqueue(wl->freezable_wq);
5980
5981 err_hw:
5982 wl1271_debugfs_exit(wl);
5983 kfree(wl->priv);
5984
5985 err_priv_alloc:
5986 ieee80211_free_hw(hw);
5987
5988 err_hw_alloc:
5989
5990 return ERR_PTR(ret);
5991 }
5992 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5993
5994 int wlcore_free_hw(struct wl1271 *wl)
5995 {
5996 /* Unblock any fwlog readers */
5997 mutex_lock(&wl->mutex);
5998 wl->fwlog_size = -1;
5999 wake_up_interruptible_all(&wl->fwlog_waitq);
6000 mutex_unlock(&wl->mutex);
6001
6002 wlcore_sysfs_free(wl);
6003
6004 kfree(wl->buffer_32);
6005 kfree(wl->mbox);
6006 free_page((unsigned long)wl->fwlog);
6007 dev_kfree_skb(wl->dummy_packet);
6008 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6009
6010 wl1271_debugfs_exit(wl);
6011
6012 vfree(wl->fw);
6013 wl->fw = NULL;
6014 wl->fw_type = WL12XX_FW_TYPE_NONE;
6015 kfree(wl->nvs);
6016 wl->nvs = NULL;
6017
6018 kfree(wl->raw_fw_status);
6019 kfree(wl->fw_status);
6020 kfree(wl->tx_res_if);
6021 destroy_workqueue(wl->freezable_wq);
6022
6023 kfree(wl->priv);
6024 ieee80211_free_hw(wl->hw);
6025
6026 return 0;
6027 }
6028 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6029
6030 #ifdef CONFIG_PM
6031 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6032 .flags = WIPHY_WOWLAN_ANY,
6033 .n_patterns = WL1271_MAX_RX_FILTERS,
6034 .pattern_min_len = 1,
6035 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6036 };
6037 #endif
6038
6039 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6040 {
6041 return IRQ_WAKE_THREAD;
6042 }
6043
6044 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6045 {
6046 struct wl1271 *wl = context;
6047 struct platform_device *pdev = wl->pdev;
6048 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6049 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6050 unsigned long irqflags;
6051 int ret;
6052 irq_handler_t hardirq_fn = NULL;
6053
6054 if (fw) {
6055 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6056 if (!wl->nvs) {
6057 wl1271_error("Could not allocate nvs data");
6058 goto out;
6059 }
6060 wl->nvs_len = fw->size;
6061 } else {
6062 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6063 WL12XX_NVS_NAME);
6064 wl->nvs = NULL;
6065 wl->nvs_len = 0;
6066 }
6067
6068 ret = wl->ops->setup(wl);
6069 if (ret < 0)
6070 goto out_free_nvs;
6071
6072 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6073
6074 /* adjust some runtime configuration parameters */
6075 wlcore_adjust_conf(wl);
6076
6077 wl->irq = platform_get_irq(pdev, 0);
6078 wl->platform_quirks = pdata->platform_quirks;
6079 wl->if_ops = pdev_data->if_ops;
6080
6081 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6082 irqflags = IRQF_TRIGGER_RISING;
6083 hardirq_fn = wlcore_hardirq;
6084 } else {
6085 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6086 }
6087
6088 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6089 irqflags, pdev->name, wl);
6090 if (ret < 0) {
6091 wl1271_error("request_irq() failed: %d", ret);
6092 goto out_free_nvs;
6093 }
6094
6095 #ifdef CONFIG_PM
6096 ret = enable_irq_wake(wl->irq);
6097 if (!ret) {
6098 wl->irq_wake_enabled = true;
6099 device_init_wakeup(wl->dev, 1);
6100 if (pdata->pwr_in_suspend)
6101 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6102 }
6103 #endif
6104 disable_irq(wl->irq);
6105
6106 ret = wl12xx_get_hw_info(wl);
6107 if (ret < 0) {
6108 wl1271_error("couldn't get hw info");
6109 goto out_irq;
6110 }
6111
6112 ret = wl->ops->identify_chip(wl);
6113 if (ret < 0)
6114 goto out_irq;
6115
6116 ret = wl1271_init_ieee80211(wl);
6117 if (ret)
6118 goto out_irq;
6119
6120 ret = wl1271_register_hw(wl);
6121 if (ret)
6122 goto out_irq;
6123
6124 ret = wlcore_sysfs_init(wl);
6125 if (ret)
6126 goto out_unreg;
6127
6128 wl->initialized = true;
6129 goto out;
6130
6131 out_unreg:
6132 wl1271_unregister_hw(wl);
6133
6134 out_irq:
6135 free_irq(wl->irq, wl);
6136
6137 out_free_nvs:
6138 kfree(wl->nvs);
6139
6140 out:
6141 release_firmware(fw);
6142 complete_all(&wl->nvs_loading_complete);
6143 }
6144
6145 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6146 {
6147 int ret;
6148
6149 if (!wl->ops || !wl->ptable)
6150 return -EINVAL;
6151
6152 wl->dev = &pdev->dev;
6153 wl->pdev = pdev;
6154 platform_set_drvdata(pdev, wl);
6155
6156 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6157 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6158 wl, wlcore_nvs_cb);
6159 if (ret < 0) {
6160 wl1271_error("request_firmware_nowait failed: %d", ret);
6161 complete_all(&wl->nvs_loading_complete);
6162 }
6163
6164 return ret;
6165 }
6166 EXPORT_SYMBOL_GPL(wlcore_probe);
6167
6168 int wlcore_remove(struct platform_device *pdev)
6169 {
6170 struct wl1271 *wl = platform_get_drvdata(pdev);
6171
6172 wait_for_completion(&wl->nvs_loading_complete);
6173 if (!wl->initialized)
6174 return 0;
6175
6176 if (wl->irq_wake_enabled) {
6177 device_init_wakeup(wl->dev, 0);
6178 disable_irq_wake(wl->irq);
6179 }
6180 wl1271_unregister_hw(wl);
6181 free_irq(wl->irq, wl);
6182 wlcore_free_hw(wl);
6183
6184 return 0;
6185 }
6186 EXPORT_SYMBOL_GPL(wlcore_remove);
6187
6188 u32 wl12xx_debug_level = DEBUG_NONE;
6189 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6190 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6191 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6192
6193 module_param_named(fwlog, fwlog_param, charp, 0);
6194 MODULE_PARM_DESC(fwlog,
6195 "FW logger options: continuous, ondemand, dbgpins or disable");
6196
6197 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6198 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6199
6200 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6201 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6202
6203 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6204 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6205
6206 MODULE_LICENSE("GPL");
6207 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6208 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6209 MODULE_FIRMWARE(WL12XX_NVS_NAME);
This page took 0.165606 seconds and 5 git commands to generate.