wlcore/wl12xx/wl18xx: configure num_links per-hw
[deliverable/linux.git] / drivers / net / wireless / ti / wlcore / main.c
1
2 /*
3 * This file is part of wlcore
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
30
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43
44 #define WL1271_BOOT_RETRIES 3
45
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
50
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
58 {
59 int ret;
60
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 return -EINVAL;
63
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
65 return 0;
66
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
68 return 0;
69
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
71 if (ret < 0)
72 return ret;
73
74 wl1271_info("Association completed.");
75 return 0;
76 }
77
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
80 {
81 struct ieee80211_supported_band *band;
82 struct ieee80211_channel *ch;
83 int i;
84 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 struct wl1271 *wl = hw->priv;
86
87 band = wiphy->bands[IEEE80211_BAND_5GHZ];
88 for (i = 0; i < band->n_channels; i++) {
89 ch = &band->channels[i];
90 if (ch->flags & IEEE80211_CHAN_DISABLED)
91 continue;
92
93 if (ch->flags & IEEE80211_CHAN_RADAR)
94 ch->flags |= IEEE80211_CHAN_NO_IR;
95
96 }
97
98 wlcore_regdomain_config(wl);
99 }
100
101 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
102 bool enable)
103 {
104 int ret = 0;
105
106 /* we should hold wl->mutex */
107 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
108 if (ret < 0)
109 goto out;
110
111 if (enable)
112 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
113 else
114 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
115 out:
116 return ret;
117 }
118
119 /*
120 * this function is being called when the rx_streaming interval
121 * has beed changed or rx_streaming should be disabled
122 */
123 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
124 {
125 int ret = 0;
126 int period = wl->conf.rx_streaming.interval;
127
128 /* don't reconfigure if rx_streaming is disabled */
129 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
130 goto out;
131
132 /* reconfigure/disable according to new streaming_period */
133 if (period &&
134 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
135 (wl->conf.rx_streaming.always ||
136 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
137 ret = wl1271_set_rx_streaming(wl, wlvif, true);
138 else {
139 ret = wl1271_set_rx_streaming(wl, wlvif, false);
140 /* don't cancel_work_sync since we might deadlock */
141 del_timer_sync(&wlvif->rx_streaming_timer);
142 }
143 out:
144 return ret;
145 }
146
147 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
148 {
149 int ret;
150 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
151 rx_streaming_enable_work);
152 struct wl1271 *wl = wlvif->wl;
153
154 mutex_lock(&wl->mutex);
155
156 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
157 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
158 (!wl->conf.rx_streaming.always &&
159 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
160 goto out;
161
162 if (!wl->conf.rx_streaming.interval)
163 goto out;
164
165 ret = wl1271_ps_elp_wakeup(wl);
166 if (ret < 0)
167 goto out;
168
169 ret = wl1271_set_rx_streaming(wl, wlvif, true);
170 if (ret < 0)
171 goto out_sleep;
172
173 /* stop it after some time of inactivity */
174 mod_timer(&wlvif->rx_streaming_timer,
175 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
176
177 out_sleep:
178 wl1271_ps_elp_sleep(wl);
179 out:
180 mutex_unlock(&wl->mutex);
181 }
182
183 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
184 {
185 int ret;
186 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
187 rx_streaming_disable_work);
188 struct wl1271 *wl = wlvif->wl;
189
190 mutex_lock(&wl->mutex);
191
192 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
193 goto out;
194
195 ret = wl1271_ps_elp_wakeup(wl);
196 if (ret < 0)
197 goto out;
198
199 ret = wl1271_set_rx_streaming(wl, wlvif, false);
200 if (ret)
201 goto out_sleep;
202
203 out_sleep:
204 wl1271_ps_elp_sleep(wl);
205 out:
206 mutex_unlock(&wl->mutex);
207 }
208
209 static void wl1271_rx_streaming_timer(unsigned long data)
210 {
211 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
212 struct wl1271 *wl = wlvif->wl;
213 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
214 }
215
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
218 {
219 /* if the watchdog is not armed, don't do anything */
220 if (wl->tx_allocated_blocks == 0)
221 return;
222
223 cancel_delayed_work(&wl->tx_watchdog_work);
224 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
226 }
227
228 static void wl12xx_tx_watchdog_work(struct work_struct *work)
229 {
230 struct delayed_work *dwork;
231 struct wl1271 *wl;
232
233 dwork = container_of(work, struct delayed_work, work);
234 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
235
236 mutex_lock(&wl->mutex);
237
238 if (unlikely(wl->state != WLCORE_STATE_ON))
239 goto out;
240
241 /* Tx went out in the meantime - everything is ok */
242 if (unlikely(wl->tx_allocated_blocks == 0))
243 goto out;
244
245 /*
246 * if a ROC is in progress, we might not have any Tx for a long
247 * time (e.g. pending Tx on the non-ROC channels)
248 */
249 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
250 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
251 wl->conf.tx.tx_watchdog_timeout);
252 wl12xx_rearm_tx_watchdog_locked(wl);
253 goto out;
254 }
255
256 /*
257 * if a scan is in progress, we might not have any Tx for a long
258 * time
259 */
260 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
261 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
262 wl->conf.tx.tx_watchdog_timeout);
263 wl12xx_rearm_tx_watchdog_locked(wl);
264 goto out;
265 }
266
267 /*
268 * AP might cache a frame for a long time for a sleeping station,
269 * so rearm the timer if there's an AP interface with stations. If
270 * Tx is genuinely stuck we will most hopefully discover it when all
271 * stations are removed due to inactivity.
272 */
273 if (wl->active_sta_count) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
275 " %d stations",
276 wl->conf.tx.tx_watchdog_timeout,
277 wl->active_sta_count);
278 wl12xx_rearm_tx_watchdog_locked(wl);
279 goto out;
280 }
281
282 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
283 wl->conf.tx.tx_watchdog_timeout);
284 wl12xx_queue_recovery_work(wl);
285
286 out:
287 mutex_unlock(&wl->mutex);
288 }
289
290 static void wlcore_adjust_conf(struct wl1271 *wl)
291 {
292 /* Adjust settings according to optional module parameters */
293
294 /* Firmware Logger params */
295 if (fwlog_mem_blocks != -1) {
296 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
297 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
298 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
299 } else {
300 wl1271_error(
301 "Illegal fwlog_mem_blocks=%d using default %d",
302 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
303 }
304 }
305
306 if (fwlog_param) {
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
317 } else {
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
319 }
320 }
321
322 if (bug_on_recovery != -1)
323 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
324
325 if (no_recovery != -1)
326 wl->conf.recovery.no_recovery = (u8) no_recovery;
327 }
328
329 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
330 struct wl12xx_vif *wlvif,
331 u8 hlid, u8 tx_pkts)
332 {
333 bool fw_ps;
334
335 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
336
337 /*
338 * Wake up from high level PS if the STA is asleep with too little
339 * packets in FW or if the STA is awake.
340 */
341 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
342 wl12xx_ps_link_end(wl, wlvif, hlid);
343
344 /*
345 * Start high-level PS if the STA is asleep with enough blocks in FW.
346 * Make an exception if this is the only connected link. In this
347 * case FW-memory congestion is less of a problem.
348 * Note that a single connected STA means 3 active links, since we must
349 * account for the global and broadcast AP links. The "fw_ps" check
350 * assures us the third link is a STA connected to the AP. Otherwise
351 * the FW would not set the PSM bit.
352 */
353 else if (wl->active_link_count > 3 && fw_ps &&
354 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 wl12xx_ps_link_start(wl, wlvif, hlid, true);
356 }
357
358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 struct wl12xx_vif *wlvif,
360 struct wl_fw_status *status)
361 {
362 u32 cur_fw_ps_map;
363 u8 hlid;
364
365 cur_fw_ps_map = status->link_ps_bitmap;
366 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 wl1271_debug(DEBUG_PSM,
368 "link ps prev 0x%x cur 0x%x changed 0x%x",
369 wl->ap_fw_ps_map, cur_fw_ps_map,
370 wl->ap_fw_ps_map ^ cur_fw_ps_map);
371
372 wl->ap_fw_ps_map = cur_fw_ps_map;
373 }
374
375 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 wl->links[hlid].allocated_pkts);
378 }
379
380 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
381 {
382 struct wl12xx_vif *wlvif;
383 struct timespec ts;
384 u32 old_tx_blk_count = wl->tx_blocks_available;
385 int avail, freed_blocks;
386 int i;
387 int ret;
388 struct wl1271_link *lnk;
389
390 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
391 wl->raw_fw_status,
392 wl->fw_status_len, false);
393 if (ret < 0)
394 return ret;
395
396 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
397
398 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
399 "drv_rx_counter = %d, tx_results_counter = %d)",
400 status->intr,
401 status->fw_rx_counter,
402 status->drv_rx_counter,
403 status->tx_results_counter);
404
405 for (i = 0; i < NUM_TX_QUEUES; i++) {
406 /* prevent wrap-around in freed-packets counter */
407 wl->tx_allocated_pkts[i] -=
408 (status->counters.tx_released_pkts[i] -
409 wl->tx_pkts_freed[i]) & 0xff;
410
411 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
412 }
413
414
415 for_each_set_bit(i, wl->links_map, wl->num_links) {
416 u8 diff;
417 lnk = &wl->links[i];
418
419 /* prevent wrap-around in freed-packets counter */
420 diff = (status->counters.tx_lnk_free_pkts[i] -
421 lnk->prev_freed_pkts) & 0xff;
422
423 if (diff == 0)
424 continue;
425
426 lnk->allocated_pkts -= diff;
427 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
428
429 /* accumulate the prev_freed_pkts counter */
430 lnk->total_freed_pkts += diff;
431 }
432
433 /* prevent wrap-around in total blocks counter */
434 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
435 freed_blocks = status->total_released_blks -
436 wl->tx_blocks_freed;
437 else
438 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
439 status->total_released_blks;
440
441 wl->tx_blocks_freed = status->total_released_blks;
442
443 wl->tx_allocated_blocks -= freed_blocks;
444
445 /*
446 * If the FW freed some blocks:
447 * If we still have allocated blocks - re-arm the timer, Tx is
448 * not stuck. Otherwise, cancel the timer (no Tx currently).
449 */
450 if (freed_blocks) {
451 if (wl->tx_allocated_blocks)
452 wl12xx_rearm_tx_watchdog_locked(wl);
453 else
454 cancel_delayed_work(&wl->tx_watchdog_work);
455 }
456
457 avail = status->tx_total - wl->tx_allocated_blocks;
458
459 /*
460 * The FW might change the total number of TX memblocks before
461 * we get a notification about blocks being released. Thus, the
462 * available blocks calculation might yield a temporary result
463 * which is lower than the actual available blocks. Keeping in
464 * mind that only blocks that were allocated can be moved from
465 * TX to RX, tx_blocks_available should never decrease here.
466 */
467 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
468 avail);
469
470 /* if more blocks are available now, tx work can be scheduled */
471 if (wl->tx_blocks_available > old_tx_blk_count)
472 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
473
474 /* for AP update num of allocated TX blocks per link and ps status */
475 wl12xx_for_each_wlvif_ap(wl, wlvif) {
476 wl12xx_irq_update_links_status(wl, wlvif, status);
477 }
478
479 /* update the host-chipset time offset */
480 getnstimeofday(&ts);
481 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
482 (s64)(status->fw_localtime);
483
484 wl->fw_fast_lnk_map = status->link_fast_bitmap;
485
486 return 0;
487 }
488
489 static void wl1271_flush_deferred_work(struct wl1271 *wl)
490 {
491 struct sk_buff *skb;
492
493 /* Pass all received frames to the network stack */
494 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
495 ieee80211_rx_ni(wl->hw, skb);
496
497 /* Return sent skbs to the network stack */
498 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
499 ieee80211_tx_status_ni(wl->hw, skb);
500 }
501
502 static void wl1271_netstack_work(struct work_struct *work)
503 {
504 struct wl1271 *wl =
505 container_of(work, struct wl1271, netstack_work);
506
507 do {
508 wl1271_flush_deferred_work(wl);
509 } while (skb_queue_len(&wl->deferred_rx_queue));
510 }
511
512 #define WL1271_IRQ_MAX_LOOPS 256
513
514 static int wlcore_irq_locked(struct wl1271 *wl)
515 {
516 int ret = 0;
517 u32 intr;
518 int loopcount = WL1271_IRQ_MAX_LOOPS;
519 bool done = false;
520 unsigned int defer_count;
521 unsigned long flags;
522
523 /*
524 * In case edge triggered interrupt must be used, we cannot iterate
525 * more than once without introducing race conditions with the hardirq.
526 */
527 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
528 loopcount = 1;
529
530 wl1271_debug(DEBUG_IRQ, "IRQ work");
531
532 if (unlikely(wl->state != WLCORE_STATE_ON))
533 goto out;
534
535 ret = wl1271_ps_elp_wakeup(wl);
536 if (ret < 0)
537 goto out;
538
539 while (!done && loopcount--) {
540 /*
541 * In order to avoid a race with the hardirq, clear the flag
542 * before acknowledging the chip. Since the mutex is held,
543 * wl1271_ps_elp_wakeup cannot be called concurrently.
544 */
545 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
546 smp_mb__after_clear_bit();
547
548 ret = wlcore_fw_status(wl, wl->fw_status);
549 if (ret < 0)
550 goto out;
551
552 wlcore_hw_tx_immediate_compl(wl);
553
554 intr = wl->fw_status->intr;
555 intr &= WLCORE_ALL_INTR_MASK;
556 if (!intr) {
557 done = true;
558 continue;
559 }
560
561 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
562 wl1271_error("HW watchdog interrupt received! starting recovery.");
563 wl->watchdog_recovery = true;
564 ret = -EIO;
565
566 /* restarting the chip. ignore any other interrupt. */
567 goto out;
568 }
569
570 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
571 wl1271_error("SW watchdog interrupt received! "
572 "starting recovery.");
573 wl->watchdog_recovery = true;
574 ret = -EIO;
575
576 /* restarting the chip. ignore any other interrupt. */
577 goto out;
578 }
579
580 if (likely(intr & WL1271_ACX_INTR_DATA)) {
581 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
582
583 ret = wlcore_rx(wl, wl->fw_status);
584 if (ret < 0)
585 goto out;
586
587 /* Check if any tx blocks were freed */
588 spin_lock_irqsave(&wl->wl_lock, flags);
589 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
590 wl1271_tx_total_queue_count(wl) > 0) {
591 spin_unlock_irqrestore(&wl->wl_lock, flags);
592 /*
593 * In order to avoid starvation of the TX path,
594 * call the work function directly.
595 */
596 ret = wlcore_tx_work_locked(wl);
597 if (ret < 0)
598 goto out;
599 } else {
600 spin_unlock_irqrestore(&wl->wl_lock, flags);
601 }
602
603 /* check for tx results */
604 ret = wlcore_hw_tx_delayed_compl(wl);
605 if (ret < 0)
606 goto out;
607
608 /* Make sure the deferred queues don't get too long */
609 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
610 skb_queue_len(&wl->deferred_rx_queue);
611 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
612 wl1271_flush_deferred_work(wl);
613 }
614
615 if (intr & WL1271_ACX_INTR_EVENT_A) {
616 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
617 ret = wl1271_event_handle(wl, 0);
618 if (ret < 0)
619 goto out;
620 }
621
622 if (intr & WL1271_ACX_INTR_EVENT_B) {
623 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
624 ret = wl1271_event_handle(wl, 1);
625 if (ret < 0)
626 goto out;
627 }
628
629 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
630 wl1271_debug(DEBUG_IRQ,
631 "WL1271_ACX_INTR_INIT_COMPLETE");
632
633 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
634 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
635 }
636
637 wl1271_ps_elp_sleep(wl);
638
639 out:
640 return ret;
641 }
642
643 static irqreturn_t wlcore_irq(int irq, void *cookie)
644 {
645 int ret;
646 unsigned long flags;
647 struct wl1271 *wl = cookie;
648
649 /* complete the ELP completion */
650 spin_lock_irqsave(&wl->wl_lock, flags);
651 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
652 if (wl->elp_compl) {
653 complete(wl->elp_compl);
654 wl->elp_compl = NULL;
655 }
656
657 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
658 /* don't enqueue a work right now. mark it as pending */
659 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
660 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
661 disable_irq_nosync(wl->irq);
662 pm_wakeup_event(wl->dev, 0);
663 spin_unlock_irqrestore(&wl->wl_lock, flags);
664 return IRQ_HANDLED;
665 }
666 spin_unlock_irqrestore(&wl->wl_lock, flags);
667
668 /* TX might be handled here, avoid redundant work */
669 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
670 cancel_work_sync(&wl->tx_work);
671
672 mutex_lock(&wl->mutex);
673
674 ret = wlcore_irq_locked(wl);
675 if (ret)
676 wl12xx_queue_recovery_work(wl);
677
678 spin_lock_irqsave(&wl->wl_lock, flags);
679 /* In case TX was not handled here, queue TX work */
680 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
681 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
682 wl1271_tx_total_queue_count(wl) > 0)
683 ieee80211_queue_work(wl->hw, &wl->tx_work);
684 spin_unlock_irqrestore(&wl->wl_lock, flags);
685
686 mutex_unlock(&wl->mutex);
687
688 return IRQ_HANDLED;
689 }
690
691 struct vif_counter_data {
692 u8 counter;
693
694 struct ieee80211_vif *cur_vif;
695 bool cur_vif_running;
696 };
697
698 static void wl12xx_vif_count_iter(void *data, u8 *mac,
699 struct ieee80211_vif *vif)
700 {
701 struct vif_counter_data *counter = data;
702
703 counter->counter++;
704 if (counter->cur_vif == vif)
705 counter->cur_vif_running = true;
706 }
707
708 /* caller must not hold wl->mutex, as it might deadlock */
709 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
710 struct ieee80211_vif *cur_vif,
711 struct vif_counter_data *data)
712 {
713 memset(data, 0, sizeof(*data));
714 data->cur_vif = cur_vif;
715
716 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
717 wl12xx_vif_count_iter, data);
718 }
719
720 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
721 {
722 const struct firmware *fw;
723 const char *fw_name;
724 enum wl12xx_fw_type fw_type;
725 int ret;
726
727 if (plt) {
728 fw_type = WL12XX_FW_TYPE_PLT;
729 fw_name = wl->plt_fw_name;
730 } else {
731 /*
732 * we can't call wl12xx_get_vif_count() here because
733 * wl->mutex is taken, so use the cached last_vif_count value
734 */
735 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
736 fw_type = WL12XX_FW_TYPE_MULTI;
737 fw_name = wl->mr_fw_name;
738 } else {
739 fw_type = WL12XX_FW_TYPE_NORMAL;
740 fw_name = wl->sr_fw_name;
741 }
742 }
743
744 if (wl->fw_type == fw_type)
745 return 0;
746
747 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
748
749 ret = request_firmware(&fw, fw_name, wl->dev);
750
751 if (ret < 0) {
752 wl1271_error("could not get firmware %s: %d", fw_name, ret);
753 return ret;
754 }
755
756 if (fw->size % 4) {
757 wl1271_error("firmware size is not multiple of 32 bits: %zu",
758 fw->size);
759 ret = -EILSEQ;
760 goto out;
761 }
762
763 vfree(wl->fw);
764 wl->fw_type = WL12XX_FW_TYPE_NONE;
765 wl->fw_len = fw->size;
766 wl->fw = vmalloc(wl->fw_len);
767
768 if (!wl->fw) {
769 wl1271_error("could not allocate memory for the firmware");
770 ret = -ENOMEM;
771 goto out;
772 }
773
774 memcpy(wl->fw, fw->data, wl->fw_len);
775 ret = 0;
776 wl->fw_type = fw_type;
777 out:
778 release_firmware(fw);
779
780 return ret;
781 }
782
783 void wl12xx_queue_recovery_work(struct wl1271 *wl)
784 {
785 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
786
787 /* Avoid a recursive recovery */
788 if (wl->state == WLCORE_STATE_ON) {
789 wl->state = WLCORE_STATE_RESTARTING;
790 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
791 wl1271_ps_elp_wakeup(wl);
792 wlcore_disable_interrupts_nosync(wl);
793 ieee80211_queue_work(wl->hw, &wl->recovery_work);
794 }
795 }
796
797 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
798 {
799 size_t len;
800
801 /* Make sure we have enough room */
802 len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
803
804 /* Fill the FW log file, consumed by the sysfs fwlog entry */
805 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
806 wl->fwlog_size += len;
807
808 return len;
809 }
810
811 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
812 {
813 struct wlcore_partition_set part, old_part;
814 u32 addr;
815 u32 offset;
816 u32 end_of_log;
817 u8 *block;
818 int ret;
819
820 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
821 (wl->conf.fwlog.mem_blocks == 0))
822 return;
823
824 wl1271_info("Reading FW panic log");
825
826 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
827 if (!block)
828 return;
829
830 /*
831 * Make sure the chip is awake and the logger isn't active.
832 * Do not send a stop fwlog command if the fw is hanged or if
833 * dbgpins are used (due to some fw bug).
834 */
835 if (wl1271_ps_elp_wakeup(wl))
836 goto out;
837 if (!wl->watchdog_recovery &&
838 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
839 wl12xx_cmd_stop_fwlog(wl);
840
841 /* Read the first memory block address */
842 ret = wlcore_fw_status(wl, wl->fw_status);
843 if (ret < 0)
844 goto out;
845
846 addr = wl->fw_status->log_start_addr;
847 if (!addr)
848 goto out;
849
850 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
851 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
852 end_of_log = wl->fwlog_end;
853 } else {
854 offset = sizeof(addr);
855 end_of_log = addr;
856 }
857
858 old_part = wl->curr_part;
859 memset(&part, 0, sizeof(part));
860
861 /* Traverse the memory blocks linked list */
862 do {
863 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
864 part.mem.size = PAGE_SIZE;
865
866 ret = wlcore_set_partition(wl, &part);
867 if (ret < 0) {
868 wl1271_error("%s: set_partition start=0x%X size=%d",
869 __func__, part.mem.start, part.mem.size);
870 goto out;
871 }
872
873 memset(block, 0, wl->fw_mem_block_size);
874 ret = wlcore_read_hwaddr(wl, addr, block,
875 wl->fw_mem_block_size, false);
876
877 if (ret < 0)
878 goto out;
879
880 /*
881 * Memory blocks are linked to one another. The first 4 bytes
882 * of each memory block hold the hardware address of the next
883 * one. The last memory block points to the first one in
884 * on demand mode and is equal to 0x2000000 in continuous mode.
885 */
886 addr = le32_to_cpup((__le32 *)block);
887
888 if (!wl12xx_copy_fwlog(wl, block + offset,
889 wl->fw_mem_block_size - offset))
890 break;
891 } while (addr && (addr != end_of_log));
892
893 wake_up_interruptible(&wl->fwlog_waitq);
894
895 out:
896 kfree(block);
897 wlcore_set_partition(wl, &old_part);
898 }
899
900 static void wlcore_print_recovery(struct wl1271 *wl)
901 {
902 u32 pc = 0;
903 u32 hint_sts = 0;
904 int ret;
905
906 wl1271_info("Hardware recovery in progress. FW ver: %s",
907 wl->chip.fw_ver_str);
908
909 /* change partitions momentarily so we can read the FW pc */
910 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
911 if (ret < 0)
912 return;
913
914 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
915 if (ret < 0)
916 return;
917
918 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
919 if (ret < 0)
920 return;
921
922 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
923 pc, hint_sts, ++wl->recovery_count);
924
925 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
926 }
927
928
929 static void wl1271_recovery_work(struct work_struct *work)
930 {
931 struct wl1271 *wl =
932 container_of(work, struct wl1271, recovery_work);
933 struct wl12xx_vif *wlvif;
934 struct ieee80211_vif *vif;
935
936 mutex_lock(&wl->mutex);
937
938 if (wl->state == WLCORE_STATE_OFF || wl->plt)
939 goto out_unlock;
940
941 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
942 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
943 wl12xx_read_fwlog_panic(wl);
944 wlcore_print_recovery(wl);
945 }
946
947 BUG_ON(wl->conf.recovery.bug_on_recovery &&
948 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
949
950 if (wl->conf.recovery.no_recovery) {
951 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
952 goto out_unlock;
953 }
954
955 /* Prevent spurious TX during FW restart */
956 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
957
958 /* reboot the chipset */
959 while (!list_empty(&wl->wlvif_list)) {
960 wlvif = list_first_entry(&wl->wlvif_list,
961 struct wl12xx_vif, list);
962 vif = wl12xx_wlvif_to_vif(wlvif);
963 __wl1271_op_remove_interface(wl, vif, false);
964 }
965
966 wlcore_op_stop_locked(wl);
967
968 ieee80211_restart_hw(wl->hw);
969
970 /*
971 * Its safe to enable TX now - the queues are stopped after a request
972 * to restart the HW.
973 */
974 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
975
976 out_unlock:
977 wl->watchdog_recovery = false;
978 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
979 mutex_unlock(&wl->mutex);
980 }
981
982 static int wlcore_fw_wakeup(struct wl1271 *wl)
983 {
984 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
985 }
986
987 static int wl1271_setup(struct wl1271 *wl)
988 {
989 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
990 if (!wl->raw_fw_status)
991 goto err;
992
993 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
994 if (!wl->fw_status)
995 goto err;
996
997 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
998 if (!wl->tx_res_if)
999 goto err;
1000
1001 return 0;
1002 err:
1003 kfree(wl->fw_status);
1004 kfree(wl->raw_fw_status);
1005 return -ENOMEM;
1006 }
1007
1008 static int wl12xx_set_power_on(struct wl1271 *wl)
1009 {
1010 int ret;
1011
1012 msleep(WL1271_PRE_POWER_ON_SLEEP);
1013 ret = wl1271_power_on(wl);
1014 if (ret < 0)
1015 goto out;
1016 msleep(WL1271_POWER_ON_SLEEP);
1017 wl1271_io_reset(wl);
1018 wl1271_io_init(wl);
1019
1020 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1021 if (ret < 0)
1022 goto fail;
1023
1024 /* ELP module wake up */
1025 ret = wlcore_fw_wakeup(wl);
1026 if (ret < 0)
1027 goto fail;
1028
1029 out:
1030 return ret;
1031
1032 fail:
1033 wl1271_power_off(wl);
1034 return ret;
1035 }
1036
1037 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1038 {
1039 int ret = 0;
1040
1041 ret = wl12xx_set_power_on(wl);
1042 if (ret < 0)
1043 goto out;
1044
1045 /*
1046 * For wl127x based devices we could use the default block
1047 * size (512 bytes), but due to a bug in the sdio driver, we
1048 * need to set it explicitly after the chip is powered on. To
1049 * simplify the code and since the performance impact is
1050 * negligible, we use the same block size for all different
1051 * chip types.
1052 *
1053 * Check if the bus supports blocksize alignment and, if it
1054 * doesn't, make sure we don't have the quirk.
1055 */
1056 if (!wl1271_set_block_size(wl))
1057 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1058
1059 /* TODO: make sure the lower driver has set things up correctly */
1060
1061 ret = wl1271_setup(wl);
1062 if (ret < 0)
1063 goto out;
1064
1065 ret = wl12xx_fetch_firmware(wl, plt);
1066 if (ret < 0)
1067 goto out;
1068
1069 out:
1070 return ret;
1071 }
1072
1073 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1074 {
1075 int retries = WL1271_BOOT_RETRIES;
1076 struct wiphy *wiphy = wl->hw->wiphy;
1077
1078 static const char* const PLT_MODE[] = {
1079 "PLT_OFF",
1080 "PLT_ON",
1081 "PLT_FEM_DETECT",
1082 "PLT_CHIP_AWAKE"
1083 };
1084
1085 int ret;
1086
1087 mutex_lock(&wl->mutex);
1088
1089 wl1271_notice("power up");
1090
1091 if (wl->state != WLCORE_STATE_OFF) {
1092 wl1271_error("cannot go into PLT state because not "
1093 "in off state: %d", wl->state);
1094 ret = -EBUSY;
1095 goto out;
1096 }
1097
1098 /* Indicate to lower levels that we are now in PLT mode */
1099 wl->plt = true;
1100 wl->plt_mode = plt_mode;
1101
1102 while (retries) {
1103 retries--;
1104 ret = wl12xx_chip_wakeup(wl, true);
1105 if (ret < 0)
1106 goto power_off;
1107
1108 if (plt_mode != PLT_CHIP_AWAKE) {
1109 ret = wl->ops->plt_init(wl);
1110 if (ret < 0)
1111 goto power_off;
1112 }
1113
1114 wl->state = WLCORE_STATE_ON;
1115 wl1271_notice("firmware booted in PLT mode %s (%s)",
1116 PLT_MODE[plt_mode],
1117 wl->chip.fw_ver_str);
1118
1119 /* update hw/fw version info in wiphy struct */
1120 wiphy->hw_version = wl->chip.id;
1121 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1122 sizeof(wiphy->fw_version));
1123
1124 goto out;
1125
1126 power_off:
1127 wl1271_power_off(wl);
1128 }
1129
1130 wl->plt = false;
1131 wl->plt_mode = PLT_OFF;
1132
1133 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1134 WL1271_BOOT_RETRIES);
1135 out:
1136 mutex_unlock(&wl->mutex);
1137
1138 return ret;
1139 }
1140
1141 int wl1271_plt_stop(struct wl1271 *wl)
1142 {
1143 int ret = 0;
1144
1145 wl1271_notice("power down");
1146
1147 /*
1148 * Interrupts must be disabled before setting the state to OFF.
1149 * Otherwise, the interrupt handler might be called and exit without
1150 * reading the interrupt status.
1151 */
1152 wlcore_disable_interrupts(wl);
1153 mutex_lock(&wl->mutex);
1154 if (!wl->plt) {
1155 mutex_unlock(&wl->mutex);
1156
1157 /*
1158 * This will not necessarily enable interrupts as interrupts
1159 * may have been disabled when op_stop was called. It will,
1160 * however, balance the above call to disable_interrupts().
1161 */
1162 wlcore_enable_interrupts(wl);
1163
1164 wl1271_error("cannot power down because not in PLT "
1165 "state: %d", wl->state);
1166 ret = -EBUSY;
1167 goto out;
1168 }
1169
1170 mutex_unlock(&wl->mutex);
1171
1172 wl1271_flush_deferred_work(wl);
1173 cancel_work_sync(&wl->netstack_work);
1174 cancel_work_sync(&wl->recovery_work);
1175 cancel_delayed_work_sync(&wl->elp_work);
1176 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1177
1178 mutex_lock(&wl->mutex);
1179 wl1271_power_off(wl);
1180 wl->flags = 0;
1181 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1182 wl->state = WLCORE_STATE_OFF;
1183 wl->plt = false;
1184 wl->plt_mode = PLT_OFF;
1185 wl->rx_counter = 0;
1186 mutex_unlock(&wl->mutex);
1187
1188 out:
1189 return ret;
1190 }
1191
1192 static void wl1271_op_tx(struct ieee80211_hw *hw,
1193 struct ieee80211_tx_control *control,
1194 struct sk_buff *skb)
1195 {
1196 struct wl1271 *wl = hw->priv;
1197 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1198 struct ieee80211_vif *vif = info->control.vif;
1199 struct wl12xx_vif *wlvif = NULL;
1200 unsigned long flags;
1201 int q, mapping;
1202 u8 hlid;
1203
1204 if (!vif) {
1205 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1206 ieee80211_free_txskb(hw, skb);
1207 return;
1208 }
1209
1210 wlvif = wl12xx_vif_to_data(vif);
1211 mapping = skb_get_queue_mapping(skb);
1212 q = wl1271_tx_get_queue(mapping);
1213
1214 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1215
1216 spin_lock_irqsave(&wl->wl_lock, flags);
1217
1218 /*
1219 * drop the packet if the link is invalid or the queue is stopped
1220 * for any reason but watermark. Watermark is a "soft"-stop so we
1221 * allow these packets through.
1222 */
1223 if (hlid == WL12XX_INVALID_LINK_ID ||
1224 (!test_bit(hlid, wlvif->links_map)) ||
1225 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1226 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1227 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1228 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1229 ieee80211_free_txskb(hw, skb);
1230 goto out;
1231 }
1232
1233 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1234 hlid, q, skb->len);
1235 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1236
1237 wl->tx_queue_count[q]++;
1238 wlvif->tx_queue_count[q]++;
1239
1240 /*
1241 * The workqueue is slow to process the tx_queue and we need stop
1242 * the queue here, otherwise the queue will get too long.
1243 */
1244 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1245 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1246 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1247 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1248 wlcore_stop_queue_locked(wl, wlvif, q,
1249 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1250 }
1251
1252 /*
1253 * The chip specific setup must run before the first TX packet -
1254 * before that, the tx_work will not be initialized!
1255 */
1256
1257 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1258 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1259 ieee80211_queue_work(wl->hw, &wl->tx_work);
1260
1261 out:
1262 spin_unlock_irqrestore(&wl->wl_lock, flags);
1263 }
1264
1265 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1266 {
1267 unsigned long flags;
1268 int q;
1269
1270 /* no need to queue a new dummy packet if one is already pending */
1271 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1272 return 0;
1273
1274 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1275
1276 spin_lock_irqsave(&wl->wl_lock, flags);
1277 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1278 wl->tx_queue_count[q]++;
1279 spin_unlock_irqrestore(&wl->wl_lock, flags);
1280
1281 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1282 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1283 return wlcore_tx_work_locked(wl);
1284
1285 /*
1286 * If the FW TX is busy, TX work will be scheduled by the threaded
1287 * interrupt handler function
1288 */
1289 return 0;
1290 }
1291
1292 /*
1293 * The size of the dummy packet should be at least 1400 bytes. However, in
1294 * order to minimize the number of bus transactions, aligning it to 512 bytes
1295 * boundaries could be beneficial, performance wise
1296 */
1297 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1298
1299 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1300 {
1301 struct sk_buff *skb;
1302 struct ieee80211_hdr_3addr *hdr;
1303 unsigned int dummy_packet_size;
1304
1305 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1306 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1307
1308 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1309 if (!skb) {
1310 wl1271_warning("Failed to allocate a dummy packet skb");
1311 return NULL;
1312 }
1313
1314 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1315
1316 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1317 memset(hdr, 0, sizeof(*hdr));
1318 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1319 IEEE80211_STYPE_NULLFUNC |
1320 IEEE80211_FCTL_TODS);
1321
1322 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1323
1324 /* Dummy packets require the TID to be management */
1325 skb->priority = WL1271_TID_MGMT;
1326
1327 /* Initialize all fields that might be used */
1328 skb_set_queue_mapping(skb, 0);
1329 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1330
1331 return skb;
1332 }
1333
1334
1335 #ifdef CONFIG_PM
1336 static int
1337 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1338 {
1339 int num_fields = 0, in_field = 0, fields_size = 0;
1340 int i, pattern_len = 0;
1341
1342 if (!p->mask) {
1343 wl1271_warning("No mask in WoWLAN pattern");
1344 return -EINVAL;
1345 }
1346
1347 /*
1348 * The pattern is broken up into segments of bytes at different offsets
1349 * that need to be checked by the FW filter. Each segment is called
1350 * a field in the FW API. We verify that the total number of fields
1351 * required for this pattern won't exceed FW limits (8)
1352 * as well as the total fields buffer won't exceed the FW limit.
1353 * Note that if there's a pattern which crosses Ethernet/IP header
1354 * boundary a new field is required.
1355 */
1356 for (i = 0; i < p->pattern_len; i++) {
1357 if (test_bit(i, (unsigned long *)p->mask)) {
1358 if (!in_field) {
1359 in_field = 1;
1360 pattern_len = 1;
1361 } else {
1362 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1363 num_fields++;
1364 fields_size += pattern_len +
1365 RX_FILTER_FIELD_OVERHEAD;
1366 pattern_len = 1;
1367 } else
1368 pattern_len++;
1369 }
1370 } else {
1371 if (in_field) {
1372 in_field = 0;
1373 fields_size += pattern_len +
1374 RX_FILTER_FIELD_OVERHEAD;
1375 num_fields++;
1376 }
1377 }
1378 }
1379
1380 if (in_field) {
1381 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1382 num_fields++;
1383 }
1384
1385 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1386 wl1271_warning("RX Filter too complex. Too many segments");
1387 return -EINVAL;
1388 }
1389
1390 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1391 wl1271_warning("RX filter pattern is too big");
1392 return -E2BIG;
1393 }
1394
1395 return 0;
1396 }
1397
1398 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1399 {
1400 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1401 }
1402
1403 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1404 {
1405 int i;
1406
1407 if (filter == NULL)
1408 return;
1409
1410 for (i = 0; i < filter->num_fields; i++)
1411 kfree(filter->fields[i].pattern);
1412
1413 kfree(filter);
1414 }
1415
1416 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1417 u16 offset, u8 flags,
1418 u8 *pattern, u8 len)
1419 {
1420 struct wl12xx_rx_filter_field *field;
1421
1422 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1423 wl1271_warning("Max fields per RX filter. can't alloc another");
1424 return -EINVAL;
1425 }
1426
1427 field = &filter->fields[filter->num_fields];
1428
1429 field->pattern = kzalloc(len, GFP_KERNEL);
1430 if (!field->pattern) {
1431 wl1271_warning("Failed to allocate RX filter pattern");
1432 return -ENOMEM;
1433 }
1434
1435 filter->num_fields++;
1436
1437 field->offset = cpu_to_le16(offset);
1438 field->flags = flags;
1439 field->len = len;
1440 memcpy(field->pattern, pattern, len);
1441
1442 return 0;
1443 }
1444
1445 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1446 {
1447 int i, fields_size = 0;
1448
1449 for (i = 0; i < filter->num_fields; i++)
1450 fields_size += filter->fields[i].len +
1451 sizeof(struct wl12xx_rx_filter_field) -
1452 sizeof(u8 *);
1453
1454 return fields_size;
1455 }
1456
1457 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1458 u8 *buf)
1459 {
1460 int i;
1461 struct wl12xx_rx_filter_field *field;
1462
1463 for (i = 0; i < filter->num_fields; i++) {
1464 field = (struct wl12xx_rx_filter_field *)buf;
1465
1466 field->offset = filter->fields[i].offset;
1467 field->flags = filter->fields[i].flags;
1468 field->len = filter->fields[i].len;
1469
1470 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1471 buf += sizeof(struct wl12xx_rx_filter_field) -
1472 sizeof(u8 *) + field->len;
1473 }
1474 }
1475
1476 /*
1477 * Allocates an RX filter returned through f
1478 * which needs to be freed using rx_filter_free()
1479 */
1480 static int
1481 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1482 struct wl12xx_rx_filter **f)
1483 {
1484 int i, j, ret = 0;
1485 struct wl12xx_rx_filter *filter;
1486 u16 offset;
1487 u8 flags, len;
1488
1489 filter = wl1271_rx_filter_alloc();
1490 if (!filter) {
1491 wl1271_warning("Failed to alloc rx filter");
1492 ret = -ENOMEM;
1493 goto err;
1494 }
1495
1496 i = 0;
1497 while (i < p->pattern_len) {
1498 if (!test_bit(i, (unsigned long *)p->mask)) {
1499 i++;
1500 continue;
1501 }
1502
1503 for (j = i; j < p->pattern_len; j++) {
1504 if (!test_bit(j, (unsigned long *)p->mask))
1505 break;
1506
1507 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1508 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1509 break;
1510 }
1511
1512 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1513 offset = i;
1514 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1515 } else {
1516 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1517 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1518 }
1519
1520 len = j - i;
1521
1522 ret = wl1271_rx_filter_alloc_field(filter,
1523 offset,
1524 flags,
1525 &p->pattern[i], len);
1526 if (ret)
1527 goto err;
1528
1529 i = j;
1530 }
1531
1532 filter->action = FILTER_SIGNAL;
1533
1534 *f = filter;
1535 return 0;
1536
1537 err:
1538 wl1271_rx_filter_free(filter);
1539 *f = NULL;
1540
1541 return ret;
1542 }
1543
1544 static int wl1271_configure_wowlan(struct wl1271 *wl,
1545 struct cfg80211_wowlan *wow)
1546 {
1547 int i, ret;
1548
1549 if (!wow || wow->any || !wow->n_patterns) {
1550 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1551 FILTER_SIGNAL);
1552 if (ret)
1553 goto out;
1554
1555 ret = wl1271_rx_filter_clear_all(wl);
1556 if (ret)
1557 goto out;
1558
1559 return 0;
1560 }
1561
1562 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1563 return -EINVAL;
1564
1565 /* Validate all incoming patterns before clearing current FW state */
1566 for (i = 0; i < wow->n_patterns; i++) {
1567 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1568 if (ret) {
1569 wl1271_warning("Bad wowlan pattern %d", i);
1570 return ret;
1571 }
1572 }
1573
1574 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1575 if (ret)
1576 goto out;
1577
1578 ret = wl1271_rx_filter_clear_all(wl);
1579 if (ret)
1580 goto out;
1581
1582 /* Translate WoWLAN patterns into filters */
1583 for (i = 0; i < wow->n_patterns; i++) {
1584 struct cfg80211_pkt_pattern *p;
1585 struct wl12xx_rx_filter *filter = NULL;
1586
1587 p = &wow->patterns[i];
1588
1589 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1590 if (ret) {
1591 wl1271_warning("Failed to create an RX filter from "
1592 "wowlan pattern %d", i);
1593 goto out;
1594 }
1595
1596 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1597
1598 wl1271_rx_filter_free(filter);
1599 if (ret)
1600 goto out;
1601 }
1602
1603 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1604
1605 out:
1606 return ret;
1607 }
1608
1609 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1610 struct wl12xx_vif *wlvif,
1611 struct cfg80211_wowlan *wow)
1612 {
1613 int ret = 0;
1614
1615 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1616 goto out;
1617
1618 ret = wl1271_ps_elp_wakeup(wl);
1619 if (ret < 0)
1620 goto out;
1621
1622 ret = wl1271_configure_wowlan(wl, wow);
1623 if (ret < 0)
1624 goto out_sleep;
1625
1626 if ((wl->conf.conn.suspend_wake_up_event ==
1627 wl->conf.conn.wake_up_event) &&
1628 (wl->conf.conn.suspend_listen_interval ==
1629 wl->conf.conn.listen_interval))
1630 goto out_sleep;
1631
1632 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1633 wl->conf.conn.suspend_wake_up_event,
1634 wl->conf.conn.suspend_listen_interval);
1635
1636 if (ret < 0)
1637 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1638
1639 out_sleep:
1640 wl1271_ps_elp_sleep(wl);
1641 out:
1642 return ret;
1643
1644 }
1645
1646 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1647 struct wl12xx_vif *wlvif)
1648 {
1649 int ret = 0;
1650
1651 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1652 goto out;
1653
1654 ret = wl1271_ps_elp_wakeup(wl);
1655 if (ret < 0)
1656 goto out;
1657
1658 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1659
1660 wl1271_ps_elp_sleep(wl);
1661 out:
1662 return ret;
1663
1664 }
1665
1666 static int wl1271_configure_suspend(struct wl1271 *wl,
1667 struct wl12xx_vif *wlvif,
1668 struct cfg80211_wowlan *wow)
1669 {
1670 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1671 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1672 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1673 return wl1271_configure_suspend_ap(wl, wlvif);
1674 return 0;
1675 }
1676
1677 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1678 {
1679 int ret = 0;
1680 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1681 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1682
1683 if ((!is_ap) && (!is_sta))
1684 return;
1685
1686 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1687 return;
1688
1689 ret = wl1271_ps_elp_wakeup(wl);
1690 if (ret < 0)
1691 return;
1692
1693 if (is_sta) {
1694 wl1271_configure_wowlan(wl, NULL);
1695
1696 if ((wl->conf.conn.suspend_wake_up_event ==
1697 wl->conf.conn.wake_up_event) &&
1698 (wl->conf.conn.suspend_listen_interval ==
1699 wl->conf.conn.listen_interval))
1700 goto out_sleep;
1701
1702 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1703 wl->conf.conn.wake_up_event,
1704 wl->conf.conn.listen_interval);
1705
1706 if (ret < 0)
1707 wl1271_error("resume: wake up conditions failed: %d",
1708 ret);
1709
1710 } else if (is_ap) {
1711 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1712 }
1713
1714 out_sleep:
1715 wl1271_ps_elp_sleep(wl);
1716 }
1717
1718 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1719 struct cfg80211_wowlan *wow)
1720 {
1721 struct wl1271 *wl = hw->priv;
1722 struct wl12xx_vif *wlvif;
1723 int ret;
1724
1725 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1726 WARN_ON(!wow);
1727
1728 /* we want to perform the recovery before suspending */
1729 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1730 wl1271_warning("postponing suspend to perform recovery");
1731 return -EBUSY;
1732 }
1733
1734 wl1271_tx_flush(wl);
1735
1736 mutex_lock(&wl->mutex);
1737 wl->wow_enabled = true;
1738 wl12xx_for_each_wlvif(wl, wlvif) {
1739 ret = wl1271_configure_suspend(wl, wlvif, wow);
1740 if (ret < 0) {
1741 mutex_unlock(&wl->mutex);
1742 wl1271_warning("couldn't prepare device to suspend");
1743 return ret;
1744 }
1745 }
1746 mutex_unlock(&wl->mutex);
1747 /* flush any remaining work */
1748 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1749
1750 /*
1751 * disable and re-enable interrupts in order to flush
1752 * the threaded_irq
1753 */
1754 wlcore_disable_interrupts(wl);
1755
1756 /*
1757 * set suspended flag to avoid triggering a new threaded_irq
1758 * work. no need for spinlock as interrupts are disabled.
1759 */
1760 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1761
1762 wlcore_enable_interrupts(wl);
1763 flush_work(&wl->tx_work);
1764 flush_delayed_work(&wl->elp_work);
1765
1766 /*
1767 * Cancel the watchdog even if above tx_flush failed. We will detect
1768 * it on resume anyway.
1769 */
1770 cancel_delayed_work(&wl->tx_watchdog_work);
1771
1772 return 0;
1773 }
1774
1775 static int wl1271_op_resume(struct ieee80211_hw *hw)
1776 {
1777 struct wl1271 *wl = hw->priv;
1778 struct wl12xx_vif *wlvif;
1779 unsigned long flags;
1780 bool run_irq_work = false, pending_recovery;
1781 int ret;
1782
1783 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1784 wl->wow_enabled);
1785 WARN_ON(!wl->wow_enabled);
1786
1787 /*
1788 * re-enable irq_work enqueuing, and call irq_work directly if
1789 * there is a pending work.
1790 */
1791 spin_lock_irqsave(&wl->wl_lock, flags);
1792 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1793 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1794 run_irq_work = true;
1795 spin_unlock_irqrestore(&wl->wl_lock, flags);
1796
1797 mutex_lock(&wl->mutex);
1798
1799 /* test the recovery flag before calling any SDIO functions */
1800 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1801 &wl->flags);
1802
1803 if (run_irq_work) {
1804 wl1271_debug(DEBUG_MAC80211,
1805 "run postponed irq_work directly");
1806
1807 /* don't talk to the HW if recovery is pending */
1808 if (!pending_recovery) {
1809 ret = wlcore_irq_locked(wl);
1810 if (ret)
1811 wl12xx_queue_recovery_work(wl);
1812 }
1813
1814 wlcore_enable_interrupts(wl);
1815 }
1816
1817 if (pending_recovery) {
1818 wl1271_warning("queuing forgotten recovery on resume");
1819 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1820 goto out;
1821 }
1822
1823 wl12xx_for_each_wlvif(wl, wlvif) {
1824 wl1271_configure_resume(wl, wlvif);
1825 }
1826
1827 out:
1828 wl->wow_enabled = false;
1829
1830 /*
1831 * Set a flag to re-init the watchdog on the first Tx after resume.
1832 * That way we avoid possible conditions where Tx-complete interrupts
1833 * fail to arrive and we perform a spurious recovery.
1834 */
1835 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1836 mutex_unlock(&wl->mutex);
1837
1838 return 0;
1839 }
1840 #endif
1841
1842 static int wl1271_op_start(struct ieee80211_hw *hw)
1843 {
1844 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1845
1846 /*
1847 * We have to delay the booting of the hardware because
1848 * we need to know the local MAC address before downloading and
1849 * initializing the firmware. The MAC address cannot be changed
1850 * after boot, and without the proper MAC address, the firmware
1851 * will not function properly.
1852 *
1853 * The MAC address is first known when the corresponding interface
1854 * is added. That is where we will initialize the hardware.
1855 */
1856
1857 return 0;
1858 }
1859
1860 static void wlcore_op_stop_locked(struct wl1271 *wl)
1861 {
1862 int i;
1863
1864 if (wl->state == WLCORE_STATE_OFF) {
1865 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1866 &wl->flags))
1867 wlcore_enable_interrupts(wl);
1868
1869 return;
1870 }
1871
1872 /*
1873 * this must be before the cancel_work calls below, so that the work
1874 * functions don't perform further work.
1875 */
1876 wl->state = WLCORE_STATE_OFF;
1877
1878 /*
1879 * Use the nosync variant to disable interrupts, so the mutex could be
1880 * held while doing so without deadlocking.
1881 */
1882 wlcore_disable_interrupts_nosync(wl);
1883
1884 mutex_unlock(&wl->mutex);
1885
1886 wlcore_synchronize_interrupts(wl);
1887 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1888 cancel_work_sync(&wl->recovery_work);
1889 wl1271_flush_deferred_work(wl);
1890 cancel_delayed_work_sync(&wl->scan_complete_work);
1891 cancel_work_sync(&wl->netstack_work);
1892 cancel_work_sync(&wl->tx_work);
1893 cancel_delayed_work_sync(&wl->elp_work);
1894 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1895
1896 /* let's notify MAC80211 about the remaining pending TX frames */
1897 mutex_lock(&wl->mutex);
1898 wl12xx_tx_reset(wl);
1899
1900 wl1271_power_off(wl);
1901 /*
1902 * In case a recovery was scheduled, interrupts were disabled to avoid
1903 * an interrupt storm. Now that the power is down, it is safe to
1904 * re-enable interrupts to balance the disable depth
1905 */
1906 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1907 wlcore_enable_interrupts(wl);
1908
1909 wl->band = IEEE80211_BAND_2GHZ;
1910
1911 wl->rx_counter = 0;
1912 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1913 wl->channel_type = NL80211_CHAN_NO_HT;
1914 wl->tx_blocks_available = 0;
1915 wl->tx_allocated_blocks = 0;
1916 wl->tx_results_count = 0;
1917 wl->tx_packets_count = 0;
1918 wl->time_offset = 0;
1919 wl->ap_fw_ps_map = 0;
1920 wl->ap_ps_map = 0;
1921 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1922 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1923 memset(wl->links_map, 0, sizeof(wl->links_map));
1924 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1925 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1926 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1927 wl->active_sta_count = 0;
1928 wl->active_link_count = 0;
1929
1930 /* The system link is always allocated */
1931 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1932 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1933 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1934
1935 /*
1936 * this is performed after the cancel_work calls and the associated
1937 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1938 * get executed before all these vars have been reset.
1939 */
1940 wl->flags = 0;
1941
1942 wl->tx_blocks_freed = 0;
1943
1944 for (i = 0; i < NUM_TX_QUEUES; i++) {
1945 wl->tx_pkts_freed[i] = 0;
1946 wl->tx_allocated_pkts[i] = 0;
1947 }
1948
1949 wl1271_debugfs_reset(wl);
1950
1951 kfree(wl->raw_fw_status);
1952 wl->raw_fw_status = NULL;
1953 kfree(wl->fw_status);
1954 wl->fw_status = NULL;
1955 kfree(wl->tx_res_if);
1956 wl->tx_res_if = NULL;
1957 kfree(wl->target_mem_map);
1958 wl->target_mem_map = NULL;
1959
1960 /*
1961 * FW channels must be re-calibrated after recovery,
1962 * save current Reg-Domain channel configuration and clear it.
1963 */
1964 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1965 sizeof(wl->reg_ch_conf_pending));
1966 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1967 }
1968
1969 static void wlcore_op_stop(struct ieee80211_hw *hw)
1970 {
1971 struct wl1271 *wl = hw->priv;
1972
1973 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1974
1975 mutex_lock(&wl->mutex);
1976
1977 wlcore_op_stop_locked(wl);
1978
1979 mutex_unlock(&wl->mutex);
1980 }
1981
1982 static void wlcore_channel_switch_work(struct work_struct *work)
1983 {
1984 struct delayed_work *dwork;
1985 struct wl1271 *wl;
1986 struct ieee80211_vif *vif;
1987 struct wl12xx_vif *wlvif;
1988 int ret;
1989
1990 dwork = container_of(work, struct delayed_work, work);
1991 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1992 wl = wlvif->wl;
1993
1994 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1995
1996 mutex_lock(&wl->mutex);
1997
1998 if (unlikely(wl->state != WLCORE_STATE_ON))
1999 goto out;
2000
2001 /* check the channel switch is still ongoing */
2002 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2003 goto out;
2004
2005 vif = wl12xx_wlvif_to_vif(wlvif);
2006 ieee80211_chswitch_done(vif, false);
2007
2008 ret = wl1271_ps_elp_wakeup(wl);
2009 if (ret < 0)
2010 goto out;
2011
2012 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2013
2014 wl1271_ps_elp_sleep(wl);
2015 out:
2016 mutex_unlock(&wl->mutex);
2017 }
2018
2019 static void wlcore_connection_loss_work(struct work_struct *work)
2020 {
2021 struct delayed_work *dwork;
2022 struct wl1271 *wl;
2023 struct ieee80211_vif *vif;
2024 struct wl12xx_vif *wlvif;
2025
2026 dwork = container_of(work, struct delayed_work, work);
2027 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2028 wl = wlvif->wl;
2029
2030 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2031
2032 mutex_lock(&wl->mutex);
2033
2034 if (unlikely(wl->state != WLCORE_STATE_ON))
2035 goto out;
2036
2037 /* Call mac80211 connection loss */
2038 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2039 goto out;
2040
2041 vif = wl12xx_wlvif_to_vif(wlvif);
2042 ieee80211_connection_loss(vif);
2043 out:
2044 mutex_unlock(&wl->mutex);
2045 }
2046
2047 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2048 {
2049 struct delayed_work *dwork;
2050 struct wl1271 *wl;
2051 struct wl12xx_vif *wlvif;
2052 unsigned long time_spare;
2053 int ret;
2054
2055 dwork = container_of(work, struct delayed_work, work);
2056 wlvif = container_of(dwork, struct wl12xx_vif,
2057 pending_auth_complete_work);
2058 wl = wlvif->wl;
2059
2060 mutex_lock(&wl->mutex);
2061
2062 if (unlikely(wl->state != WLCORE_STATE_ON))
2063 goto out;
2064
2065 /*
2066 * Make sure a second really passed since the last auth reply. Maybe
2067 * a second auth reply arrived while we were stuck on the mutex.
2068 * Check for a little less than the timeout to protect from scheduler
2069 * irregularities.
2070 */
2071 time_spare = jiffies +
2072 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2073 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2074 goto out;
2075
2076 ret = wl1271_ps_elp_wakeup(wl);
2077 if (ret < 0)
2078 goto out;
2079
2080 /* cancel the ROC if active */
2081 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2082
2083 wl1271_ps_elp_sleep(wl);
2084 out:
2085 mutex_unlock(&wl->mutex);
2086 }
2087
2088 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2089 {
2090 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2091 WL12XX_MAX_RATE_POLICIES);
2092 if (policy >= WL12XX_MAX_RATE_POLICIES)
2093 return -EBUSY;
2094
2095 __set_bit(policy, wl->rate_policies_map);
2096 *idx = policy;
2097 return 0;
2098 }
2099
2100 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2101 {
2102 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2103 return;
2104
2105 __clear_bit(*idx, wl->rate_policies_map);
2106 *idx = WL12XX_MAX_RATE_POLICIES;
2107 }
2108
2109 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2110 {
2111 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2112 WLCORE_MAX_KLV_TEMPLATES);
2113 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2114 return -EBUSY;
2115
2116 __set_bit(policy, wl->klv_templates_map);
2117 *idx = policy;
2118 return 0;
2119 }
2120
2121 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2122 {
2123 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2124 return;
2125
2126 __clear_bit(*idx, wl->klv_templates_map);
2127 *idx = WLCORE_MAX_KLV_TEMPLATES;
2128 }
2129
2130 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2131 {
2132 switch (wlvif->bss_type) {
2133 case BSS_TYPE_AP_BSS:
2134 if (wlvif->p2p)
2135 return WL1271_ROLE_P2P_GO;
2136 else
2137 return WL1271_ROLE_AP;
2138
2139 case BSS_TYPE_STA_BSS:
2140 if (wlvif->p2p)
2141 return WL1271_ROLE_P2P_CL;
2142 else
2143 return WL1271_ROLE_STA;
2144
2145 case BSS_TYPE_IBSS:
2146 return WL1271_ROLE_IBSS;
2147
2148 default:
2149 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2150 }
2151 return WL12XX_INVALID_ROLE_TYPE;
2152 }
2153
2154 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2155 {
2156 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2157 int i;
2158
2159 /* clear everything but the persistent data */
2160 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2161
2162 switch (ieee80211_vif_type_p2p(vif)) {
2163 case NL80211_IFTYPE_P2P_CLIENT:
2164 wlvif->p2p = 1;
2165 /* fall-through */
2166 case NL80211_IFTYPE_STATION:
2167 wlvif->bss_type = BSS_TYPE_STA_BSS;
2168 break;
2169 case NL80211_IFTYPE_ADHOC:
2170 wlvif->bss_type = BSS_TYPE_IBSS;
2171 break;
2172 case NL80211_IFTYPE_P2P_GO:
2173 wlvif->p2p = 1;
2174 /* fall-through */
2175 case NL80211_IFTYPE_AP:
2176 wlvif->bss_type = BSS_TYPE_AP_BSS;
2177 break;
2178 default:
2179 wlvif->bss_type = MAX_BSS_TYPE;
2180 return -EOPNOTSUPP;
2181 }
2182
2183 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2184 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2185 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2186
2187 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2188 wlvif->bss_type == BSS_TYPE_IBSS) {
2189 /* init sta/ibss data */
2190 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2191 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2192 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2193 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2194 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2195 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2196 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2197 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2198 } else {
2199 /* init ap data */
2200 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2201 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2202 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2203 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2204 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2205 wl12xx_allocate_rate_policy(wl,
2206 &wlvif->ap.ucast_rate_idx[i]);
2207 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2208 /*
2209 * TODO: check if basic_rate shouldn't be
2210 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2211 * instead (the same thing for STA above).
2212 */
2213 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2214 /* TODO: this seems to be used only for STA, check it */
2215 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2216 }
2217
2218 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2219 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2220 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2221
2222 /*
2223 * mac80211 configures some values globally, while we treat them
2224 * per-interface. thus, on init, we have to copy them from wl
2225 */
2226 wlvif->band = wl->band;
2227 wlvif->channel = wl->channel;
2228 wlvif->power_level = wl->power_level;
2229 wlvif->channel_type = wl->channel_type;
2230
2231 INIT_WORK(&wlvif->rx_streaming_enable_work,
2232 wl1271_rx_streaming_enable_work);
2233 INIT_WORK(&wlvif->rx_streaming_disable_work,
2234 wl1271_rx_streaming_disable_work);
2235 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2236 wlcore_channel_switch_work);
2237 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2238 wlcore_connection_loss_work);
2239 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2240 wlcore_pending_auth_complete_work);
2241 INIT_LIST_HEAD(&wlvif->list);
2242
2243 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2244 (unsigned long) wlvif);
2245 return 0;
2246 }
2247
2248 static int wl12xx_init_fw(struct wl1271 *wl)
2249 {
2250 int retries = WL1271_BOOT_RETRIES;
2251 bool booted = false;
2252 struct wiphy *wiphy = wl->hw->wiphy;
2253 int ret;
2254
2255 while (retries) {
2256 retries--;
2257 ret = wl12xx_chip_wakeup(wl, false);
2258 if (ret < 0)
2259 goto power_off;
2260
2261 ret = wl->ops->boot(wl);
2262 if (ret < 0)
2263 goto power_off;
2264
2265 ret = wl1271_hw_init(wl);
2266 if (ret < 0)
2267 goto irq_disable;
2268
2269 booted = true;
2270 break;
2271
2272 irq_disable:
2273 mutex_unlock(&wl->mutex);
2274 /* Unlocking the mutex in the middle of handling is
2275 inherently unsafe. In this case we deem it safe to do,
2276 because we need to let any possibly pending IRQ out of
2277 the system (and while we are WLCORE_STATE_OFF the IRQ
2278 work function will not do anything.) Also, any other
2279 possible concurrent operations will fail due to the
2280 current state, hence the wl1271 struct should be safe. */
2281 wlcore_disable_interrupts(wl);
2282 wl1271_flush_deferred_work(wl);
2283 cancel_work_sync(&wl->netstack_work);
2284 mutex_lock(&wl->mutex);
2285 power_off:
2286 wl1271_power_off(wl);
2287 }
2288
2289 if (!booted) {
2290 wl1271_error("firmware boot failed despite %d retries",
2291 WL1271_BOOT_RETRIES);
2292 goto out;
2293 }
2294
2295 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2296
2297 /* update hw/fw version info in wiphy struct */
2298 wiphy->hw_version = wl->chip.id;
2299 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2300 sizeof(wiphy->fw_version));
2301
2302 /*
2303 * Now we know if 11a is supported (info from the NVS), so disable
2304 * 11a channels if not supported
2305 */
2306 if (!wl->enable_11a)
2307 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2308
2309 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2310 wl->enable_11a ? "" : "not ");
2311
2312 wl->state = WLCORE_STATE_ON;
2313 out:
2314 return ret;
2315 }
2316
2317 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2318 {
2319 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2320 }
2321
2322 /*
2323 * Check whether a fw switch (i.e. moving from one loaded
2324 * fw to another) is needed. This function is also responsible
2325 * for updating wl->last_vif_count, so it must be called before
2326 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2327 * will be used).
2328 */
2329 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2330 struct vif_counter_data vif_counter_data,
2331 bool add)
2332 {
2333 enum wl12xx_fw_type current_fw = wl->fw_type;
2334 u8 vif_count = vif_counter_data.counter;
2335
2336 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2337 return false;
2338
2339 /* increase the vif count if this is a new vif */
2340 if (add && !vif_counter_data.cur_vif_running)
2341 vif_count++;
2342
2343 wl->last_vif_count = vif_count;
2344
2345 /* no need for fw change if the device is OFF */
2346 if (wl->state == WLCORE_STATE_OFF)
2347 return false;
2348
2349 /* no need for fw change if a single fw is used */
2350 if (!wl->mr_fw_name)
2351 return false;
2352
2353 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2354 return true;
2355 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2356 return true;
2357
2358 return false;
2359 }
2360
2361 /*
2362 * Enter "forced psm". Make sure the sta is in psm against the ap,
2363 * to make the fw switch a bit more disconnection-persistent.
2364 */
2365 static void wl12xx_force_active_psm(struct wl1271 *wl)
2366 {
2367 struct wl12xx_vif *wlvif;
2368
2369 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2370 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2371 }
2372 }
2373
2374 struct wlcore_hw_queue_iter_data {
2375 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2376 /* current vif */
2377 struct ieee80211_vif *vif;
2378 /* is the current vif among those iterated */
2379 bool cur_running;
2380 };
2381
2382 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2383 struct ieee80211_vif *vif)
2384 {
2385 struct wlcore_hw_queue_iter_data *iter_data = data;
2386
2387 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2388 return;
2389
2390 if (iter_data->cur_running || vif == iter_data->vif) {
2391 iter_data->cur_running = true;
2392 return;
2393 }
2394
2395 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2396 }
2397
2398 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2399 struct wl12xx_vif *wlvif)
2400 {
2401 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2402 struct wlcore_hw_queue_iter_data iter_data = {};
2403 int i, q_base;
2404
2405 iter_data.vif = vif;
2406
2407 /* mark all bits taken by active interfaces */
2408 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2409 IEEE80211_IFACE_ITER_RESUME_ALL,
2410 wlcore_hw_queue_iter, &iter_data);
2411
2412 /* the current vif is already running in mac80211 (resume/recovery) */
2413 if (iter_data.cur_running) {
2414 wlvif->hw_queue_base = vif->hw_queue[0];
2415 wl1271_debug(DEBUG_MAC80211,
2416 "using pre-allocated hw queue base %d",
2417 wlvif->hw_queue_base);
2418
2419 /* interface type might have changed type */
2420 goto adjust_cab_queue;
2421 }
2422
2423 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2424 WLCORE_NUM_MAC_ADDRESSES);
2425 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2426 return -EBUSY;
2427
2428 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2429 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2430 wlvif->hw_queue_base);
2431
2432 for (i = 0; i < NUM_TX_QUEUES; i++) {
2433 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2434 /* register hw queues in mac80211 */
2435 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2436 }
2437
2438 adjust_cab_queue:
2439 /* the last places are reserved for cab queues per interface */
2440 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2441 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2442 wlvif->hw_queue_base / NUM_TX_QUEUES;
2443 else
2444 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2445
2446 return 0;
2447 }
2448
2449 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2450 struct ieee80211_vif *vif)
2451 {
2452 struct wl1271 *wl = hw->priv;
2453 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2454 struct vif_counter_data vif_count;
2455 int ret = 0;
2456 u8 role_type;
2457
2458 if (wl->plt) {
2459 wl1271_error("Adding Interface not allowed while in PLT mode");
2460 return -EBUSY;
2461 }
2462
2463 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2464 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2465
2466 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2467 ieee80211_vif_type_p2p(vif), vif->addr);
2468
2469 wl12xx_get_vif_count(hw, vif, &vif_count);
2470
2471 mutex_lock(&wl->mutex);
2472 ret = wl1271_ps_elp_wakeup(wl);
2473 if (ret < 0)
2474 goto out_unlock;
2475
2476 /*
2477 * in some very corner case HW recovery scenarios its possible to
2478 * get here before __wl1271_op_remove_interface is complete, so
2479 * opt out if that is the case.
2480 */
2481 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2482 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2483 ret = -EBUSY;
2484 goto out;
2485 }
2486
2487
2488 ret = wl12xx_init_vif_data(wl, vif);
2489 if (ret < 0)
2490 goto out;
2491
2492 wlvif->wl = wl;
2493 role_type = wl12xx_get_role_type(wl, wlvif);
2494 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2495 ret = -EINVAL;
2496 goto out;
2497 }
2498
2499 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2500 if (ret < 0)
2501 goto out;
2502
2503 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2504 wl12xx_force_active_psm(wl);
2505 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2506 mutex_unlock(&wl->mutex);
2507 wl1271_recovery_work(&wl->recovery_work);
2508 return 0;
2509 }
2510
2511 /*
2512 * TODO: after the nvs issue will be solved, move this block
2513 * to start(), and make sure here the driver is ON.
2514 */
2515 if (wl->state == WLCORE_STATE_OFF) {
2516 /*
2517 * we still need this in order to configure the fw
2518 * while uploading the nvs
2519 */
2520 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2521
2522 ret = wl12xx_init_fw(wl);
2523 if (ret < 0)
2524 goto out;
2525 }
2526
2527 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2528 role_type, &wlvif->role_id);
2529 if (ret < 0)
2530 goto out;
2531
2532 ret = wl1271_init_vif_specific(wl, vif);
2533 if (ret < 0)
2534 goto out;
2535
2536 list_add(&wlvif->list, &wl->wlvif_list);
2537 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2538
2539 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2540 wl->ap_count++;
2541 else
2542 wl->sta_count++;
2543 out:
2544 wl1271_ps_elp_sleep(wl);
2545 out_unlock:
2546 mutex_unlock(&wl->mutex);
2547
2548 return ret;
2549 }
2550
2551 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2552 struct ieee80211_vif *vif,
2553 bool reset_tx_queues)
2554 {
2555 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2556 int i, ret;
2557 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2558
2559 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2560
2561 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2562 return;
2563
2564 /* because of hardware recovery, we may get here twice */
2565 if (wl->state == WLCORE_STATE_OFF)
2566 return;
2567
2568 wl1271_info("down");
2569
2570 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2571 wl->scan_wlvif == wlvif) {
2572 /*
2573 * Rearm the tx watchdog just before idling scan. This
2574 * prevents just-finished scans from triggering the watchdog
2575 */
2576 wl12xx_rearm_tx_watchdog_locked(wl);
2577
2578 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2579 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2580 wl->scan_wlvif = NULL;
2581 wl->scan.req = NULL;
2582 ieee80211_scan_completed(wl->hw, true);
2583 }
2584
2585 if (wl->sched_vif == wlvif) {
2586 ieee80211_sched_scan_stopped(wl->hw);
2587 wl->sched_vif = NULL;
2588 }
2589
2590 if (wl->roc_vif == vif) {
2591 wl->roc_vif = NULL;
2592 ieee80211_remain_on_channel_expired(wl->hw);
2593 }
2594
2595 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2596 /* disable active roles */
2597 ret = wl1271_ps_elp_wakeup(wl);
2598 if (ret < 0)
2599 goto deinit;
2600
2601 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2602 wlvif->bss_type == BSS_TYPE_IBSS) {
2603 if (wl12xx_dev_role_started(wlvif))
2604 wl12xx_stop_dev(wl, wlvif);
2605 }
2606
2607 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2608 if (ret < 0)
2609 goto deinit;
2610
2611 wl1271_ps_elp_sleep(wl);
2612 }
2613 deinit:
2614 wl12xx_tx_reset_wlvif(wl, wlvif);
2615
2616 /* clear all hlids (except system_hlid) */
2617 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2618
2619 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2620 wlvif->bss_type == BSS_TYPE_IBSS) {
2621 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2622 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2623 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2624 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2625 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2626 } else {
2627 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2628 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2629 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2630 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2631 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2632 wl12xx_free_rate_policy(wl,
2633 &wlvif->ap.ucast_rate_idx[i]);
2634 wl1271_free_ap_keys(wl, wlvif);
2635 }
2636
2637 dev_kfree_skb(wlvif->probereq);
2638 wlvif->probereq = NULL;
2639 if (wl->last_wlvif == wlvif)
2640 wl->last_wlvif = NULL;
2641 list_del(&wlvif->list);
2642 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2643 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2644 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2645
2646 if (is_ap)
2647 wl->ap_count--;
2648 else
2649 wl->sta_count--;
2650
2651 /*
2652 * Last AP, have more stations. Configure sleep auth according to STA.
2653 * Don't do thin on unintended recovery.
2654 */
2655 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2656 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2657 goto unlock;
2658
2659 if (wl->ap_count == 0 && is_ap) {
2660 /* mask ap events */
2661 wl->event_mask &= ~wl->ap_event_mask;
2662 wl1271_event_unmask(wl);
2663 }
2664
2665 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2666 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2667 /* Configure for power according to debugfs */
2668 if (sta_auth != WL1271_PSM_ILLEGAL)
2669 wl1271_acx_sleep_auth(wl, sta_auth);
2670 /* Configure for ELP power saving */
2671 else
2672 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2673 }
2674
2675 unlock:
2676 mutex_unlock(&wl->mutex);
2677
2678 del_timer_sync(&wlvif->rx_streaming_timer);
2679 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2680 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2681 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2682 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2683 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2684
2685 mutex_lock(&wl->mutex);
2686 }
2687
2688 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2689 struct ieee80211_vif *vif)
2690 {
2691 struct wl1271 *wl = hw->priv;
2692 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2693 struct wl12xx_vif *iter;
2694 struct vif_counter_data vif_count;
2695
2696 wl12xx_get_vif_count(hw, vif, &vif_count);
2697 mutex_lock(&wl->mutex);
2698
2699 if (wl->state == WLCORE_STATE_OFF ||
2700 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2701 goto out;
2702
2703 /*
2704 * wl->vif can be null here if someone shuts down the interface
2705 * just when hardware recovery has been started.
2706 */
2707 wl12xx_for_each_wlvif(wl, iter) {
2708 if (iter != wlvif)
2709 continue;
2710
2711 __wl1271_op_remove_interface(wl, vif, true);
2712 break;
2713 }
2714 WARN_ON(iter != wlvif);
2715 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2716 wl12xx_force_active_psm(wl);
2717 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2718 wl12xx_queue_recovery_work(wl);
2719 }
2720 out:
2721 mutex_unlock(&wl->mutex);
2722 }
2723
2724 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2725 struct ieee80211_vif *vif,
2726 enum nl80211_iftype new_type, bool p2p)
2727 {
2728 struct wl1271 *wl = hw->priv;
2729 int ret;
2730
2731 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2732 wl1271_op_remove_interface(hw, vif);
2733
2734 vif->type = new_type;
2735 vif->p2p = p2p;
2736 ret = wl1271_op_add_interface(hw, vif);
2737
2738 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2739 return ret;
2740 }
2741
2742 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2743 {
2744 int ret;
2745 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2746
2747 /*
2748 * One of the side effects of the JOIN command is that is clears
2749 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2750 * to a WPA/WPA2 access point will therefore kill the data-path.
2751 * Currently the only valid scenario for JOIN during association
2752 * is on roaming, in which case we will also be given new keys.
2753 * Keep the below message for now, unless it starts bothering
2754 * users who really like to roam a lot :)
2755 */
2756 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2757 wl1271_info("JOIN while associated.");
2758
2759 /* clear encryption type */
2760 wlvif->encryption_type = KEY_NONE;
2761
2762 if (is_ibss)
2763 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2764 else {
2765 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2766 /*
2767 * TODO: this is an ugly workaround for wl12xx fw
2768 * bug - we are not able to tx/rx after the first
2769 * start_sta, so make dummy start+stop calls,
2770 * and then call start_sta again.
2771 * this should be fixed in the fw.
2772 */
2773 wl12xx_cmd_role_start_sta(wl, wlvif);
2774 wl12xx_cmd_role_stop_sta(wl, wlvif);
2775 }
2776
2777 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2778 }
2779
2780 return ret;
2781 }
2782
2783 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2784 int offset)
2785 {
2786 u8 ssid_len;
2787 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2788 skb->len - offset);
2789
2790 if (!ptr) {
2791 wl1271_error("No SSID in IEs!");
2792 return -ENOENT;
2793 }
2794
2795 ssid_len = ptr[1];
2796 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2797 wl1271_error("SSID is too long!");
2798 return -EINVAL;
2799 }
2800
2801 wlvif->ssid_len = ssid_len;
2802 memcpy(wlvif->ssid, ptr+2, ssid_len);
2803 return 0;
2804 }
2805
2806 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2807 {
2808 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2809 struct sk_buff *skb;
2810 int ieoffset;
2811
2812 /* we currently only support setting the ssid from the ap probe req */
2813 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2814 return -EINVAL;
2815
2816 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2817 if (!skb)
2818 return -EINVAL;
2819
2820 ieoffset = offsetof(struct ieee80211_mgmt,
2821 u.probe_req.variable);
2822 wl1271_ssid_set(wlvif, skb, ieoffset);
2823 dev_kfree_skb(skb);
2824
2825 return 0;
2826 }
2827
2828 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2829 struct ieee80211_bss_conf *bss_conf,
2830 u32 sta_rate_set)
2831 {
2832 int ieoffset;
2833 int ret;
2834
2835 wlvif->aid = bss_conf->aid;
2836 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2837 wlvif->beacon_int = bss_conf->beacon_int;
2838 wlvif->wmm_enabled = bss_conf->qos;
2839
2840 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2841
2842 /*
2843 * with wl1271, we don't need to update the
2844 * beacon_int and dtim_period, because the firmware
2845 * updates it by itself when the first beacon is
2846 * received after a join.
2847 */
2848 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2849 if (ret < 0)
2850 return ret;
2851
2852 /*
2853 * Get a template for hardware connection maintenance
2854 */
2855 dev_kfree_skb(wlvif->probereq);
2856 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2857 wlvif,
2858 NULL);
2859 ieoffset = offsetof(struct ieee80211_mgmt,
2860 u.probe_req.variable);
2861 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2862
2863 /* enable the connection monitoring feature */
2864 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2865 if (ret < 0)
2866 return ret;
2867
2868 /*
2869 * The join command disable the keep-alive mode, shut down its process,
2870 * and also clear the template config, so we need to reset it all after
2871 * the join. The acx_aid starts the keep-alive process, and the order
2872 * of the commands below is relevant.
2873 */
2874 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2875 if (ret < 0)
2876 return ret;
2877
2878 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2879 if (ret < 0)
2880 return ret;
2881
2882 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2883 if (ret < 0)
2884 return ret;
2885
2886 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2887 wlvif->sta.klv_template_id,
2888 ACX_KEEP_ALIVE_TPL_VALID);
2889 if (ret < 0)
2890 return ret;
2891
2892 /*
2893 * The default fw psm configuration is AUTO, while mac80211 default
2894 * setting is off (ACTIVE), so sync the fw with the correct value.
2895 */
2896 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2897 if (ret < 0)
2898 return ret;
2899
2900 if (sta_rate_set) {
2901 wlvif->rate_set =
2902 wl1271_tx_enabled_rates_get(wl,
2903 sta_rate_set,
2904 wlvif->band);
2905 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2906 if (ret < 0)
2907 return ret;
2908 }
2909
2910 return ret;
2911 }
2912
2913 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2914 {
2915 int ret;
2916 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2917
2918 /* make sure we are connected (sta) joined */
2919 if (sta &&
2920 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2921 return false;
2922
2923 /* make sure we are joined (ibss) */
2924 if (!sta &&
2925 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2926 return false;
2927
2928 if (sta) {
2929 /* use defaults when not associated */
2930 wlvif->aid = 0;
2931
2932 /* free probe-request template */
2933 dev_kfree_skb(wlvif->probereq);
2934 wlvif->probereq = NULL;
2935
2936 /* disable connection monitor features */
2937 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2938 if (ret < 0)
2939 return ret;
2940
2941 /* Disable the keep-alive feature */
2942 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2943 if (ret < 0)
2944 return ret;
2945 }
2946
2947 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2948 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2949
2950 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2951 ieee80211_chswitch_done(vif, false);
2952 cancel_delayed_work(&wlvif->channel_switch_work);
2953 }
2954
2955 /* invalidate keep-alive template */
2956 wl1271_acx_keep_alive_config(wl, wlvif,
2957 wlvif->sta.klv_template_id,
2958 ACX_KEEP_ALIVE_TPL_INVALID);
2959
2960 return 0;
2961 }
2962
2963 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2964 {
2965 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2966 wlvif->rate_set = wlvif->basic_rate_set;
2967 }
2968
2969 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2970 bool idle)
2971 {
2972 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2973
2974 if (idle == cur_idle)
2975 return;
2976
2977 if (idle) {
2978 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2979 } else {
2980 /* The current firmware only supports sched_scan in idle */
2981 if (wl->sched_vif == wlvif)
2982 wl->ops->sched_scan_stop(wl, wlvif);
2983
2984 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2985 }
2986 }
2987
2988 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2989 struct ieee80211_conf *conf, u32 changed)
2990 {
2991 int ret;
2992
2993 if (conf->power_level != wlvif->power_level) {
2994 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2995 if (ret < 0)
2996 return ret;
2997
2998 wlvif->power_level = conf->power_level;
2999 }
3000
3001 return 0;
3002 }
3003
3004 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3005 {
3006 struct wl1271 *wl = hw->priv;
3007 struct wl12xx_vif *wlvif;
3008 struct ieee80211_conf *conf = &hw->conf;
3009 int ret = 0;
3010
3011 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3012 " changed 0x%x",
3013 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3014 conf->power_level,
3015 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3016 changed);
3017
3018 mutex_lock(&wl->mutex);
3019
3020 if (changed & IEEE80211_CONF_CHANGE_POWER)
3021 wl->power_level = conf->power_level;
3022
3023 if (unlikely(wl->state != WLCORE_STATE_ON))
3024 goto out;
3025
3026 ret = wl1271_ps_elp_wakeup(wl);
3027 if (ret < 0)
3028 goto out;
3029
3030 /* configure each interface */
3031 wl12xx_for_each_wlvif(wl, wlvif) {
3032 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3033 if (ret < 0)
3034 goto out_sleep;
3035 }
3036
3037 out_sleep:
3038 wl1271_ps_elp_sleep(wl);
3039
3040 out:
3041 mutex_unlock(&wl->mutex);
3042
3043 return ret;
3044 }
3045
3046 struct wl1271_filter_params {
3047 bool enabled;
3048 int mc_list_length;
3049 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3050 };
3051
3052 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3053 struct netdev_hw_addr_list *mc_list)
3054 {
3055 struct wl1271_filter_params *fp;
3056 struct netdev_hw_addr *ha;
3057
3058 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3059 if (!fp) {
3060 wl1271_error("Out of memory setting filters.");
3061 return 0;
3062 }
3063
3064 /* update multicast filtering parameters */
3065 fp->mc_list_length = 0;
3066 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3067 fp->enabled = false;
3068 } else {
3069 fp->enabled = true;
3070 netdev_hw_addr_list_for_each(ha, mc_list) {
3071 memcpy(fp->mc_list[fp->mc_list_length],
3072 ha->addr, ETH_ALEN);
3073 fp->mc_list_length++;
3074 }
3075 }
3076
3077 return (u64)(unsigned long)fp;
3078 }
3079
3080 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3081 FIF_ALLMULTI | \
3082 FIF_FCSFAIL | \
3083 FIF_BCN_PRBRESP_PROMISC | \
3084 FIF_CONTROL | \
3085 FIF_OTHER_BSS)
3086
3087 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3088 unsigned int changed,
3089 unsigned int *total, u64 multicast)
3090 {
3091 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3092 struct wl1271 *wl = hw->priv;
3093 struct wl12xx_vif *wlvif;
3094
3095 int ret;
3096
3097 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3098 " total %x", changed, *total);
3099
3100 mutex_lock(&wl->mutex);
3101
3102 *total &= WL1271_SUPPORTED_FILTERS;
3103 changed &= WL1271_SUPPORTED_FILTERS;
3104
3105 if (unlikely(wl->state != WLCORE_STATE_ON))
3106 goto out;
3107
3108 ret = wl1271_ps_elp_wakeup(wl);
3109 if (ret < 0)
3110 goto out;
3111
3112 wl12xx_for_each_wlvif(wl, wlvif) {
3113 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3114 if (*total & FIF_ALLMULTI)
3115 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3116 false,
3117 NULL, 0);
3118 else if (fp)
3119 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3120 fp->enabled,
3121 fp->mc_list,
3122 fp->mc_list_length);
3123 if (ret < 0)
3124 goto out_sleep;
3125 }
3126 }
3127
3128 /*
3129 * the fw doesn't provide an api to configure the filters. instead,
3130 * the filters configuration is based on the active roles / ROC
3131 * state.
3132 */
3133
3134 out_sleep:
3135 wl1271_ps_elp_sleep(wl);
3136
3137 out:
3138 mutex_unlock(&wl->mutex);
3139 kfree(fp);
3140 }
3141
3142 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3143 u8 id, u8 key_type, u8 key_size,
3144 const u8 *key, u8 hlid, u32 tx_seq_32,
3145 u16 tx_seq_16)
3146 {
3147 struct wl1271_ap_key *ap_key;
3148 int i;
3149
3150 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3151
3152 if (key_size > MAX_KEY_SIZE)
3153 return -EINVAL;
3154
3155 /*
3156 * Find next free entry in ap_keys. Also check we are not replacing
3157 * an existing key.
3158 */
3159 for (i = 0; i < MAX_NUM_KEYS; i++) {
3160 if (wlvif->ap.recorded_keys[i] == NULL)
3161 break;
3162
3163 if (wlvif->ap.recorded_keys[i]->id == id) {
3164 wl1271_warning("trying to record key replacement");
3165 return -EINVAL;
3166 }
3167 }
3168
3169 if (i == MAX_NUM_KEYS)
3170 return -EBUSY;
3171
3172 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3173 if (!ap_key)
3174 return -ENOMEM;
3175
3176 ap_key->id = id;
3177 ap_key->key_type = key_type;
3178 ap_key->key_size = key_size;
3179 memcpy(ap_key->key, key, key_size);
3180 ap_key->hlid = hlid;
3181 ap_key->tx_seq_32 = tx_seq_32;
3182 ap_key->tx_seq_16 = tx_seq_16;
3183
3184 wlvif->ap.recorded_keys[i] = ap_key;
3185 return 0;
3186 }
3187
3188 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3189 {
3190 int i;
3191
3192 for (i = 0; i < MAX_NUM_KEYS; i++) {
3193 kfree(wlvif->ap.recorded_keys[i]);
3194 wlvif->ap.recorded_keys[i] = NULL;
3195 }
3196 }
3197
3198 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3199 {
3200 int i, ret = 0;
3201 struct wl1271_ap_key *key;
3202 bool wep_key_added = false;
3203
3204 for (i = 0; i < MAX_NUM_KEYS; i++) {
3205 u8 hlid;
3206 if (wlvif->ap.recorded_keys[i] == NULL)
3207 break;
3208
3209 key = wlvif->ap.recorded_keys[i];
3210 hlid = key->hlid;
3211 if (hlid == WL12XX_INVALID_LINK_ID)
3212 hlid = wlvif->ap.bcast_hlid;
3213
3214 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3215 key->id, key->key_type,
3216 key->key_size, key->key,
3217 hlid, key->tx_seq_32,
3218 key->tx_seq_16);
3219 if (ret < 0)
3220 goto out;
3221
3222 if (key->key_type == KEY_WEP)
3223 wep_key_added = true;
3224 }
3225
3226 if (wep_key_added) {
3227 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3228 wlvif->ap.bcast_hlid);
3229 if (ret < 0)
3230 goto out;
3231 }
3232
3233 out:
3234 wl1271_free_ap_keys(wl, wlvif);
3235 return ret;
3236 }
3237
3238 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3239 u16 action, u8 id, u8 key_type,
3240 u8 key_size, const u8 *key, u32 tx_seq_32,
3241 u16 tx_seq_16, struct ieee80211_sta *sta)
3242 {
3243 int ret;
3244 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3245
3246 if (is_ap) {
3247 struct wl1271_station *wl_sta;
3248 u8 hlid;
3249
3250 if (sta) {
3251 wl_sta = (struct wl1271_station *)sta->drv_priv;
3252 hlid = wl_sta->hlid;
3253 } else {
3254 hlid = wlvif->ap.bcast_hlid;
3255 }
3256
3257 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3258 /*
3259 * We do not support removing keys after AP shutdown.
3260 * Pretend we do to make mac80211 happy.
3261 */
3262 if (action != KEY_ADD_OR_REPLACE)
3263 return 0;
3264
3265 ret = wl1271_record_ap_key(wl, wlvif, id,
3266 key_type, key_size,
3267 key, hlid, tx_seq_32,
3268 tx_seq_16);
3269 } else {
3270 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3271 id, key_type, key_size,
3272 key, hlid, tx_seq_32,
3273 tx_seq_16);
3274 }
3275
3276 if (ret < 0)
3277 return ret;
3278 } else {
3279 const u8 *addr;
3280 static const u8 bcast_addr[ETH_ALEN] = {
3281 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3282 };
3283
3284 addr = sta ? sta->addr : bcast_addr;
3285
3286 if (is_zero_ether_addr(addr)) {
3287 /* We dont support TX only encryption */
3288 return -EOPNOTSUPP;
3289 }
3290
3291 /* The wl1271 does not allow to remove unicast keys - they
3292 will be cleared automatically on next CMD_JOIN. Ignore the
3293 request silently, as we dont want the mac80211 to emit
3294 an error message. */
3295 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3296 return 0;
3297
3298 /* don't remove key if hlid was already deleted */
3299 if (action == KEY_REMOVE &&
3300 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3301 return 0;
3302
3303 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3304 id, key_type, key_size,
3305 key, addr, tx_seq_32,
3306 tx_seq_16);
3307 if (ret < 0)
3308 return ret;
3309
3310 }
3311
3312 return 0;
3313 }
3314
3315 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3316 struct ieee80211_vif *vif,
3317 struct ieee80211_sta *sta,
3318 struct ieee80211_key_conf *key_conf)
3319 {
3320 struct wl1271 *wl = hw->priv;
3321 int ret;
3322 bool might_change_spare =
3323 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3324 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3325
3326 if (might_change_spare) {
3327 /*
3328 * stop the queues and flush to ensure the next packets are
3329 * in sync with FW spare block accounting
3330 */
3331 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3332 wl1271_tx_flush(wl);
3333 }
3334
3335 mutex_lock(&wl->mutex);
3336
3337 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3338 ret = -EAGAIN;
3339 goto out_wake_queues;
3340 }
3341
3342 ret = wl1271_ps_elp_wakeup(wl);
3343 if (ret < 0)
3344 goto out_wake_queues;
3345
3346 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3347
3348 wl1271_ps_elp_sleep(wl);
3349
3350 out_wake_queues:
3351 if (might_change_spare)
3352 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3353
3354 mutex_unlock(&wl->mutex);
3355
3356 return ret;
3357 }
3358
3359 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3360 struct ieee80211_vif *vif,
3361 struct ieee80211_sta *sta,
3362 struct ieee80211_key_conf *key_conf)
3363 {
3364 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3365 int ret;
3366 u32 tx_seq_32 = 0;
3367 u16 tx_seq_16 = 0;
3368 u8 key_type;
3369 u8 hlid;
3370
3371 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3372
3373 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3374 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3375 key_conf->cipher, key_conf->keyidx,
3376 key_conf->keylen, key_conf->flags);
3377 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3378
3379 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3380 if (sta) {
3381 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3382 hlid = wl_sta->hlid;
3383 } else {
3384 hlid = wlvif->ap.bcast_hlid;
3385 }
3386 else
3387 hlid = wlvif->sta.hlid;
3388
3389 if (hlid != WL12XX_INVALID_LINK_ID) {
3390 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3391 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3392 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3393 }
3394
3395 switch (key_conf->cipher) {
3396 case WLAN_CIPHER_SUITE_WEP40:
3397 case WLAN_CIPHER_SUITE_WEP104:
3398 key_type = KEY_WEP;
3399
3400 key_conf->hw_key_idx = key_conf->keyidx;
3401 break;
3402 case WLAN_CIPHER_SUITE_TKIP:
3403 key_type = KEY_TKIP;
3404 key_conf->hw_key_idx = key_conf->keyidx;
3405 break;
3406 case WLAN_CIPHER_SUITE_CCMP:
3407 key_type = KEY_AES;
3408 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3409 break;
3410 case WL1271_CIPHER_SUITE_GEM:
3411 key_type = KEY_GEM;
3412 break;
3413 default:
3414 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3415
3416 return -EOPNOTSUPP;
3417 }
3418
3419 switch (cmd) {
3420 case SET_KEY:
3421 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3422 key_conf->keyidx, key_type,
3423 key_conf->keylen, key_conf->key,
3424 tx_seq_32, tx_seq_16, sta);
3425 if (ret < 0) {
3426 wl1271_error("Could not add or replace key");
3427 return ret;
3428 }
3429
3430 /*
3431 * reconfiguring arp response if the unicast (or common)
3432 * encryption key type was changed
3433 */
3434 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3435 (sta || key_type == KEY_WEP) &&
3436 wlvif->encryption_type != key_type) {
3437 wlvif->encryption_type = key_type;
3438 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3439 if (ret < 0) {
3440 wl1271_warning("build arp rsp failed: %d", ret);
3441 return ret;
3442 }
3443 }
3444 break;
3445
3446 case DISABLE_KEY:
3447 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3448 key_conf->keyidx, key_type,
3449 key_conf->keylen, key_conf->key,
3450 0, 0, sta);
3451 if (ret < 0) {
3452 wl1271_error("Could not remove key");
3453 return ret;
3454 }
3455 break;
3456
3457 default:
3458 wl1271_error("Unsupported key cmd 0x%x", cmd);
3459 return -EOPNOTSUPP;
3460 }
3461
3462 return ret;
3463 }
3464 EXPORT_SYMBOL_GPL(wlcore_set_key);
3465
3466 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3467 struct ieee80211_vif *vif,
3468 int key_idx)
3469 {
3470 struct wl1271 *wl = hw->priv;
3471 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3472 int ret;
3473
3474 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3475 key_idx);
3476
3477 mutex_lock(&wl->mutex);
3478
3479 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3480 ret = -EAGAIN;
3481 goto out_unlock;
3482 }
3483
3484 ret = wl1271_ps_elp_wakeup(wl);
3485 if (ret < 0)
3486 goto out_unlock;
3487
3488 wlvif->default_key = key_idx;
3489
3490 /* the default WEP key needs to be configured at least once */
3491 if (wlvif->encryption_type == KEY_WEP) {
3492 ret = wl12xx_cmd_set_default_wep_key(wl,
3493 key_idx,
3494 wlvif->sta.hlid);
3495 if (ret < 0)
3496 goto out_sleep;
3497 }
3498
3499 out_sleep:
3500 wl1271_ps_elp_sleep(wl);
3501
3502 out_unlock:
3503 mutex_unlock(&wl->mutex);
3504 }
3505
3506 void wlcore_regdomain_config(struct wl1271 *wl)
3507 {
3508 int ret;
3509
3510 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3511 return;
3512
3513 mutex_lock(&wl->mutex);
3514
3515 if (unlikely(wl->state != WLCORE_STATE_ON))
3516 goto out;
3517
3518 ret = wl1271_ps_elp_wakeup(wl);
3519 if (ret < 0)
3520 goto out;
3521
3522 ret = wlcore_cmd_regdomain_config_locked(wl);
3523 if (ret < 0) {
3524 wl12xx_queue_recovery_work(wl);
3525 goto out;
3526 }
3527
3528 wl1271_ps_elp_sleep(wl);
3529 out:
3530 mutex_unlock(&wl->mutex);
3531 }
3532
3533 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3534 struct ieee80211_vif *vif,
3535 struct cfg80211_scan_request *req)
3536 {
3537 struct wl1271 *wl = hw->priv;
3538 int ret;
3539 u8 *ssid = NULL;
3540 size_t len = 0;
3541
3542 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3543
3544 if (req->n_ssids) {
3545 ssid = req->ssids[0].ssid;
3546 len = req->ssids[0].ssid_len;
3547 }
3548
3549 mutex_lock(&wl->mutex);
3550
3551 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3552 /*
3553 * We cannot return -EBUSY here because cfg80211 will expect
3554 * a call to ieee80211_scan_completed if we do - in this case
3555 * there won't be any call.
3556 */
3557 ret = -EAGAIN;
3558 goto out;
3559 }
3560
3561 ret = wl1271_ps_elp_wakeup(wl);
3562 if (ret < 0)
3563 goto out;
3564
3565 /* fail if there is any role in ROC */
3566 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3567 /* don't allow scanning right now */
3568 ret = -EBUSY;
3569 goto out_sleep;
3570 }
3571
3572 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3573 out_sleep:
3574 wl1271_ps_elp_sleep(wl);
3575 out:
3576 mutex_unlock(&wl->mutex);
3577
3578 return ret;
3579 }
3580
3581 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3582 struct ieee80211_vif *vif)
3583 {
3584 struct wl1271 *wl = hw->priv;
3585 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3586 int ret;
3587
3588 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3589
3590 mutex_lock(&wl->mutex);
3591
3592 if (unlikely(wl->state != WLCORE_STATE_ON))
3593 goto out;
3594
3595 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3596 goto out;
3597
3598 ret = wl1271_ps_elp_wakeup(wl);
3599 if (ret < 0)
3600 goto out;
3601
3602 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3603 ret = wl->ops->scan_stop(wl, wlvif);
3604 if (ret < 0)
3605 goto out_sleep;
3606 }
3607
3608 /*
3609 * Rearm the tx watchdog just before idling scan. This
3610 * prevents just-finished scans from triggering the watchdog
3611 */
3612 wl12xx_rearm_tx_watchdog_locked(wl);
3613
3614 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3615 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3616 wl->scan_wlvif = NULL;
3617 wl->scan.req = NULL;
3618 ieee80211_scan_completed(wl->hw, true);
3619
3620 out_sleep:
3621 wl1271_ps_elp_sleep(wl);
3622 out:
3623 mutex_unlock(&wl->mutex);
3624
3625 cancel_delayed_work_sync(&wl->scan_complete_work);
3626 }
3627
3628 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3629 struct ieee80211_vif *vif,
3630 struct cfg80211_sched_scan_request *req,
3631 struct ieee80211_sched_scan_ies *ies)
3632 {
3633 struct wl1271 *wl = hw->priv;
3634 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3635 int ret;
3636
3637 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3638
3639 mutex_lock(&wl->mutex);
3640
3641 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3642 ret = -EAGAIN;
3643 goto out;
3644 }
3645
3646 ret = wl1271_ps_elp_wakeup(wl);
3647 if (ret < 0)
3648 goto out;
3649
3650 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3651 if (ret < 0)
3652 goto out_sleep;
3653
3654 wl->sched_vif = wlvif;
3655
3656 out_sleep:
3657 wl1271_ps_elp_sleep(wl);
3658 out:
3659 mutex_unlock(&wl->mutex);
3660 return ret;
3661 }
3662
3663 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3664 struct ieee80211_vif *vif)
3665 {
3666 struct wl1271 *wl = hw->priv;
3667 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3668 int ret;
3669
3670 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3671
3672 mutex_lock(&wl->mutex);
3673
3674 if (unlikely(wl->state != WLCORE_STATE_ON))
3675 goto out;
3676
3677 ret = wl1271_ps_elp_wakeup(wl);
3678 if (ret < 0)
3679 goto out;
3680
3681 wl->ops->sched_scan_stop(wl, wlvif);
3682
3683 wl1271_ps_elp_sleep(wl);
3684 out:
3685 mutex_unlock(&wl->mutex);
3686 }
3687
3688 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3689 {
3690 struct wl1271 *wl = hw->priv;
3691 int ret = 0;
3692
3693 mutex_lock(&wl->mutex);
3694
3695 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3696 ret = -EAGAIN;
3697 goto out;
3698 }
3699
3700 ret = wl1271_ps_elp_wakeup(wl);
3701 if (ret < 0)
3702 goto out;
3703
3704 ret = wl1271_acx_frag_threshold(wl, value);
3705 if (ret < 0)
3706 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3707
3708 wl1271_ps_elp_sleep(wl);
3709
3710 out:
3711 mutex_unlock(&wl->mutex);
3712
3713 return ret;
3714 }
3715
3716 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3717 {
3718 struct wl1271 *wl = hw->priv;
3719 struct wl12xx_vif *wlvif;
3720 int ret = 0;
3721
3722 mutex_lock(&wl->mutex);
3723
3724 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3725 ret = -EAGAIN;
3726 goto out;
3727 }
3728
3729 ret = wl1271_ps_elp_wakeup(wl);
3730 if (ret < 0)
3731 goto out;
3732
3733 wl12xx_for_each_wlvif(wl, wlvif) {
3734 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3735 if (ret < 0)
3736 wl1271_warning("set rts threshold failed: %d", ret);
3737 }
3738 wl1271_ps_elp_sleep(wl);
3739
3740 out:
3741 mutex_unlock(&wl->mutex);
3742
3743 return ret;
3744 }
3745
3746 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3747 {
3748 int len;
3749 const u8 *next, *end = skb->data + skb->len;
3750 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3751 skb->len - ieoffset);
3752 if (!ie)
3753 return;
3754 len = ie[1] + 2;
3755 next = ie + len;
3756 memmove(ie, next, end - next);
3757 skb_trim(skb, skb->len - len);
3758 }
3759
3760 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3761 unsigned int oui, u8 oui_type,
3762 int ieoffset)
3763 {
3764 int len;
3765 const u8 *next, *end = skb->data + skb->len;
3766 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3767 skb->data + ieoffset,
3768 skb->len - ieoffset);
3769 if (!ie)
3770 return;
3771 len = ie[1] + 2;
3772 next = ie + len;
3773 memmove(ie, next, end - next);
3774 skb_trim(skb, skb->len - len);
3775 }
3776
3777 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3778 struct ieee80211_vif *vif)
3779 {
3780 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3781 struct sk_buff *skb;
3782 int ret;
3783
3784 skb = ieee80211_proberesp_get(wl->hw, vif);
3785 if (!skb)
3786 return -EOPNOTSUPP;
3787
3788 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3789 CMD_TEMPL_AP_PROBE_RESPONSE,
3790 skb->data,
3791 skb->len, 0,
3792 rates);
3793 dev_kfree_skb(skb);
3794
3795 if (ret < 0)
3796 goto out;
3797
3798 wl1271_debug(DEBUG_AP, "probe response updated");
3799 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3800
3801 out:
3802 return ret;
3803 }
3804
3805 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3806 struct ieee80211_vif *vif,
3807 u8 *probe_rsp_data,
3808 size_t probe_rsp_len,
3809 u32 rates)
3810 {
3811 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3812 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3813 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3814 int ssid_ie_offset, ie_offset, templ_len;
3815 const u8 *ptr;
3816
3817 /* no need to change probe response if the SSID is set correctly */
3818 if (wlvif->ssid_len > 0)
3819 return wl1271_cmd_template_set(wl, wlvif->role_id,
3820 CMD_TEMPL_AP_PROBE_RESPONSE,
3821 probe_rsp_data,
3822 probe_rsp_len, 0,
3823 rates);
3824
3825 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3826 wl1271_error("probe_rsp template too big");
3827 return -EINVAL;
3828 }
3829
3830 /* start searching from IE offset */
3831 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3832
3833 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3834 probe_rsp_len - ie_offset);
3835 if (!ptr) {
3836 wl1271_error("No SSID in beacon!");
3837 return -EINVAL;
3838 }
3839
3840 ssid_ie_offset = ptr - probe_rsp_data;
3841 ptr += (ptr[1] + 2);
3842
3843 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3844
3845 /* insert SSID from bss_conf */
3846 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3847 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3848 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3849 bss_conf->ssid, bss_conf->ssid_len);
3850 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3851
3852 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3853 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3854 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3855
3856 return wl1271_cmd_template_set(wl, wlvif->role_id,
3857 CMD_TEMPL_AP_PROBE_RESPONSE,
3858 probe_rsp_templ,
3859 templ_len, 0,
3860 rates);
3861 }
3862
3863 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3864 struct ieee80211_vif *vif,
3865 struct ieee80211_bss_conf *bss_conf,
3866 u32 changed)
3867 {
3868 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3869 int ret = 0;
3870
3871 if (changed & BSS_CHANGED_ERP_SLOT) {
3872 if (bss_conf->use_short_slot)
3873 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3874 else
3875 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3876 if (ret < 0) {
3877 wl1271_warning("Set slot time failed %d", ret);
3878 goto out;
3879 }
3880 }
3881
3882 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3883 if (bss_conf->use_short_preamble)
3884 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3885 else
3886 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3887 }
3888
3889 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3890 if (bss_conf->use_cts_prot)
3891 ret = wl1271_acx_cts_protect(wl, wlvif,
3892 CTSPROTECT_ENABLE);
3893 else
3894 ret = wl1271_acx_cts_protect(wl, wlvif,
3895 CTSPROTECT_DISABLE);
3896 if (ret < 0) {
3897 wl1271_warning("Set ctsprotect failed %d", ret);
3898 goto out;
3899 }
3900 }
3901
3902 out:
3903 return ret;
3904 }
3905
3906 static int wlcore_set_beacon_template(struct wl1271 *wl,
3907 struct ieee80211_vif *vif,
3908 bool is_ap)
3909 {
3910 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3911 struct ieee80211_hdr *hdr;
3912 u32 min_rate;
3913 int ret;
3914 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3915 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3916 u16 tmpl_id;
3917
3918 if (!beacon) {
3919 ret = -EINVAL;
3920 goto out;
3921 }
3922
3923 wl1271_debug(DEBUG_MASTER, "beacon updated");
3924
3925 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3926 if (ret < 0) {
3927 dev_kfree_skb(beacon);
3928 goto out;
3929 }
3930 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3931 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3932 CMD_TEMPL_BEACON;
3933 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3934 beacon->data,
3935 beacon->len, 0,
3936 min_rate);
3937 if (ret < 0) {
3938 dev_kfree_skb(beacon);
3939 goto out;
3940 }
3941
3942 wlvif->wmm_enabled =
3943 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3944 WLAN_OUI_TYPE_MICROSOFT_WMM,
3945 beacon->data + ieoffset,
3946 beacon->len - ieoffset);
3947
3948 /*
3949 * In case we already have a probe-resp beacon set explicitly
3950 * by usermode, don't use the beacon data.
3951 */
3952 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3953 goto end_bcn;
3954
3955 /* remove TIM ie from probe response */
3956 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3957
3958 /*
3959 * remove p2p ie from probe response.
3960 * the fw reponds to probe requests that don't include
3961 * the p2p ie. probe requests with p2p ie will be passed,
3962 * and will be responded by the supplicant (the spec
3963 * forbids including the p2p ie when responding to probe
3964 * requests that didn't include it).
3965 */
3966 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3967 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3968
3969 hdr = (struct ieee80211_hdr *) beacon->data;
3970 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3971 IEEE80211_STYPE_PROBE_RESP);
3972 if (is_ap)
3973 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3974 beacon->data,
3975 beacon->len,
3976 min_rate);
3977 else
3978 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3979 CMD_TEMPL_PROBE_RESPONSE,
3980 beacon->data,
3981 beacon->len, 0,
3982 min_rate);
3983 end_bcn:
3984 dev_kfree_skb(beacon);
3985 if (ret < 0)
3986 goto out;
3987
3988 out:
3989 return ret;
3990 }
3991
3992 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3993 struct ieee80211_vif *vif,
3994 struct ieee80211_bss_conf *bss_conf,
3995 u32 changed)
3996 {
3997 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3998 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3999 int ret = 0;
4000
4001 if (changed & BSS_CHANGED_BEACON_INT) {
4002 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4003 bss_conf->beacon_int);
4004
4005 wlvif->beacon_int = bss_conf->beacon_int;
4006 }
4007
4008 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4009 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4010
4011 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4012 }
4013
4014 if (changed & BSS_CHANGED_BEACON) {
4015 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4016 if (ret < 0)
4017 goto out;
4018 }
4019
4020 out:
4021 if (ret != 0)
4022 wl1271_error("beacon info change failed: %d", ret);
4023 return ret;
4024 }
4025
4026 /* AP mode changes */
4027 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4028 struct ieee80211_vif *vif,
4029 struct ieee80211_bss_conf *bss_conf,
4030 u32 changed)
4031 {
4032 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4033 int ret = 0;
4034
4035 if (changed & BSS_CHANGED_BASIC_RATES) {
4036 u32 rates = bss_conf->basic_rates;
4037
4038 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4039 wlvif->band);
4040 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4041 wlvif->basic_rate_set);
4042
4043 ret = wl1271_init_ap_rates(wl, wlvif);
4044 if (ret < 0) {
4045 wl1271_error("AP rate policy change failed %d", ret);
4046 goto out;
4047 }
4048
4049 ret = wl1271_ap_init_templates(wl, vif);
4050 if (ret < 0)
4051 goto out;
4052
4053 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4054 if (ret < 0)
4055 goto out;
4056
4057 ret = wlcore_set_beacon_template(wl, vif, true);
4058 if (ret < 0)
4059 goto out;
4060 }
4061
4062 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4063 if (ret < 0)
4064 goto out;
4065
4066 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4067 if (bss_conf->enable_beacon) {
4068 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4069 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4070 if (ret < 0)
4071 goto out;
4072
4073 ret = wl1271_ap_init_hwenc(wl, wlvif);
4074 if (ret < 0)
4075 goto out;
4076
4077 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4078 wl1271_debug(DEBUG_AP, "started AP");
4079 }
4080 } else {
4081 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4082 /*
4083 * AP might be in ROC in case we have just
4084 * sent auth reply. handle it.
4085 */
4086 if (test_bit(wlvif->role_id, wl->roc_map))
4087 wl12xx_croc(wl, wlvif->role_id);
4088
4089 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4090 if (ret < 0)
4091 goto out;
4092
4093 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4094 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4095 &wlvif->flags);
4096 wl1271_debug(DEBUG_AP, "stopped AP");
4097 }
4098 }
4099 }
4100
4101 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4102 if (ret < 0)
4103 goto out;
4104
4105 /* Handle HT information change */
4106 if ((changed & BSS_CHANGED_HT) &&
4107 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4108 ret = wl1271_acx_set_ht_information(wl, wlvif,
4109 bss_conf->ht_operation_mode);
4110 if (ret < 0) {
4111 wl1271_warning("Set ht information failed %d", ret);
4112 goto out;
4113 }
4114 }
4115
4116 out:
4117 return;
4118 }
4119
4120 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4121 struct ieee80211_bss_conf *bss_conf,
4122 u32 sta_rate_set)
4123 {
4124 u32 rates;
4125 int ret;
4126
4127 wl1271_debug(DEBUG_MAC80211,
4128 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4129 bss_conf->bssid, bss_conf->aid,
4130 bss_conf->beacon_int,
4131 bss_conf->basic_rates, sta_rate_set);
4132
4133 wlvif->beacon_int = bss_conf->beacon_int;
4134 rates = bss_conf->basic_rates;
4135 wlvif->basic_rate_set =
4136 wl1271_tx_enabled_rates_get(wl, rates,
4137 wlvif->band);
4138 wlvif->basic_rate =
4139 wl1271_tx_min_rate_get(wl,
4140 wlvif->basic_rate_set);
4141
4142 if (sta_rate_set)
4143 wlvif->rate_set =
4144 wl1271_tx_enabled_rates_get(wl,
4145 sta_rate_set,
4146 wlvif->band);
4147
4148 /* we only support sched_scan while not connected */
4149 if (wl->sched_vif == wlvif)
4150 wl->ops->sched_scan_stop(wl, wlvif);
4151
4152 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4153 if (ret < 0)
4154 return ret;
4155
4156 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4157 if (ret < 0)
4158 return ret;
4159
4160 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4161 if (ret < 0)
4162 return ret;
4163
4164 wlcore_set_ssid(wl, wlvif);
4165
4166 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4167
4168 return 0;
4169 }
4170
4171 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4172 {
4173 int ret;
4174
4175 /* revert back to minimum rates for the current band */
4176 wl1271_set_band_rate(wl, wlvif);
4177 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4178
4179 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4180 if (ret < 0)
4181 return ret;
4182
4183 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4184 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4185 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4186 if (ret < 0)
4187 return ret;
4188 }
4189
4190 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4191 return 0;
4192 }
4193 /* STA/IBSS mode changes */
4194 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4195 struct ieee80211_vif *vif,
4196 struct ieee80211_bss_conf *bss_conf,
4197 u32 changed)
4198 {
4199 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4200 bool do_join = false;
4201 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4202 bool ibss_joined = false;
4203 u32 sta_rate_set = 0;
4204 int ret;
4205 struct ieee80211_sta *sta;
4206 bool sta_exists = false;
4207 struct ieee80211_sta_ht_cap sta_ht_cap;
4208
4209 if (is_ibss) {
4210 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4211 changed);
4212 if (ret < 0)
4213 goto out;
4214 }
4215
4216 if (changed & BSS_CHANGED_IBSS) {
4217 if (bss_conf->ibss_joined) {
4218 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4219 ibss_joined = true;
4220 } else {
4221 wlcore_unset_assoc(wl, wlvif);
4222 wl12xx_cmd_role_stop_sta(wl, wlvif);
4223 }
4224 }
4225
4226 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4227 do_join = true;
4228
4229 /* Need to update the SSID (for filtering etc) */
4230 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4231 do_join = true;
4232
4233 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4234 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4235 bss_conf->enable_beacon ? "enabled" : "disabled");
4236
4237 do_join = true;
4238 }
4239
4240 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4241 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4242
4243 if (changed & BSS_CHANGED_CQM) {
4244 bool enable = false;
4245 if (bss_conf->cqm_rssi_thold)
4246 enable = true;
4247 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4248 bss_conf->cqm_rssi_thold,
4249 bss_conf->cqm_rssi_hyst);
4250 if (ret < 0)
4251 goto out;
4252 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4253 }
4254
4255 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4256 BSS_CHANGED_ASSOC)) {
4257 rcu_read_lock();
4258 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4259 if (sta) {
4260 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4261
4262 /* save the supp_rates of the ap */
4263 sta_rate_set = sta->supp_rates[wlvif->band];
4264 if (sta->ht_cap.ht_supported)
4265 sta_rate_set |=
4266 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4267 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4268 sta_ht_cap = sta->ht_cap;
4269 sta_exists = true;
4270 }
4271
4272 rcu_read_unlock();
4273 }
4274
4275 if (changed & BSS_CHANGED_BSSID) {
4276 if (!is_zero_ether_addr(bss_conf->bssid)) {
4277 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4278 sta_rate_set);
4279 if (ret < 0)
4280 goto out;
4281
4282 /* Need to update the BSSID (for filtering etc) */
4283 do_join = true;
4284 } else {
4285 ret = wlcore_clear_bssid(wl, wlvif);
4286 if (ret < 0)
4287 goto out;
4288 }
4289 }
4290
4291 if (changed & BSS_CHANGED_IBSS) {
4292 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4293 bss_conf->ibss_joined);
4294
4295 if (bss_conf->ibss_joined) {
4296 u32 rates = bss_conf->basic_rates;
4297 wlvif->basic_rate_set =
4298 wl1271_tx_enabled_rates_get(wl, rates,
4299 wlvif->band);
4300 wlvif->basic_rate =
4301 wl1271_tx_min_rate_get(wl,
4302 wlvif->basic_rate_set);
4303
4304 /* by default, use 11b + OFDM rates */
4305 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4306 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4307 if (ret < 0)
4308 goto out;
4309 }
4310 }
4311
4312 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4313 if (ret < 0)
4314 goto out;
4315
4316 if (do_join) {
4317 ret = wlcore_join(wl, wlvif);
4318 if (ret < 0) {
4319 wl1271_warning("cmd join failed %d", ret);
4320 goto out;
4321 }
4322 }
4323
4324 if (changed & BSS_CHANGED_ASSOC) {
4325 if (bss_conf->assoc) {
4326 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4327 sta_rate_set);
4328 if (ret < 0)
4329 goto out;
4330
4331 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4332 wl12xx_set_authorized(wl, wlvif);
4333 } else {
4334 wlcore_unset_assoc(wl, wlvif);
4335 }
4336 }
4337
4338 if (changed & BSS_CHANGED_PS) {
4339 if ((bss_conf->ps) &&
4340 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4341 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4342 int ps_mode;
4343 char *ps_mode_str;
4344
4345 if (wl->conf.conn.forced_ps) {
4346 ps_mode = STATION_POWER_SAVE_MODE;
4347 ps_mode_str = "forced";
4348 } else {
4349 ps_mode = STATION_AUTO_PS_MODE;
4350 ps_mode_str = "auto";
4351 }
4352
4353 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4354
4355 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4356 if (ret < 0)
4357 wl1271_warning("enter %s ps failed %d",
4358 ps_mode_str, ret);
4359 } else if (!bss_conf->ps &&
4360 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4361 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4362
4363 ret = wl1271_ps_set_mode(wl, wlvif,
4364 STATION_ACTIVE_MODE);
4365 if (ret < 0)
4366 wl1271_warning("exit auto ps failed %d", ret);
4367 }
4368 }
4369
4370 /* Handle new association with HT. Do this after join. */
4371 if (sta_exists) {
4372 bool enabled =
4373 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4374
4375 ret = wlcore_hw_set_peer_cap(wl,
4376 &sta_ht_cap,
4377 enabled,
4378 wlvif->rate_set,
4379 wlvif->sta.hlid);
4380 if (ret < 0) {
4381 wl1271_warning("Set ht cap failed %d", ret);
4382 goto out;
4383
4384 }
4385
4386 if (enabled) {
4387 ret = wl1271_acx_set_ht_information(wl, wlvif,
4388 bss_conf->ht_operation_mode);
4389 if (ret < 0) {
4390 wl1271_warning("Set ht information failed %d",
4391 ret);
4392 goto out;
4393 }
4394 }
4395 }
4396
4397 /* Handle arp filtering. Done after join. */
4398 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4399 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4400 __be32 addr = bss_conf->arp_addr_list[0];
4401 wlvif->sta.qos = bss_conf->qos;
4402 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4403
4404 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4405 wlvif->ip_addr = addr;
4406 /*
4407 * The template should have been configured only upon
4408 * association. however, it seems that the correct ip
4409 * isn't being set (when sending), so we have to
4410 * reconfigure the template upon every ip change.
4411 */
4412 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4413 if (ret < 0) {
4414 wl1271_warning("build arp rsp failed: %d", ret);
4415 goto out;
4416 }
4417
4418 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4419 (ACX_ARP_FILTER_ARP_FILTERING |
4420 ACX_ARP_FILTER_AUTO_ARP),
4421 addr);
4422 } else {
4423 wlvif->ip_addr = 0;
4424 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4425 }
4426
4427 if (ret < 0)
4428 goto out;
4429 }
4430
4431 out:
4432 return;
4433 }
4434
4435 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4436 struct ieee80211_vif *vif,
4437 struct ieee80211_bss_conf *bss_conf,
4438 u32 changed)
4439 {
4440 struct wl1271 *wl = hw->priv;
4441 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4442 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4443 int ret;
4444
4445 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4446 wlvif->role_id, (int)changed);
4447
4448 /*
4449 * make sure to cancel pending disconnections if our association
4450 * state changed
4451 */
4452 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4453 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4454
4455 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4456 !bss_conf->enable_beacon)
4457 wl1271_tx_flush(wl);
4458
4459 mutex_lock(&wl->mutex);
4460
4461 if (unlikely(wl->state != WLCORE_STATE_ON))
4462 goto out;
4463
4464 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4465 goto out;
4466
4467 ret = wl1271_ps_elp_wakeup(wl);
4468 if (ret < 0)
4469 goto out;
4470
4471 if ((changed & BSS_CHANGED_TXPOWER) &&
4472 bss_conf->txpower != wlvif->power_level) {
4473
4474 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4475 if (ret < 0)
4476 goto out;
4477
4478 wlvif->power_level = bss_conf->txpower;
4479 }
4480
4481 if (is_ap)
4482 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4483 else
4484 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4485
4486 wl1271_ps_elp_sleep(wl);
4487
4488 out:
4489 mutex_unlock(&wl->mutex);
4490 }
4491
4492 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4493 struct ieee80211_chanctx_conf *ctx)
4494 {
4495 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4496 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4497 cfg80211_get_chandef_type(&ctx->def));
4498 return 0;
4499 }
4500
4501 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4502 struct ieee80211_chanctx_conf *ctx)
4503 {
4504 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4505 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4506 cfg80211_get_chandef_type(&ctx->def));
4507 }
4508
4509 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4510 struct ieee80211_chanctx_conf *ctx,
4511 u32 changed)
4512 {
4513 wl1271_debug(DEBUG_MAC80211,
4514 "mac80211 change chanctx %d (type %d) changed 0x%x",
4515 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4516 cfg80211_get_chandef_type(&ctx->def), changed);
4517 }
4518
4519 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4520 struct ieee80211_vif *vif,
4521 struct ieee80211_chanctx_conf *ctx)
4522 {
4523 struct wl1271 *wl = hw->priv;
4524 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4525 int channel = ieee80211_frequency_to_channel(
4526 ctx->def.chan->center_freq);
4527
4528 wl1271_debug(DEBUG_MAC80211,
4529 "mac80211 assign chanctx (role %d) %d (type %d)",
4530 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4531
4532 mutex_lock(&wl->mutex);
4533
4534 wlvif->band = ctx->def.chan->band;
4535 wlvif->channel = channel;
4536 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4537
4538 /* update default rates according to the band */
4539 wl1271_set_band_rate(wl, wlvif);
4540
4541 mutex_unlock(&wl->mutex);
4542
4543 return 0;
4544 }
4545
4546 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4547 struct ieee80211_vif *vif,
4548 struct ieee80211_chanctx_conf *ctx)
4549 {
4550 struct wl1271 *wl = hw->priv;
4551 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4552
4553 wl1271_debug(DEBUG_MAC80211,
4554 "mac80211 unassign chanctx (role %d) %d (type %d)",
4555 wlvif->role_id,
4556 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4557 cfg80211_get_chandef_type(&ctx->def));
4558
4559 wl1271_tx_flush(wl);
4560 }
4561
4562 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4563 struct ieee80211_vif *vif, u16 queue,
4564 const struct ieee80211_tx_queue_params *params)
4565 {
4566 struct wl1271 *wl = hw->priv;
4567 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4568 u8 ps_scheme;
4569 int ret = 0;
4570
4571 mutex_lock(&wl->mutex);
4572
4573 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4574
4575 if (params->uapsd)
4576 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4577 else
4578 ps_scheme = CONF_PS_SCHEME_LEGACY;
4579
4580 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4581 goto out;
4582
4583 ret = wl1271_ps_elp_wakeup(wl);
4584 if (ret < 0)
4585 goto out;
4586
4587 /*
4588 * the txop is confed in units of 32us by the mac80211,
4589 * we need us
4590 */
4591 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4592 params->cw_min, params->cw_max,
4593 params->aifs, params->txop << 5);
4594 if (ret < 0)
4595 goto out_sleep;
4596
4597 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4598 CONF_CHANNEL_TYPE_EDCF,
4599 wl1271_tx_get_queue(queue),
4600 ps_scheme, CONF_ACK_POLICY_LEGACY,
4601 0, 0);
4602
4603 out_sleep:
4604 wl1271_ps_elp_sleep(wl);
4605
4606 out:
4607 mutex_unlock(&wl->mutex);
4608
4609 return ret;
4610 }
4611
4612 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4613 struct ieee80211_vif *vif)
4614 {
4615
4616 struct wl1271 *wl = hw->priv;
4617 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4618 u64 mactime = ULLONG_MAX;
4619 int ret;
4620
4621 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4622
4623 mutex_lock(&wl->mutex);
4624
4625 if (unlikely(wl->state != WLCORE_STATE_ON))
4626 goto out;
4627
4628 ret = wl1271_ps_elp_wakeup(wl);
4629 if (ret < 0)
4630 goto out;
4631
4632 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4633 if (ret < 0)
4634 goto out_sleep;
4635
4636 out_sleep:
4637 wl1271_ps_elp_sleep(wl);
4638
4639 out:
4640 mutex_unlock(&wl->mutex);
4641 return mactime;
4642 }
4643
4644 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4645 struct survey_info *survey)
4646 {
4647 struct ieee80211_conf *conf = &hw->conf;
4648
4649 if (idx != 0)
4650 return -ENOENT;
4651
4652 survey->channel = conf->chandef.chan;
4653 survey->filled = 0;
4654 return 0;
4655 }
4656
4657 static int wl1271_allocate_sta(struct wl1271 *wl,
4658 struct wl12xx_vif *wlvif,
4659 struct ieee80211_sta *sta)
4660 {
4661 struct wl1271_station *wl_sta;
4662 int ret;
4663
4664
4665 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4666 wl1271_warning("could not allocate HLID - too much stations");
4667 return -EBUSY;
4668 }
4669
4670 wl_sta = (struct wl1271_station *)sta->drv_priv;
4671 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4672 if (ret < 0) {
4673 wl1271_warning("could not allocate HLID - too many links");
4674 return -EBUSY;
4675 }
4676
4677 /* use the previous security seq, if this is a recovery/resume */
4678 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4679
4680 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4681 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4682 wl->active_sta_count++;
4683 return 0;
4684 }
4685
4686 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4687 {
4688 struct wl1271_station *wl_sta;
4689 struct ieee80211_sta *sta;
4690 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4691
4692 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4693 return;
4694
4695 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4696 __clear_bit(hlid, &wl->ap_ps_map);
4697 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4698
4699 /*
4700 * save the last used PN in the private part of iee80211_sta,
4701 * in case of recovery/suspend
4702 */
4703 rcu_read_lock();
4704 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4705 if (sta) {
4706 wl_sta = (void *)sta->drv_priv;
4707 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4708
4709 /*
4710 * increment the initial seq number on recovery to account for
4711 * transmitted packets that we haven't yet got in the FW status
4712 */
4713 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4714 wl_sta->total_freed_pkts +=
4715 WL1271_TX_SQN_POST_RECOVERY_PADDING;
4716 }
4717 rcu_read_unlock();
4718
4719 wl12xx_free_link(wl, wlvif, &hlid);
4720 wl->active_sta_count--;
4721
4722 /*
4723 * rearm the tx watchdog when the last STA is freed - give the FW a
4724 * chance to return STA-buffered packets before complaining.
4725 */
4726 if (wl->active_sta_count == 0)
4727 wl12xx_rearm_tx_watchdog_locked(wl);
4728 }
4729
4730 static int wl12xx_sta_add(struct wl1271 *wl,
4731 struct wl12xx_vif *wlvif,
4732 struct ieee80211_sta *sta)
4733 {
4734 struct wl1271_station *wl_sta;
4735 int ret = 0;
4736 u8 hlid;
4737
4738 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4739
4740 ret = wl1271_allocate_sta(wl, wlvif, sta);
4741 if (ret < 0)
4742 return ret;
4743
4744 wl_sta = (struct wl1271_station *)sta->drv_priv;
4745 hlid = wl_sta->hlid;
4746
4747 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4748 if (ret < 0)
4749 wl1271_free_sta(wl, wlvif, hlid);
4750
4751 return ret;
4752 }
4753
4754 static int wl12xx_sta_remove(struct wl1271 *wl,
4755 struct wl12xx_vif *wlvif,
4756 struct ieee80211_sta *sta)
4757 {
4758 struct wl1271_station *wl_sta;
4759 int ret = 0, id;
4760
4761 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4762
4763 wl_sta = (struct wl1271_station *)sta->drv_priv;
4764 id = wl_sta->hlid;
4765 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4766 return -EINVAL;
4767
4768 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4769 if (ret < 0)
4770 return ret;
4771
4772 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4773 return ret;
4774 }
4775
4776 static void wlcore_roc_if_possible(struct wl1271 *wl,
4777 struct wl12xx_vif *wlvif)
4778 {
4779 if (find_first_bit(wl->roc_map,
4780 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4781 return;
4782
4783 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4784 return;
4785
4786 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4787 }
4788
4789 /*
4790 * when wl_sta is NULL, we treat this call as if coming from a
4791 * pending auth reply.
4792 * wl->mutex must be taken and the FW must be awake when the call
4793 * takes place.
4794 */
4795 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4796 struct wl1271_station *wl_sta, bool in_conn)
4797 {
4798 if (in_conn) {
4799 if (WARN_ON(wl_sta && wl_sta->in_connection))
4800 return;
4801
4802 if (!wlvif->ap_pending_auth_reply &&
4803 !wlvif->inconn_count)
4804 wlcore_roc_if_possible(wl, wlvif);
4805
4806 if (wl_sta) {
4807 wl_sta->in_connection = true;
4808 wlvif->inconn_count++;
4809 } else {
4810 wlvif->ap_pending_auth_reply = true;
4811 }
4812 } else {
4813 if (wl_sta && !wl_sta->in_connection)
4814 return;
4815
4816 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4817 return;
4818
4819 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4820 return;
4821
4822 if (wl_sta) {
4823 wl_sta->in_connection = false;
4824 wlvif->inconn_count--;
4825 } else {
4826 wlvif->ap_pending_auth_reply = false;
4827 }
4828
4829 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4830 test_bit(wlvif->role_id, wl->roc_map))
4831 wl12xx_croc(wl, wlvif->role_id);
4832 }
4833 }
4834
4835 static int wl12xx_update_sta_state(struct wl1271 *wl,
4836 struct wl12xx_vif *wlvif,
4837 struct ieee80211_sta *sta,
4838 enum ieee80211_sta_state old_state,
4839 enum ieee80211_sta_state new_state)
4840 {
4841 struct wl1271_station *wl_sta;
4842 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4843 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4844 int ret;
4845
4846 wl_sta = (struct wl1271_station *)sta->drv_priv;
4847
4848 /* Add station (AP mode) */
4849 if (is_ap &&
4850 old_state == IEEE80211_STA_NOTEXIST &&
4851 new_state == IEEE80211_STA_NONE) {
4852 ret = wl12xx_sta_add(wl, wlvif, sta);
4853 if (ret)
4854 return ret;
4855
4856 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4857 }
4858
4859 /* Remove station (AP mode) */
4860 if (is_ap &&
4861 old_state == IEEE80211_STA_NONE &&
4862 new_state == IEEE80211_STA_NOTEXIST) {
4863 /* must not fail */
4864 wl12xx_sta_remove(wl, wlvif, sta);
4865
4866 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4867 }
4868
4869 /* Authorize station (AP mode) */
4870 if (is_ap &&
4871 new_state == IEEE80211_STA_AUTHORIZED) {
4872 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4873 if (ret < 0)
4874 return ret;
4875
4876 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4877 wl_sta->hlid);
4878 if (ret)
4879 return ret;
4880
4881 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4882 }
4883
4884 /* Authorize station */
4885 if (is_sta &&
4886 new_state == IEEE80211_STA_AUTHORIZED) {
4887 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4888 ret = wl12xx_set_authorized(wl, wlvif);
4889 if (ret)
4890 return ret;
4891 }
4892
4893 if (is_sta &&
4894 old_state == IEEE80211_STA_AUTHORIZED &&
4895 new_state == IEEE80211_STA_ASSOC) {
4896 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4897 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4898 }
4899
4900 /* clear ROCs on failure or authorization */
4901 if (is_sta &&
4902 (new_state == IEEE80211_STA_AUTHORIZED ||
4903 new_state == IEEE80211_STA_NOTEXIST)) {
4904 if (test_bit(wlvif->role_id, wl->roc_map))
4905 wl12xx_croc(wl, wlvif->role_id);
4906 }
4907
4908 if (is_sta &&
4909 old_state == IEEE80211_STA_NOTEXIST &&
4910 new_state == IEEE80211_STA_NONE) {
4911 if (find_first_bit(wl->roc_map,
4912 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4913 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4914 wl12xx_roc(wl, wlvif, wlvif->role_id,
4915 wlvif->band, wlvif->channel);
4916 }
4917 }
4918 return 0;
4919 }
4920
4921 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4922 struct ieee80211_vif *vif,
4923 struct ieee80211_sta *sta,
4924 enum ieee80211_sta_state old_state,
4925 enum ieee80211_sta_state new_state)
4926 {
4927 struct wl1271 *wl = hw->priv;
4928 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4929 int ret;
4930
4931 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4932 sta->aid, old_state, new_state);
4933
4934 mutex_lock(&wl->mutex);
4935
4936 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4937 ret = -EBUSY;
4938 goto out;
4939 }
4940
4941 ret = wl1271_ps_elp_wakeup(wl);
4942 if (ret < 0)
4943 goto out;
4944
4945 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4946
4947 wl1271_ps_elp_sleep(wl);
4948 out:
4949 mutex_unlock(&wl->mutex);
4950 if (new_state < old_state)
4951 return 0;
4952 return ret;
4953 }
4954
4955 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4956 struct ieee80211_vif *vif,
4957 enum ieee80211_ampdu_mlme_action action,
4958 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4959 u8 buf_size)
4960 {
4961 struct wl1271 *wl = hw->priv;
4962 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4963 int ret;
4964 u8 hlid, *ba_bitmap;
4965
4966 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4967 tid);
4968
4969 /* sanity check - the fields in FW are only 8bits wide */
4970 if (WARN_ON(tid > 0xFF))
4971 return -ENOTSUPP;
4972
4973 mutex_lock(&wl->mutex);
4974
4975 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4976 ret = -EAGAIN;
4977 goto out;
4978 }
4979
4980 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4981 hlid = wlvif->sta.hlid;
4982 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4983 struct wl1271_station *wl_sta;
4984
4985 wl_sta = (struct wl1271_station *)sta->drv_priv;
4986 hlid = wl_sta->hlid;
4987 } else {
4988 ret = -EINVAL;
4989 goto out;
4990 }
4991
4992 ba_bitmap = &wl->links[hlid].ba_bitmap;
4993
4994 ret = wl1271_ps_elp_wakeup(wl);
4995 if (ret < 0)
4996 goto out;
4997
4998 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4999 tid, action);
5000
5001 switch (action) {
5002 case IEEE80211_AMPDU_RX_START:
5003 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5004 ret = -ENOTSUPP;
5005 break;
5006 }
5007
5008 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5009 ret = -EBUSY;
5010 wl1271_error("exceeded max RX BA sessions");
5011 break;
5012 }
5013
5014 if (*ba_bitmap & BIT(tid)) {
5015 ret = -EINVAL;
5016 wl1271_error("cannot enable RX BA session on active "
5017 "tid: %d", tid);
5018 break;
5019 }
5020
5021 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5022 hlid);
5023 if (!ret) {
5024 *ba_bitmap |= BIT(tid);
5025 wl->ba_rx_session_count++;
5026 }
5027 break;
5028
5029 case IEEE80211_AMPDU_RX_STOP:
5030 if (!(*ba_bitmap & BIT(tid))) {
5031 /*
5032 * this happens on reconfig - so only output a debug
5033 * message for now, and don't fail the function.
5034 */
5035 wl1271_debug(DEBUG_MAC80211,
5036 "no active RX BA session on tid: %d",
5037 tid);
5038 ret = 0;
5039 break;
5040 }
5041
5042 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5043 hlid);
5044 if (!ret) {
5045 *ba_bitmap &= ~BIT(tid);
5046 wl->ba_rx_session_count--;
5047 }
5048 break;
5049
5050 /*
5051 * The BA initiator session management in FW independently.
5052 * Falling break here on purpose for all TX APDU commands.
5053 */
5054 case IEEE80211_AMPDU_TX_START:
5055 case IEEE80211_AMPDU_TX_STOP_CONT:
5056 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5057 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5058 case IEEE80211_AMPDU_TX_OPERATIONAL:
5059 ret = -EINVAL;
5060 break;
5061
5062 default:
5063 wl1271_error("Incorrect ampdu action id=%x\n", action);
5064 ret = -EINVAL;
5065 }
5066
5067 wl1271_ps_elp_sleep(wl);
5068
5069 out:
5070 mutex_unlock(&wl->mutex);
5071
5072 return ret;
5073 }
5074
5075 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5076 struct ieee80211_vif *vif,
5077 const struct cfg80211_bitrate_mask *mask)
5078 {
5079 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5080 struct wl1271 *wl = hw->priv;
5081 int i, ret = 0;
5082
5083 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5084 mask->control[NL80211_BAND_2GHZ].legacy,
5085 mask->control[NL80211_BAND_5GHZ].legacy);
5086
5087 mutex_lock(&wl->mutex);
5088
5089 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5090 wlvif->bitrate_masks[i] =
5091 wl1271_tx_enabled_rates_get(wl,
5092 mask->control[i].legacy,
5093 i);
5094
5095 if (unlikely(wl->state != WLCORE_STATE_ON))
5096 goto out;
5097
5098 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5099 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5100
5101 ret = wl1271_ps_elp_wakeup(wl);
5102 if (ret < 0)
5103 goto out;
5104
5105 wl1271_set_band_rate(wl, wlvif);
5106 wlvif->basic_rate =
5107 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5108 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5109
5110 wl1271_ps_elp_sleep(wl);
5111 }
5112 out:
5113 mutex_unlock(&wl->mutex);
5114
5115 return ret;
5116 }
5117
5118 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5119 struct ieee80211_channel_switch *ch_switch)
5120 {
5121 struct wl1271 *wl = hw->priv;
5122 struct wl12xx_vif *wlvif;
5123 int ret;
5124
5125 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5126
5127 wl1271_tx_flush(wl);
5128
5129 mutex_lock(&wl->mutex);
5130
5131 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5132 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5133 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5134 ieee80211_chswitch_done(vif, false);
5135 }
5136 goto out;
5137 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5138 goto out;
5139 }
5140
5141 ret = wl1271_ps_elp_wakeup(wl);
5142 if (ret < 0)
5143 goto out;
5144
5145 /* TODO: change mac80211 to pass vif as param */
5146 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5147 unsigned long delay_usec;
5148
5149 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5150 if (ret)
5151 goto out_sleep;
5152
5153 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5154
5155 /* indicate failure 5 seconds after channel switch time */
5156 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5157 ch_switch->count;
5158 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5159 usecs_to_jiffies(delay_usec) +
5160 msecs_to_jiffies(5000));
5161 }
5162
5163 out_sleep:
5164 wl1271_ps_elp_sleep(wl);
5165
5166 out:
5167 mutex_unlock(&wl->mutex);
5168 }
5169
5170 static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
5171 {
5172 struct wl1271 *wl = hw->priv;
5173
5174 wl1271_tx_flush(wl);
5175 }
5176
5177 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5178 struct ieee80211_vif *vif,
5179 struct ieee80211_channel *chan,
5180 int duration,
5181 enum ieee80211_roc_type type)
5182 {
5183 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5184 struct wl1271 *wl = hw->priv;
5185 int channel, ret = 0;
5186
5187 channel = ieee80211_frequency_to_channel(chan->center_freq);
5188
5189 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5190 channel, wlvif->role_id);
5191
5192 mutex_lock(&wl->mutex);
5193
5194 if (unlikely(wl->state != WLCORE_STATE_ON))
5195 goto out;
5196
5197 /* return EBUSY if we can't ROC right now */
5198 if (WARN_ON(wl->roc_vif ||
5199 find_first_bit(wl->roc_map,
5200 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5201 ret = -EBUSY;
5202 goto out;
5203 }
5204
5205 ret = wl1271_ps_elp_wakeup(wl);
5206 if (ret < 0)
5207 goto out;
5208
5209 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5210 if (ret < 0)
5211 goto out_sleep;
5212
5213 wl->roc_vif = vif;
5214 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5215 msecs_to_jiffies(duration));
5216 out_sleep:
5217 wl1271_ps_elp_sleep(wl);
5218 out:
5219 mutex_unlock(&wl->mutex);
5220 return ret;
5221 }
5222
5223 static int __wlcore_roc_completed(struct wl1271 *wl)
5224 {
5225 struct wl12xx_vif *wlvif;
5226 int ret;
5227
5228 /* already completed */
5229 if (unlikely(!wl->roc_vif))
5230 return 0;
5231
5232 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5233
5234 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5235 return -EBUSY;
5236
5237 ret = wl12xx_stop_dev(wl, wlvif);
5238 if (ret < 0)
5239 return ret;
5240
5241 wl->roc_vif = NULL;
5242
5243 return 0;
5244 }
5245
5246 static int wlcore_roc_completed(struct wl1271 *wl)
5247 {
5248 int ret;
5249
5250 wl1271_debug(DEBUG_MAC80211, "roc complete");
5251
5252 mutex_lock(&wl->mutex);
5253
5254 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5255 ret = -EBUSY;
5256 goto out;
5257 }
5258
5259 ret = wl1271_ps_elp_wakeup(wl);
5260 if (ret < 0)
5261 goto out;
5262
5263 ret = __wlcore_roc_completed(wl);
5264
5265 wl1271_ps_elp_sleep(wl);
5266 out:
5267 mutex_unlock(&wl->mutex);
5268
5269 return ret;
5270 }
5271
5272 static void wlcore_roc_complete_work(struct work_struct *work)
5273 {
5274 struct delayed_work *dwork;
5275 struct wl1271 *wl;
5276 int ret;
5277
5278 dwork = container_of(work, struct delayed_work, work);
5279 wl = container_of(dwork, struct wl1271, roc_complete_work);
5280
5281 ret = wlcore_roc_completed(wl);
5282 if (!ret)
5283 ieee80211_remain_on_channel_expired(wl->hw);
5284 }
5285
5286 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5287 {
5288 struct wl1271 *wl = hw->priv;
5289
5290 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5291
5292 /* TODO: per-vif */
5293 wl1271_tx_flush(wl);
5294
5295 /*
5296 * we can't just flush_work here, because it might deadlock
5297 * (as we might get called from the same workqueue)
5298 */
5299 cancel_delayed_work_sync(&wl->roc_complete_work);
5300 wlcore_roc_completed(wl);
5301
5302 return 0;
5303 }
5304
5305 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5306 struct ieee80211_vif *vif,
5307 struct ieee80211_sta *sta,
5308 u32 changed)
5309 {
5310 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5311 struct wl1271 *wl = hw->priv;
5312
5313 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5314 }
5315
5316 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5317 struct ieee80211_vif *vif,
5318 struct ieee80211_sta *sta,
5319 s8 *rssi_dbm)
5320 {
5321 struct wl1271 *wl = hw->priv;
5322 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5323 int ret = 0;
5324
5325 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5326
5327 mutex_lock(&wl->mutex);
5328
5329 if (unlikely(wl->state != WLCORE_STATE_ON))
5330 goto out;
5331
5332 ret = wl1271_ps_elp_wakeup(wl);
5333 if (ret < 0)
5334 goto out_sleep;
5335
5336 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5337 if (ret < 0)
5338 goto out_sleep;
5339
5340 out_sleep:
5341 wl1271_ps_elp_sleep(wl);
5342
5343 out:
5344 mutex_unlock(&wl->mutex);
5345
5346 return ret;
5347 }
5348
5349 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5350 {
5351 struct wl1271 *wl = hw->priv;
5352 bool ret = false;
5353
5354 mutex_lock(&wl->mutex);
5355
5356 if (unlikely(wl->state != WLCORE_STATE_ON))
5357 goto out;
5358
5359 /* packets are considered pending if in the TX queue or the FW */
5360 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5361 out:
5362 mutex_unlock(&wl->mutex);
5363
5364 return ret;
5365 }
5366
5367 /* can't be const, mac80211 writes to this */
5368 static struct ieee80211_rate wl1271_rates[] = {
5369 { .bitrate = 10,
5370 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5371 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5372 { .bitrate = 20,
5373 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5374 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5375 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5376 { .bitrate = 55,
5377 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5378 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5379 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5380 { .bitrate = 110,
5381 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5382 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5383 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5384 { .bitrate = 60,
5385 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5386 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5387 { .bitrate = 90,
5388 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5389 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5390 { .bitrate = 120,
5391 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5392 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5393 { .bitrate = 180,
5394 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5395 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5396 { .bitrate = 240,
5397 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5398 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5399 { .bitrate = 360,
5400 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5401 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5402 { .bitrate = 480,
5403 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5404 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5405 { .bitrate = 540,
5406 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5407 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5408 };
5409
5410 /* can't be const, mac80211 writes to this */
5411 static struct ieee80211_channel wl1271_channels[] = {
5412 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5413 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5414 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5415 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5416 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5417 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5418 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5419 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5420 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5421 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5422 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5423 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5424 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5425 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5426 };
5427
5428 /* can't be const, mac80211 writes to this */
5429 static struct ieee80211_supported_band wl1271_band_2ghz = {
5430 .channels = wl1271_channels,
5431 .n_channels = ARRAY_SIZE(wl1271_channels),
5432 .bitrates = wl1271_rates,
5433 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5434 };
5435
5436 /* 5 GHz data rates for WL1273 */
5437 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5438 { .bitrate = 60,
5439 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5440 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5441 { .bitrate = 90,
5442 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5443 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5444 { .bitrate = 120,
5445 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5446 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5447 { .bitrate = 180,
5448 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5449 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5450 { .bitrate = 240,
5451 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5452 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5453 { .bitrate = 360,
5454 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5455 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5456 { .bitrate = 480,
5457 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5458 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5459 { .bitrate = 540,
5460 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5461 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5462 };
5463
5464 /* 5 GHz band channels for WL1273 */
5465 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5466 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5467 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5468 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5469 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5470 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5471 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5472 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5473 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5474 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5475 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5476 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5477 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5478 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5479 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5480 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5481 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5482 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5483 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5484 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5485 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5486 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5487 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5488 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5489 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5490 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5491 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5492 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5493 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5494 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5495 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5496 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5497 };
5498
5499 static struct ieee80211_supported_band wl1271_band_5ghz = {
5500 .channels = wl1271_channels_5ghz,
5501 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5502 .bitrates = wl1271_rates_5ghz,
5503 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5504 };
5505
5506 static const struct ieee80211_ops wl1271_ops = {
5507 .start = wl1271_op_start,
5508 .stop = wlcore_op_stop,
5509 .add_interface = wl1271_op_add_interface,
5510 .remove_interface = wl1271_op_remove_interface,
5511 .change_interface = wl12xx_op_change_interface,
5512 #ifdef CONFIG_PM
5513 .suspend = wl1271_op_suspend,
5514 .resume = wl1271_op_resume,
5515 #endif
5516 .config = wl1271_op_config,
5517 .prepare_multicast = wl1271_op_prepare_multicast,
5518 .configure_filter = wl1271_op_configure_filter,
5519 .tx = wl1271_op_tx,
5520 .set_key = wlcore_op_set_key,
5521 .hw_scan = wl1271_op_hw_scan,
5522 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5523 .sched_scan_start = wl1271_op_sched_scan_start,
5524 .sched_scan_stop = wl1271_op_sched_scan_stop,
5525 .bss_info_changed = wl1271_op_bss_info_changed,
5526 .set_frag_threshold = wl1271_op_set_frag_threshold,
5527 .set_rts_threshold = wl1271_op_set_rts_threshold,
5528 .conf_tx = wl1271_op_conf_tx,
5529 .get_tsf = wl1271_op_get_tsf,
5530 .get_survey = wl1271_op_get_survey,
5531 .sta_state = wl12xx_op_sta_state,
5532 .ampdu_action = wl1271_op_ampdu_action,
5533 .tx_frames_pending = wl1271_tx_frames_pending,
5534 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5535 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5536 .channel_switch = wl12xx_op_channel_switch,
5537 .flush = wlcore_op_flush,
5538 .remain_on_channel = wlcore_op_remain_on_channel,
5539 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5540 .add_chanctx = wlcore_op_add_chanctx,
5541 .remove_chanctx = wlcore_op_remove_chanctx,
5542 .change_chanctx = wlcore_op_change_chanctx,
5543 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5544 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5545 .sta_rc_update = wlcore_op_sta_rc_update,
5546 .get_rssi = wlcore_op_get_rssi,
5547 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5548 };
5549
5550
5551 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5552 {
5553 u8 idx;
5554
5555 BUG_ON(band >= 2);
5556
5557 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5558 wl1271_error("Illegal RX rate from HW: %d", rate);
5559 return 0;
5560 }
5561
5562 idx = wl->band_rate_to_idx[band][rate];
5563 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5564 wl1271_error("Unsupported RX rate from HW: %d", rate);
5565 return 0;
5566 }
5567
5568 return idx;
5569 }
5570
5571 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5572 {
5573 int i;
5574
5575 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5576 oui, nic);
5577
5578 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5579 wl1271_warning("NIC part of the MAC address wraps around!");
5580
5581 for (i = 0; i < wl->num_mac_addr; i++) {
5582 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5583 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5584 wl->addresses[i].addr[2] = (u8) oui;
5585 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5586 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5587 wl->addresses[i].addr[5] = (u8) nic;
5588 nic++;
5589 }
5590
5591 /* we may be one address short at the most */
5592 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5593
5594 /*
5595 * turn on the LAA bit in the first address and use it as
5596 * the last address.
5597 */
5598 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5599 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5600 memcpy(&wl->addresses[idx], &wl->addresses[0],
5601 sizeof(wl->addresses[0]));
5602 /* LAA bit */
5603 wl->addresses[idx].addr[2] |= BIT(1);
5604 }
5605
5606 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5607 wl->hw->wiphy->addresses = wl->addresses;
5608 }
5609
5610 static int wl12xx_get_hw_info(struct wl1271 *wl)
5611 {
5612 int ret;
5613
5614 ret = wl12xx_set_power_on(wl);
5615 if (ret < 0)
5616 return ret;
5617
5618 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5619 if (ret < 0)
5620 goto out;
5621
5622 wl->fuse_oui_addr = 0;
5623 wl->fuse_nic_addr = 0;
5624
5625 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5626 if (ret < 0)
5627 goto out;
5628
5629 if (wl->ops->get_mac)
5630 ret = wl->ops->get_mac(wl);
5631
5632 out:
5633 wl1271_power_off(wl);
5634 return ret;
5635 }
5636
5637 static int wl1271_register_hw(struct wl1271 *wl)
5638 {
5639 int ret;
5640 u32 oui_addr = 0, nic_addr = 0;
5641
5642 if (wl->mac80211_registered)
5643 return 0;
5644
5645 if (wl->nvs_len >= 12) {
5646 /* NOTE: The wl->nvs->nvs element must be first, in
5647 * order to simplify the casting, we assume it is at
5648 * the beginning of the wl->nvs structure.
5649 */
5650 u8 *nvs_ptr = (u8 *)wl->nvs;
5651
5652 oui_addr =
5653 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5654 nic_addr =
5655 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5656 }
5657
5658 /* if the MAC address is zeroed in the NVS derive from fuse */
5659 if (oui_addr == 0 && nic_addr == 0) {
5660 oui_addr = wl->fuse_oui_addr;
5661 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5662 nic_addr = wl->fuse_nic_addr + 1;
5663 }
5664
5665 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5666
5667 ret = ieee80211_register_hw(wl->hw);
5668 if (ret < 0) {
5669 wl1271_error("unable to register mac80211 hw: %d", ret);
5670 goto out;
5671 }
5672
5673 wl->mac80211_registered = true;
5674
5675 wl1271_debugfs_init(wl);
5676
5677 wl1271_notice("loaded");
5678
5679 out:
5680 return ret;
5681 }
5682
5683 static void wl1271_unregister_hw(struct wl1271 *wl)
5684 {
5685 if (wl->plt)
5686 wl1271_plt_stop(wl);
5687
5688 ieee80211_unregister_hw(wl->hw);
5689 wl->mac80211_registered = false;
5690
5691 }
5692
5693 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5694 {
5695 .max = 3,
5696 .types = BIT(NL80211_IFTYPE_STATION),
5697 },
5698 {
5699 .max = 1,
5700 .types = BIT(NL80211_IFTYPE_AP) |
5701 BIT(NL80211_IFTYPE_P2P_GO) |
5702 BIT(NL80211_IFTYPE_P2P_CLIENT),
5703 },
5704 };
5705
5706 static struct ieee80211_iface_combination
5707 wlcore_iface_combinations[] = {
5708 {
5709 .max_interfaces = 3,
5710 .limits = wlcore_iface_limits,
5711 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5712 },
5713 };
5714
5715 static int wl1271_init_ieee80211(struct wl1271 *wl)
5716 {
5717 int i;
5718 static const u32 cipher_suites[] = {
5719 WLAN_CIPHER_SUITE_WEP40,
5720 WLAN_CIPHER_SUITE_WEP104,
5721 WLAN_CIPHER_SUITE_TKIP,
5722 WLAN_CIPHER_SUITE_CCMP,
5723 WL1271_CIPHER_SUITE_GEM,
5724 };
5725
5726 /* The tx descriptor buffer */
5727 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5728
5729 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5730 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5731
5732 /* unit us */
5733 /* FIXME: find a proper value */
5734 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5735
5736 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5737 IEEE80211_HW_SUPPORTS_PS |
5738 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5739 IEEE80211_HW_SUPPORTS_UAPSD |
5740 IEEE80211_HW_HAS_RATE_CONTROL |
5741 IEEE80211_HW_CONNECTION_MONITOR |
5742 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5743 IEEE80211_HW_SPECTRUM_MGMT |
5744 IEEE80211_HW_AP_LINK_PS |
5745 IEEE80211_HW_AMPDU_AGGREGATION |
5746 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5747 IEEE80211_HW_QUEUE_CONTROL;
5748
5749 wl->hw->wiphy->cipher_suites = cipher_suites;
5750 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5751
5752 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5753 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5754 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5755 wl->hw->wiphy->max_scan_ssids = 1;
5756 wl->hw->wiphy->max_sched_scan_ssids = 16;
5757 wl->hw->wiphy->max_match_sets = 16;
5758 /*
5759 * Maximum length of elements in scanning probe request templates
5760 * should be the maximum length possible for a template, without
5761 * the IEEE80211 header of the template
5762 */
5763 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5764 sizeof(struct ieee80211_header);
5765
5766 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5767 sizeof(struct ieee80211_header);
5768
5769 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5770
5771 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5772 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5773 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5774
5775 /* make sure all our channels fit in the scanned_ch bitmask */
5776 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5777 ARRAY_SIZE(wl1271_channels_5ghz) >
5778 WL1271_MAX_CHANNELS);
5779 /*
5780 * clear channel flags from the previous usage
5781 * and restore max_power & max_antenna_gain values.
5782 */
5783 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5784 wl1271_band_2ghz.channels[i].flags = 0;
5785 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5786 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5787 }
5788
5789 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5790 wl1271_band_5ghz.channels[i].flags = 0;
5791 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5792 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5793 }
5794
5795 /*
5796 * We keep local copies of the band structs because we need to
5797 * modify them on a per-device basis.
5798 */
5799 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5800 sizeof(wl1271_band_2ghz));
5801 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5802 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5803 sizeof(*wl->ht_cap));
5804 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5805 sizeof(wl1271_band_5ghz));
5806 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5807 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5808 sizeof(*wl->ht_cap));
5809
5810 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5811 &wl->bands[IEEE80211_BAND_2GHZ];
5812 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5813 &wl->bands[IEEE80211_BAND_5GHZ];
5814
5815 /*
5816 * allow 4 queues per mac address we support +
5817 * 1 cab queue per mac + one global offchannel Tx queue
5818 */
5819 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5820
5821 /* the last queue is the offchannel queue */
5822 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5823 wl->hw->max_rates = 1;
5824
5825 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5826
5827 /* the FW answers probe-requests in AP-mode */
5828 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5829 wl->hw->wiphy->probe_resp_offload =
5830 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5831 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5832 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5833
5834 /* allowed interface combinations */
5835 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5836 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5837 wl->hw->wiphy->n_iface_combinations =
5838 ARRAY_SIZE(wlcore_iface_combinations);
5839
5840 SET_IEEE80211_DEV(wl->hw, wl->dev);
5841
5842 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5843 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5844
5845 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5846
5847 return 0;
5848 }
5849
5850 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5851 u32 mbox_size)
5852 {
5853 struct ieee80211_hw *hw;
5854 struct wl1271 *wl;
5855 int i, j, ret;
5856 unsigned int order;
5857
5858 BUILD_BUG_ON(AP_MAX_STATIONS > WLCORE_MAX_LINKS);
5859
5860 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5861 if (!hw) {
5862 wl1271_error("could not alloc ieee80211_hw");
5863 ret = -ENOMEM;
5864 goto err_hw_alloc;
5865 }
5866
5867 wl = hw->priv;
5868 memset(wl, 0, sizeof(*wl));
5869
5870 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5871 if (!wl->priv) {
5872 wl1271_error("could not alloc wl priv");
5873 ret = -ENOMEM;
5874 goto err_priv_alloc;
5875 }
5876
5877 INIT_LIST_HEAD(&wl->wlvif_list);
5878
5879 wl->hw = hw;
5880
5881 /*
5882 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5883 * we don't allocate any additional resource here, so that's fine.
5884 */
5885 for (i = 0; i < NUM_TX_QUEUES; i++)
5886 for (j = 0; j < WLCORE_MAX_LINKS; j++)
5887 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5888
5889 skb_queue_head_init(&wl->deferred_rx_queue);
5890 skb_queue_head_init(&wl->deferred_tx_queue);
5891
5892 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5893 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5894 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5895 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5896 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5897 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5898 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5899
5900 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5901 if (!wl->freezable_wq) {
5902 ret = -ENOMEM;
5903 goto err_hw;
5904 }
5905
5906 wl->channel = 0;
5907 wl->rx_counter = 0;
5908 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5909 wl->band = IEEE80211_BAND_2GHZ;
5910 wl->channel_type = NL80211_CHAN_NO_HT;
5911 wl->flags = 0;
5912 wl->sg_enabled = true;
5913 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5914 wl->recovery_count = 0;
5915 wl->hw_pg_ver = -1;
5916 wl->ap_ps_map = 0;
5917 wl->ap_fw_ps_map = 0;
5918 wl->quirks = 0;
5919 wl->platform_quirks = 0;
5920 wl->system_hlid = WL12XX_SYSTEM_HLID;
5921 wl->active_sta_count = 0;
5922 wl->active_link_count = 0;
5923 wl->fwlog_size = 0;
5924 init_waitqueue_head(&wl->fwlog_waitq);
5925
5926 /* The system link is always allocated */
5927 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5928
5929 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5930 for (i = 0; i < wl->num_tx_desc; i++)
5931 wl->tx_frames[i] = NULL;
5932
5933 spin_lock_init(&wl->wl_lock);
5934
5935 wl->state = WLCORE_STATE_OFF;
5936 wl->fw_type = WL12XX_FW_TYPE_NONE;
5937 mutex_init(&wl->mutex);
5938 mutex_init(&wl->flush_mutex);
5939 init_completion(&wl->nvs_loading_complete);
5940
5941 order = get_order(aggr_buf_size);
5942 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5943 if (!wl->aggr_buf) {
5944 ret = -ENOMEM;
5945 goto err_wq;
5946 }
5947 wl->aggr_buf_size = aggr_buf_size;
5948
5949 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5950 if (!wl->dummy_packet) {
5951 ret = -ENOMEM;
5952 goto err_aggr;
5953 }
5954
5955 /* Allocate one page for the FW log */
5956 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5957 if (!wl->fwlog) {
5958 ret = -ENOMEM;
5959 goto err_dummy_packet;
5960 }
5961
5962 wl->mbox_size = mbox_size;
5963 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5964 if (!wl->mbox) {
5965 ret = -ENOMEM;
5966 goto err_fwlog;
5967 }
5968
5969 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5970 if (!wl->buffer_32) {
5971 ret = -ENOMEM;
5972 goto err_mbox;
5973 }
5974
5975 return hw;
5976
5977 err_mbox:
5978 kfree(wl->mbox);
5979
5980 err_fwlog:
5981 free_page((unsigned long)wl->fwlog);
5982
5983 err_dummy_packet:
5984 dev_kfree_skb(wl->dummy_packet);
5985
5986 err_aggr:
5987 free_pages((unsigned long)wl->aggr_buf, order);
5988
5989 err_wq:
5990 destroy_workqueue(wl->freezable_wq);
5991
5992 err_hw:
5993 wl1271_debugfs_exit(wl);
5994 kfree(wl->priv);
5995
5996 err_priv_alloc:
5997 ieee80211_free_hw(hw);
5998
5999 err_hw_alloc:
6000
6001 return ERR_PTR(ret);
6002 }
6003 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6004
6005 int wlcore_free_hw(struct wl1271 *wl)
6006 {
6007 /* Unblock any fwlog readers */
6008 mutex_lock(&wl->mutex);
6009 wl->fwlog_size = -1;
6010 wake_up_interruptible_all(&wl->fwlog_waitq);
6011 mutex_unlock(&wl->mutex);
6012
6013 wlcore_sysfs_free(wl);
6014
6015 kfree(wl->buffer_32);
6016 kfree(wl->mbox);
6017 free_page((unsigned long)wl->fwlog);
6018 dev_kfree_skb(wl->dummy_packet);
6019 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6020
6021 wl1271_debugfs_exit(wl);
6022
6023 vfree(wl->fw);
6024 wl->fw = NULL;
6025 wl->fw_type = WL12XX_FW_TYPE_NONE;
6026 kfree(wl->nvs);
6027 wl->nvs = NULL;
6028
6029 kfree(wl->raw_fw_status);
6030 kfree(wl->fw_status);
6031 kfree(wl->tx_res_if);
6032 destroy_workqueue(wl->freezable_wq);
6033
6034 kfree(wl->priv);
6035 ieee80211_free_hw(wl->hw);
6036
6037 return 0;
6038 }
6039 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6040
6041 #ifdef CONFIG_PM
6042 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6043 .flags = WIPHY_WOWLAN_ANY,
6044 .n_patterns = WL1271_MAX_RX_FILTERS,
6045 .pattern_min_len = 1,
6046 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6047 };
6048 #endif
6049
6050 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6051 {
6052 return IRQ_WAKE_THREAD;
6053 }
6054
6055 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6056 {
6057 struct wl1271 *wl = context;
6058 struct platform_device *pdev = wl->pdev;
6059 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6060 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6061 unsigned long irqflags;
6062 int ret;
6063 irq_handler_t hardirq_fn = NULL;
6064
6065 if (fw) {
6066 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6067 if (!wl->nvs) {
6068 wl1271_error("Could not allocate nvs data");
6069 goto out;
6070 }
6071 wl->nvs_len = fw->size;
6072 } else {
6073 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6074 WL12XX_NVS_NAME);
6075 wl->nvs = NULL;
6076 wl->nvs_len = 0;
6077 }
6078
6079 ret = wl->ops->setup(wl);
6080 if (ret < 0)
6081 goto out_free_nvs;
6082
6083 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6084
6085 /* adjust some runtime configuration parameters */
6086 wlcore_adjust_conf(wl);
6087
6088 wl->irq = platform_get_irq(pdev, 0);
6089 wl->platform_quirks = pdata->platform_quirks;
6090 wl->if_ops = pdev_data->if_ops;
6091
6092 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6093 irqflags = IRQF_TRIGGER_RISING;
6094 hardirq_fn = wlcore_hardirq;
6095 } else {
6096 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6097 }
6098
6099 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6100 irqflags, pdev->name, wl);
6101 if (ret < 0) {
6102 wl1271_error("request_irq() failed: %d", ret);
6103 goto out_free_nvs;
6104 }
6105
6106 #ifdef CONFIG_PM
6107 ret = enable_irq_wake(wl->irq);
6108 if (!ret) {
6109 wl->irq_wake_enabled = true;
6110 device_init_wakeup(wl->dev, 1);
6111 if (pdata->pwr_in_suspend)
6112 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6113 }
6114 #endif
6115 disable_irq(wl->irq);
6116
6117 ret = wl12xx_get_hw_info(wl);
6118 if (ret < 0) {
6119 wl1271_error("couldn't get hw info");
6120 goto out_irq;
6121 }
6122
6123 ret = wl->ops->identify_chip(wl);
6124 if (ret < 0)
6125 goto out_irq;
6126
6127 ret = wl1271_init_ieee80211(wl);
6128 if (ret)
6129 goto out_irq;
6130
6131 ret = wl1271_register_hw(wl);
6132 if (ret)
6133 goto out_irq;
6134
6135 ret = wlcore_sysfs_init(wl);
6136 if (ret)
6137 goto out_unreg;
6138
6139 wl->initialized = true;
6140 goto out;
6141
6142 out_unreg:
6143 wl1271_unregister_hw(wl);
6144
6145 out_irq:
6146 free_irq(wl->irq, wl);
6147
6148 out_free_nvs:
6149 kfree(wl->nvs);
6150
6151 out:
6152 release_firmware(fw);
6153 complete_all(&wl->nvs_loading_complete);
6154 }
6155
6156 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6157 {
6158 int ret;
6159
6160 if (!wl->ops || !wl->ptable)
6161 return -EINVAL;
6162
6163 wl->dev = &pdev->dev;
6164 wl->pdev = pdev;
6165 platform_set_drvdata(pdev, wl);
6166
6167 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6168 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6169 wl, wlcore_nvs_cb);
6170 if (ret < 0) {
6171 wl1271_error("request_firmware_nowait failed: %d", ret);
6172 complete_all(&wl->nvs_loading_complete);
6173 }
6174
6175 return ret;
6176 }
6177 EXPORT_SYMBOL_GPL(wlcore_probe);
6178
6179 int wlcore_remove(struct platform_device *pdev)
6180 {
6181 struct wl1271 *wl = platform_get_drvdata(pdev);
6182
6183 wait_for_completion(&wl->nvs_loading_complete);
6184 if (!wl->initialized)
6185 return 0;
6186
6187 if (wl->irq_wake_enabled) {
6188 device_init_wakeup(wl->dev, 0);
6189 disable_irq_wake(wl->irq);
6190 }
6191 wl1271_unregister_hw(wl);
6192 free_irq(wl->irq, wl);
6193 wlcore_free_hw(wl);
6194
6195 return 0;
6196 }
6197 EXPORT_SYMBOL_GPL(wlcore_remove);
6198
6199 u32 wl12xx_debug_level = DEBUG_NONE;
6200 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6201 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6202 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6203
6204 module_param_named(fwlog, fwlog_param, charp, 0);
6205 MODULE_PARM_DESC(fwlog,
6206 "FW logger options: continuous, ondemand, dbgpins or disable");
6207
6208 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6209 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6210
6211 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6212 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6213
6214 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6215 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6216
6217 MODULE_LICENSE("GPL");
6218 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6219 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6220 MODULE_FIRMWARE(WL12XX_NVS_NAME);
This page took 0.168832 seconds and 6 git commands to generate.