wlcore: add ability to reduce FW interrupts during suspend
[deliverable/linux.git] / drivers / net / wireless / ti / wlcore / main.c
1
2 /*
3 * This file is part of wlcore
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
30
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "vendor_cmd.h"
41 #include "scan.h"
42 #include "hw_ops.h"
43 #include "sysfs.h"
44
45 #define WL1271_BOOT_RETRIES 3
46
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery = -1;
51
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 struct ieee80211_vif *vif,
54 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57
58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
59 {
60 int ret;
61
62 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
63 return -EINVAL;
64
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
66 return 0;
67
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
69 return 0;
70
71 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
72 if (ret < 0)
73 return ret;
74
75 wl1271_info("Association completed.");
76 return 0;
77 }
78
79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 struct regulatory_request *request)
81 {
82 struct ieee80211_supported_band *band;
83 struct ieee80211_channel *ch;
84 int i;
85 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
86 struct wl1271 *wl = hw->priv;
87
88 band = wiphy->bands[IEEE80211_BAND_5GHZ];
89 for (i = 0; i < band->n_channels; i++) {
90 ch = &band->channels[i];
91 if (ch->flags & IEEE80211_CHAN_DISABLED)
92 continue;
93
94 if (ch->flags & IEEE80211_CHAN_RADAR)
95 ch->flags |= IEEE80211_CHAN_NO_IR;
96
97 }
98
99 wlcore_regdomain_config(wl);
100 }
101
102 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
103 bool enable)
104 {
105 int ret = 0;
106
107 /* we should hold wl->mutex */
108 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
109 if (ret < 0)
110 goto out;
111
112 if (enable)
113 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
114 else
115 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
116 out:
117 return ret;
118 }
119
120 /*
121 * this function is being called when the rx_streaming interval
122 * has beed changed or rx_streaming should be disabled
123 */
124 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
125 {
126 int ret = 0;
127 int period = wl->conf.rx_streaming.interval;
128
129 /* don't reconfigure if rx_streaming is disabled */
130 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
131 goto out;
132
133 /* reconfigure/disable according to new streaming_period */
134 if (period &&
135 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
136 (wl->conf.rx_streaming.always ||
137 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
138 ret = wl1271_set_rx_streaming(wl, wlvif, true);
139 else {
140 ret = wl1271_set_rx_streaming(wl, wlvif, false);
141 /* don't cancel_work_sync since we might deadlock */
142 del_timer_sync(&wlvif->rx_streaming_timer);
143 }
144 out:
145 return ret;
146 }
147
148 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
149 {
150 int ret;
151 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
152 rx_streaming_enable_work);
153 struct wl1271 *wl = wlvif->wl;
154
155 mutex_lock(&wl->mutex);
156
157 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
158 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
159 (!wl->conf.rx_streaming.always &&
160 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
161 goto out;
162
163 if (!wl->conf.rx_streaming.interval)
164 goto out;
165
166 ret = wl1271_ps_elp_wakeup(wl);
167 if (ret < 0)
168 goto out;
169
170 ret = wl1271_set_rx_streaming(wl, wlvif, true);
171 if (ret < 0)
172 goto out_sleep;
173
174 /* stop it after some time of inactivity */
175 mod_timer(&wlvif->rx_streaming_timer,
176 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
177
178 out_sleep:
179 wl1271_ps_elp_sleep(wl);
180 out:
181 mutex_unlock(&wl->mutex);
182 }
183
184 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
185 {
186 int ret;
187 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
188 rx_streaming_disable_work);
189 struct wl1271 *wl = wlvif->wl;
190
191 mutex_lock(&wl->mutex);
192
193 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
194 goto out;
195
196 ret = wl1271_ps_elp_wakeup(wl);
197 if (ret < 0)
198 goto out;
199
200 ret = wl1271_set_rx_streaming(wl, wlvif, false);
201 if (ret)
202 goto out_sleep;
203
204 out_sleep:
205 wl1271_ps_elp_sleep(wl);
206 out:
207 mutex_unlock(&wl->mutex);
208 }
209
210 static void wl1271_rx_streaming_timer(unsigned long data)
211 {
212 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
213 struct wl1271 *wl = wlvif->wl;
214 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
215 }
216
217 /* wl->mutex must be taken */
218 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
219 {
220 /* if the watchdog is not armed, don't do anything */
221 if (wl->tx_allocated_blocks == 0)
222 return;
223
224 cancel_delayed_work(&wl->tx_watchdog_work);
225 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
226 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
227 }
228
229 static void wlcore_rc_update_work(struct work_struct *work)
230 {
231 int ret;
232 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
233 rc_update_work);
234 struct wl1271 *wl = wlvif->wl;
235
236 mutex_lock(&wl->mutex);
237
238 if (unlikely(wl->state != WLCORE_STATE_ON))
239 goto out;
240
241 ret = wl1271_ps_elp_wakeup(wl);
242 if (ret < 0)
243 goto out;
244
245 wlcore_hw_sta_rc_update(wl, wlvif);
246
247 wl1271_ps_elp_sleep(wl);
248 out:
249 mutex_unlock(&wl->mutex);
250 }
251
252 static void wl12xx_tx_watchdog_work(struct work_struct *work)
253 {
254 struct delayed_work *dwork;
255 struct wl1271 *wl;
256
257 dwork = container_of(work, struct delayed_work, work);
258 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
259
260 mutex_lock(&wl->mutex);
261
262 if (unlikely(wl->state != WLCORE_STATE_ON))
263 goto out;
264
265 /* Tx went out in the meantime - everything is ok */
266 if (unlikely(wl->tx_allocated_blocks == 0))
267 goto out;
268
269 /*
270 * if a ROC is in progress, we might not have any Tx for a long
271 * time (e.g. pending Tx on the non-ROC channels)
272 */
273 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
275 wl->conf.tx.tx_watchdog_timeout);
276 wl12xx_rearm_tx_watchdog_locked(wl);
277 goto out;
278 }
279
280 /*
281 * if a scan is in progress, we might not have any Tx for a long
282 * time
283 */
284 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
285 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
286 wl->conf.tx.tx_watchdog_timeout);
287 wl12xx_rearm_tx_watchdog_locked(wl);
288 goto out;
289 }
290
291 /*
292 * AP might cache a frame for a long time for a sleeping station,
293 * so rearm the timer if there's an AP interface with stations. If
294 * Tx is genuinely stuck we will most hopefully discover it when all
295 * stations are removed due to inactivity.
296 */
297 if (wl->active_sta_count) {
298 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
299 " %d stations",
300 wl->conf.tx.tx_watchdog_timeout,
301 wl->active_sta_count);
302 wl12xx_rearm_tx_watchdog_locked(wl);
303 goto out;
304 }
305
306 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
307 wl->conf.tx.tx_watchdog_timeout);
308 wl12xx_queue_recovery_work(wl);
309
310 out:
311 mutex_unlock(&wl->mutex);
312 }
313
314 static void wlcore_adjust_conf(struct wl1271 *wl)
315 {
316 /* Adjust settings according to optional module parameters */
317
318 /* Firmware Logger params */
319 if (fwlog_mem_blocks != -1) {
320 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
321 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
322 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
323 } else {
324 wl1271_error(
325 "Illegal fwlog_mem_blocks=%d using default %d",
326 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
327 }
328 }
329
330 if (fwlog_param) {
331 if (!strcmp(fwlog_param, "continuous")) {
332 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
333 } else if (!strcmp(fwlog_param, "ondemand")) {
334 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
335 } else if (!strcmp(fwlog_param, "dbgpins")) {
336 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
337 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
338 } else if (!strcmp(fwlog_param, "disable")) {
339 wl->conf.fwlog.mem_blocks = 0;
340 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
341 } else {
342 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
343 }
344 }
345
346 if (bug_on_recovery != -1)
347 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
348
349 if (no_recovery != -1)
350 wl->conf.recovery.no_recovery = (u8) no_recovery;
351 }
352
353 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
354 struct wl12xx_vif *wlvif,
355 u8 hlid, u8 tx_pkts)
356 {
357 bool fw_ps;
358
359 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
360
361 /*
362 * Wake up from high level PS if the STA is asleep with too little
363 * packets in FW or if the STA is awake.
364 */
365 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
366 wl12xx_ps_link_end(wl, wlvif, hlid);
367
368 /*
369 * Start high-level PS if the STA is asleep with enough blocks in FW.
370 * Make an exception if this is the only connected link. In this
371 * case FW-memory congestion is less of a problem.
372 * Note that a single connected STA means 2*ap_count + 1 active links,
373 * since we must account for the global and broadcast AP links
374 * for each AP. The "fw_ps" check assures us the other link is a STA
375 * connected to the AP. Otherwise the FW would not set the PSM bit.
376 */
377 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
378 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
379 wl12xx_ps_link_start(wl, wlvif, hlid, true);
380 }
381
382 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
383 struct wl12xx_vif *wlvif,
384 struct wl_fw_status *status)
385 {
386 unsigned long cur_fw_ps_map;
387 u8 hlid;
388
389 cur_fw_ps_map = status->link_ps_bitmap;
390 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
391 wl1271_debug(DEBUG_PSM,
392 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
393 wl->ap_fw_ps_map, cur_fw_ps_map,
394 wl->ap_fw_ps_map ^ cur_fw_ps_map);
395
396 wl->ap_fw_ps_map = cur_fw_ps_map;
397 }
398
399 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
400 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
401 wl->links[hlid].allocated_pkts);
402 }
403
404 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
405 {
406 struct wl12xx_vif *wlvif;
407 struct timespec ts;
408 u32 old_tx_blk_count = wl->tx_blocks_available;
409 int avail, freed_blocks;
410 int i;
411 int ret;
412 struct wl1271_link *lnk;
413
414 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
415 wl->raw_fw_status,
416 wl->fw_status_len, false);
417 if (ret < 0)
418 return ret;
419
420 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
421
422 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
423 "drv_rx_counter = %d, tx_results_counter = %d)",
424 status->intr,
425 status->fw_rx_counter,
426 status->drv_rx_counter,
427 status->tx_results_counter);
428
429 for (i = 0; i < NUM_TX_QUEUES; i++) {
430 /* prevent wrap-around in freed-packets counter */
431 wl->tx_allocated_pkts[i] -=
432 (status->counters.tx_released_pkts[i] -
433 wl->tx_pkts_freed[i]) & 0xff;
434
435 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
436 }
437
438
439 for_each_set_bit(i, wl->links_map, wl->num_links) {
440 u8 diff;
441 lnk = &wl->links[i];
442
443 /* prevent wrap-around in freed-packets counter */
444 diff = (status->counters.tx_lnk_free_pkts[i] -
445 lnk->prev_freed_pkts) & 0xff;
446
447 if (diff == 0)
448 continue;
449
450 lnk->allocated_pkts -= diff;
451 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
452
453 /* accumulate the prev_freed_pkts counter */
454 lnk->total_freed_pkts += diff;
455 }
456
457 /* prevent wrap-around in total blocks counter */
458 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
459 freed_blocks = status->total_released_blks -
460 wl->tx_blocks_freed;
461 else
462 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
463 status->total_released_blks;
464
465 wl->tx_blocks_freed = status->total_released_blks;
466
467 wl->tx_allocated_blocks -= freed_blocks;
468
469 /*
470 * If the FW freed some blocks:
471 * If we still have allocated blocks - re-arm the timer, Tx is
472 * not stuck. Otherwise, cancel the timer (no Tx currently).
473 */
474 if (freed_blocks) {
475 if (wl->tx_allocated_blocks)
476 wl12xx_rearm_tx_watchdog_locked(wl);
477 else
478 cancel_delayed_work(&wl->tx_watchdog_work);
479 }
480
481 avail = status->tx_total - wl->tx_allocated_blocks;
482
483 /*
484 * The FW might change the total number of TX memblocks before
485 * we get a notification about blocks being released. Thus, the
486 * available blocks calculation might yield a temporary result
487 * which is lower than the actual available blocks. Keeping in
488 * mind that only blocks that were allocated can be moved from
489 * TX to RX, tx_blocks_available should never decrease here.
490 */
491 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
492 avail);
493
494 /* if more blocks are available now, tx work can be scheduled */
495 if (wl->tx_blocks_available > old_tx_blk_count)
496 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
497
498 /* for AP update num of allocated TX blocks per link and ps status */
499 wl12xx_for_each_wlvif_ap(wl, wlvif) {
500 wl12xx_irq_update_links_status(wl, wlvif, status);
501 }
502
503 /* update the host-chipset time offset */
504 getnstimeofday(&ts);
505 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
506 (s64)(status->fw_localtime);
507
508 wl->fw_fast_lnk_map = status->link_fast_bitmap;
509
510 return 0;
511 }
512
513 static void wl1271_flush_deferred_work(struct wl1271 *wl)
514 {
515 struct sk_buff *skb;
516
517 /* Pass all received frames to the network stack */
518 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
519 ieee80211_rx_ni(wl->hw, skb);
520
521 /* Return sent skbs to the network stack */
522 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
523 ieee80211_tx_status_ni(wl->hw, skb);
524 }
525
526 static void wl1271_netstack_work(struct work_struct *work)
527 {
528 struct wl1271 *wl =
529 container_of(work, struct wl1271, netstack_work);
530
531 do {
532 wl1271_flush_deferred_work(wl);
533 } while (skb_queue_len(&wl->deferred_rx_queue));
534 }
535
536 #define WL1271_IRQ_MAX_LOOPS 256
537
538 static int wlcore_irq_locked(struct wl1271 *wl)
539 {
540 int ret = 0;
541 u32 intr;
542 int loopcount = WL1271_IRQ_MAX_LOOPS;
543 bool done = false;
544 unsigned int defer_count;
545 unsigned long flags;
546
547 /*
548 * In case edge triggered interrupt must be used, we cannot iterate
549 * more than once without introducing race conditions with the hardirq.
550 */
551 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
552 loopcount = 1;
553
554 wl1271_debug(DEBUG_IRQ, "IRQ work");
555
556 if (unlikely(wl->state != WLCORE_STATE_ON))
557 goto out;
558
559 ret = wl1271_ps_elp_wakeup(wl);
560 if (ret < 0)
561 goto out;
562
563 while (!done && loopcount--) {
564 /*
565 * In order to avoid a race with the hardirq, clear the flag
566 * before acknowledging the chip. Since the mutex is held,
567 * wl1271_ps_elp_wakeup cannot be called concurrently.
568 */
569 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
570 smp_mb__after_atomic();
571
572 ret = wlcore_fw_status(wl, wl->fw_status);
573 if (ret < 0)
574 goto out;
575
576 wlcore_hw_tx_immediate_compl(wl);
577
578 intr = wl->fw_status->intr;
579 intr &= WLCORE_ALL_INTR_MASK;
580 if (!intr) {
581 done = true;
582 continue;
583 }
584
585 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
586 wl1271_error("HW watchdog interrupt received! starting recovery.");
587 wl->watchdog_recovery = true;
588 ret = -EIO;
589
590 /* restarting the chip. ignore any other interrupt. */
591 goto out;
592 }
593
594 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
595 wl1271_error("SW watchdog interrupt received! "
596 "starting recovery.");
597 wl->watchdog_recovery = true;
598 ret = -EIO;
599
600 /* restarting the chip. ignore any other interrupt. */
601 goto out;
602 }
603
604 if (likely(intr & WL1271_ACX_INTR_DATA)) {
605 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
606
607 ret = wlcore_rx(wl, wl->fw_status);
608 if (ret < 0)
609 goto out;
610
611 /* Check if any tx blocks were freed */
612 spin_lock_irqsave(&wl->wl_lock, flags);
613 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
614 wl1271_tx_total_queue_count(wl) > 0) {
615 spin_unlock_irqrestore(&wl->wl_lock, flags);
616 /*
617 * In order to avoid starvation of the TX path,
618 * call the work function directly.
619 */
620 ret = wlcore_tx_work_locked(wl);
621 if (ret < 0)
622 goto out;
623 } else {
624 spin_unlock_irqrestore(&wl->wl_lock, flags);
625 }
626
627 /* check for tx results */
628 ret = wlcore_hw_tx_delayed_compl(wl);
629 if (ret < 0)
630 goto out;
631
632 /* Make sure the deferred queues don't get too long */
633 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
634 skb_queue_len(&wl->deferred_rx_queue);
635 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
636 wl1271_flush_deferred_work(wl);
637 }
638
639 if (intr & WL1271_ACX_INTR_EVENT_A) {
640 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
641 ret = wl1271_event_handle(wl, 0);
642 if (ret < 0)
643 goto out;
644 }
645
646 if (intr & WL1271_ACX_INTR_EVENT_B) {
647 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
648 ret = wl1271_event_handle(wl, 1);
649 if (ret < 0)
650 goto out;
651 }
652
653 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
654 wl1271_debug(DEBUG_IRQ,
655 "WL1271_ACX_INTR_INIT_COMPLETE");
656
657 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
658 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
659 }
660
661 wl1271_ps_elp_sleep(wl);
662
663 out:
664 return ret;
665 }
666
667 static irqreturn_t wlcore_irq(int irq, void *cookie)
668 {
669 int ret;
670 unsigned long flags;
671 struct wl1271 *wl = cookie;
672
673 /* complete the ELP completion */
674 spin_lock_irqsave(&wl->wl_lock, flags);
675 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
676 if (wl->elp_compl) {
677 complete(wl->elp_compl);
678 wl->elp_compl = NULL;
679 }
680
681 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
682 /* don't enqueue a work right now. mark it as pending */
683 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
684 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
685 disable_irq_nosync(wl->irq);
686 pm_wakeup_event(wl->dev, 0);
687 spin_unlock_irqrestore(&wl->wl_lock, flags);
688 return IRQ_HANDLED;
689 }
690 spin_unlock_irqrestore(&wl->wl_lock, flags);
691
692 /* TX might be handled here, avoid redundant work */
693 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
694 cancel_work_sync(&wl->tx_work);
695
696 mutex_lock(&wl->mutex);
697
698 ret = wlcore_irq_locked(wl);
699 if (ret)
700 wl12xx_queue_recovery_work(wl);
701
702 spin_lock_irqsave(&wl->wl_lock, flags);
703 /* In case TX was not handled here, queue TX work */
704 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
705 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
706 wl1271_tx_total_queue_count(wl) > 0)
707 ieee80211_queue_work(wl->hw, &wl->tx_work);
708 spin_unlock_irqrestore(&wl->wl_lock, flags);
709
710 mutex_unlock(&wl->mutex);
711
712 return IRQ_HANDLED;
713 }
714
715 struct vif_counter_data {
716 u8 counter;
717
718 struct ieee80211_vif *cur_vif;
719 bool cur_vif_running;
720 };
721
722 static void wl12xx_vif_count_iter(void *data, u8 *mac,
723 struct ieee80211_vif *vif)
724 {
725 struct vif_counter_data *counter = data;
726
727 counter->counter++;
728 if (counter->cur_vif == vif)
729 counter->cur_vif_running = true;
730 }
731
732 /* caller must not hold wl->mutex, as it might deadlock */
733 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
734 struct ieee80211_vif *cur_vif,
735 struct vif_counter_data *data)
736 {
737 memset(data, 0, sizeof(*data));
738 data->cur_vif = cur_vif;
739
740 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
741 wl12xx_vif_count_iter, data);
742 }
743
744 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
745 {
746 const struct firmware *fw;
747 const char *fw_name;
748 enum wl12xx_fw_type fw_type;
749 int ret;
750
751 if (plt) {
752 fw_type = WL12XX_FW_TYPE_PLT;
753 fw_name = wl->plt_fw_name;
754 } else {
755 /*
756 * we can't call wl12xx_get_vif_count() here because
757 * wl->mutex is taken, so use the cached last_vif_count value
758 */
759 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
760 fw_type = WL12XX_FW_TYPE_MULTI;
761 fw_name = wl->mr_fw_name;
762 } else {
763 fw_type = WL12XX_FW_TYPE_NORMAL;
764 fw_name = wl->sr_fw_name;
765 }
766 }
767
768 if (wl->fw_type == fw_type)
769 return 0;
770
771 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
772
773 ret = request_firmware(&fw, fw_name, wl->dev);
774
775 if (ret < 0) {
776 wl1271_error("could not get firmware %s: %d", fw_name, ret);
777 return ret;
778 }
779
780 if (fw->size % 4) {
781 wl1271_error("firmware size is not multiple of 32 bits: %zu",
782 fw->size);
783 ret = -EILSEQ;
784 goto out;
785 }
786
787 vfree(wl->fw);
788 wl->fw_type = WL12XX_FW_TYPE_NONE;
789 wl->fw_len = fw->size;
790 wl->fw = vmalloc(wl->fw_len);
791
792 if (!wl->fw) {
793 wl1271_error("could not allocate memory for the firmware");
794 ret = -ENOMEM;
795 goto out;
796 }
797
798 memcpy(wl->fw, fw->data, wl->fw_len);
799 ret = 0;
800 wl->fw_type = fw_type;
801 out:
802 release_firmware(fw);
803
804 return ret;
805 }
806
807 void wl12xx_queue_recovery_work(struct wl1271 *wl)
808 {
809 /* Avoid a recursive recovery */
810 if (wl->state == WLCORE_STATE_ON) {
811 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
812 &wl->flags));
813
814 wl->state = WLCORE_STATE_RESTARTING;
815 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
816 wl1271_ps_elp_wakeup(wl);
817 wlcore_disable_interrupts_nosync(wl);
818 ieee80211_queue_work(wl->hw, &wl->recovery_work);
819 }
820 }
821
822 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
823 {
824 size_t len;
825
826 /* Make sure we have enough room */
827 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
828
829 /* Fill the FW log file, consumed by the sysfs fwlog entry */
830 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
831 wl->fwlog_size += len;
832
833 return len;
834 }
835
836 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
837 {
838 struct wlcore_partition_set part, old_part;
839 u32 addr;
840 u32 offset;
841 u32 end_of_log;
842 u8 *block;
843 int ret;
844
845 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
846 (wl->conf.fwlog.mem_blocks == 0))
847 return;
848
849 wl1271_info("Reading FW panic log");
850
851 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
852 if (!block)
853 return;
854
855 /*
856 * Make sure the chip is awake and the logger isn't active.
857 * Do not send a stop fwlog command if the fw is hanged or if
858 * dbgpins are used (due to some fw bug).
859 */
860 if (wl1271_ps_elp_wakeup(wl))
861 goto out;
862 if (!wl->watchdog_recovery &&
863 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
864 wl12xx_cmd_stop_fwlog(wl);
865
866 /* Read the first memory block address */
867 ret = wlcore_fw_status(wl, wl->fw_status);
868 if (ret < 0)
869 goto out;
870
871 addr = wl->fw_status->log_start_addr;
872 if (!addr)
873 goto out;
874
875 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
876 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
877 end_of_log = wl->fwlog_end;
878 } else {
879 offset = sizeof(addr);
880 end_of_log = addr;
881 }
882
883 old_part = wl->curr_part;
884 memset(&part, 0, sizeof(part));
885
886 /* Traverse the memory blocks linked list */
887 do {
888 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
889 part.mem.size = PAGE_SIZE;
890
891 ret = wlcore_set_partition(wl, &part);
892 if (ret < 0) {
893 wl1271_error("%s: set_partition start=0x%X size=%d",
894 __func__, part.mem.start, part.mem.size);
895 goto out;
896 }
897
898 memset(block, 0, wl->fw_mem_block_size);
899 ret = wlcore_read_hwaddr(wl, addr, block,
900 wl->fw_mem_block_size, false);
901
902 if (ret < 0)
903 goto out;
904
905 /*
906 * Memory blocks are linked to one another. The first 4 bytes
907 * of each memory block hold the hardware address of the next
908 * one. The last memory block points to the first one in
909 * on demand mode and is equal to 0x2000000 in continuous mode.
910 */
911 addr = le32_to_cpup((__le32 *)block);
912
913 if (!wl12xx_copy_fwlog(wl, block + offset,
914 wl->fw_mem_block_size - offset))
915 break;
916 } while (addr && (addr != end_of_log));
917
918 wake_up_interruptible(&wl->fwlog_waitq);
919
920 out:
921 kfree(block);
922 wlcore_set_partition(wl, &old_part);
923 }
924
925 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
926 u8 hlid, struct ieee80211_sta *sta)
927 {
928 struct wl1271_station *wl_sta;
929 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
930
931 wl_sta = (void *)sta->drv_priv;
932 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
933
934 /*
935 * increment the initial seq number on recovery to account for
936 * transmitted packets that we haven't yet got in the FW status
937 */
938 if (wlvif->encryption_type == KEY_GEM)
939 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
940
941 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
942 wl_sta->total_freed_pkts += sqn_recovery_padding;
943 }
944
945 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
946 struct wl12xx_vif *wlvif,
947 u8 hlid, const u8 *addr)
948 {
949 struct ieee80211_sta *sta;
950 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
951
952 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
953 is_zero_ether_addr(addr)))
954 return;
955
956 rcu_read_lock();
957 sta = ieee80211_find_sta(vif, addr);
958 if (sta)
959 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
960 rcu_read_unlock();
961 }
962
963 static void wlcore_print_recovery(struct wl1271 *wl)
964 {
965 u32 pc = 0;
966 u32 hint_sts = 0;
967 int ret;
968
969 wl1271_info("Hardware recovery in progress. FW ver: %s",
970 wl->chip.fw_ver_str);
971
972 /* change partitions momentarily so we can read the FW pc */
973 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
974 if (ret < 0)
975 return;
976
977 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
978 if (ret < 0)
979 return;
980
981 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
982 if (ret < 0)
983 return;
984
985 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
986 pc, hint_sts, ++wl->recovery_count);
987
988 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
989 }
990
991
992 static void wl1271_recovery_work(struct work_struct *work)
993 {
994 struct wl1271 *wl =
995 container_of(work, struct wl1271, recovery_work);
996 struct wl12xx_vif *wlvif;
997 struct ieee80211_vif *vif;
998
999 mutex_lock(&wl->mutex);
1000
1001 if (wl->state == WLCORE_STATE_OFF || wl->plt)
1002 goto out_unlock;
1003
1004 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
1005 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
1006 wl12xx_read_fwlog_panic(wl);
1007 wlcore_print_recovery(wl);
1008 }
1009
1010 BUG_ON(wl->conf.recovery.bug_on_recovery &&
1011 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1012
1013 if (wl->conf.recovery.no_recovery) {
1014 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1015 goto out_unlock;
1016 }
1017
1018 /* Prevent spurious TX during FW restart */
1019 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1020
1021 /* reboot the chipset */
1022 while (!list_empty(&wl->wlvif_list)) {
1023 wlvif = list_first_entry(&wl->wlvif_list,
1024 struct wl12xx_vif, list);
1025 vif = wl12xx_wlvif_to_vif(wlvif);
1026
1027 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1028 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1029 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1030 vif->bss_conf.bssid);
1031 }
1032
1033 __wl1271_op_remove_interface(wl, vif, false);
1034 }
1035
1036 wlcore_op_stop_locked(wl);
1037
1038 ieee80211_restart_hw(wl->hw);
1039
1040 /*
1041 * Its safe to enable TX now - the queues are stopped after a request
1042 * to restart the HW.
1043 */
1044 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1045
1046 out_unlock:
1047 wl->watchdog_recovery = false;
1048 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1049 mutex_unlock(&wl->mutex);
1050 }
1051
1052 static int wlcore_fw_wakeup(struct wl1271 *wl)
1053 {
1054 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1055 }
1056
1057 static int wl1271_setup(struct wl1271 *wl)
1058 {
1059 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1060 if (!wl->raw_fw_status)
1061 goto err;
1062
1063 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1064 if (!wl->fw_status)
1065 goto err;
1066
1067 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1068 if (!wl->tx_res_if)
1069 goto err;
1070
1071 return 0;
1072 err:
1073 kfree(wl->fw_status);
1074 kfree(wl->raw_fw_status);
1075 return -ENOMEM;
1076 }
1077
1078 static int wl12xx_set_power_on(struct wl1271 *wl)
1079 {
1080 int ret;
1081
1082 msleep(WL1271_PRE_POWER_ON_SLEEP);
1083 ret = wl1271_power_on(wl);
1084 if (ret < 0)
1085 goto out;
1086 msleep(WL1271_POWER_ON_SLEEP);
1087 wl1271_io_reset(wl);
1088 wl1271_io_init(wl);
1089
1090 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1091 if (ret < 0)
1092 goto fail;
1093
1094 /* ELP module wake up */
1095 ret = wlcore_fw_wakeup(wl);
1096 if (ret < 0)
1097 goto fail;
1098
1099 out:
1100 return ret;
1101
1102 fail:
1103 wl1271_power_off(wl);
1104 return ret;
1105 }
1106
1107 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1108 {
1109 int ret = 0;
1110
1111 ret = wl12xx_set_power_on(wl);
1112 if (ret < 0)
1113 goto out;
1114
1115 /*
1116 * For wl127x based devices we could use the default block
1117 * size (512 bytes), but due to a bug in the sdio driver, we
1118 * need to set it explicitly after the chip is powered on. To
1119 * simplify the code and since the performance impact is
1120 * negligible, we use the same block size for all different
1121 * chip types.
1122 *
1123 * Check if the bus supports blocksize alignment and, if it
1124 * doesn't, make sure we don't have the quirk.
1125 */
1126 if (!wl1271_set_block_size(wl))
1127 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1128
1129 /* TODO: make sure the lower driver has set things up correctly */
1130
1131 ret = wl1271_setup(wl);
1132 if (ret < 0)
1133 goto out;
1134
1135 ret = wl12xx_fetch_firmware(wl, plt);
1136 if (ret < 0)
1137 goto out;
1138
1139 out:
1140 return ret;
1141 }
1142
1143 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1144 {
1145 int retries = WL1271_BOOT_RETRIES;
1146 struct wiphy *wiphy = wl->hw->wiphy;
1147
1148 static const char* const PLT_MODE[] = {
1149 "PLT_OFF",
1150 "PLT_ON",
1151 "PLT_FEM_DETECT",
1152 "PLT_CHIP_AWAKE"
1153 };
1154
1155 int ret;
1156
1157 mutex_lock(&wl->mutex);
1158
1159 wl1271_notice("power up");
1160
1161 if (wl->state != WLCORE_STATE_OFF) {
1162 wl1271_error("cannot go into PLT state because not "
1163 "in off state: %d", wl->state);
1164 ret = -EBUSY;
1165 goto out;
1166 }
1167
1168 /* Indicate to lower levels that we are now in PLT mode */
1169 wl->plt = true;
1170 wl->plt_mode = plt_mode;
1171
1172 while (retries) {
1173 retries--;
1174 ret = wl12xx_chip_wakeup(wl, true);
1175 if (ret < 0)
1176 goto power_off;
1177
1178 if (plt_mode != PLT_CHIP_AWAKE) {
1179 ret = wl->ops->plt_init(wl);
1180 if (ret < 0)
1181 goto power_off;
1182 }
1183
1184 wl->state = WLCORE_STATE_ON;
1185 wl1271_notice("firmware booted in PLT mode %s (%s)",
1186 PLT_MODE[plt_mode],
1187 wl->chip.fw_ver_str);
1188
1189 /* update hw/fw version info in wiphy struct */
1190 wiphy->hw_version = wl->chip.id;
1191 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1192 sizeof(wiphy->fw_version));
1193
1194 goto out;
1195
1196 power_off:
1197 wl1271_power_off(wl);
1198 }
1199
1200 wl->plt = false;
1201 wl->plt_mode = PLT_OFF;
1202
1203 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1204 WL1271_BOOT_RETRIES);
1205 out:
1206 mutex_unlock(&wl->mutex);
1207
1208 return ret;
1209 }
1210
1211 int wl1271_plt_stop(struct wl1271 *wl)
1212 {
1213 int ret = 0;
1214
1215 wl1271_notice("power down");
1216
1217 /*
1218 * Interrupts must be disabled before setting the state to OFF.
1219 * Otherwise, the interrupt handler might be called and exit without
1220 * reading the interrupt status.
1221 */
1222 wlcore_disable_interrupts(wl);
1223 mutex_lock(&wl->mutex);
1224 if (!wl->plt) {
1225 mutex_unlock(&wl->mutex);
1226
1227 /*
1228 * This will not necessarily enable interrupts as interrupts
1229 * may have been disabled when op_stop was called. It will,
1230 * however, balance the above call to disable_interrupts().
1231 */
1232 wlcore_enable_interrupts(wl);
1233
1234 wl1271_error("cannot power down because not in PLT "
1235 "state: %d", wl->state);
1236 ret = -EBUSY;
1237 goto out;
1238 }
1239
1240 mutex_unlock(&wl->mutex);
1241
1242 wl1271_flush_deferred_work(wl);
1243 cancel_work_sync(&wl->netstack_work);
1244 cancel_work_sync(&wl->recovery_work);
1245 cancel_delayed_work_sync(&wl->elp_work);
1246 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1247
1248 mutex_lock(&wl->mutex);
1249 wl1271_power_off(wl);
1250 wl->flags = 0;
1251 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1252 wl->state = WLCORE_STATE_OFF;
1253 wl->plt = false;
1254 wl->plt_mode = PLT_OFF;
1255 wl->rx_counter = 0;
1256 mutex_unlock(&wl->mutex);
1257
1258 out:
1259 return ret;
1260 }
1261
1262 static void wl1271_op_tx(struct ieee80211_hw *hw,
1263 struct ieee80211_tx_control *control,
1264 struct sk_buff *skb)
1265 {
1266 struct wl1271 *wl = hw->priv;
1267 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1268 struct ieee80211_vif *vif = info->control.vif;
1269 struct wl12xx_vif *wlvif = NULL;
1270 unsigned long flags;
1271 int q, mapping;
1272 u8 hlid;
1273
1274 if (!vif) {
1275 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1276 ieee80211_free_txskb(hw, skb);
1277 return;
1278 }
1279
1280 wlvif = wl12xx_vif_to_data(vif);
1281 mapping = skb_get_queue_mapping(skb);
1282 q = wl1271_tx_get_queue(mapping);
1283
1284 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1285
1286 spin_lock_irqsave(&wl->wl_lock, flags);
1287
1288 /*
1289 * drop the packet if the link is invalid or the queue is stopped
1290 * for any reason but watermark. Watermark is a "soft"-stop so we
1291 * allow these packets through.
1292 */
1293 if (hlid == WL12XX_INVALID_LINK_ID ||
1294 (!test_bit(hlid, wlvif->links_map)) ||
1295 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1296 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1297 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1298 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1299 ieee80211_free_txskb(hw, skb);
1300 goto out;
1301 }
1302
1303 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1304 hlid, q, skb->len);
1305 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1306
1307 wl->tx_queue_count[q]++;
1308 wlvif->tx_queue_count[q]++;
1309
1310 /*
1311 * The workqueue is slow to process the tx_queue and we need stop
1312 * the queue here, otherwise the queue will get too long.
1313 */
1314 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1315 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1316 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1317 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1318 wlcore_stop_queue_locked(wl, wlvif, q,
1319 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1320 }
1321
1322 /*
1323 * The chip specific setup must run before the first TX packet -
1324 * before that, the tx_work will not be initialized!
1325 */
1326
1327 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1328 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1329 ieee80211_queue_work(wl->hw, &wl->tx_work);
1330
1331 out:
1332 spin_unlock_irqrestore(&wl->wl_lock, flags);
1333 }
1334
1335 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1336 {
1337 unsigned long flags;
1338 int q;
1339
1340 /* no need to queue a new dummy packet if one is already pending */
1341 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1342 return 0;
1343
1344 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1345
1346 spin_lock_irqsave(&wl->wl_lock, flags);
1347 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1348 wl->tx_queue_count[q]++;
1349 spin_unlock_irqrestore(&wl->wl_lock, flags);
1350
1351 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1352 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1353 return wlcore_tx_work_locked(wl);
1354
1355 /*
1356 * If the FW TX is busy, TX work will be scheduled by the threaded
1357 * interrupt handler function
1358 */
1359 return 0;
1360 }
1361
1362 /*
1363 * The size of the dummy packet should be at least 1400 bytes. However, in
1364 * order to minimize the number of bus transactions, aligning it to 512 bytes
1365 * boundaries could be beneficial, performance wise
1366 */
1367 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1368
1369 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1370 {
1371 struct sk_buff *skb;
1372 struct ieee80211_hdr_3addr *hdr;
1373 unsigned int dummy_packet_size;
1374
1375 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1376 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1377
1378 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1379 if (!skb) {
1380 wl1271_warning("Failed to allocate a dummy packet skb");
1381 return NULL;
1382 }
1383
1384 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1385
1386 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1387 memset(hdr, 0, sizeof(*hdr));
1388 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1389 IEEE80211_STYPE_NULLFUNC |
1390 IEEE80211_FCTL_TODS);
1391
1392 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1393
1394 /* Dummy packets require the TID to be management */
1395 skb->priority = WL1271_TID_MGMT;
1396
1397 /* Initialize all fields that might be used */
1398 skb_set_queue_mapping(skb, 0);
1399 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1400
1401 return skb;
1402 }
1403
1404
1405 #ifdef CONFIG_PM
1406 static int
1407 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1408 {
1409 int num_fields = 0, in_field = 0, fields_size = 0;
1410 int i, pattern_len = 0;
1411
1412 if (!p->mask) {
1413 wl1271_warning("No mask in WoWLAN pattern");
1414 return -EINVAL;
1415 }
1416
1417 /*
1418 * The pattern is broken up into segments of bytes at different offsets
1419 * that need to be checked by the FW filter. Each segment is called
1420 * a field in the FW API. We verify that the total number of fields
1421 * required for this pattern won't exceed FW limits (8)
1422 * as well as the total fields buffer won't exceed the FW limit.
1423 * Note that if there's a pattern which crosses Ethernet/IP header
1424 * boundary a new field is required.
1425 */
1426 for (i = 0; i < p->pattern_len; i++) {
1427 if (test_bit(i, (unsigned long *)p->mask)) {
1428 if (!in_field) {
1429 in_field = 1;
1430 pattern_len = 1;
1431 } else {
1432 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1433 num_fields++;
1434 fields_size += pattern_len +
1435 RX_FILTER_FIELD_OVERHEAD;
1436 pattern_len = 1;
1437 } else
1438 pattern_len++;
1439 }
1440 } else {
1441 if (in_field) {
1442 in_field = 0;
1443 fields_size += pattern_len +
1444 RX_FILTER_FIELD_OVERHEAD;
1445 num_fields++;
1446 }
1447 }
1448 }
1449
1450 if (in_field) {
1451 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1452 num_fields++;
1453 }
1454
1455 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1456 wl1271_warning("RX Filter too complex. Too many segments");
1457 return -EINVAL;
1458 }
1459
1460 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1461 wl1271_warning("RX filter pattern is too big");
1462 return -E2BIG;
1463 }
1464
1465 return 0;
1466 }
1467
1468 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1469 {
1470 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1471 }
1472
1473 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1474 {
1475 int i;
1476
1477 if (filter == NULL)
1478 return;
1479
1480 for (i = 0; i < filter->num_fields; i++)
1481 kfree(filter->fields[i].pattern);
1482
1483 kfree(filter);
1484 }
1485
1486 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1487 u16 offset, u8 flags,
1488 const u8 *pattern, u8 len)
1489 {
1490 struct wl12xx_rx_filter_field *field;
1491
1492 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1493 wl1271_warning("Max fields per RX filter. can't alloc another");
1494 return -EINVAL;
1495 }
1496
1497 field = &filter->fields[filter->num_fields];
1498
1499 field->pattern = kzalloc(len, GFP_KERNEL);
1500 if (!field->pattern) {
1501 wl1271_warning("Failed to allocate RX filter pattern");
1502 return -ENOMEM;
1503 }
1504
1505 filter->num_fields++;
1506
1507 field->offset = cpu_to_le16(offset);
1508 field->flags = flags;
1509 field->len = len;
1510 memcpy(field->pattern, pattern, len);
1511
1512 return 0;
1513 }
1514
1515 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1516 {
1517 int i, fields_size = 0;
1518
1519 for (i = 0; i < filter->num_fields; i++)
1520 fields_size += filter->fields[i].len +
1521 sizeof(struct wl12xx_rx_filter_field) -
1522 sizeof(u8 *);
1523
1524 return fields_size;
1525 }
1526
1527 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1528 u8 *buf)
1529 {
1530 int i;
1531 struct wl12xx_rx_filter_field *field;
1532
1533 for (i = 0; i < filter->num_fields; i++) {
1534 field = (struct wl12xx_rx_filter_field *)buf;
1535
1536 field->offset = filter->fields[i].offset;
1537 field->flags = filter->fields[i].flags;
1538 field->len = filter->fields[i].len;
1539
1540 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1541 buf += sizeof(struct wl12xx_rx_filter_field) -
1542 sizeof(u8 *) + field->len;
1543 }
1544 }
1545
1546 /*
1547 * Allocates an RX filter returned through f
1548 * which needs to be freed using rx_filter_free()
1549 */
1550 static int
1551 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1552 struct wl12xx_rx_filter **f)
1553 {
1554 int i, j, ret = 0;
1555 struct wl12xx_rx_filter *filter;
1556 u16 offset;
1557 u8 flags, len;
1558
1559 filter = wl1271_rx_filter_alloc();
1560 if (!filter) {
1561 wl1271_warning("Failed to alloc rx filter");
1562 ret = -ENOMEM;
1563 goto err;
1564 }
1565
1566 i = 0;
1567 while (i < p->pattern_len) {
1568 if (!test_bit(i, (unsigned long *)p->mask)) {
1569 i++;
1570 continue;
1571 }
1572
1573 for (j = i; j < p->pattern_len; j++) {
1574 if (!test_bit(j, (unsigned long *)p->mask))
1575 break;
1576
1577 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1578 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1579 break;
1580 }
1581
1582 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1583 offset = i;
1584 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1585 } else {
1586 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1587 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1588 }
1589
1590 len = j - i;
1591
1592 ret = wl1271_rx_filter_alloc_field(filter,
1593 offset,
1594 flags,
1595 &p->pattern[i], len);
1596 if (ret)
1597 goto err;
1598
1599 i = j;
1600 }
1601
1602 filter->action = FILTER_SIGNAL;
1603
1604 *f = filter;
1605 return 0;
1606
1607 err:
1608 wl1271_rx_filter_free(filter);
1609 *f = NULL;
1610
1611 return ret;
1612 }
1613
1614 static int wl1271_configure_wowlan(struct wl1271 *wl,
1615 struct cfg80211_wowlan *wow)
1616 {
1617 int i, ret;
1618
1619 if (!wow || wow->any || !wow->n_patterns) {
1620 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1621 FILTER_SIGNAL);
1622 if (ret)
1623 goto out;
1624
1625 ret = wl1271_rx_filter_clear_all(wl);
1626 if (ret)
1627 goto out;
1628
1629 return 0;
1630 }
1631
1632 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1633 return -EINVAL;
1634
1635 /* Validate all incoming patterns before clearing current FW state */
1636 for (i = 0; i < wow->n_patterns; i++) {
1637 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1638 if (ret) {
1639 wl1271_warning("Bad wowlan pattern %d", i);
1640 return ret;
1641 }
1642 }
1643
1644 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1645 if (ret)
1646 goto out;
1647
1648 ret = wl1271_rx_filter_clear_all(wl);
1649 if (ret)
1650 goto out;
1651
1652 /* Translate WoWLAN patterns into filters */
1653 for (i = 0; i < wow->n_patterns; i++) {
1654 struct cfg80211_pkt_pattern *p;
1655 struct wl12xx_rx_filter *filter = NULL;
1656
1657 p = &wow->patterns[i];
1658
1659 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1660 if (ret) {
1661 wl1271_warning("Failed to create an RX filter from "
1662 "wowlan pattern %d", i);
1663 goto out;
1664 }
1665
1666 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1667
1668 wl1271_rx_filter_free(filter);
1669 if (ret)
1670 goto out;
1671 }
1672
1673 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1674
1675 out:
1676 return ret;
1677 }
1678
1679 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1680 struct wl12xx_vif *wlvif,
1681 struct cfg80211_wowlan *wow)
1682 {
1683 int ret = 0;
1684
1685 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1686 goto out;
1687
1688 ret = wl1271_configure_wowlan(wl, wow);
1689 if (ret < 0)
1690 goto out;
1691
1692 if ((wl->conf.conn.suspend_wake_up_event ==
1693 wl->conf.conn.wake_up_event) &&
1694 (wl->conf.conn.suspend_listen_interval ==
1695 wl->conf.conn.listen_interval))
1696 goto out;
1697
1698 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1699 wl->conf.conn.suspend_wake_up_event,
1700 wl->conf.conn.suspend_listen_interval);
1701
1702 if (ret < 0)
1703 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1704 out:
1705 return ret;
1706
1707 }
1708
1709 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1710 struct wl12xx_vif *wlvif)
1711 {
1712 int ret = 0;
1713
1714 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1715 goto out;
1716
1717 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1718
1719 out:
1720 return ret;
1721
1722 }
1723
1724 static int wl1271_configure_suspend(struct wl1271 *wl,
1725 struct wl12xx_vif *wlvif,
1726 struct cfg80211_wowlan *wow)
1727 {
1728 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1729 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1730 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1731 return wl1271_configure_suspend_ap(wl, wlvif);
1732 return 0;
1733 }
1734
1735 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1736 {
1737 int ret = 0;
1738 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1739 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1740
1741 if ((!is_ap) && (!is_sta))
1742 return;
1743
1744 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1745 return;
1746
1747 if (is_sta) {
1748 wl1271_configure_wowlan(wl, NULL);
1749
1750 if ((wl->conf.conn.suspend_wake_up_event ==
1751 wl->conf.conn.wake_up_event) &&
1752 (wl->conf.conn.suspend_listen_interval ==
1753 wl->conf.conn.listen_interval))
1754 return;
1755
1756 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1757 wl->conf.conn.wake_up_event,
1758 wl->conf.conn.listen_interval);
1759
1760 if (ret < 0)
1761 wl1271_error("resume: wake up conditions failed: %d",
1762 ret);
1763
1764 } else if (is_ap) {
1765 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1766 }
1767 }
1768
1769 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1770 struct cfg80211_wowlan *wow)
1771 {
1772 struct wl1271 *wl = hw->priv;
1773 struct wl12xx_vif *wlvif;
1774 int ret;
1775
1776 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1777 WARN_ON(!wow);
1778
1779 /* we want to perform the recovery before suspending */
1780 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1781 wl1271_warning("postponing suspend to perform recovery");
1782 return -EBUSY;
1783 }
1784
1785 wl1271_tx_flush(wl);
1786
1787 mutex_lock(&wl->mutex);
1788
1789 ret = wl1271_ps_elp_wakeup(wl);
1790 if (ret < 0)
1791 return ret;
1792
1793 wl->wow_enabled = true;
1794 wl12xx_for_each_wlvif(wl, wlvif) {
1795 ret = wl1271_configure_suspend(wl, wlvif, wow);
1796 if (ret < 0) {
1797 mutex_unlock(&wl->mutex);
1798 wl1271_warning("couldn't prepare device to suspend");
1799 return ret;
1800 }
1801 }
1802
1803 /* disable fast link flow control notifications from FW */
1804 ret = wlcore_hw_interrupt_notify(wl, false);
1805 if (ret < 0)
1806 goto out_sleep;
1807
1808 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1809 ret = wlcore_hw_rx_ba_filter(wl,
1810 !!wl->conf.conn.suspend_rx_ba_activity);
1811 if (ret < 0)
1812 goto out_sleep;
1813
1814 out_sleep:
1815 wl1271_ps_elp_sleep(wl);
1816 mutex_unlock(&wl->mutex);
1817
1818 if (ret < 0) {
1819 wl1271_warning("couldn't prepare device to suspend");
1820 return ret;
1821 }
1822
1823 /* flush any remaining work */
1824 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1825
1826 /*
1827 * disable and re-enable interrupts in order to flush
1828 * the threaded_irq
1829 */
1830 wlcore_disable_interrupts(wl);
1831
1832 /*
1833 * set suspended flag to avoid triggering a new threaded_irq
1834 * work. no need for spinlock as interrupts are disabled.
1835 */
1836 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1837
1838 wlcore_enable_interrupts(wl);
1839 flush_work(&wl->tx_work);
1840 flush_delayed_work(&wl->elp_work);
1841
1842 /*
1843 * Cancel the watchdog even if above tx_flush failed. We will detect
1844 * it on resume anyway.
1845 */
1846 cancel_delayed_work(&wl->tx_watchdog_work);
1847
1848 return 0;
1849 }
1850
1851 static int wl1271_op_resume(struct ieee80211_hw *hw)
1852 {
1853 struct wl1271 *wl = hw->priv;
1854 struct wl12xx_vif *wlvif;
1855 unsigned long flags;
1856 bool run_irq_work = false, pending_recovery;
1857 int ret;
1858
1859 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1860 wl->wow_enabled);
1861 WARN_ON(!wl->wow_enabled);
1862
1863 /*
1864 * re-enable irq_work enqueuing, and call irq_work directly if
1865 * there is a pending work.
1866 */
1867 spin_lock_irqsave(&wl->wl_lock, flags);
1868 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1869 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1870 run_irq_work = true;
1871 spin_unlock_irqrestore(&wl->wl_lock, flags);
1872
1873 mutex_lock(&wl->mutex);
1874
1875 /* test the recovery flag before calling any SDIO functions */
1876 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1877 &wl->flags);
1878
1879 if (run_irq_work) {
1880 wl1271_debug(DEBUG_MAC80211,
1881 "run postponed irq_work directly");
1882
1883 /* don't talk to the HW if recovery is pending */
1884 if (!pending_recovery) {
1885 ret = wlcore_irq_locked(wl);
1886 if (ret)
1887 wl12xx_queue_recovery_work(wl);
1888 }
1889
1890 wlcore_enable_interrupts(wl);
1891 }
1892
1893 if (pending_recovery) {
1894 wl1271_warning("queuing forgotten recovery on resume");
1895 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1896 goto out_sleep;
1897 }
1898
1899 ret = wl1271_ps_elp_wakeup(wl);
1900 if (ret < 0)
1901 goto out;
1902
1903 wl12xx_for_each_wlvif(wl, wlvif) {
1904 wl1271_configure_resume(wl, wlvif);
1905 }
1906
1907 ret = wlcore_hw_interrupt_notify(wl, true);
1908 if (ret < 0)
1909 goto out_sleep;
1910
1911 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1912 ret = wlcore_hw_rx_ba_filter(wl, false);
1913 if (ret < 0)
1914 goto out_sleep;
1915
1916 out_sleep:
1917 wl1271_ps_elp_sleep(wl);
1918
1919 out:
1920 wl->wow_enabled = false;
1921
1922 /*
1923 * Set a flag to re-init the watchdog on the first Tx after resume.
1924 * That way we avoid possible conditions where Tx-complete interrupts
1925 * fail to arrive and we perform a spurious recovery.
1926 */
1927 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1928 mutex_unlock(&wl->mutex);
1929
1930 return 0;
1931 }
1932 #endif
1933
1934 static int wl1271_op_start(struct ieee80211_hw *hw)
1935 {
1936 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1937
1938 /*
1939 * We have to delay the booting of the hardware because
1940 * we need to know the local MAC address before downloading and
1941 * initializing the firmware. The MAC address cannot be changed
1942 * after boot, and without the proper MAC address, the firmware
1943 * will not function properly.
1944 *
1945 * The MAC address is first known when the corresponding interface
1946 * is added. That is where we will initialize the hardware.
1947 */
1948
1949 return 0;
1950 }
1951
1952 static void wlcore_op_stop_locked(struct wl1271 *wl)
1953 {
1954 int i;
1955
1956 if (wl->state == WLCORE_STATE_OFF) {
1957 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1958 &wl->flags))
1959 wlcore_enable_interrupts(wl);
1960
1961 return;
1962 }
1963
1964 /*
1965 * this must be before the cancel_work calls below, so that the work
1966 * functions don't perform further work.
1967 */
1968 wl->state = WLCORE_STATE_OFF;
1969
1970 /*
1971 * Use the nosync variant to disable interrupts, so the mutex could be
1972 * held while doing so without deadlocking.
1973 */
1974 wlcore_disable_interrupts_nosync(wl);
1975
1976 mutex_unlock(&wl->mutex);
1977
1978 wlcore_synchronize_interrupts(wl);
1979 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1980 cancel_work_sync(&wl->recovery_work);
1981 wl1271_flush_deferred_work(wl);
1982 cancel_delayed_work_sync(&wl->scan_complete_work);
1983 cancel_work_sync(&wl->netstack_work);
1984 cancel_work_sync(&wl->tx_work);
1985 cancel_delayed_work_sync(&wl->elp_work);
1986 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1987
1988 /* let's notify MAC80211 about the remaining pending TX frames */
1989 mutex_lock(&wl->mutex);
1990 wl12xx_tx_reset(wl);
1991
1992 wl1271_power_off(wl);
1993 /*
1994 * In case a recovery was scheduled, interrupts were disabled to avoid
1995 * an interrupt storm. Now that the power is down, it is safe to
1996 * re-enable interrupts to balance the disable depth
1997 */
1998 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1999 wlcore_enable_interrupts(wl);
2000
2001 wl->band = IEEE80211_BAND_2GHZ;
2002
2003 wl->rx_counter = 0;
2004 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2005 wl->channel_type = NL80211_CHAN_NO_HT;
2006 wl->tx_blocks_available = 0;
2007 wl->tx_allocated_blocks = 0;
2008 wl->tx_results_count = 0;
2009 wl->tx_packets_count = 0;
2010 wl->time_offset = 0;
2011 wl->ap_fw_ps_map = 0;
2012 wl->ap_ps_map = 0;
2013 wl->sleep_auth = WL1271_PSM_ILLEGAL;
2014 memset(wl->roles_map, 0, sizeof(wl->roles_map));
2015 memset(wl->links_map, 0, sizeof(wl->links_map));
2016 memset(wl->roc_map, 0, sizeof(wl->roc_map));
2017 memset(wl->session_ids, 0, sizeof(wl->session_ids));
2018 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2019 wl->active_sta_count = 0;
2020 wl->active_link_count = 0;
2021
2022 /* The system link is always allocated */
2023 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2024 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2025 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2026
2027 /*
2028 * this is performed after the cancel_work calls and the associated
2029 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2030 * get executed before all these vars have been reset.
2031 */
2032 wl->flags = 0;
2033
2034 wl->tx_blocks_freed = 0;
2035
2036 for (i = 0; i < NUM_TX_QUEUES; i++) {
2037 wl->tx_pkts_freed[i] = 0;
2038 wl->tx_allocated_pkts[i] = 0;
2039 }
2040
2041 wl1271_debugfs_reset(wl);
2042
2043 kfree(wl->raw_fw_status);
2044 wl->raw_fw_status = NULL;
2045 kfree(wl->fw_status);
2046 wl->fw_status = NULL;
2047 kfree(wl->tx_res_if);
2048 wl->tx_res_if = NULL;
2049 kfree(wl->target_mem_map);
2050 wl->target_mem_map = NULL;
2051
2052 /*
2053 * FW channels must be re-calibrated after recovery,
2054 * save current Reg-Domain channel configuration and clear it.
2055 */
2056 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2057 sizeof(wl->reg_ch_conf_pending));
2058 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2059 }
2060
2061 static void wlcore_op_stop(struct ieee80211_hw *hw)
2062 {
2063 struct wl1271 *wl = hw->priv;
2064
2065 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2066
2067 mutex_lock(&wl->mutex);
2068
2069 wlcore_op_stop_locked(wl);
2070
2071 mutex_unlock(&wl->mutex);
2072 }
2073
2074 static void wlcore_channel_switch_work(struct work_struct *work)
2075 {
2076 struct delayed_work *dwork;
2077 struct wl1271 *wl;
2078 struct ieee80211_vif *vif;
2079 struct wl12xx_vif *wlvif;
2080 int ret;
2081
2082 dwork = container_of(work, struct delayed_work, work);
2083 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2084 wl = wlvif->wl;
2085
2086 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2087
2088 mutex_lock(&wl->mutex);
2089
2090 if (unlikely(wl->state != WLCORE_STATE_ON))
2091 goto out;
2092
2093 /* check the channel switch is still ongoing */
2094 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2095 goto out;
2096
2097 vif = wl12xx_wlvif_to_vif(wlvif);
2098 ieee80211_chswitch_done(vif, false);
2099
2100 ret = wl1271_ps_elp_wakeup(wl);
2101 if (ret < 0)
2102 goto out;
2103
2104 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2105
2106 wl1271_ps_elp_sleep(wl);
2107 out:
2108 mutex_unlock(&wl->mutex);
2109 }
2110
2111 static void wlcore_connection_loss_work(struct work_struct *work)
2112 {
2113 struct delayed_work *dwork;
2114 struct wl1271 *wl;
2115 struct ieee80211_vif *vif;
2116 struct wl12xx_vif *wlvif;
2117
2118 dwork = container_of(work, struct delayed_work, work);
2119 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2120 wl = wlvif->wl;
2121
2122 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2123
2124 mutex_lock(&wl->mutex);
2125
2126 if (unlikely(wl->state != WLCORE_STATE_ON))
2127 goto out;
2128
2129 /* Call mac80211 connection loss */
2130 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2131 goto out;
2132
2133 vif = wl12xx_wlvif_to_vif(wlvif);
2134 ieee80211_connection_loss(vif);
2135 out:
2136 mutex_unlock(&wl->mutex);
2137 }
2138
2139 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2140 {
2141 struct delayed_work *dwork;
2142 struct wl1271 *wl;
2143 struct wl12xx_vif *wlvif;
2144 unsigned long time_spare;
2145 int ret;
2146
2147 dwork = container_of(work, struct delayed_work, work);
2148 wlvif = container_of(dwork, struct wl12xx_vif,
2149 pending_auth_complete_work);
2150 wl = wlvif->wl;
2151
2152 mutex_lock(&wl->mutex);
2153
2154 if (unlikely(wl->state != WLCORE_STATE_ON))
2155 goto out;
2156
2157 /*
2158 * Make sure a second really passed since the last auth reply. Maybe
2159 * a second auth reply arrived while we were stuck on the mutex.
2160 * Check for a little less than the timeout to protect from scheduler
2161 * irregularities.
2162 */
2163 time_spare = jiffies +
2164 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2165 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2166 goto out;
2167
2168 ret = wl1271_ps_elp_wakeup(wl);
2169 if (ret < 0)
2170 goto out;
2171
2172 /* cancel the ROC if active */
2173 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2174
2175 wl1271_ps_elp_sleep(wl);
2176 out:
2177 mutex_unlock(&wl->mutex);
2178 }
2179
2180 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2181 {
2182 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2183 WL12XX_MAX_RATE_POLICIES);
2184 if (policy >= WL12XX_MAX_RATE_POLICIES)
2185 return -EBUSY;
2186
2187 __set_bit(policy, wl->rate_policies_map);
2188 *idx = policy;
2189 return 0;
2190 }
2191
2192 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2193 {
2194 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2195 return;
2196
2197 __clear_bit(*idx, wl->rate_policies_map);
2198 *idx = WL12XX_MAX_RATE_POLICIES;
2199 }
2200
2201 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2202 {
2203 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2204 WLCORE_MAX_KLV_TEMPLATES);
2205 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2206 return -EBUSY;
2207
2208 __set_bit(policy, wl->klv_templates_map);
2209 *idx = policy;
2210 return 0;
2211 }
2212
2213 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2214 {
2215 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2216 return;
2217
2218 __clear_bit(*idx, wl->klv_templates_map);
2219 *idx = WLCORE_MAX_KLV_TEMPLATES;
2220 }
2221
2222 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2223 {
2224 switch (wlvif->bss_type) {
2225 case BSS_TYPE_AP_BSS:
2226 if (wlvif->p2p)
2227 return WL1271_ROLE_P2P_GO;
2228 else
2229 return WL1271_ROLE_AP;
2230
2231 case BSS_TYPE_STA_BSS:
2232 if (wlvif->p2p)
2233 return WL1271_ROLE_P2P_CL;
2234 else
2235 return WL1271_ROLE_STA;
2236
2237 case BSS_TYPE_IBSS:
2238 return WL1271_ROLE_IBSS;
2239
2240 default:
2241 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2242 }
2243 return WL12XX_INVALID_ROLE_TYPE;
2244 }
2245
2246 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2247 {
2248 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2249 int i;
2250
2251 /* clear everything but the persistent data */
2252 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2253
2254 switch (ieee80211_vif_type_p2p(vif)) {
2255 case NL80211_IFTYPE_P2P_CLIENT:
2256 wlvif->p2p = 1;
2257 /* fall-through */
2258 case NL80211_IFTYPE_STATION:
2259 wlvif->bss_type = BSS_TYPE_STA_BSS;
2260 break;
2261 case NL80211_IFTYPE_ADHOC:
2262 wlvif->bss_type = BSS_TYPE_IBSS;
2263 break;
2264 case NL80211_IFTYPE_P2P_GO:
2265 wlvif->p2p = 1;
2266 /* fall-through */
2267 case NL80211_IFTYPE_AP:
2268 wlvif->bss_type = BSS_TYPE_AP_BSS;
2269 break;
2270 default:
2271 wlvif->bss_type = MAX_BSS_TYPE;
2272 return -EOPNOTSUPP;
2273 }
2274
2275 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2276 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2277 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2278
2279 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2280 wlvif->bss_type == BSS_TYPE_IBSS) {
2281 /* init sta/ibss data */
2282 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2283 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2284 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2285 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2286 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2287 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2288 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2289 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2290 } else {
2291 /* init ap data */
2292 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2293 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2294 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2295 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2296 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2297 wl12xx_allocate_rate_policy(wl,
2298 &wlvif->ap.ucast_rate_idx[i]);
2299 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2300 /*
2301 * TODO: check if basic_rate shouldn't be
2302 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2303 * instead (the same thing for STA above).
2304 */
2305 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2306 /* TODO: this seems to be used only for STA, check it */
2307 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2308 }
2309
2310 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2311 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2312 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2313
2314 /*
2315 * mac80211 configures some values globally, while we treat them
2316 * per-interface. thus, on init, we have to copy them from wl
2317 */
2318 wlvif->band = wl->band;
2319 wlvif->channel = wl->channel;
2320 wlvif->power_level = wl->power_level;
2321 wlvif->channel_type = wl->channel_type;
2322
2323 INIT_WORK(&wlvif->rx_streaming_enable_work,
2324 wl1271_rx_streaming_enable_work);
2325 INIT_WORK(&wlvif->rx_streaming_disable_work,
2326 wl1271_rx_streaming_disable_work);
2327 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2328 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2329 wlcore_channel_switch_work);
2330 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2331 wlcore_connection_loss_work);
2332 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2333 wlcore_pending_auth_complete_work);
2334 INIT_LIST_HEAD(&wlvif->list);
2335
2336 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2337 (unsigned long) wlvif);
2338 return 0;
2339 }
2340
2341 static int wl12xx_init_fw(struct wl1271 *wl)
2342 {
2343 int retries = WL1271_BOOT_RETRIES;
2344 bool booted = false;
2345 struct wiphy *wiphy = wl->hw->wiphy;
2346 int ret;
2347
2348 while (retries) {
2349 retries--;
2350 ret = wl12xx_chip_wakeup(wl, false);
2351 if (ret < 0)
2352 goto power_off;
2353
2354 ret = wl->ops->boot(wl);
2355 if (ret < 0)
2356 goto power_off;
2357
2358 ret = wl1271_hw_init(wl);
2359 if (ret < 0)
2360 goto irq_disable;
2361
2362 booted = true;
2363 break;
2364
2365 irq_disable:
2366 mutex_unlock(&wl->mutex);
2367 /* Unlocking the mutex in the middle of handling is
2368 inherently unsafe. In this case we deem it safe to do,
2369 because we need to let any possibly pending IRQ out of
2370 the system (and while we are WLCORE_STATE_OFF the IRQ
2371 work function will not do anything.) Also, any other
2372 possible concurrent operations will fail due to the
2373 current state, hence the wl1271 struct should be safe. */
2374 wlcore_disable_interrupts(wl);
2375 wl1271_flush_deferred_work(wl);
2376 cancel_work_sync(&wl->netstack_work);
2377 mutex_lock(&wl->mutex);
2378 power_off:
2379 wl1271_power_off(wl);
2380 }
2381
2382 if (!booted) {
2383 wl1271_error("firmware boot failed despite %d retries",
2384 WL1271_BOOT_RETRIES);
2385 goto out;
2386 }
2387
2388 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2389
2390 /* update hw/fw version info in wiphy struct */
2391 wiphy->hw_version = wl->chip.id;
2392 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2393 sizeof(wiphy->fw_version));
2394
2395 /*
2396 * Now we know if 11a is supported (info from the NVS), so disable
2397 * 11a channels if not supported
2398 */
2399 if (!wl->enable_11a)
2400 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2401
2402 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2403 wl->enable_11a ? "" : "not ");
2404
2405 wl->state = WLCORE_STATE_ON;
2406 out:
2407 return ret;
2408 }
2409
2410 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2411 {
2412 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2413 }
2414
2415 /*
2416 * Check whether a fw switch (i.e. moving from one loaded
2417 * fw to another) is needed. This function is also responsible
2418 * for updating wl->last_vif_count, so it must be called before
2419 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2420 * will be used).
2421 */
2422 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2423 struct vif_counter_data vif_counter_data,
2424 bool add)
2425 {
2426 enum wl12xx_fw_type current_fw = wl->fw_type;
2427 u8 vif_count = vif_counter_data.counter;
2428
2429 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2430 return false;
2431
2432 /* increase the vif count if this is a new vif */
2433 if (add && !vif_counter_data.cur_vif_running)
2434 vif_count++;
2435
2436 wl->last_vif_count = vif_count;
2437
2438 /* no need for fw change if the device is OFF */
2439 if (wl->state == WLCORE_STATE_OFF)
2440 return false;
2441
2442 /* no need for fw change if a single fw is used */
2443 if (!wl->mr_fw_name)
2444 return false;
2445
2446 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2447 return true;
2448 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2449 return true;
2450
2451 return false;
2452 }
2453
2454 /*
2455 * Enter "forced psm". Make sure the sta is in psm against the ap,
2456 * to make the fw switch a bit more disconnection-persistent.
2457 */
2458 static void wl12xx_force_active_psm(struct wl1271 *wl)
2459 {
2460 struct wl12xx_vif *wlvif;
2461
2462 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2463 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2464 }
2465 }
2466
2467 struct wlcore_hw_queue_iter_data {
2468 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2469 /* current vif */
2470 struct ieee80211_vif *vif;
2471 /* is the current vif among those iterated */
2472 bool cur_running;
2473 };
2474
2475 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2476 struct ieee80211_vif *vif)
2477 {
2478 struct wlcore_hw_queue_iter_data *iter_data = data;
2479
2480 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2481 return;
2482
2483 if (iter_data->cur_running || vif == iter_data->vif) {
2484 iter_data->cur_running = true;
2485 return;
2486 }
2487
2488 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2489 }
2490
2491 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2492 struct wl12xx_vif *wlvif)
2493 {
2494 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2495 struct wlcore_hw_queue_iter_data iter_data = {};
2496 int i, q_base;
2497
2498 iter_data.vif = vif;
2499
2500 /* mark all bits taken by active interfaces */
2501 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2502 IEEE80211_IFACE_ITER_RESUME_ALL,
2503 wlcore_hw_queue_iter, &iter_data);
2504
2505 /* the current vif is already running in mac80211 (resume/recovery) */
2506 if (iter_data.cur_running) {
2507 wlvif->hw_queue_base = vif->hw_queue[0];
2508 wl1271_debug(DEBUG_MAC80211,
2509 "using pre-allocated hw queue base %d",
2510 wlvif->hw_queue_base);
2511
2512 /* interface type might have changed type */
2513 goto adjust_cab_queue;
2514 }
2515
2516 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2517 WLCORE_NUM_MAC_ADDRESSES);
2518 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2519 return -EBUSY;
2520
2521 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2522 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2523 wlvif->hw_queue_base);
2524
2525 for (i = 0; i < NUM_TX_QUEUES; i++) {
2526 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2527 /* register hw queues in mac80211 */
2528 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2529 }
2530
2531 adjust_cab_queue:
2532 /* the last places are reserved for cab queues per interface */
2533 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2534 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2535 wlvif->hw_queue_base / NUM_TX_QUEUES;
2536 else
2537 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2538
2539 return 0;
2540 }
2541
2542 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2543 struct ieee80211_vif *vif)
2544 {
2545 struct wl1271 *wl = hw->priv;
2546 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2547 struct vif_counter_data vif_count;
2548 int ret = 0;
2549 u8 role_type;
2550
2551 if (wl->plt) {
2552 wl1271_error("Adding Interface not allowed while in PLT mode");
2553 return -EBUSY;
2554 }
2555
2556 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2557 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2558
2559 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2560 ieee80211_vif_type_p2p(vif), vif->addr);
2561
2562 wl12xx_get_vif_count(hw, vif, &vif_count);
2563
2564 mutex_lock(&wl->mutex);
2565 ret = wl1271_ps_elp_wakeup(wl);
2566 if (ret < 0)
2567 goto out_unlock;
2568
2569 /*
2570 * in some very corner case HW recovery scenarios its possible to
2571 * get here before __wl1271_op_remove_interface is complete, so
2572 * opt out if that is the case.
2573 */
2574 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2575 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2576 ret = -EBUSY;
2577 goto out;
2578 }
2579
2580
2581 ret = wl12xx_init_vif_data(wl, vif);
2582 if (ret < 0)
2583 goto out;
2584
2585 wlvif->wl = wl;
2586 role_type = wl12xx_get_role_type(wl, wlvif);
2587 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2588 ret = -EINVAL;
2589 goto out;
2590 }
2591
2592 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2593 if (ret < 0)
2594 goto out;
2595
2596 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2597 wl12xx_force_active_psm(wl);
2598 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2599 mutex_unlock(&wl->mutex);
2600 wl1271_recovery_work(&wl->recovery_work);
2601 return 0;
2602 }
2603
2604 /*
2605 * TODO: after the nvs issue will be solved, move this block
2606 * to start(), and make sure here the driver is ON.
2607 */
2608 if (wl->state == WLCORE_STATE_OFF) {
2609 /*
2610 * we still need this in order to configure the fw
2611 * while uploading the nvs
2612 */
2613 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2614
2615 ret = wl12xx_init_fw(wl);
2616 if (ret < 0)
2617 goto out;
2618 }
2619
2620 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2621 role_type, &wlvif->role_id);
2622 if (ret < 0)
2623 goto out;
2624
2625 ret = wl1271_init_vif_specific(wl, vif);
2626 if (ret < 0)
2627 goto out;
2628
2629 list_add(&wlvif->list, &wl->wlvif_list);
2630 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2631
2632 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2633 wl->ap_count++;
2634 else
2635 wl->sta_count++;
2636 out:
2637 wl1271_ps_elp_sleep(wl);
2638 out_unlock:
2639 mutex_unlock(&wl->mutex);
2640
2641 return ret;
2642 }
2643
2644 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2645 struct ieee80211_vif *vif,
2646 bool reset_tx_queues)
2647 {
2648 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2649 int i, ret;
2650 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2651
2652 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2653
2654 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2655 return;
2656
2657 /* because of hardware recovery, we may get here twice */
2658 if (wl->state == WLCORE_STATE_OFF)
2659 return;
2660
2661 wl1271_info("down");
2662
2663 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2664 wl->scan_wlvif == wlvif) {
2665 /*
2666 * Rearm the tx watchdog just before idling scan. This
2667 * prevents just-finished scans from triggering the watchdog
2668 */
2669 wl12xx_rearm_tx_watchdog_locked(wl);
2670
2671 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2672 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2673 wl->scan_wlvif = NULL;
2674 wl->scan.req = NULL;
2675 ieee80211_scan_completed(wl->hw, true);
2676 }
2677
2678 if (wl->sched_vif == wlvif)
2679 wl->sched_vif = NULL;
2680
2681 if (wl->roc_vif == vif) {
2682 wl->roc_vif = NULL;
2683 ieee80211_remain_on_channel_expired(wl->hw);
2684 }
2685
2686 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2687 /* disable active roles */
2688 ret = wl1271_ps_elp_wakeup(wl);
2689 if (ret < 0)
2690 goto deinit;
2691
2692 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2693 wlvif->bss_type == BSS_TYPE_IBSS) {
2694 if (wl12xx_dev_role_started(wlvif))
2695 wl12xx_stop_dev(wl, wlvif);
2696 }
2697
2698 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2699 if (ret < 0)
2700 goto deinit;
2701
2702 wl1271_ps_elp_sleep(wl);
2703 }
2704 deinit:
2705 wl12xx_tx_reset_wlvif(wl, wlvif);
2706
2707 /* clear all hlids (except system_hlid) */
2708 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2709
2710 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2711 wlvif->bss_type == BSS_TYPE_IBSS) {
2712 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2713 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2714 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2715 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2716 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2717 } else {
2718 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2719 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2720 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2721 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2722 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2723 wl12xx_free_rate_policy(wl,
2724 &wlvif->ap.ucast_rate_idx[i]);
2725 wl1271_free_ap_keys(wl, wlvif);
2726 }
2727
2728 dev_kfree_skb(wlvif->probereq);
2729 wlvif->probereq = NULL;
2730 if (wl->last_wlvif == wlvif)
2731 wl->last_wlvif = NULL;
2732 list_del(&wlvif->list);
2733 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2734 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2735 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2736
2737 if (is_ap)
2738 wl->ap_count--;
2739 else
2740 wl->sta_count--;
2741
2742 /*
2743 * Last AP, have more stations. Configure sleep auth according to STA.
2744 * Don't do thin on unintended recovery.
2745 */
2746 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2747 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2748 goto unlock;
2749
2750 if (wl->ap_count == 0 && is_ap) {
2751 /* mask ap events */
2752 wl->event_mask &= ~wl->ap_event_mask;
2753 wl1271_event_unmask(wl);
2754 }
2755
2756 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2757 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2758 /* Configure for power according to debugfs */
2759 if (sta_auth != WL1271_PSM_ILLEGAL)
2760 wl1271_acx_sleep_auth(wl, sta_auth);
2761 /* Configure for ELP power saving */
2762 else
2763 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2764 }
2765
2766 unlock:
2767 mutex_unlock(&wl->mutex);
2768
2769 del_timer_sync(&wlvif->rx_streaming_timer);
2770 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2771 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2772 cancel_work_sync(&wlvif->rc_update_work);
2773 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2774 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2775 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2776
2777 mutex_lock(&wl->mutex);
2778 }
2779
2780 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2781 struct ieee80211_vif *vif)
2782 {
2783 struct wl1271 *wl = hw->priv;
2784 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2785 struct wl12xx_vif *iter;
2786 struct vif_counter_data vif_count;
2787
2788 wl12xx_get_vif_count(hw, vif, &vif_count);
2789 mutex_lock(&wl->mutex);
2790
2791 if (wl->state == WLCORE_STATE_OFF ||
2792 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2793 goto out;
2794
2795 /*
2796 * wl->vif can be null here if someone shuts down the interface
2797 * just when hardware recovery has been started.
2798 */
2799 wl12xx_for_each_wlvif(wl, iter) {
2800 if (iter != wlvif)
2801 continue;
2802
2803 __wl1271_op_remove_interface(wl, vif, true);
2804 break;
2805 }
2806 WARN_ON(iter != wlvif);
2807 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2808 wl12xx_force_active_psm(wl);
2809 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2810 wl12xx_queue_recovery_work(wl);
2811 }
2812 out:
2813 mutex_unlock(&wl->mutex);
2814 }
2815
2816 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2817 struct ieee80211_vif *vif,
2818 enum nl80211_iftype new_type, bool p2p)
2819 {
2820 struct wl1271 *wl = hw->priv;
2821 int ret;
2822
2823 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2824 wl1271_op_remove_interface(hw, vif);
2825
2826 vif->type = new_type;
2827 vif->p2p = p2p;
2828 ret = wl1271_op_add_interface(hw, vif);
2829
2830 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2831 return ret;
2832 }
2833
2834 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2835 {
2836 int ret;
2837 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2838
2839 /*
2840 * One of the side effects of the JOIN command is that is clears
2841 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2842 * to a WPA/WPA2 access point will therefore kill the data-path.
2843 * Currently the only valid scenario for JOIN during association
2844 * is on roaming, in which case we will also be given new keys.
2845 * Keep the below message for now, unless it starts bothering
2846 * users who really like to roam a lot :)
2847 */
2848 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2849 wl1271_info("JOIN while associated.");
2850
2851 /* clear encryption type */
2852 wlvif->encryption_type = KEY_NONE;
2853
2854 if (is_ibss)
2855 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2856 else {
2857 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2858 /*
2859 * TODO: this is an ugly workaround for wl12xx fw
2860 * bug - we are not able to tx/rx after the first
2861 * start_sta, so make dummy start+stop calls,
2862 * and then call start_sta again.
2863 * this should be fixed in the fw.
2864 */
2865 wl12xx_cmd_role_start_sta(wl, wlvif);
2866 wl12xx_cmd_role_stop_sta(wl, wlvif);
2867 }
2868
2869 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2870 }
2871
2872 return ret;
2873 }
2874
2875 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2876 int offset)
2877 {
2878 u8 ssid_len;
2879 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2880 skb->len - offset);
2881
2882 if (!ptr) {
2883 wl1271_error("No SSID in IEs!");
2884 return -ENOENT;
2885 }
2886
2887 ssid_len = ptr[1];
2888 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2889 wl1271_error("SSID is too long!");
2890 return -EINVAL;
2891 }
2892
2893 wlvif->ssid_len = ssid_len;
2894 memcpy(wlvif->ssid, ptr+2, ssid_len);
2895 return 0;
2896 }
2897
2898 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2899 {
2900 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2901 struct sk_buff *skb;
2902 int ieoffset;
2903
2904 /* we currently only support setting the ssid from the ap probe req */
2905 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2906 return -EINVAL;
2907
2908 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2909 if (!skb)
2910 return -EINVAL;
2911
2912 ieoffset = offsetof(struct ieee80211_mgmt,
2913 u.probe_req.variable);
2914 wl1271_ssid_set(wlvif, skb, ieoffset);
2915 dev_kfree_skb(skb);
2916
2917 return 0;
2918 }
2919
2920 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2921 struct ieee80211_bss_conf *bss_conf,
2922 u32 sta_rate_set)
2923 {
2924 int ieoffset;
2925 int ret;
2926
2927 wlvif->aid = bss_conf->aid;
2928 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2929 wlvif->beacon_int = bss_conf->beacon_int;
2930 wlvif->wmm_enabled = bss_conf->qos;
2931
2932 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2933
2934 /*
2935 * with wl1271, we don't need to update the
2936 * beacon_int and dtim_period, because the firmware
2937 * updates it by itself when the first beacon is
2938 * received after a join.
2939 */
2940 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2941 if (ret < 0)
2942 return ret;
2943
2944 /*
2945 * Get a template for hardware connection maintenance
2946 */
2947 dev_kfree_skb(wlvif->probereq);
2948 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2949 wlvif,
2950 NULL);
2951 ieoffset = offsetof(struct ieee80211_mgmt,
2952 u.probe_req.variable);
2953 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2954
2955 /* enable the connection monitoring feature */
2956 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2957 if (ret < 0)
2958 return ret;
2959
2960 /*
2961 * The join command disable the keep-alive mode, shut down its process,
2962 * and also clear the template config, so we need to reset it all after
2963 * the join. The acx_aid starts the keep-alive process, and the order
2964 * of the commands below is relevant.
2965 */
2966 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2967 if (ret < 0)
2968 return ret;
2969
2970 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2971 if (ret < 0)
2972 return ret;
2973
2974 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2975 if (ret < 0)
2976 return ret;
2977
2978 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2979 wlvif->sta.klv_template_id,
2980 ACX_KEEP_ALIVE_TPL_VALID);
2981 if (ret < 0)
2982 return ret;
2983
2984 /*
2985 * The default fw psm configuration is AUTO, while mac80211 default
2986 * setting is off (ACTIVE), so sync the fw with the correct value.
2987 */
2988 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2989 if (ret < 0)
2990 return ret;
2991
2992 if (sta_rate_set) {
2993 wlvif->rate_set =
2994 wl1271_tx_enabled_rates_get(wl,
2995 sta_rate_set,
2996 wlvif->band);
2997 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2998 if (ret < 0)
2999 return ret;
3000 }
3001
3002 return ret;
3003 }
3004
3005 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3006 {
3007 int ret;
3008 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3009
3010 /* make sure we are connected (sta) joined */
3011 if (sta &&
3012 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3013 return false;
3014
3015 /* make sure we are joined (ibss) */
3016 if (!sta &&
3017 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3018 return false;
3019
3020 if (sta) {
3021 /* use defaults when not associated */
3022 wlvif->aid = 0;
3023
3024 /* free probe-request template */
3025 dev_kfree_skb(wlvif->probereq);
3026 wlvif->probereq = NULL;
3027
3028 /* disable connection monitor features */
3029 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3030 if (ret < 0)
3031 return ret;
3032
3033 /* Disable the keep-alive feature */
3034 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3035 if (ret < 0)
3036 return ret;
3037
3038 /* disable beacon filtering */
3039 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3040 if (ret < 0)
3041 return ret;
3042 }
3043
3044 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3045 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3046
3047 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3048 ieee80211_chswitch_done(vif, false);
3049 cancel_delayed_work(&wlvif->channel_switch_work);
3050 }
3051
3052 /* invalidate keep-alive template */
3053 wl1271_acx_keep_alive_config(wl, wlvif,
3054 wlvif->sta.klv_template_id,
3055 ACX_KEEP_ALIVE_TPL_INVALID);
3056
3057 return 0;
3058 }
3059
3060 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3061 {
3062 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3063 wlvif->rate_set = wlvif->basic_rate_set;
3064 }
3065
3066 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3067 bool idle)
3068 {
3069 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3070
3071 if (idle == cur_idle)
3072 return;
3073
3074 if (idle) {
3075 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3076 } else {
3077 /* The current firmware only supports sched_scan in idle */
3078 if (wl->sched_vif == wlvif)
3079 wl->ops->sched_scan_stop(wl, wlvif);
3080
3081 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3082 }
3083 }
3084
3085 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3086 struct ieee80211_conf *conf, u32 changed)
3087 {
3088 int ret;
3089
3090 if (conf->power_level != wlvif->power_level) {
3091 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3092 if (ret < 0)
3093 return ret;
3094
3095 wlvif->power_level = conf->power_level;
3096 }
3097
3098 return 0;
3099 }
3100
3101 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3102 {
3103 struct wl1271 *wl = hw->priv;
3104 struct wl12xx_vif *wlvif;
3105 struct ieee80211_conf *conf = &hw->conf;
3106 int ret = 0;
3107
3108 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3109 " changed 0x%x",
3110 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3111 conf->power_level,
3112 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3113 changed);
3114
3115 mutex_lock(&wl->mutex);
3116
3117 if (changed & IEEE80211_CONF_CHANGE_POWER)
3118 wl->power_level = conf->power_level;
3119
3120 if (unlikely(wl->state != WLCORE_STATE_ON))
3121 goto out;
3122
3123 ret = wl1271_ps_elp_wakeup(wl);
3124 if (ret < 0)
3125 goto out;
3126
3127 /* configure each interface */
3128 wl12xx_for_each_wlvif(wl, wlvif) {
3129 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3130 if (ret < 0)
3131 goto out_sleep;
3132 }
3133
3134 out_sleep:
3135 wl1271_ps_elp_sleep(wl);
3136
3137 out:
3138 mutex_unlock(&wl->mutex);
3139
3140 return ret;
3141 }
3142
3143 struct wl1271_filter_params {
3144 bool enabled;
3145 int mc_list_length;
3146 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3147 };
3148
3149 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3150 struct netdev_hw_addr_list *mc_list)
3151 {
3152 struct wl1271_filter_params *fp;
3153 struct netdev_hw_addr *ha;
3154
3155 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3156 if (!fp) {
3157 wl1271_error("Out of memory setting filters.");
3158 return 0;
3159 }
3160
3161 /* update multicast filtering parameters */
3162 fp->mc_list_length = 0;
3163 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3164 fp->enabled = false;
3165 } else {
3166 fp->enabled = true;
3167 netdev_hw_addr_list_for_each(ha, mc_list) {
3168 memcpy(fp->mc_list[fp->mc_list_length],
3169 ha->addr, ETH_ALEN);
3170 fp->mc_list_length++;
3171 }
3172 }
3173
3174 return (u64)(unsigned long)fp;
3175 }
3176
3177 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3178 FIF_ALLMULTI | \
3179 FIF_FCSFAIL | \
3180 FIF_BCN_PRBRESP_PROMISC | \
3181 FIF_CONTROL | \
3182 FIF_OTHER_BSS)
3183
3184 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3185 unsigned int changed,
3186 unsigned int *total, u64 multicast)
3187 {
3188 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3189 struct wl1271 *wl = hw->priv;
3190 struct wl12xx_vif *wlvif;
3191
3192 int ret;
3193
3194 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3195 " total %x", changed, *total);
3196
3197 mutex_lock(&wl->mutex);
3198
3199 *total &= WL1271_SUPPORTED_FILTERS;
3200 changed &= WL1271_SUPPORTED_FILTERS;
3201
3202 if (unlikely(wl->state != WLCORE_STATE_ON))
3203 goto out;
3204
3205 ret = wl1271_ps_elp_wakeup(wl);
3206 if (ret < 0)
3207 goto out;
3208
3209 wl12xx_for_each_wlvif(wl, wlvif) {
3210 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3211 if (*total & FIF_ALLMULTI)
3212 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3213 false,
3214 NULL, 0);
3215 else if (fp)
3216 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3217 fp->enabled,
3218 fp->mc_list,
3219 fp->mc_list_length);
3220 if (ret < 0)
3221 goto out_sleep;
3222 }
3223 }
3224
3225 /*
3226 * the fw doesn't provide an api to configure the filters. instead,
3227 * the filters configuration is based on the active roles / ROC
3228 * state.
3229 */
3230
3231 out_sleep:
3232 wl1271_ps_elp_sleep(wl);
3233
3234 out:
3235 mutex_unlock(&wl->mutex);
3236 kfree(fp);
3237 }
3238
3239 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3240 u8 id, u8 key_type, u8 key_size,
3241 const u8 *key, u8 hlid, u32 tx_seq_32,
3242 u16 tx_seq_16)
3243 {
3244 struct wl1271_ap_key *ap_key;
3245 int i;
3246
3247 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3248
3249 if (key_size > MAX_KEY_SIZE)
3250 return -EINVAL;
3251
3252 /*
3253 * Find next free entry in ap_keys. Also check we are not replacing
3254 * an existing key.
3255 */
3256 for (i = 0; i < MAX_NUM_KEYS; i++) {
3257 if (wlvif->ap.recorded_keys[i] == NULL)
3258 break;
3259
3260 if (wlvif->ap.recorded_keys[i]->id == id) {
3261 wl1271_warning("trying to record key replacement");
3262 return -EINVAL;
3263 }
3264 }
3265
3266 if (i == MAX_NUM_KEYS)
3267 return -EBUSY;
3268
3269 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3270 if (!ap_key)
3271 return -ENOMEM;
3272
3273 ap_key->id = id;
3274 ap_key->key_type = key_type;
3275 ap_key->key_size = key_size;
3276 memcpy(ap_key->key, key, key_size);
3277 ap_key->hlid = hlid;
3278 ap_key->tx_seq_32 = tx_seq_32;
3279 ap_key->tx_seq_16 = tx_seq_16;
3280
3281 wlvif->ap.recorded_keys[i] = ap_key;
3282 return 0;
3283 }
3284
3285 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3286 {
3287 int i;
3288
3289 for (i = 0; i < MAX_NUM_KEYS; i++) {
3290 kfree(wlvif->ap.recorded_keys[i]);
3291 wlvif->ap.recorded_keys[i] = NULL;
3292 }
3293 }
3294
3295 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3296 {
3297 int i, ret = 0;
3298 struct wl1271_ap_key *key;
3299 bool wep_key_added = false;
3300
3301 for (i = 0; i < MAX_NUM_KEYS; i++) {
3302 u8 hlid;
3303 if (wlvif->ap.recorded_keys[i] == NULL)
3304 break;
3305
3306 key = wlvif->ap.recorded_keys[i];
3307 hlid = key->hlid;
3308 if (hlid == WL12XX_INVALID_LINK_ID)
3309 hlid = wlvif->ap.bcast_hlid;
3310
3311 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3312 key->id, key->key_type,
3313 key->key_size, key->key,
3314 hlid, key->tx_seq_32,
3315 key->tx_seq_16);
3316 if (ret < 0)
3317 goto out;
3318
3319 if (key->key_type == KEY_WEP)
3320 wep_key_added = true;
3321 }
3322
3323 if (wep_key_added) {
3324 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3325 wlvif->ap.bcast_hlid);
3326 if (ret < 0)
3327 goto out;
3328 }
3329
3330 out:
3331 wl1271_free_ap_keys(wl, wlvif);
3332 return ret;
3333 }
3334
3335 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3336 u16 action, u8 id, u8 key_type,
3337 u8 key_size, const u8 *key, u32 tx_seq_32,
3338 u16 tx_seq_16, struct ieee80211_sta *sta)
3339 {
3340 int ret;
3341 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3342
3343 if (is_ap) {
3344 struct wl1271_station *wl_sta;
3345 u8 hlid;
3346
3347 if (sta) {
3348 wl_sta = (struct wl1271_station *)sta->drv_priv;
3349 hlid = wl_sta->hlid;
3350 } else {
3351 hlid = wlvif->ap.bcast_hlid;
3352 }
3353
3354 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3355 /*
3356 * We do not support removing keys after AP shutdown.
3357 * Pretend we do to make mac80211 happy.
3358 */
3359 if (action != KEY_ADD_OR_REPLACE)
3360 return 0;
3361
3362 ret = wl1271_record_ap_key(wl, wlvif, id,
3363 key_type, key_size,
3364 key, hlid, tx_seq_32,
3365 tx_seq_16);
3366 } else {
3367 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3368 id, key_type, key_size,
3369 key, hlid, tx_seq_32,
3370 tx_seq_16);
3371 }
3372
3373 if (ret < 0)
3374 return ret;
3375 } else {
3376 const u8 *addr;
3377 static const u8 bcast_addr[ETH_ALEN] = {
3378 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3379 };
3380
3381 addr = sta ? sta->addr : bcast_addr;
3382
3383 if (is_zero_ether_addr(addr)) {
3384 /* We dont support TX only encryption */
3385 return -EOPNOTSUPP;
3386 }
3387
3388 /* The wl1271 does not allow to remove unicast keys - they
3389 will be cleared automatically on next CMD_JOIN. Ignore the
3390 request silently, as we dont want the mac80211 to emit
3391 an error message. */
3392 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3393 return 0;
3394
3395 /* don't remove key if hlid was already deleted */
3396 if (action == KEY_REMOVE &&
3397 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3398 return 0;
3399
3400 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3401 id, key_type, key_size,
3402 key, addr, tx_seq_32,
3403 tx_seq_16);
3404 if (ret < 0)
3405 return ret;
3406
3407 }
3408
3409 return 0;
3410 }
3411
3412 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3413 struct ieee80211_vif *vif,
3414 struct ieee80211_sta *sta,
3415 struct ieee80211_key_conf *key_conf)
3416 {
3417 struct wl1271 *wl = hw->priv;
3418 int ret;
3419 bool might_change_spare =
3420 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3421 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3422
3423 if (might_change_spare) {
3424 /*
3425 * stop the queues and flush to ensure the next packets are
3426 * in sync with FW spare block accounting
3427 */
3428 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3429 wl1271_tx_flush(wl);
3430 }
3431
3432 mutex_lock(&wl->mutex);
3433
3434 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3435 ret = -EAGAIN;
3436 goto out_wake_queues;
3437 }
3438
3439 ret = wl1271_ps_elp_wakeup(wl);
3440 if (ret < 0)
3441 goto out_wake_queues;
3442
3443 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3444
3445 wl1271_ps_elp_sleep(wl);
3446
3447 out_wake_queues:
3448 if (might_change_spare)
3449 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3450
3451 mutex_unlock(&wl->mutex);
3452
3453 return ret;
3454 }
3455
3456 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3457 struct ieee80211_vif *vif,
3458 struct ieee80211_sta *sta,
3459 struct ieee80211_key_conf *key_conf)
3460 {
3461 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3462 int ret;
3463 u32 tx_seq_32 = 0;
3464 u16 tx_seq_16 = 0;
3465 u8 key_type;
3466 u8 hlid;
3467
3468 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3469
3470 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3471 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3472 key_conf->cipher, key_conf->keyidx,
3473 key_conf->keylen, key_conf->flags);
3474 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3475
3476 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3477 if (sta) {
3478 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3479 hlid = wl_sta->hlid;
3480 } else {
3481 hlid = wlvif->ap.bcast_hlid;
3482 }
3483 else
3484 hlid = wlvif->sta.hlid;
3485
3486 if (hlid != WL12XX_INVALID_LINK_ID) {
3487 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3488 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3489 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3490 }
3491
3492 switch (key_conf->cipher) {
3493 case WLAN_CIPHER_SUITE_WEP40:
3494 case WLAN_CIPHER_SUITE_WEP104:
3495 key_type = KEY_WEP;
3496
3497 key_conf->hw_key_idx = key_conf->keyidx;
3498 break;
3499 case WLAN_CIPHER_SUITE_TKIP:
3500 key_type = KEY_TKIP;
3501 key_conf->hw_key_idx = key_conf->keyidx;
3502 break;
3503 case WLAN_CIPHER_SUITE_CCMP:
3504 key_type = KEY_AES;
3505 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3506 break;
3507 case WL1271_CIPHER_SUITE_GEM:
3508 key_type = KEY_GEM;
3509 break;
3510 default:
3511 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3512
3513 return -EOPNOTSUPP;
3514 }
3515
3516 switch (cmd) {
3517 case SET_KEY:
3518 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3519 key_conf->keyidx, key_type,
3520 key_conf->keylen, key_conf->key,
3521 tx_seq_32, tx_seq_16, sta);
3522 if (ret < 0) {
3523 wl1271_error("Could not add or replace key");
3524 return ret;
3525 }
3526
3527 /*
3528 * reconfiguring arp response if the unicast (or common)
3529 * encryption key type was changed
3530 */
3531 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3532 (sta || key_type == KEY_WEP) &&
3533 wlvif->encryption_type != key_type) {
3534 wlvif->encryption_type = key_type;
3535 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3536 if (ret < 0) {
3537 wl1271_warning("build arp rsp failed: %d", ret);
3538 return ret;
3539 }
3540 }
3541 break;
3542
3543 case DISABLE_KEY:
3544 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3545 key_conf->keyidx, key_type,
3546 key_conf->keylen, key_conf->key,
3547 0, 0, sta);
3548 if (ret < 0) {
3549 wl1271_error("Could not remove key");
3550 return ret;
3551 }
3552 break;
3553
3554 default:
3555 wl1271_error("Unsupported key cmd 0x%x", cmd);
3556 return -EOPNOTSUPP;
3557 }
3558
3559 return ret;
3560 }
3561 EXPORT_SYMBOL_GPL(wlcore_set_key);
3562
3563 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3564 struct ieee80211_vif *vif,
3565 int key_idx)
3566 {
3567 struct wl1271 *wl = hw->priv;
3568 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3569 int ret;
3570
3571 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3572 key_idx);
3573
3574 /* we don't handle unsetting of default key */
3575 if (key_idx == -1)
3576 return;
3577
3578 mutex_lock(&wl->mutex);
3579
3580 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3581 ret = -EAGAIN;
3582 goto out_unlock;
3583 }
3584
3585 ret = wl1271_ps_elp_wakeup(wl);
3586 if (ret < 0)
3587 goto out_unlock;
3588
3589 wlvif->default_key = key_idx;
3590
3591 /* the default WEP key needs to be configured at least once */
3592 if (wlvif->encryption_type == KEY_WEP) {
3593 ret = wl12xx_cmd_set_default_wep_key(wl,
3594 key_idx,
3595 wlvif->sta.hlid);
3596 if (ret < 0)
3597 goto out_sleep;
3598 }
3599
3600 out_sleep:
3601 wl1271_ps_elp_sleep(wl);
3602
3603 out_unlock:
3604 mutex_unlock(&wl->mutex);
3605 }
3606
3607 void wlcore_regdomain_config(struct wl1271 *wl)
3608 {
3609 int ret;
3610
3611 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3612 return;
3613
3614 mutex_lock(&wl->mutex);
3615
3616 if (unlikely(wl->state != WLCORE_STATE_ON))
3617 goto out;
3618
3619 ret = wl1271_ps_elp_wakeup(wl);
3620 if (ret < 0)
3621 goto out;
3622
3623 ret = wlcore_cmd_regdomain_config_locked(wl);
3624 if (ret < 0) {
3625 wl12xx_queue_recovery_work(wl);
3626 goto out;
3627 }
3628
3629 wl1271_ps_elp_sleep(wl);
3630 out:
3631 mutex_unlock(&wl->mutex);
3632 }
3633
3634 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3635 struct ieee80211_vif *vif,
3636 struct ieee80211_scan_request *hw_req)
3637 {
3638 struct cfg80211_scan_request *req = &hw_req->req;
3639 struct wl1271 *wl = hw->priv;
3640 int ret;
3641 u8 *ssid = NULL;
3642 size_t len = 0;
3643
3644 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3645
3646 if (req->n_ssids) {
3647 ssid = req->ssids[0].ssid;
3648 len = req->ssids[0].ssid_len;
3649 }
3650
3651 mutex_lock(&wl->mutex);
3652
3653 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3654 /*
3655 * We cannot return -EBUSY here because cfg80211 will expect
3656 * a call to ieee80211_scan_completed if we do - in this case
3657 * there won't be any call.
3658 */
3659 ret = -EAGAIN;
3660 goto out;
3661 }
3662
3663 ret = wl1271_ps_elp_wakeup(wl);
3664 if (ret < 0)
3665 goto out;
3666
3667 /* fail if there is any role in ROC */
3668 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3669 /* don't allow scanning right now */
3670 ret = -EBUSY;
3671 goto out_sleep;
3672 }
3673
3674 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3675 out_sleep:
3676 wl1271_ps_elp_sleep(wl);
3677 out:
3678 mutex_unlock(&wl->mutex);
3679
3680 return ret;
3681 }
3682
3683 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3684 struct ieee80211_vif *vif)
3685 {
3686 struct wl1271 *wl = hw->priv;
3687 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3688 int ret;
3689
3690 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3691
3692 mutex_lock(&wl->mutex);
3693
3694 if (unlikely(wl->state != WLCORE_STATE_ON))
3695 goto out;
3696
3697 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3698 goto out;
3699
3700 ret = wl1271_ps_elp_wakeup(wl);
3701 if (ret < 0)
3702 goto out;
3703
3704 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3705 ret = wl->ops->scan_stop(wl, wlvif);
3706 if (ret < 0)
3707 goto out_sleep;
3708 }
3709
3710 /*
3711 * Rearm the tx watchdog just before idling scan. This
3712 * prevents just-finished scans from triggering the watchdog
3713 */
3714 wl12xx_rearm_tx_watchdog_locked(wl);
3715
3716 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3717 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3718 wl->scan_wlvif = NULL;
3719 wl->scan.req = NULL;
3720 ieee80211_scan_completed(wl->hw, true);
3721
3722 out_sleep:
3723 wl1271_ps_elp_sleep(wl);
3724 out:
3725 mutex_unlock(&wl->mutex);
3726
3727 cancel_delayed_work_sync(&wl->scan_complete_work);
3728 }
3729
3730 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3731 struct ieee80211_vif *vif,
3732 struct cfg80211_sched_scan_request *req,
3733 struct ieee80211_scan_ies *ies)
3734 {
3735 struct wl1271 *wl = hw->priv;
3736 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3737 int ret;
3738
3739 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3740
3741 mutex_lock(&wl->mutex);
3742
3743 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3744 ret = -EAGAIN;
3745 goto out;
3746 }
3747
3748 ret = wl1271_ps_elp_wakeup(wl);
3749 if (ret < 0)
3750 goto out;
3751
3752 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3753 if (ret < 0)
3754 goto out_sleep;
3755
3756 wl->sched_vif = wlvif;
3757
3758 out_sleep:
3759 wl1271_ps_elp_sleep(wl);
3760 out:
3761 mutex_unlock(&wl->mutex);
3762 return ret;
3763 }
3764
3765 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3766 struct ieee80211_vif *vif)
3767 {
3768 struct wl1271 *wl = hw->priv;
3769 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3770 int ret;
3771
3772 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3773
3774 mutex_lock(&wl->mutex);
3775
3776 if (unlikely(wl->state != WLCORE_STATE_ON))
3777 goto out;
3778
3779 ret = wl1271_ps_elp_wakeup(wl);
3780 if (ret < 0)
3781 goto out;
3782
3783 wl->ops->sched_scan_stop(wl, wlvif);
3784
3785 wl1271_ps_elp_sleep(wl);
3786 out:
3787 mutex_unlock(&wl->mutex);
3788
3789 return 0;
3790 }
3791
3792 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3793 {
3794 struct wl1271 *wl = hw->priv;
3795 int ret = 0;
3796
3797 mutex_lock(&wl->mutex);
3798
3799 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3800 ret = -EAGAIN;
3801 goto out;
3802 }
3803
3804 ret = wl1271_ps_elp_wakeup(wl);
3805 if (ret < 0)
3806 goto out;
3807
3808 ret = wl1271_acx_frag_threshold(wl, value);
3809 if (ret < 0)
3810 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3811
3812 wl1271_ps_elp_sleep(wl);
3813
3814 out:
3815 mutex_unlock(&wl->mutex);
3816
3817 return ret;
3818 }
3819
3820 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3821 {
3822 struct wl1271 *wl = hw->priv;
3823 struct wl12xx_vif *wlvif;
3824 int ret = 0;
3825
3826 mutex_lock(&wl->mutex);
3827
3828 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3829 ret = -EAGAIN;
3830 goto out;
3831 }
3832
3833 ret = wl1271_ps_elp_wakeup(wl);
3834 if (ret < 0)
3835 goto out;
3836
3837 wl12xx_for_each_wlvif(wl, wlvif) {
3838 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3839 if (ret < 0)
3840 wl1271_warning("set rts threshold failed: %d", ret);
3841 }
3842 wl1271_ps_elp_sleep(wl);
3843
3844 out:
3845 mutex_unlock(&wl->mutex);
3846
3847 return ret;
3848 }
3849
3850 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3851 {
3852 int len;
3853 const u8 *next, *end = skb->data + skb->len;
3854 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3855 skb->len - ieoffset);
3856 if (!ie)
3857 return;
3858 len = ie[1] + 2;
3859 next = ie + len;
3860 memmove(ie, next, end - next);
3861 skb_trim(skb, skb->len - len);
3862 }
3863
3864 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3865 unsigned int oui, u8 oui_type,
3866 int ieoffset)
3867 {
3868 int len;
3869 const u8 *next, *end = skb->data + skb->len;
3870 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3871 skb->data + ieoffset,
3872 skb->len - ieoffset);
3873 if (!ie)
3874 return;
3875 len = ie[1] + 2;
3876 next = ie + len;
3877 memmove(ie, next, end - next);
3878 skb_trim(skb, skb->len - len);
3879 }
3880
3881 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3882 struct ieee80211_vif *vif)
3883 {
3884 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3885 struct sk_buff *skb;
3886 int ret;
3887
3888 skb = ieee80211_proberesp_get(wl->hw, vif);
3889 if (!skb)
3890 return -EOPNOTSUPP;
3891
3892 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3893 CMD_TEMPL_AP_PROBE_RESPONSE,
3894 skb->data,
3895 skb->len, 0,
3896 rates);
3897 dev_kfree_skb(skb);
3898
3899 if (ret < 0)
3900 goto out;
3901
3902 wl1271_debug(DEBUG_AP, "probe response updated");
3903 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3904
3905 out:
3906 return ret;
3907 }
3908
3909 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3910 struct ieee80211_vif *vif,
3911 u8 *probe_rsp_data,
3912 size_t probe_rsp_len,
3913 u32 rates)
3914 {
3915 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3916 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3917 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3918 int ssid_ie_offset, ie_offset, templ_len;
3919 const u8 *ptr;
3920
3921 /* no need to change probe response if the SSID is set correctly */
3922 if (wlvif->ssid_len > 0)
3923 return wl1271_cmd_template_set(wl, wlvif->role_id,
3924 CMD_TEMPL_AP_PROBE_RESPONSE,
3925 probe_rsp_data,
3926 probe_rsp_len, 0,
3927 rates);
3928
3929 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3930 wl1271_error("probe_rsp template too big");
3931 return -EINVAL;
3932 }
3933
3934 /* start searching from IE offset */
3935 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3936
3937 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3938 probe_rsp_len - ie_offset);
3939 if (!ptr) {
3940 wl1271_error("No SSID in beacon!");
3941 return -EINVAL;
3942 }
3943
3944 ssid_ie_offset = ptr - probe_rsp_data;
3945 ptr += (ptr[1] + 2);
3946
3947 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3948
3949 /* insert SSID from bss_conf */
3950 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3951 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3952 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3953 bss_conf->ssid, bss_conf->ssid_len);
3954 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3955
3956 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3957 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3958 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3959
3960 return wl1271_cmd_template_set(wl, wlvif->role_id,
3961 CMD_TEMPL_AP_PROBE_RESPONSE,
3962 probe_rsp_templ,
3963 templ_len, 0,
3964 rates);
3965 }
3966
3967 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3968 struct ieee80211_vif *vif,
3969 struct ieee80211_bss_conf *bss_conf,
3970 u32 changed)
3971 {
3972 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3973 int ret = 0;
3974
3975 if (changed & BSS_CHANGED_ERP_SLOT) {
3976 if (bss_conf->use_short_slot)
3977 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3978 else
3979 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3980 if (ret < 0) {
3981 wl1271_warning("Set slot time failed %d", ret);
3982 goto out;
3983 }
3984 }
3985
3986 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3987 if (bss_conf->use_short_preamble)
3988 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3989 else
3990 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3991 }
3992
3993 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3994 if (bss_conf->use_cts_prot)
3995 ret = wl1271_acx_cts_protect(wl, wlvif,
3996 CTSPROTECT_ENABLE);
3997 else
3998 ret = wl1271_acx_cts_protect(wl, wlvif,
3999 CTSPROTECT_DISABLE);
4000 if (ret < 0) {
4001 wl1271_warning("Set ctsprotect failed %d", ret);
4002 goto out;
4003 }
4004 }
4005
4006 out:
4007 return ret;
4008 }
4009
4010 static int wlcore_set_beacon_template(struct wl1271 *wl,
4011 struct ieee80211_vif *vif,
4012 bool is_ap)
4013 {
4014 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4015 struct ieee80211_hdr *hdr;
4016 u32 min_rate;
4017 int ret;
4018 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4019 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4020 u16 tmpl_id;
4021
4022 if (!beacon) {
4023 ret = -EINVAL;
4024 goto out;
4025 }
4026
4027 wl1271_debug(DEBUG_MASTER, "beacon updated");
4028
4029 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4030 if (ret < 0) {
4031 dev_kfree_skb(beacon);
4032 goto out;
4033 }
4034 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4035 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4036 CMD_TEMPL_BEACON;
4037 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4038 beacon->data,
4039 beacon->len, 0,
4040 min_rate);
4041 if (ret < 0) {
4042 dev_kfree_skb(beacon);
4043 goto out;
4044 }
4045
4046 wlvif->wmm_enabled =
4047 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4048 WLAN_OUI_TYPE_MICROSOFT_WMM,
4049 beacon->data + ieoffset,
4050 beacon->len - ieoffset);
4051
4052 /*
4053 * In case we already have a probe-resp beacon set explicitly
4054 * by usermode, don't use the beacon data.
4055 */
4056 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4057 goto end_bcn;
4058
4059 /* remove TIM ie from probe response */
4060 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4061
4062 /*
4063 * remove p2p ie from probe response.
4064 * the fw reponds to probe requests that don't include
4065 * the p2p ie. probe requests with p2p ie will be passed,
4066 * and will be responded by the supplicant (the spec
4067 * forbids including the p2p ie when responding to probe
4068 * requests that didn't include it).
4069 */
4070 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4071 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4072
4073 hdr = (struct ieee80211_hdr *) beacon->data;
4074 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4075 IEEE80211_STYPE_PROBE_RESP);
4076 if (is_ap)
4077 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4078 beacon->data,
4079 beacon->len,
4080 min_rate);
4081 else
4082 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4083 CMD_TEMPL_PROBE_RESPONSE,
4084 beacon->data,
4085 beacon->len, 0,
4086 min_rate);
4087 end_bcn:
4088 dev_kfree_skb(beacon);
4089 if (ret < 0)
4090 goto out;
4091
4092 out:
4093 return ret;
4094 }
4095
4096 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4097 struct ieee80211_vif *vif,
4098 struct ieee80211_bss_conf *bss_conf,
4099 u32 changed)
4100 {
4101 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4102 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4103 int ret = 0;
4104
4105 if (changed & BSS_CHANGED_BEACON_INT) {
4106 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4107 bss_conf->beacon_int);
4108
4109 wlvif->beacon_int = bss_conf->beacon_int;
4110 }
4111
4112 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4113 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4114
4115 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4116 }
4117
4118 if (changed & BSS_CHANGED_BEACON) {
4119 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4120 if (ret < 0)
4121 goto out;
4122 }
4123
4124 out:
4125 if (ret != 0)
4126 wl1271_error("beacon info change failed: %d", ret);
4127 return ret;
4128 }
4129
4130 /* AP mode changes */
4131 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4132 struct ieee80211_vif *vif,
4133 struct ieee80211_bss_conf *bss_conf,
4134 u32 changed)
4135 {
4136 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4137 int ret = 0;
4138
4139 if (changed & BSS_CHANGED_BASIC_RATES) {
4140 u32 rates = bss_conf->basic_rates;
4141
4142 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4143 wlvif->band);
4144 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4145 wlvif->basic_rate_set);
4146
4147 ret = wl1271_init_ap_rates(wl, wlvif);
4148 if (ret < 0) {
4149 wl1271_error("AP rate policy change failed %d", ret);
4150 goto out;
4151 }
4152
4153 ret = wl1271_ap_init_templates(wl, vif);
4154 if (ret < 0)
4155 goto out;
4156
4157 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4158 if (ret < 0)
4159 goto out;
4160
4161 ret = wlcore_set_beacon_template(wl, vif, true);
4162 if (ret < 0)
4163 goto out;
4164 }
4165
4166 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4167 if (ret < 0)
4168 goto out;
4169
4170 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4171 if (bss_conf->enable_beacon) {
4172 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4173 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4174 if (ret < 0)
4175 goto out;
4176
4177 ret = wl1271_ap_init_hwenc(wl, wlvif);
4178 if (ret < 0)
4179 goto out;
4180
4181 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4182 wl1271_debug(DEBUG_AP, "started AP");
4183 }
4184 } else {
4185 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4186 /*
4187 * AP might be in ROC in case we have just
4188 * sent auth reply. handle it.
4189 */
4190 if (test_bit(wlvif->role_id, wl->roc_map))
4191 wl12xx_croc(wl, wlvif->role_id);
4192
4193 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4194 if (ret < 0)
4195 goto out;
4196
4197 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4198 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4199 &wlvif->flags);
4200 wl1271_debug(DEBUG_AP, "stopped AP");
4201 }
4202 }
4203 }
4204
4205 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4206 if (ret < 0)
4207 goto out;
4208
4209 /* Handle HT information change */
4210 if ((changed & BSS_CHANGED_HT) &&
4211 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4212 ret = wl1271_acx_set_ht_information(wl, wlvif,
4213 bss_conf->ht_operation_mode);
4214 if (ret < 0) {
4215 wl1271_warning("Set ht information failed %d", ret);
4216 goto out;
4217 }
4218 }
4219
4220 out:
4221 return;
4222 }
4223
4224 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4225 struct ieee80211_bss_conf *bss_conf,
4226 u32 sta_rate_set)
4227 {
4228 u32 rates;
4229 int ret;
4230
4231 wl1271_debug(DEBUG_MAC80211,
4232 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4233 bss_conf->bssid, bss_conf->aid,
4234 bss_conf->beacon_int,
4235 bss_conf->basic_rates, sta_rate_set);
4236
4237 wlvif->beacon_int = bss_conf->beacon_int;
4238 rates = bss_conf->basic_rates;
4239 wlvif->basic_rate_set =
4240 wl1271_tx_enabled_rates_get(wl, rates,
4241 wlvif->band);
4242 wlvif->basic_rate =
4243 wl1271_tx_min_rate_get(wl,
4244 wlvif->basic_rate_set);
4245
4246 if (sta_rate_set)
4247 wlvif->rate_set =
4248 wl1271_tx_enabled_rates_get(wl,
4249 sta_rate_set,
4250 wlvif->band);
4251
4252 /* we only support sched_scan while not connected */
4253 if (wl->sched_vif == wlvif)
4254 wl->ops->sched_scan_stop(wl, wlvif);
4255
4256 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4257 if (ret < 0)
4258 return ret;
4259
4260 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4261 if (ret < 0)
4262 return ret;
4263
4264 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4265 if (ret < 0)
4266 return ret;
4267
4268 wlcore_set_ssid(wl, wlvif);
4269
4270 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4271
4272 return 0;
4273 }
4274
4275 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4276 {
4277 int ret;
4278
4279 /* revert back to minimum rates for the current band */
4280 wl1271_set_band_rate(wl, wlvif);
4281 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4282
4283 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4284 if (ret < 0)
4285 return ret;
4286
4287 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4288 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4289 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4290 if (ret < 0)
4291 return ret;
4292 }
4293
4294 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4295 return 0;
4296 }
4297 /* STA/IBSS mode changes */
4298 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4299 struct ieee80211_vif *vif,
4300 struct ieee80211_bss_conf *bss_conf,
4301 u32 changed)
4302 {
4303 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4304 bool do_join = false;
4305 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4306 bool ibss_joined = false;
4307 u32 sta_rate_set = 0;
4308 int ret;
4309 struct ieee80211_sta *sta;
4310 bool sta_exists = false;
4311 struct ieee80211_sta_ht_cap sta_ht_cap;
4312
4313 if (is_ibss) {
4314 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4315 changed);
4316 if (ret < 0)
4317 goto out;
4318 }
4319
4320 if (changed & BSS_CHANGED_IBSS) {
4321 if (bss_conf->ibss_joined) {
4322 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4323 ibss_joined = true;
4324 } else {
4325 wlcore_unset_assoc(wl, wlvif);
4326 wl12xx_cmd_role_stop_sta(wl, wlvif);
4327 }
4328 }
4329
4330 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4331 do_join = true;
4332
4333 /* Need to update the SSID (for filtering etc) */
4334 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4335 do_join = true;
4336
4337 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4338 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4339 bss_conf->enable_beacon ? "enabled" : "disabled");
4340
4341 do_join = true;
4342 }
4343
4344 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4345 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4346
4347 if (changed & BSS_CHANGED_CQM) {
4348 bool enable = false;
4349 if (bss_conf->cqm_rssi_thold)
4350 enable = true;
4351 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4352 bss_conf->cqm_rssi_thold,
4353 bss_conf->cqm_rssi_hyst);
4354 if (ret < 0)
4355 goto out;
4356 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4357 }
4358
4359 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4360 BSS_CHANGED_ASSOC)) {
4361 rcu_read_lock();
4362 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4363 if (sta) {
4364 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4365
4366 /* save the supp_rates of the ap */
4367 sta_rate_set = sta->supp_rates[wlvif->band];
4368 if (sta->ht_cap.ht_supported)
4369 sta_rate_set |=
4370 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4371 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4372 sta_ht_cap = sta->ht_cap;
4373 sta_exists = true;
4374 }
4375
4376 rcu_read_unlock();
4377 }
4378
4379 if (changed & BSS_CHANGED_BSSID) {
4380 if (!is_zero_ether_addr(bss_conf->bssid)) {
4381 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4382 sta_rate_set);
4383 if (ret < 0)
4384 goto out;
4385
4386 /* Need to update the BSSID (for filtering etc) */
4387 do_join = true;
4388 } else {
4389 ret = wlcore_clear_bssid(wl, wlvif);
4390 if (ret < 0)
4391 goto out;
4392 }
4393 }
4394
4395 if (changed & BSS_CHANGED_IBSS) {
4396 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4397 bss_conf->ibss_joined);
4398
4399 if (bss_conf->ibss_joined) {
4400 u32 rates = bss_conf->basic_rates;
4401 wlvif->basic_rate_set =
4402 wl1271_tx_enabled_rates_get(wl, rates,
4403 wlvif->band);
4404 wlvif->basic_rate =
4405 wl1271_tx_min_rate_get(wl,
4406 wlvif->basic_rate_set);
4407
4408 /* by default, use 11b + OFDM rates */
4409 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4410 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4411 if (ret < 0)
4412 goto out;
4413 }
4414 }
4415
4416 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4417 /* enable beacon filtering */
4418 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4419 if (ret < 0)
4420 goto out;
4421 }
4422
4423 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4424 if (ret < 0)
4425 goto out;
4426
4427 if (do_join) {
4428 ret = wlcore_join(wl, wlvif);
4429 if (ret < 0) {
4430 wl1271_warning("cmd join failed %d", ret);
4431 goto out;
4432 }
4433 }
4434
4435 if (changed & BSS_CHANGED_ASSOC) {
4436 if (bss_conf->assoc) {
4437 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4438 sta_rate_set);
4439 if (ret < 0)
4440 goto out;
4441
4442 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4443 wl12xx_set_authorized(wl, wlvif);
4444 } else {
4445 wlcore_unset_assoc(wl, wlvif);
4446 }
4447 }
4448
4449 if (changed & BSS_CHANGED_PS) {
4450 if ((bss_conf->ps) &&
4451 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4452 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4453 int ps_mode;
4454 char *ps_mode_str;
4455
4456 if (wl->conf.conn.forced_ps) {
4457 ps_mode = STATION_POWER_SAVE_MODE;
4458 ps_mode_str = "forced";
4459 } else {
4460 ps_mode = STATION_AUTO_PS_MODE;
4461 ps_mode_str = "auto";
4462 }
4463
4464 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4465
4466 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4467 if (ret < 0)
4468 wl1271_warning("enter %s ps failed %d",
4469 ps_mode_str, ret);
4470 } else if (!bss_conf->ps &&
4471 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4472 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4473
4474 ret = wl1271_ps_set_mode(wl, wlvif,
4475 STATION_ACTIVE_MODE);
4476 if (ret < 0)
4477 wl1271_warning("exit auto ps failed %d", ret);
4478 }
4479 }
4480
4481 /* Handle new association with HT. Do this after join. */
4482 if (sta_exists) {
4483 bool enabled =
4484 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4485
4486 ret = wlcore_hw_set_peer_cap(wl,
4487 &sta_ht_cap,
4488 enabled,
4489 wlvif->rate_set,
4490 wlvif->sta.hlid);
4491 if (ret < 0) {
4492 wl1271_warning("Set ht cap failed %d", ret);
4493 goto out;
4494
4495 }
4496
4497 if (enabled) {
4498 ret = wl1271_acx_set_ht_information(wl, wlvif,
4499 bss_conf->ht_operation_mode);
4500 if (ret < 0) {
4501 wl1271_warning("Set ht information failed %d",
4502 ret);
4503 goto out;
4504 }
4505 }
4506 }
4507
4508 /* Handle arp filtering. Done after join. */
4509 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4510 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4511 __be32 addr = bss_conf->arp_addr_list[0];
4512 wlvif->sta.qos = bss_conf->qos;
4513 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4514
4515 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4516 wlvif->ip_addr = addr;
4517 /*
4518 * The template should have been configured only upon
4519 * association. however, it seems that the correct ip
4520 * isn't being set (when sending), so we have to
4521 * reconfigure the template upon every ip change.
4522 */
4523 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4524 if (ret < 0) {
4525 wl1271_warning("build arp rsp failed: %d", ret);
4526 goto out;
4527 }
4528
4529 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4530 (ACX_ARP_FILTER_ARP_FILTERING |
4531 ACX_ARP_FILTER_AUTO_ARP),
4532 addr);
4533 } else {
4534 wlvif->ip_addr = 0;
4535 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4536 }
4537
4538 if (ret < 0)
4539 goto out;
4540 }
4541
4542 out:
4543 return;
4544 }
4545
4546 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4547 struct ieee80211_vif *vif,
4548 struct ieee80211_bss_conf *bss_conf,
4549 u32 changed)
4550 {
4551 struct wl1271 *wl = hw->priv;
4552 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4553 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4554 int ret;
4555
4556 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4557 wlvif->role_id, (int)changed);
4558
4559 /*
4560 * make sure to cancel pending disconnections if our association
4561 * state changed
4562 */
4563 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4564 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4565
4566 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4567 !bss_conf->enable_beacon)
4568 wl1271_tx_flush(wl);
4569
4570 mutex_lock(&wl->mutex);
4571
4572 if (unlikely(wl->state != WLCORE_STATE_ON))
4573 goto out;
4574
4575 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4576 goto out;
4577
4578 ret = wl1271_ps_elp_wakeup(wl);
4579 if (ret < 0)
4580 goto out;
4581
4582 if ((changed & BSS_CHANGED_TXPOWER) &&
4583 bss_conf->txpower != wlvif->power_level) {
4584
4585 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4586 if (ret < 0)
4587 goto out;
4588
4589 wlvif->power_level = bss_conf->txpower;
4590 }
4591
4592 if (is_ap)
4593 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4594 else
4595 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4596
4597 wl1271_ps_elp_sleep(wl);
4598
4599 out:
4600 mutex_unlock(&wl->mutex);
4601 }
4602
4603 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4604 struct ieee80211_chanctx_conf *ctx)
4605 {
4606 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4607 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4608 cfg80211_get_chandef_type(&ctx->def));
4609 return 0;
4610 }
4611
4612 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4613 struct ieee80211_chanctx_conf *ctx)
4614 {
4615 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4616 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4617 cfg80211_get_chandef_type(&ctx->def));
4618 }
4619
4620 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4621 struct ieee80211_chanctx_conf *ctx,
4622 u32 changed)
4623 {
4624 wl1271_debug(DEBUG_MAC80211,
4625 "mac80211 change chanctx %d (type %d) changed 0x%x",
4626 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4627 cfg80211_get_chandef_type(&ctx->def), changed);
4628 }
4629
4630 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4631 struct ieee80211_vif *vif,
4632 struct ieee80211_chanctx_conf *ctx)
4633 {
4634 struct wl1271 *wl = hw->priv;
4635 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4636 int channel = ieee80211_frequency_to_channel(
4637 ctx->def.chan->center_freq);
4638
4639 wl1271_debug(DEBUG_MAC80211,
4640 "mac80211 assign chanctx (role %d) %d (type %d)",
4641 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4642
4643 mutex_lock(&wl->mutex);
4644
4645 wlvif->band = ctx->def.chan->band;
4646 wlvif->channel = channel;
4647 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4648
4649 /* update default rates according to the band */
4650 wl1271_set_band_rate(wl, wlvif);
4651
4652 mutex_unlock(&wl->mutex);
4653
4654 return 0;
4655 }
4656
4657 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4658 struct ieee80211_vif *vif,
4659 struct ieee80211_chanctx_conf *ctx)
4660 {
4661 struct wl1271 *wl = hw->priv;
4662 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4663
4664 wl1271_debug(DEBUG_MAC80211,
4665 "mac80211 unassign chanctx (role %d) %d (type %d)",
4666 wlvif->role_id,
4667 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4668 cfg80211_get_chandef_type(&ctx->def));
4669
4670 wl1271_tx_flush(wl);
4671 }
4672
4673 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4674 struct ieee80211_vif *vif, u16 queue,
4675 const struct ieee80211_tx_queue_params *params)
4676 {
4677 struct wl1271 *wl = hw->priv;
4678 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4679 u8 ps_scheme;
4680 int ret = 0;
4681
4682 mutex_lock(&wl->mutex);
4683
4684 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4685
4686 if (params->uapsd)
4687 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4688 else
4689 ps_scheme = CONF_PS_SCHEME_LEGACY;
4690
4691 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4692 goto out;
4693
4694 ret = wl1271_ps_elp_wakeup(wl);
4695 if (ret < 0)
4696 goto out;
4697
4698 /*
4699 * the txop is confed in units of 32us by the mac80211,
4700 * we need us
4701 */
4702 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4703 params->cw_min, params->cw_max,
4704 params->aifs, params->txop << 5);
4705 if (ret < 0)
4706 goto out_sleep;
4707
4708 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4709 CONF_CHANNEL_TYPE_EDCF,
4710 wl1271_tx_get_queue(queue),
4711 ps_scheme, CONF_ACK_POLICY_LEGACY,
4712 0, 0);
4713
4714 out_sleep:
4715 wl1271_ps_elp_sleep(wl);
4716
4717 out:
4718 mutex_unlock(&wl->mutex);
4719
4720 return ret;
4721 }
4722
4723 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4724 struct ieee80211_vif *vif)
4725 {
4726
4727 struct wl1271 *wl = hw->priv;
4728 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4729 u64 mactime = ULLONG_MAX;
4730 int ret;
4731
4732 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4733
4734 mutex_lock(&wl->mutex);
4735
4736 if (unlikely(wl->state != WLCORE_STATE_ON))
4737 goto out;
4738
4739 ret = wl1271_ps_elp_wakeup(wl);
4740 if (ret < 0)
4741 goto out;
4742
4743 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4744 if (ret < 0)
4745 goto out_sleep;
4746
4747 out_sleep:
4748 wl1271_ps_elp_sleep(wl);
4749
4750 out:
4751 mutex_unlock(&wl->mutex);
4752 return mactime;
4753 }
4754
4755 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4756 struct survey_info *survey)
4757 {
4758 struct ieee80211_conf *conf = &hw->conf;
4759
4760 if (idx != 0)
4761 return -ENOENT;
4762
4763 survey->channel = conf->chandef.chan;
4764 survey->filled = 0;
4765 return 0;
4766 }
4767
4768 static int wl1271_allocate_sta(struct wl1271 *wl,
4769 struct wl12xx_vif *wlvif,
4770 struct ieee80211_sta *sta)
4771 {
4772 struct wl1271_station *wl_sta;
4773 int ret;
4774
4775
4776 if (wl->active_sta_count >= wl->max_ap_stations) {
4777 wl1271_warning("could not allocate HLID - too much stations");
4778 return -EBUSY;
4779 }
4780
4781 wl_sta = (struct wl1271_station *)sta->drv_priv;
4782 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4783 if (ret < 0) {
4784 wl1271_warning("could not allocate HLID - too many links");
4785 return -EBUSY;
4786 }
4787
4788 /* use the previous security seq, if this is a recovery/resume */
4789 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4790
4791 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4792 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4793 wl->active_sta_count++;
4794 return 0;
4795 }
4796
4797 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4798 {
4799 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4800 return;
4801
4802 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4803 __clear_bit(hlid, &wl->ap_ps_map);
4804 __clear_bit(hlid, &wl->ap_fw_ps_map);
4805
4806 /*
4807 * save the last used PN in the private part of iee80211_sta,
4808 * in case of recovery/suspend
4809 */
4810 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4811
4812 wl12xx_free_link(wl, wlvif, &hlid);
4813 wl->active_sta_count--;
4814
4815 /*
4816 * rearm the tx watchdog when the last STA is freed - give the FW a
4817 * chance to return STA-buffered packets before complaining.
4818 */
4819 if (wl->active_sta_count == 0)
4820 wl12xx_rearm_tx_watchdog_locked(wl);
4821 }
4822
4823 static int wl12xx_sta_add(struct wl1271 *wl,
4824 struct wl12xx_vif *wlvif,
4825 struct ieee80211_sta *sta)
4826 {
4827 struct wl1271_station *wl_sta;
4828 int ret = 0;
4829 u8 hlid;
4830
4831 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4832
4833 ret = wl1271_allocate_sta(wl, wlvif, sta);
4834 if (ret < 0)
4835 return ret;
4836
4837 wl_sta = (struct wl1271_station *)sta->drv_priv;
4838 hlid = wl_sta->hlid;
4839
4840 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4841 if (ret < 0)
4842 wl1271_free_sta(wl, wlvif, hlid);
4843
4844 return ret;
4845 }
4846
4847 static int wl12xx_sta_remove(struct wl1271 *wl,
4848 struct wl12xx_vif *wlvif,
4849 struct ieee80211_sta *sta)
4850 {
4851 struct wl1271_station *wl_sta;
4852 int ret = 0, id;
4853
4854 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4855
4856 wl_sta = (struct wl1271_station *)sta->drv_priv;
4857 id = wl_sta->hlid;
4858 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4859 return -EINVAL;
4860
4861 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
4862 if (ret < 0)
4863 return ret;
4864
4865 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4866 return ret;
4867 }
4868
4869 static void wlcore_roc_if_possible(struct wl1271 *wl,
4870 struct wl12xx_vif *wlvif)
4871 {
4872 if (find_first_bit(wl->roc_map,
4873 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4874 return;
4875
4876 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4877 return;
4878
4879 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4880 }
4881
4882 /*
4883 * when wl_sta is NULL, we treat this call as if coming from a
4884 * pending auth reply.
4885 * wl->mutex must be taken and the FW must be awake when the call
4886 * takes place.
4887 */
4888 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4889 struct wl1271_station *wl_sta, bool in_conn)
4890 {
4891 if (in_conn) {
4892 if (WARN_ON(wl_sta && wl_sta->in_connection))
4893 return;
4894
4895 if (!wlvif->ap_pending_auth_reply &&
4896 !wlvif->inconn_count)
4897 wlcore_roc_if_possible(wl, wlvif);
4898
4899 if (wl_sta) {
4900 wl_sta->in_connection = true;
4901 wlvif->inconn_count++;
4902 } else {
4903 wlvif->ap_pending_auth_reply = true;
4904 }
4905 } else {
4906 if (wl_sta && !wl_sta->in_connection)
4907 return;
4908
4909 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4910 return;
4911
4912 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4913 return;
4914
4915 if (wl_sta) {
4916 wl_sta->in_connection = false;
4917 wlvif->inconn_count--;
4918 } else {
4919 wlvif->ap_pending_auth_reply = false;
4920 }
4921
4922 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4923 test_bit(wlvif->role_id, wl->roc_map))
4924 wl12xx_croc(wl, wlvif->role_id);
4925 }
4926 }
4927
4928 static int wl12xx_update_sta_state(struct wl1271 *wl,
4929 struct wl12xx_vif *wlvif,
4930 struct ieee80211_sta *sta,
4931 enum ieee80211_sta_state old_state,
4932 enum ieee80211_sta_state new_state)
4933 {
4934 struct wl1271_station *wl_sta;
4935 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4936 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4937 int ret;
4938
4939 wl_sta = (struct wl1271_station *)sta->drv_priv;
4940
4941 /* Add station (AP mode) */
4942 if (is_ap &&
4943 old_state == IEEE80211_STA_NOTEXIST &&
4944 new_state == IEEE80211_STA_NONE) {
4945 ret = wl12xx_sta_add(wl, wlvif, sta);
4946 if (ret)
4947 return ret;
4948
4949 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4950 }
4951
4952 /* Remove station (AP mode) */
4953 if (is_ap &&
4954 old_state == IEEE80211_STA_NONE &&
4955 new_state == IEEE80211_STA_NOTEXIST) {
4956 /* must not fail */
4957 wl12xx_sta_remove(wl, wlvif, sta);
4958
4959 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4960 }
4961
4962 /* Authorize station (AP mode) */
4963 if (is_ap &&
4964 new_state == IEEE80211_STA_AUTHORIZED) {
4965 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4966 if (ret < 0)
4967 return ret;
4968
4969 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4970 wl_sta->hlid);
4971 if (ret)
4972 return ret;
4973
4974 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4975 }
4976
4977 /* Authorize station */
4978 if (is_sta &&
4979 new_state == IEEE80211_STA_AUTHORIZED) {
4980 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4981 ret = wl12xx_set_authorized(wl, wlvif);
4982 if (ret)
4983 return ret;
4984 }
4985
4986 if (is_sta &&
4987 old_state == IEEE80211_STA_AUTHORIZED &&
4988 new_state == IEEE80211_STA_ASSOC) {
4989 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4990 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4991 }
4992
4993 /* save seq number on disassoc (suspend) */
4994 if (is_sta &&
4995 old_state == IEEE80211_STA_ASSOC &&
4996 new_state == IEEE80211_STA_AUTH) {
4997 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
4998 wlvif->total_freed_pkts = 0;
4999 }
5000
5001 /* restore seq number on assoc (resume) */
5002 if (is_sta &&
5003 old_state == IEEE80211_STA_AUTH &&
5004 new_state == IEEE80211_STA_ASSOC) {
5005 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5006 }
5007
5008 /* clear ROCs on failure or authorization */
5009 if (is_sta &&
5010 (new_state == IEEE80211_STA_AUTHORIZED ||
5011 new_state == IEEE80211_STA_NOTEXIST)) {
5012 if (test_bit(wlvif->role_id, wl->roc_map))
5013 wl12xx_croc(wl, wlvif->role_id);
5014 }
5015
5016 if (is_sta &&
5017 old_state == IEEE80211_STA_NOTEXIST &&
5018 new_state == IEEE80211_STA_NONE) {
5019 if (find_first_bit(wl->roc_map,
5020 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5021 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5022 wl12xx_roc(wl, wlvif, wlvif->role_id,
5023 wlvif->band, wlvif->channel);
5024 }
5025 }
5026 return 0;
5027 }
5028
5029 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5030 struct ieee80211_vif *vif,
5031 struct ieee80211_sta *sta,
5032 enum ieee80211_sta_state old_state,
5033 enum ieee80211_sta_state new_state)
5034 {
5035 struct wl1271 *wl = hw->priv;
5036 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5037 int ret;
5038
5039 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5040 sta->aid, old_state, new_state);
5041
5042 mutex_lock(&wl->mutex);
5043
5044 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5045 ret = -EBUSY;
5046 goto out;
5047 }
5048
5049 ret = wl1271_ps_elp_wakeup(wl);
5050 if (ret < 0)
5051 goto out;
5052
5053 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5054
5055 wl1271_ps_elp_sleep(wl);
5056 out:
5057 mutex_unlock(&wl->mutex);
5058 if (new_state < old_state)
5059 return 0;
5060 return ret;
5061 }
5062
5063 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5064 struct ieee80211_vif *vif,
5065 enum ieee80211_ampdu_mlme_action action,
5066 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5067 u8 buf_size)
5068 {
5069 struct wl1271 *wl = hw->priv;
5070 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5071 int ret;
5072 u8 hlid, *ba_bitmap;
5073
5074 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5075 tid);
5076
5077 /* sanity check - the fields in FW are only 8bits wide */
5078 if (WARN_ON(tid > 0xFF))
5079 return -ENOTSUPP;
5080
5081 mutex_lock(&wl->mutex);
5082
5083 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5084 ret = -EAGAIN;
5085 goto out;
5086 }
5087
5088 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5089 hlid = wlvif->sta.hlid;
5090 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5091 struct wl1271_station *wl_sta;
5092
5093 wl_sta = (struct wl1271_station *)sta->drv_priv;
5094 hlid = wl_sta->hlid;
5095 } else {
5096 ret = -EINVAL;
5097 goto out;
5098 }
5099
5100 ba_bitmap = &wl->links[hlid].ba_bitmap;
5101
5102 ret = wl1271_ps_elp_wakeup(wl);
5103 if (ret < 0)
5104 goto out;
5105
5106 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5107 tid, action);
5108
5109 switch (action) {
5110 case IEEE80211_AMPDU_RX_START:
5111 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5112 ret = -ENOTSUPP;
5113 break;
5114 }
5115
5116 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5117 ret = -EBUSY;
5118 wl1271_error("exceeded max RX BA sessions");
5119 break;
5120 }
5121
5122 if (*ba_bitmap & BIT(tid)) {
5123 ret = -EINVAL;
5124 wl1271_error("cannot enable RX BA session on active "
5125 "tid: %d", tid);
5126 break;
5127 }
5128
5129 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5130 hlid);
5131 if (!ret) {
5132 *ba_bitmap |= BIT(tid);
5133 wl->ba_rx_session_count++;
5134 }
5135 break;
5136
5137 case IEEE80211_AMPDU_RX_STOP:
5138 if (!(*ba_bitmap & BIT(tid))) {
5139 /*
5140 * this happens on reconfig - so only output a debug
5141 * message for now, and don't fail the function.
5142 */
5143 wl1271_debug(DEBUG_MAC80211,
5144 "no active RX BA session on tid: %d",
5145 tid);
5146 ret = 0;
5147 break;
5148 }
5149
5150 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5151 hlid);
5152 if (!ret) {
5153 *ba_bitmap &= ~BIT(tid);
5154 wl->ba_rx_session_count--;
5155 }
5156 break;
5157
5158 /*
5159 * The BA initiator session management in FW independently.
5160 * Falling break here on purpose for all TX APDU commands.
5161 */
5162 case IEEE80211_AMPDU_TX_START:
5163 case IEEE80211_AMPDU_TX_STOP_CONT:
5164 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5165 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5166 case IEEE80211_AMPDU_TX_OPERATIONAL:
5167 ret = -EINVAL;
5168 break;
5169
5170 default:
5171 wl1271_error("Incorrect ampdu action id=%x\n", action);
5172 ret = -EINVAL;
5173 }
5174
5175 wl1271_ps_elp_sleep(wl);
5176
5177 out:
5178 mutex_unlock(&wl->mutex);
5179
5180 return ret;
5181 }
5182
5183 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5184 struct ieee80211_vif *vif,
5185 const struct cfg80211_bitrate_mask *mask)
5186 {
5187 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5188 struct wl1271 *wl = hw->priv;
5189 int i, ret = 0;
5190
5191 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5192 mask->control[NL80211_BAND_2GHZ].legacy,
5193 mask->control[NL80211_BAND_5GHZ].legacy);
5194
5195 mutex_lock(&wl->mutex);
5196
5197 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5198 wlvif->bitrate_masks[i] =
5199 wl1271_tx_enabled_rates_get(wl,
5200 mask->control[i].legacy,
5201 i);
5202
5203 if (unlikely(wl->state != WLCORE_STATE_ON))
5204 goto out;
5205
5206 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5207 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5208
5209 ret = wl1271_ps_elp_wakeup(wl);
5210 if (ret < 0)
5211 goto out;
5212
5213 wl1271_set_band_rate(wl, wlvif);
5214 wlvif->basic_rate =
5215 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5216 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5217
5218 wl1271_ps_elp_sleep(wl);
5219 }
5220 out:
5221 mutex_unlock(&wl->mutex);
5222
5223 return ret;
5224 }
5225
5226 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5227 struct ieee80211_vif *vif,
5228 struct ieee80211_channel_switch *ch_switch)
5229 {
5230 struct wl1271 *wl = hw->priv;
5231 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5232 int ret;
5233
5234 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5235
5236 wl1271_tx_flush(wl);
5237
5238 mutex_lock(&wl->mutex);
5239
5240 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5241 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5242 ieee80211_chswitch_done(vif, false);
5243 goto out;
5244 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5245 goto out;
5246 }
5247
5248 ret = wl1271_ps_elp_wakeup(wl);
5249 if (ret < 0)
5250 goto out;
5251
5252 /* TODO: change mac80211 to pass vif as param */
5253
5254 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5255 unsigned long delay_usec;
5256
5257 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5258 if (ret)
5259 goto out_sleep;
5260
5261 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5262
5263 /* indicate failure 5 seconds after channel switch time */
5264 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5265 ch_switch->count;
5266 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5267 usecs_to_jiffies(delay_usec) +
5268 msecs_to_jiffies(5000));
5269 }
5270
5271 out_sleep:
5272 wl1271_ps_elp_sleep(wl);
5273
5274 out:
5275 mutex_unlock(&wl->mutex);
5276 }
5277
5278 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5279 u32 queues, bool drop)
5280 {
5281 struct wl1271 *wl = hw->priv;
5282
5283 wl1271_tx_flush(wl);
5284 }
5285
5286 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5287 struct ieee80211_vif *vif,
5288 struct ieee80211_channel *chan,
5289 int duration,
5290 enum ieee80211_roc_type type)
5291 {
5292 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5293 struct wl1271 *wl = hw->priv;
5294 int channel, ret = 0;
5295
5296 channel = ieee80211_frequency_to_channel(chan->center_freq);
5297
5298 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5299 channel, wlvif->role_id);
5300
5301 mutex_lock(&wl->mutex);
5302
5303 if (unlikely(wl->state != WLCORE_STATE_ON))
5304 goto out;
5305
5306 /* return EBUSY if we can't ROC right now */
5307 if (WARN_ON(wl->roc_vif ||
5308 find_first_bit(wl->roc_map,
5309 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5310 ret = -EBUSY;
5311 goto out;
5312 }
5313
5314 ret = wl1271_ps_elp_wakeup(wl);
5315 if (ret < 0)
5316 goto out;
5317
5318 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5319 if (ret < 0)
5320 goto out_sleep;
5321
5322 wl->roc_vif = vif;
5323 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5324 msecs_to_jiffies(duration));
5325 out_sleep:
5326 wl1271_ps_elp_sleep(wl);
5327 out:
5328 mutex_unlock(&wl->mutex);
5329 return ret;
5330 }
5331
5332 static int __wlcore_roc_completed(struct wl1271 *wl)
5333 {
5334 struct wl12xx_vif *wlvif;
5335 int ret;
5336
5337 /* already completed */
5338 if (unlikely(!wl->roc_vif))
5339 return 0;
5340
5341 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5342
5343 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5344 return -EBUSY;
5345
5346 ret = wl12xx_stop_dev(wl, wlvif);
5347 if (ret < 0)
5348 return ret;
5349
5350 wl->roc_vif = NULL;
5351
5352 return 0;
5353 }
5354
5355 static int wlcore_roc_completed(struct wl1271 *wl)
5356 {
5357 int ret;
5358
5359 wl1271_debug(DEBUG_MAC80211, "roc complete");
5360
5361 mutex_lock(&wl->mutex);
5362
5363 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5364 ret = -EBUSY;
5365 goto out;
5366 }
5367
5368 ret = wl1271_ps_elp_wakeup(wl);
5369 if (ret < 0)
5370 goto out;
5371
5372 ret = __wlcore_roc_completed(wl);
5373
5374 wl1271_ps_elp_sleep(wl);
5375 out:
5376 mutex_unlock(&wl->mutex);
5377
5378 return ret;
5379 }
5380
5381 static void wlcore_roc_complete_work(struct work_struct *work)
5382 {
5383 struct delayed_work *dwork;
5384 struct wl1271 *wl;
5385 int ret;
5386
5387 dwork = container_of(work, struct delayed_work, work);
5388 wl = container_of(dwork, struct wl1271, roc_complete_work);
5389
5390 ret = wlcore_roc_completed(wl);
5391 if (!ret)
5392 ieee80211_remain_on_channel_expired(wl->hw);
5393 }
5394
5395 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5396 {
5397 struct wl1271 *wl = hw->priv;
5398
5399 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5400
5401 /* TODO: per-vif */
5402 wl1271_tx_flush(wl);
5403
5404 /*
5405 * we can't just flush_work here, because it might deadlock
5406 * (as we might get called from the same workqueue)
5407 */
5408 cancel_delayed_work_sync(&wl->roc_complete_work);
5409 wlcore_roc_completed(wl);
5410
5411 return 0;
5412 }
5413
5414 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5415 struct ieee80211_vif *vif,
5416 struct ieee80211_sta *sta,
5417 u32 changed)
5418 {
5419 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5420
5421 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5422
5423 if (!(changed & IEEE80211_RC_BW_CHANGED))
5424 return;
5425
5426 /* this callback is atomic, so schedule a new work */
5427 wlvif->rc_update_bw = sta->bandwidth;
5428 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5429 }
5430
5431 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5432 struct ieee80211_vif *vif,
5433 struct ieee80211_sta *sta,
5434 s8 *rssi_dbm)
5435 {
5436 struct wl1271 *wl = hw->priv;
5437 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5438 int ret = 0;
5439
5440 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5441
5442 mutex_lock(&wl->mutex);
5443
5444 if (unlikely(wl->state != WLCORE_STATE_ON))
5445 goto out;
5446
5447 ret = wl1271_ps_elp_wakeup(wl);
5448 if (ret < 0)
5449 goto out_sleep;
5450
5451 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5452 if (ret < 0)
5453 goto out_sleep;
5454
5455 out_sleep:
5456 wl1271_ps_elp_sleep(wl);
5457
5458 out:
5459 mutex_unlock(&wl->mutex);
5460
5461 return ret;
5462 }
5463
5464 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5465 {
5466 struct wl1271 *wl = hw->priv;
5467 bool ret = false;
5468
5469 mutex_lock(&wl->mutex);
5470
5471 if (unlikely(wl->state != WLCORE_STATE_ON))
5472 goto out;
5473
5474 /* packets are considered pending if in the TX queue or the FW */
5475 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5476 out:
5477 mutex_unlock(&wl->mutex);
5478
5479 return ret;
5480 }
5481
5482 /* can't be const, mac80211 writes to this */
5483 static struct ieee80211_rate wl1271_rates[] = {
5484 { .bitrate = 10,
5485 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5486 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5487 { .bitrate = 20,
5488 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5489 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5490 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5491 { .bitrate = 55,
5492 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5493 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5494 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5495 { .bitrate = 110,
5496 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5497 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5498 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5499 { .bitrate = 60,
5500 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5501 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5502 { .bitrate = 90,
5503 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5504 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5505 { .bitrate = 120,
5506 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5507 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5508 { .bitrate = 180,
5509 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5510 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5511 { .bitrate = 240,
5512 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5513 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5514 { .bitrate = 360,
5515 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5516 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5517 { .bitrate = 480,
5518 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5519 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5520 { .bitrate = 540,
5521 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5522 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5523 };
5524
5525 /* can't be const, mac80211 writes to this */
5526 static struct ieee80211_channel wl1271_channels[] = {
5527 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5528 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5529 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5530 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5531 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5532 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5533 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5534 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5535 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5536 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5537 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5538 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5539 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5540 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5541 };
5542
5543 /* can't be const, mac80211 writes to this */
5544 static struct ieee80211_supported_band wl1271_band_2ghz = {
5545 .channels = wl1271_channels,
5546 .n_channels = ARRAY_SIZE(wl1271_channels),
5547 .bitrates = wl1271_rates,
5548 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5549 };
5550
5551 /* 5 GHz data rates for WL1273 */
5552 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5553 { .bitrate = 60,
5554 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5555 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5556 { .bitrate = 90,
5557 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5558 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5559 { .bitrate = 120,
5560 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5561 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5562 { .bitrate = 180,
5563 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5564 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5565 { .bitrate = 240,
5566 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5567 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5568 { .bitrate = 360,
5569 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5570 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5571 { .bitrate = 480,
5572 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5573 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5574 { .bitrate = 540,
5575 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5576 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5577 };
5578
5579 /* 5 GHz band channels for WL1273 */
5580 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5581 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5582 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5583 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5584 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5585 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5586 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5587 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5588 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5589 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5590 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5591 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5592 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5593 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5594 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5595 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5596 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5597 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5598 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5599 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5600 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5601 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5602 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5603 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5604 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5605 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5606 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5607 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5608 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5609 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5610 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5611 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5612 };
5613
5614 static struct ieee80211_supported_band wl1271_band_5ghz = {
5615 .channels = wl1271_channels_5ghz,
5616 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5617 .bitrates = wl1271_rates_5ghz,
5618 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5619 };
5620
5621 static const struct ieee80211_ops wl1271_ops = {
5622 .start = wl1271_op_start,
5623 .stop = wlcore_op_stop,
5624 .add_interface = wl1271_op_add_interface,
5625 .remove_interface = wl1271_op_remove_interface,
5626 .change_interface = wl12xx_op_change_interface,
5627 #ifdef CONFIG_PM
5628 .suspend = wl1271_op_suspend,
5629 .resume = wl1271_op_resume,
5630 #endif
5631 .config = wl1271_op_config,
5632 .prepare_multicast = wl1271_op_prepare_multicast,
5633 .configure_filter = wl1271_op_configure_filter,
5634 .tx = wl1271_op_tx,
5635 .set_key = wlcore_op_set_key,
5636 .hw_scan = wl1271_op_hw_scan,
5637 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5638 .sched_scan_start = wl1271_op_sched_scan_start,
5639 .sched_scan_stop = wl1271_op_sched_scan_stop,
5640 .bss_info_changed = wl1271_op_bss_info_changed,
5641 .set_frag_threshold = wl1271_op_set_frag_threshold,
5642 .set_rts_threshold = wl1271_op_set_rts_threshold,
5643 .conf_tx = wl1271_op_conf_tx,
5644 .get_tsf = wl1271_op_get_tsf,
5645 .get_survey = wl1271_op_get_survey,
5646 .sta_state = wl12xx_op_sta_state,
5647 .ampdu_action = wl1271_op_ampdu_action,
5648 .tx_frames_pending = wl1271_tx_frames_pending,
5649 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5650 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5651 .channel_switch = wl12xx_op_channel_switch,
5652 .flush = wlcore_op_flush,
5653 .remain_on_channel = wlcore_op_remain_on_channel,
5654 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5655 .add_chanctx = wlcore_op_add_chanctx,
5656 .remove_chanctx = wlcore_op_remove_chanctx,
5657 .change_chanctx = wlcore_op_change_chanctx,
5658 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5659 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5660 .sta_rc_update = wlcore_op_sta_rc_update,
5661 .get_rssi = wlcore_op_get_rssi,
5662 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5663 };
5664
5665
5666 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5667 {
5668 u8 idx;
5669
5670 BUG_ON(band >= 2);
5671
5672 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5673 wl1271_error("Illegal RX rate from HW: %d", rate);
5674 return 0;
5675 }
5676
5677 idx = wl->band_rate_to_idx[band][rate];
5678 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5679 wl1271_error("Unsupported RX rate from HW: %d", rate);
5680 return 0;
5681 }
5682
5683 return idx;
5684 }
5685
5686 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5687 {
5688 int i;
5689
5690 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5691 oui, nic);
5692
5693 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5694 wl1271_warning("NIC part of the MAC address wraps around!");
5695
5696 for (i = 0; i < wl->num_mac_addr; i++) {
5697 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5698 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5699 wl->addresses[i].addr[2] = (u8) oui;
5700 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5701 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5702 wl->addresses[i].addr[5] = (u8) nic;
5703 nic++;
5704 }
5705
5706 /* we may be one address short at the most */
5707 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5708
5709 /*
5710 * turn on the LAA bit in the first address and use it as
5711 * the last address.
5712 */
5713 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5714 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5715 memcpy(&wl->addresses[idx], &wl->addresses[0],
5716 sizeof(wl->addresses[0]));
5717 /* LAA bit */
5718 wl->addresses[idx].addr[0] |= BIT(1);
5719 }
5720
5721 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5722 wl->hw->wiphy->addresses = wl->addresses;
5723 }
5724
5725 static int wl12xx_get_hw_info(struct wl1271 *wl)
5726 {
5727 int ret;
5728
5729 ret = wl12xx_set_power_on(wl);
5730 if (ret < 0)
5731 return ret;
5732
5733 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5734 if (ret < 0)
5735 goto out;
5736
5737 wl->fuse_oui_addr = 0;
5738 wl->fuse_nic_addr = 0;
5739
5740 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5741 if (ret < 0)
5742 goto out;
5743
5744 if (wl->ops->get_mac)
5745 ret = wl->ops->get_mac(wl);
5746
5747 out:
5748 wl1271_power_off(wl);
5749 return ret;
5750 }
5751
5752 static int wl1271_register_hw(struct wl1271 *wl)
5753 {
5754 int ret;
5755 u32 oui_addr = 0, nic_addr = 0;
5756
5757 if (wl->mac80211_registered)
5758 return 0;
5759
5760 if (wl->nvs_len >= 12) {
5761 /* NOTE: The wl->nvs->nvs element must be first, in
5762 * order to simplify the casting, we assume it is at
5763 * the beginning of the wl->nvs structure.
5764 */
5765 u8 *nvs_ptr = (u8 *)wl->nvs;
5766
5767 oui_addr =
5768 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5769 nic_addr =
5770 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5771 }
5772
5773 /* if the MAC address is zeroed in the NVS derive from fuse */
5774 if (oui_addr == 0 && nic_addr == 0) {
5775 oui_addr = wl->fuse_oui_addr;
5776 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5777 nic_addr = wl->fuse_nic_addr + 1;
5778 }
5779
5780 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5781
5782 ret = ieee80211_register_hw(wl->hw);
5783 if (ret < 0) {
5784 wl1271_error("unable to register mac80211 hw: %d", ret);
5785 goto out;
5786 }
5787
5788 wl->mac80211_registered = true;
5789
5790 wl1271_debugfs_init(wl);
5791
5792 wl1271_notice("loaded");
5793
5794 out:
5795 return ret;
5796 }
5797
5798 static void wl1271_unregister_hw(struct wl1271 *wl)
5799 {
5800 if (wl->plt)
5801 wl1271_plt_stop(wl);
5802
5803 ieee80211_unregister_hw(wl->hw);
5804 wl->mac80211_registered = false;
5805
5806 }
5807
5808 static int wl1271_init_ieee80211(struct wl1271 *wl)
5809 {
5810 int i;
5811 static const u32 cipher_suites[] = {
5812 WLAN_CIPHER_SUITE_WEP40,
5813 WLAN_CIPHER_SUITE_WEP104,
5814 WLAN_CIPHER_SUITE_TKIP,
5815 WLAN_CIPHER_SUITE_CCMP,
5816 WL1271_CIPHER_SUITE_GEM,
5817 };
5818
5819 /* The tx descriptor buffer */
5820 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5821
5822 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5823 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5824
5825 /* unit us */
5826 /* FIXME: find a proper value */
5827 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5828
5829 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5830 IEEE80211_HW_SUPPORTS_PS |
5831 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5832 IEEE80211_HW_SUPPORTS_UAPSD |
5833 IEEE80211_HW_HAS_RATE_CONTROL |
5834 IEEE80211_HW_CONNECTION_MONITOR |
5835 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5836 IEEE80211_HW_SPECTRUM_MGMT |
5837 IEEE80211_HW_AP_LINK_PS |
5838 IEEE80211_HW_AMPDU_AGGREGATION |
5839 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5840 IEEE80211_HW_QUEUE_CONTROL |
5841 IEEE80211_HW_CHANCTX_STA_CSA;
5842
5843 wl->hw->wiphy->cipher_suites = cipher_suites;
5844 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5845
5846 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5847 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5848 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5849 wl->hw->wiphy->max_scan_ssids = 1;
5850 wl->hw->wiphy->max_sched_scan_ssids = 16;
5851 wl->hw->wiphy->max_match_sets = 16;
5852 /*
5853 * Maximum length of elements in scanning probe request templates
5854 * should be the maximum length possible for a template, without
5855 * the IEEE80211 header of the template
5856 */
5857 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5858 sizeof(struct ieee80211_header);
5859
5860 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5861 sizeof(struct ieee80211_header);
5862
5863 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
5864
5865 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5866 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5867 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5868
5869 /* make sure all our channels fit in the scanned_ch bitmask */
5870 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5871 ARRAY_SIZE(wl1271_channels_5ghz) >
5872 WL1271_MAX_CHANNELS);
5873 /*
5874 * clear channel flags from the previous usage
5875 * and restore max_power & max_antenna_gain values.
5876 */
5877 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5878 wl1271_band_2ghz.channels[i].flags = 0;
5879 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5880 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5881 }
5882
5883 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5884 wl1271_band_5ghz.channels[i].flags = 0;
5885 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5886 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5887 }
5888
5889 /*
5890 * We keep local copies of the band structs because we need to
5891 * modify them on a per-device basis.
5892 */
5893 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5894 sizeof(wl1271_band_2ghz));
5895 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5896 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5897 sizeof(*wl->ht_cap));
5898 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5899 sizeof(wl1271_band_5ghz));
5900 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5901 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5902 sizeof(*wl->ht_cap));
5903
5904 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5905 &wl->bands[IEEE80211_BAND_2GHZ];
5906 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5907 &wl->bands[IEEE80211_BAND_5GHZ];
5908
5909 /*
5910 * allow 4 queues per mac address we support +
5911 * 1 cab queue per mac + one global offchannel Tx queue
5912 */
5913 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5914
5915 /* the last queue is the offchannel queue */
5916 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5917 wl->hw->max_rates = 1;
5918
5919 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5920
5921 /* the FW answers probe-requests in AP-mode */
5922 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5923 wl->hw->wiphy->probe_resp_offload =
5924 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5925 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5926 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5927
5928 /* allowed interface combinations */
5929 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
5930 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
5931
5932 /* register vendor commands */
5933 wlcore_set_vendor_commands(wl->hw->wiphy);
5934
5935 SET_IEEE80211_DEV(wl->hw, wl->dev);
5936
5937 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5938 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5939
5940 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5941
5942 return 0;
5943 }
5944
5945 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5946 u32 mbox_size)
5947 {
5948 struct ieee80211_hw *hw;
5949 struct wl1271 *wl;
5950 int i, j, ret;
5951 unsigned int order;
5952
5953 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5954 if (!hw) {
5955 wl1271_error("could not alloc ieee80211_hw");
5956 ret = -ENOMEM;
5957 goto err_hw_alloc;
5958 }
5959
5960 wl = hw->priv;
5961 memset(wl, 0, sizeof(*wl));
5962
5963 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5964 if (!wl->priv) {
5965 wl1271_error("could not alloc wl priv");
5966 ret = -ENOMEM;
5967 goto err_priv_alloc;
5968 }
5969
5970 INIT_LIST_HEAD(&wl->wlvif_list);
5971
5972 wl->hw = hw;
5973
5974 /*
5975 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5976 * we don't allocate any additional resource here, so that's fine.
5977 */
5978 for (i = 0; i < NUM_TX_QUEUES; i++)
5979 for (j = 0; j < WLCORE_MAX_LINKS; j++)
5980 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5981
5982 skb_queue_head_init(&wl->deferred_rx_queue);
5983 skb_queue_head_init(&wl->deferred_tx_queue);
5984
5985 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5986 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5987 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5988 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5989 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5990 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5991 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5992
5993 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5994 if (!wl->freezable_wq) {
5995 ret = -ENOMEM;
5996 goto err_hw;
5997 }
5998
5999 wl->channel = 0;
6000 wl->rx_counter = 0;
6001 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6002 wl->band = IEEE80211_BAND_2GHZ;
6003 wl->channel_type = NL80211_CHAN_NO_HT;
6004 wl->flags = 0;
6005 wl->sg_enabled = true;
6006 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6007 wl->recovery_count = 0;
6008 wl->hw_pg_ver = -1;
6009 wl->ap_ps_map = 0;
6010 wl->ap_fw_ps_map = 0;
6011 wl->quirks = 0;
6012 wl->platform_quirks = 0;
6013 wl->system_hlid = WL12XX_SYSTEM_HLID;
6014 wl->active_sta_count = 0;
6015 wl->active_link_count = 0;
6016 wl->fwlog_size = 0;
6017 init_waitqueue_head(&wl->fwlog_waitq);
6018
6019 /* The system link is always allocated */
6020 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6021
6022 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6023 for (i = 0; i < wl->num_tx_desc; i++)
6024 wl->tx_frames[i] = NULL;
6025
6026 spin_lock_init(&wl->wl_lock);
6027
6028 wl->state = WLCORE_STATE_OFF;
6029 wl->fw_type = WL12XX_FW_TYPE_NONE;
6030 mutex_init(&wl->mutex);
6031 mutex_init(&wl->flush_mutex);
6032 init_completion(&wl->nvs_loading_complete);
6033
6034 order = get_order(aggr_buf_size);
6035 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6036 if (!wl->aggr_buf) {
6037 ret = -ENOMEM;
6038 goto err_wq;
6039 }
6040 wl->aggr_buf_size = aggr_buf_size;
6041
6042 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6043 if (!wl->dummy_packet) {
6044 ret = -ENOMEM;
6045 goto err_aggr;
6046 }
6047
6048 /* Allocate one page for the FW log */
6049 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6050 if (!wl->fwlog) {
6051 ret = -ENOMEM;
6052 goto err_dummy_packet;
6053 }
6054
6055 wl->mbox_size = mbox_size;
6056 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6057 if (!wl->mbox) {
6058 ret = -ENOMEM;
6059 goto err_fwlog;
6060 }
6061
6062 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6063 if (!wl->buffer_32) {
6064 ret = -ENOMEM;
6065 goto err_mbox;
6066 }
6067
6068 return hw;
6069
6070 err_mbox:
6071 kfree(wl->mbox);
6072
6073 err_fwlog:
6074 free_page((unsigned long)wl->fwlog);
6075
6076 err_dummy_packet:
6077 dev_kfree_skb(wl->dummy_packet);
6078
6079 err_aggr:
6080 free_pages((unsigned long)wl->aggr_buf, order);
6081
6082 err_wq:
6083 destroy_workqueue(wl->freezable_wq);
6084
6085 err_hw:
6086 wl1271_debugfs_exit(wl);
6087 kfree(wl->priv);
6088
6089 err_priv_alloc:
6090 ieee80211_free_hw(hw);
6091
6092 err_hw_alloc:
6093
6094 return ERR_PTR(ret);
6095 }
6096 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6097
6098 int wlcore_free_hw(struct wl1271 *wl)
6099 {
6100 /* Unblock any fwlog readers */
6101 mutex_lock(&wl->mutex);
6102 wl->fwlog_size = -1;
6103 wake_up_interruptible_all(&wl->fwlog_waitq);
6104 mutex_unlock(&wl->mutex);
6105
6106 wlcore_sysfs_free(wl);
6107
6108 kfree(wl->buffer_32);
6109 kfree(wl->mbox);
6110 free_page((unsigned long)wl->fwlog);
6111 dev_kfree_skb(wl->dummy_packet);
6112 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6113
6114 wl1271_debugfs_exit(wl);
6115
6116 vfree(wl->fw);
6117 wl->fw = NULL;
6118 wl->fw_type = WL12XX_FW_TYPE_NONE;
6119 kfree(wl->nvs);
6120 wl->nvs = NULL;
6121
6122 kfree(wl->raw_fw_status);
6123 kfree(wl->fw_status);
6124 kfree(wl->tx_res_if);
6125 destroy_workqueue(wl->freezable_wq);
6126
6127 kfree(wl->priv);
6128 ieee80211_free_hw(wl->hw);
6129
6130 return 0;
6131 }
6132 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6133
6134 #ifdef CONFIG_PM
6135 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6136 .flags = WIPHY_WOWLAN_ANY,
6137 .n_patterns = WL1271_MAX_RX_FILTERS,
6138 .pattern_min_len = 1,
6139 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6140 };
6141 #endif
6142
6143 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6144 {
6145 return IRQ_WAKE_THREAD;
6146 }
6147
6148 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6149 {
6150 struct wl1271 *wl = context;
6151 struct platform_device *pdev = wl->pdev;
6152 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6153 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6154 unsigned long irqflags;
6155 int ret;
6156 irq_handler_t hardirq_fn = NULL;
6157
6158 if (fw) {
6159 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6160 if (!wl->nvs) {
6161 wl1271_error("Could not allocate nvs data");
6162 goto out;
6163 }
6164 wl->nvs_len = fw->size;
6165 } else {
6166 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6167 WL12XX_NVS_NAME);
6168 wl->nvs = NULL;
6169 wl->nvs_len = 0;
6170 }
6171
6172 ret = wl->ops->setup(wl);
6173 if (ret < 0)
6174 goto out_free_nvs;
6175
6176 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6177
6178 /* adjust some runtime configuration parameters */
6179 wlcore_adjust_conf(wl);
6180
6181 wl->irq = platform_get_irq(pdev, 0);
6182 wl->platform_quirks = pdata->platform_quirks;
6183 wl->if_ops = pdev_data->if_ops;
6184
6185 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6186 irqflags = IRQF_TRIGGER_RISING;
6187 hardirq_fn = wlcore_hardirq;
6188 } else {
6189 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6190 }
6191
6192 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6193 irqflags, pdev->name, wl);
6194 if (ret < 0) {
6195 wl1271_error("request_irq() failed: %d", ret);
6196 goto out_free_nvs;
6197 }
6198
6199 #ifdef CONFIG_PM
6200 ret = enable_irq_wake(wl->irq);
6201 if (!ret) {
6202 wl->irq_wake_enabled = true;
6203 device_init_wakeup(wl->dev, 1);
6204 if (pdata->pwr_in_suspend)
6205 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6206 }
6207 #endif
6208 disable_irq(wl->irq);
6209
6210 ret = wl12xx_get_hw_info(wl);
6211 if (ret < 0) {
6212 wl1271_error("couldn't get hw info");
6213 goto out_irq;
6214 }
6215
6216 ret = wl->ops->identify_chip(wl);
6217 if (ret < 0)
6218 goto out_irq;
6219
6220 ret = wl1271_init_ieee80211(wl);
6221 if (ret)
6222 goto out_irq;
6223
6224 ret = wl1271_register_hw(wl);
6225 if (ret)
6226 goto out_irq;
6227
6228 ret = wlcore_sysfs_init(wl);
6229 if (ret)
6230 goto out_unreg;
6231
6232 wl->initialized = true;
6233 goto out;
6234
6235 out_unreg:
6236 wl1271_unregister_hw(wl);
6237
6238 out_irq:
6239 free_irq(wl->irq, wl);
6240
6241 out_free_nvs:
6242 kfree(wl->nvs);
6243
6244 out:
6245 release_firmware(fw);
6246 complete_all(&wl->nvs_loading_complete);
6247 }
6248
6249 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6250 {
6251 int ret;
6252
6253 if (!wl->ops || !wl->ptable)
6254 return -EINVAL;
6255
6256 wl->dev = &pdev->dev;
6257 wl->pdev = pdev;
6258 platform_set_drvdata(pdev, wl);
6259
6260 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6261 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6262 wl, wlcore_nvs_cb);
6263 if (ret < 0) {
6264 wl1271_error("request_firmware_nowait failed: %d", ret);
6265 complete_all(&wl->nvs_loading_complete);
6266 }
6267
6268 return ret;
6269 }
6270 EXPORT_SYMBOL_GPL(wlcore_probe);
6271
6272 int wlcore_remove(struct platform_device *pdev)
6273 {
6274 struct wl1271 *wl = platform_get_drvdata(pdev);
6275
6276 wait_for_completion(&wl->nvs_loading_complete);
6277 if (!wl->initialized)
6278 return 0;
6279
6280 if (wl->irq_wake_enabled) {
6281 device_init_wakeup(wl->dev, 0);
6282 disable_irq_wake(wl->irq);
6283 }
6284 wl1271_unregister_hw(wl);
6285 free_irq(wl->irq, wl);
6286 wlcore_free_hw(wl);
6287
6288 return 0;
6289 }
6290 EXPORT_SYMBOL_GPL(wlcore_remove);
6291
6292 u32 wl12xx_debug_level = DEBUG_NONE;
6293 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6294 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6295 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6296
6297 module_param_named(fwlog, fwlog_param, charp, 0);
6298 MODULE_PARM_DESC(fwlog,
6299 "FW logger options: continuous, ondemand, dbgpins or disable");
6300
6301 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6302 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6303
6304 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6305 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6306
6307 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6308 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6309
6310 MODULE_LICENSE("GPL");
6311 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6312 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6313 MODULE_FIRMWARE(WL12XX_NVS_NAME);
This page took 0.231695 seconds and 6 git commands to generate.