Merge tag 'for-linus-20140127' of git://git.infradead.org/linux-mtd
[deliverable/linux.git] / drivers / net / wireless / ti / wlcore / main.c
1
2 /*
3 * This file is part of wlcore
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
30
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43
44 #define WL1271_BOOT_RETRIES 3
45
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
50
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
58 {
59 int ret;
60
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 return -EINVAL;
63
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
65 return 0;
66
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
68 return 0;
69
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
71 if (ret < 0)
72 return ret;
73
74 wl1271_info("Association completed.");
75 return 0;
76 }
77
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
80 {
81 struct ieee80211_supported_band *band;
82 struct ieee80211_channel *ch;
83 int i;
84 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 struct wl1271 *wl = hw->priv;
86
87 band = wiphy->bands[IEEE80211_BAND_5GHZ];
88 for (i = 0; i < band->n_channels; i++) {
89 ch = &band->channels[i];
90 if (ch->flags & IEEE80211_CHAN_DISABLED)
91 continue;
92
93 if (ch->flags & IEEE80211_CHAN_RADAR)
94 ch->flags |= IEEE80211_CHAN_NO_IR;
95
96 }
97
98 wlcore_regdomain_config(wl);
99 }
100
101 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
102 bool enable)
103 {
104 int ret = 0;
105
106 /* we should hold wl->mutex */
107 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
108 if (ret < 0)
109 goto out;
110
111 if (enable)
112 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
113 else
114 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
115 out:
116 return ret;
117 }
118
119 /*
120 * this function is being called when the rx_streaming interval
121 * has beed changed or rx_streaming should be disabled
122 */
123 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
124 {
125 int ret = 0;
126 int period = wl->conf.rx_streaming.interval;
127
128 /* don't reconfigure if rx_streaming is disabled */
129 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
130 goto out;
131
132 /* reconfigure/disable according to new streaming_period */
133 if (period &&
134 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
135 (wl->conf.rx_streaming.always ||
136 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
137 ret = wl1271_set_rx_streaming(wl, wlvif, true);
138 else {
139 ret = wl1271_set_rx_streaming(wl, wlvif, false);
140 /* don't cancel_work_sync since we might deadlock */
141 del_timer_sync(&wlvif->rx_streaming_timer);
142 }
143 out:
144 return ret;
145 }
146
147 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
148 {
149 int ret;
150 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
151 rx_streaming_enable_work);
152 struct wl1271 *wl = wlvif->wl;
153
154 mutex_lock(&wl->mutex);
155
156 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
157 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
158 (!wl->conf.rx_streaming.always &&
159 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
160 goto out;
161
162 if (!wl->conf.rx_streaming.interval)
163 goto out;
164
165 ret = wl1271_ps_elp_wakeup(wl);
166 if (ret < 0)
167 goto out;
168
169 ret = wl1271_set_rx_streaming(wl, wlvif, true);
170 if (ret < 0)
171 goto out_sleep;
172
173 /* stop it after some time of inactivity */
174 mod_timer(&wlvif->rx_streaming_timer,
175 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
176
177 out_sleep:
178 wl1271_ps_elp_sleep(wl);
179 out:
180 mutex_unlock(&wl->mutex);
181 }
182
183 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
184 {
185 int ret;
186 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
187 rx_streaming_disable_work);
188 struct wl1271 *wl = wlvif->wl;
189
190 mutex_lock(&wl->mutex);
191
192 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
193 goto out;
194
195 ret = wl1271_ps_elp_wakeup(wl);
196 if (ret < 0)
197 goto out;
198
199 ret = wl1271_set_rx_streaming(wl, wlvif, false);
200 if (ret)
201 goto out_sleep;
202
203 out_sleep:
204 wl1271_ps_elp_sleep(wl);
205 out:
206 mutex_unlock(&wl->mutex);
207 }
208
209 static void wl1271_rx_streaming_timer(unsigned long data)
210 {
211 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
212 struct wl1271 *wl = wlvif->wl;
213 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
214 }
215
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
218 {
219 /* if the watchdog is not armed, don't do anything */
220 if (wl->tx_allocated_blocks == 0)
221 return;
222
223 cancel_delayed_work(&wl->tx_watchdog_work);
224 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
226 }
227
228 static void wl12xx_tx_watchdog_work(struct work_struct *work)
229 {
230 struct delayed_work *dwork;
231 struct wl1271 *wl;
232
233 dwork = container_of(work, struct delayed_work, work);
234 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
235
236 mutex_lock(&wl->mutex);
237
238 if (unlikely(wl->state != WLCORE_STATE_ON))
239 goto out;
240
241 /* Tx went out in the meantime - everything is ok */
242 if (unlikely(wl->tx_allocated_blocks == 0))
243 goto out;
244
245 /*
246 * if a ROC is in progress, we might not have any Tx for a long
247 * time (e.g. pending Tx on the non-ROC channels)
248 */
249 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
250 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
251 wl->conf.tx.tx_watchdog_timeout);
252 wl12xx_rearm_tx_watchdog_locked(wl);
253 goto out;
254 }
255
256 /*
257 * if a scan is in progress, we might not have any Tx for a long
258 * time
259 */
260 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
261 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
262 wl->conf.tx.tx_watchdog_timeout);
263 wl12xx_rearm_tx_watchdog_locked(wl);
264 goto out;
265 }
266
267 /*
268 * AP might cache a frame for a long time for a sleeping station,
269 * so rearm the timer if there's an AP interface with stations. If
270 * Tx is genuinely stuck we will most hopefully discover it when all
271 * stations are removed due to inactivity.
272 */
273 if (wl->active_sta_count) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
275 " %d stations",
276 wl->conf.tx.tx_watchdog_timeout,
277 wl->active_sta_count);
278 wl12xx_rearm_tx_watchdog_locked(wl);
279 goto out;
280 }
281
282 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
283 wl->conf.tx.tx_watchdog_timeout);
284 wl12xx_queue_recovery_work(wl);
285
286 out:
287 mutex_unlock(&wl->mutex);
288 }
289
290 static void wlcore_adjust_conf(struct wl1271 *wl)
291 {
292 /* Adjust settings according to optional module parameters */
293
294 /* Firmware Logger params */
295 if (fwlog_mem_blocks != -1) {
296 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
297 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
298 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
299 } else {
300 wl1271_error(
301 "Illegal fwlog_mem_blocks=%d using default %d",
302 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
303 }
304 }
305
306 if (fwlog_param) {
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
317 } else {
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
319 }
320 }
321
322 if (bug_on_recovery != -1)
323 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
324
325 if (no_recovery != -1)
326 wl->conf.recovery.no_recovery = (u8) no_recovery;
327 }
328
329 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
330 struct wl12xx_vif *wlvif,
331 u8 hlid, u8 tx_pkts)
332 {
333 bool fw_ps;
334
335 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
336
337 /*
338 * Wake up from high level PS if the STA is asleep with too little
339 * packets in FW or if the STA is awake.
340 */
341 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
342 wl12xx_ps_link_end(wl, wlvif, hlid);
343
344 /*
345 * Start high-level PS if the STA is asleep with enough blocks in FW.
346 * Make an exception if this is the only connected link. In this
347 * case FW-memory congestion is less of a problem.
348 * Note that a single connected STA means 3 active links, since we must
349 * account for the global and broadcast AP links. The "fw_ps" check
350 * assures us the third link is a STA connected to the AP. Otherwise
351 * the FW would not set the PSM bit.
352 */
353 else if (wl->active_link_count > 3 && fw_ps &&
354 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 wl12xx_ps_link_start(wl, wlvif, hlid, true);
356 }
357
358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 struct wl12xx_vif *wlvif,
360 struct wl_fw_status_2 *status)
361 {
362 u32 cur_fw_ps_map;
363 u8 hlid;
364
365 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
366 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 wl1271_debug(DEBUG_PSM,
368 "link ps prev 0x%x cur 0x%x changed 0x%x",
369 wl->ap_fw_ps_map, cur_fw_ps_map,
370 wl->ap_fw_ps_map ^ cur_fw_ps_map);
371
372 wl->ap_fw_ps_map = cur_fw_ps_map;
373 }
374
375 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 wl->links[hlid].allocated_pkts);
378 }
379
380 static int wlcore_fw_status(struct wl1271 *wl,
381 struct wl_fw_status_1 *status_1,
382 struct wl_fw_status_2 *status_2)
383 {
384 struct wl12xx_vif *wlvif;
385 struct timespec ts;
386 u32 old_tx_blk_count = wl->tx_blocks_available;
387 int avail, freed_blocks;
388 int i;
389 size_t status_len;
390 int ret;
391 struct wl1271_link *lnk;
392
393 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
394 sizeof(*status_2) + wl->fw_status_priv_len;
395
396 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
397 status_len, false);
398 if (ret < 0)
399 return ret;
400
401 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
402 "drv_rx_counter = %d, tx_results_counter = %d)",
403 status_1->intr,
404 status_1->fw_rx_counter,
405 status_1->drv_rx_counter,
406 status_1->tx_results_counter);
407
408 for (i = 0; i < NUM_TX_QUEUES; i++) {
409 /* prevent wrap-around in freed-packets counter */
410 wl->tx_allocated_pkts[i] -=
411 (status_2->counters.tx_released_pkts[i] -
412 wl->tx_pkts_freed[i]) & 0xff;
413
414 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
415 }
416
417
418 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
419 u8 diff;
420 lnk = &wl->links[i];
421
422 /* prevent wrap-around in freed-packets counter */
423 diff = (status_2->counters.tx_lnk_free_pkts[i] -
424 lnk->prev_freed_pkts) & 0xff;
425
426 if (diff == 0)
427 continue;
428
429 lnk->allocated_pkts -= diff;
430 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
431
432 /* accumulate the prev_freed_pkts counter */
433 lnk->total_freed_pkts += diff;
434 }
435
436 /* prevent wrap-around in total blocks counter */
437 if (likely(wl->tx_blocks_freed <=
438 le32_to_cpu(status_2->total_released_blks)))
439 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
440 wl->tx_blocks_freed;
441 else
442 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
443 le32_to_cpu(status_2->total_released_blks);
444
445 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
446
447 wl->tx_allocated_blocks -= freed_blocks;
448
449 /*
450 * If the FW freed some blocks:
451 * If we still have allocated blocks - re-arm the timer, Tx is
452 * not stuck. Otherwise, cancel the timer (no Tx currently).
453 */
454 if (freed_blocks) {
455 if (wl->tx_allocated_blocks)
456 wl12xx_rearm_tx_watchdog_locked(wl);
457 else
458 cancel_delayed_work(&wl->tx_watchdog_work);
459 }
460
461 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
462
463 /*
464 * The FW might change the total number of TX memblocks before
465 * we get a notification about blocks being released. Thus, the
466 * available blocks calculation might yield a temporary result
467 * which is lower than the actual available blocks. Keeping in
468 * mind that only blocks that were allocated can be moved from
469 * TX to RX, tx_blocks_available should never decrease here.
470 */
471 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
472 avail);
473
474 /* if more blocks are available now, tx work can be scheduled */
475 if (wl->tx_blocks_available > old_tx_blk_count)
476 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
477
478 /* for AP update num of allocated TX blocks per link and ps status */
479 wl12xx_for_each_wlvif_ap(wl, wlvif) {
480 wl12xx_irq_update_links_status(wl, wlvif, status_2);
481 }
482
483 /* update the host-chipset time offset */
484 getnstimeofday(&ts);
485 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
486 (s64)le32_to_cpu(status_2->fw_localtime);
487
488 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
489
490 return 0;
491 }
492
493 static void wl1271_flush_deferred_work(struct wl1271 *wl)
494 {
495 struct sk_buff *skb;
496
497 /* Pass all received frames to the network stack */
498 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
499 ieee80211_rx_ni(wl->hw, skb);
500
501 /* Return sent skbs to the network stack */
502 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
503 ieee80211_tx_status_ni(wl->hw, skb);
504 }
505
506 static void wl1271_netstack_work(struct work_struct *work)
507 {
508 struct wl1271 *wl =
509 container_of(work, struct wl1271, netstack_work);
510
511 do {
512 wl1271_flush_deferred_work(wl);
513 } while (skb_queue_len(&wl->deferred_rx_queue));
514 }
515
516 #define WL1271_IRQ_MAX_LOOPS 256
517
518 static int wlcore_irq_locked(struct wl1271 *wl)
519 {
520 int ret = 0;
521 u32 intr;
522 int loopcount = WL1271_IRQ_MAX_LOOPS;
523 bool done = false;
524 unsigned int defer_count;
525 unsigned long flags;
526
527 /*
528 * In case edge triggered interrupt must be used, we cannot iterate
529 * more than once without introducing race conditions with the hardirq.
530 */
531 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
532 loopcount = 1;
533
534 wl1271_debug(DEBUG_IRQ, "IRQ work");
535
536 if (unlikely(wl->state != WLCORE_STATE_ON))
537 goto out;
538
539 ret = wl1271_ps_elp_wakeup(wl);
540 if (ret < 0)
541 goto out;
542
543 while (!done && loopcount--) {
544 /*
545 * In order to avoid a race with the hardirq, clear the flag
546 * before acknowledging the chip. Since the mutex is held,
547 * wl1271_ps_elp_wakeup cannot be called concurrently.
548 */
549 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
550 smp_mb__after_clear_bit();
551
552 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
553 if (ret < 0)
554 goto out;
555
556 wlcore_hw_tx_immediate_compl(wl);
557
558 intr = le32_to_cpu(wl->fw_status_1->intr);
559 intr &= WLCORE_ALL_INTR_MASK;
560 if (!intr) {
561 done = true;
562 continue;
563 }
564
565 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
566 wl1271_error("HW watchdog interrupt received! starting recovery.");
567 wl->watchdog_recovery = true;
568 ret = -EIO;
569
570 /* restarting the chip. ignore any other interrupt. */
571 goto out;
572 }
573
574 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
575 wl1271_error("SW watchdog interrupt received! "
576 "starting recovery.");
577 wl->watchdog_recovery = true;
578 ret = -EIO;
579
580 /* restarting the chip. ignore any other interrupt. */
581 goto out;
582 }
583
584 if (likely(intr & WL1271_ACX_INTR_DATA)) {
585 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
586
587 ret = wlcore_rx(wl, wl->fw_status_1);
588 if (ret < 0)
589 goto out;
590
591 /* Check if any tx blocks were freed */
592 spin_lock_irqsave(&wl->wl_lock, flags);
593 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
594 wl1271_tx_total_queue_count(wl) > 0) {
595 spin_unlock_irqrestore(&wl->wl_lock, flags);
596 /*
597 * In order to avoid starvation of the TX path,
598 * call the work function directly.
599 */
600 ret = wlcore_tx_work_locked(wl);
601 if (ret < 0)
602 goto out;
603 } else {
604 spin_unlock_irqrestore(&wl->wl_lock, flags);
605 }
606
607 /* check for tx results */
608 ret = wlcore_hw_tx_delayed_compl(wl);
609 if (ret < 0)
610 goto out;
611
612 /* Make sure the deferred queues don't get too long */
613 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
614 skb_queue_len(&wl->deferred_rx_queue);
615 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
616 wl1271_flush_deferred_work(wl);
617 }
618
619 if (intr & WL1271_ACX_INTR_EVENT_A) {
620 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
621 ret = wl1271_event_handle(wl, 0);
622 if (ret < 0)
623 goto out;
624 }
625
626 if (intr & WL1271_ACX_INTR_EVENT_B) {
627 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
628 ret = wl1271_event_handle(wl, 1);
629 if (ret < 0)
630 goto out;
631 }
632
633 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
634 wl1271_debug(DEBUG_IRQ,
635 "WL1271_ACX_INTR_INIT_COMPLETE");
636
637 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
638 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
639 }
640
641 wl1271_ps_elp_sleep(wl);
642
643 out:
644 return ret;
645 }
646
647 static irqreturn_t wlcore_irq(int irq, void *cookie)
648 {
649 int ret;
650 unsigned long flags;
651 struct wl1271 *wl = cookie;
652
653 /* complete the ELP completion */
654 spin_lock_irqsave(&wl->wl_lock, flags);
655 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
656 if (wl->elp_compl) {
657 complete(wl->elp_compl);
658 wl->elp_compl = NULL;
659 }
660
661 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
662 /* don't enqueue a work right now. mark it as pending */
663 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
664 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
665 disable_irq_nosync(wl->irq);
666 pm_wakeup_event(wl->dev, 0);
667 spin_unlock_irqrestore(&wl->wl_lock, flags);
668 return IRQ_HANDLED;
669 }
670 spin_unlock_irqrestore(&wl->wl_lock, flags);
671
672 /* TX might be handled here, avoid redundant work */
673 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
674 cancel_work_sync(&wl->tx_work);
675
676 mutex_lock(&wl->mutex);
677
678 ret = wlcore_irq_locked(wl);
679 if (ret)
680 wl12xx_queue_recovery_work(wl);
681
682 spin_lock_irqsave(&wl->wl_lock, flags);
683 /* In case TX was not handled here, queue TX work */
684 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
685 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
686 wl1271_tx_total_queue_count(wl) > 0)
687 ieee80211_queue_work(wl->hw, &wl->tx_work);
688 spin_unlock_irqrestore(&wl->wl_lock, flags);
689
690 mutex_unlock(&wl->mutex);
691
692 return IRQ_HANDLED;
693 }
694
695 struct vif_counter_data {
696 u8 counter;
697
698 struct ieee80211_vif *cur_vif;
699 bool cur_vif_running;
700 };
701
702 static void wl12xx_vif_count_iter(void *data, u8 *mac,
703 struct ieee80211_vif *vif)
704 {
705 struct vif_counter_data *counter = data;
706
707 counter->counter++;
708 if (counter->cur_vif == vif)
709 counter->cur_vif_running = true;
710 }
711
712 /* caller must not hold wl->mutex, as it might deadlock */
713 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
714 struct ieee80211_vif *cur_vif,
715 struct vif_counter_data *data)
716 {
717 memset(data, 0, sizeof(*data));
718 data->cur_vif = cur_vif;
719
720 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
721 wl12xx_vif_count_iter, data);
722 }
723
724 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
725 {
726 const struct firmware *fw;
727 const char *fw_name;
728 enum wl12xx_fw_type fw_type;
729 int ret;
730
731 if (plt) {
732 fw_type = WL12XX_FW_TYPE_PLT;
733 fw_name = wl->plt_fw_name;
734 } else {
735 /*
736 * we can't call wl12xx_get_vif_count() here because
737 * wl->mutex is taken, so use the cached last_vif_count value
738 */
739 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
740 fw_type = WL12XX_FW_TYPE_MULTI;
741 fw_name = wl->mr_fw_name;
742 } else {
743 fw_type = WL12XX_FW_TYPE_NORMAL;
744 fw_name = wl->sr_fw_name;
745 }
746 }
747
748 if (wl->fw_type == fw_type)
749 return 0;
750
751 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
752
753 ret = request_firmware(&fw, fw_name, wl->dev);
754
755 if (ret < 0) {
756 wl1271_error("could not get firmware %s: %d", fw_name, ret);
757 return ret;
758 }
759
760 if (fw->size % 4) {
761 wl1271_error("firmware size is not multiple of 32 bits: %zu",
762 fw->size);
763 ret = -EILSEQ;
764 goto out;
765 }
766
767 vfree(wl->fw);
768 wl->fw_type = WL12XX_FW_TYPE_NONE;
769 wl->fw_len = fw->size;
770 wl->fw = vmalloc(wl->fw_len);
771
772 if (!wl->fw) {
773 wl1271_error("could not allocate memory for the firmware");
774 ret = -ENOMEM;
775 goto out;
776 }
777
778 memcpy(wl->fw, fw->data, wl->fw_len);
779 ret = 0;
780 wl->fw_type = fw_type;
781 out:
782 release_firmware(fw);
783
784 return ret;
785 }
786
787 void wl12xx_queue_recovery_work(struct wl1271 *wl)
788 {
789 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
790
791 /* Avoid a recursive recovery */
792 if (wl->state == WLCORE_STATE_ON) {
793 wl->state = WLCORE_STATE_RESTARTING;
794 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
795 wl1271_ps_elp_wakeup(wl);
796 wlcore_disable_interrupts_nosync(wl);
797 ieee80211_queue_work(wl->hw, &wl->recovery_work);
798 }
799 }
800
801 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
802 {
803 size_t len;
804
805 /* Make sure we have enough room */
806 len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
807
808 /* Fill the FW log file, consumed by the sysfs fwlog entry */
809 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
810 wl->fwlog_size += len;
811
812 return len;
813 }
814
815 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
816 {
817 struct wlcore_partition_set part, old_part;
818 u32 addr;
819 u32 offset;
820 u32 end_of_log;
821 u8 *block;
822 int ret;
823
824 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
825 (wl->conf.fwlog.mem_blocks == 0))
826 return;
827
828 wl1271_info("Reading FW panic log");
829
830 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
831 if (!block)
832 return;
833
834 /*
835 * Make sure the chip is awake and the logger isn't active.
836 * Do not send a stop fwlog command if the fw is hanged or if
837 * dbgpins are used (due to some fw bug).
838 */
839 if (wl1271_ps_elp_wakeup(wl))
840 goto out;
841 if (!wl->watchdog_recovery &&
842 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
843 wl12xx_cmd_stop_fwlog(wl);
844
845 /* Read the first memory block address */
846 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
847 if (ret < 0)
848 goto out;
849
850 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
851 if (!addr)
852 goto out;
853
854 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
855 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
856 end_of_log = wl->fwlog_end;
857 } else {
858 offset = sizeof(addr);
859 end_of_log = addr;
860 }
861
862 old_part = wl->curr_part;
863 memset(&part, 0, sizeof(part));
864
865 /* Traverse the memory blocks linked list */
866 do {
867 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
868 part.mem.size = PAGE_SIZE;
869
870 ret = wlcore_set_partition(wl, &part);
871 if (ret < 0) {
872 wl1271_error("%s: set_partition start=0x%X size=%d",
873 __func__, part.mem.start, part.mem.size);
874 goto out;
875 }
876
877 memset(block, 0, wl->fw_mem_block_size);
878 ret = wlcore_read_hwaddr(wl, addr, block,
879 wl->fw_mem_block_size, false);
880
881 if (ret < 0)
882 goto out;
883
884 /*
885 * Memory blocks are linked to one another. The first 4 bytes
886 * of each memory block hold the hardware address of the next
887 * one. The last memory block points to the first one in
888 * on demand mode and is equal to 0x2000000 in continuous mode.
889 */
890 addr = le32_to_cpup((__le32 *)block);
891
892 if (!wl12xx_copy_fwlog(wl, block + offset,
893 wl->fw_mem_block_size - offset))
894 break;
895 } while (addr && (addr != end_of_log));
896
897 wake_up_interruptible(&wl->fwlog_waitq);
898
899 out:
900 kfree(block);
901 wlcore_set_partition(wl, &old_part);
902 }
903
904 static void wlcore_print_recovery(struct wl1271 *wl)
905 {
906 u32 pc = 0;
907 u32 hint_sts = 0;
908 int ret;
909
910 wl1271_info("Hardware recovery in progress. FW ver: %s",
911 wl->chip.fw_ver_str);
912
913 /* change partitions momentarily so we can read the FW pc */
914 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
915 if (ret < 0)
916 return;
917
918 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
919 if (ret < 0)
920 return;
921
922 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
923 if (ret < 0)
924 return;
925
926 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
927 pc, hint_sts, ++wl->recovery_count);
928
929 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
930 }
931
932
933 static void wl1271_recovery_work(struct work_struct *work)
934 {
935 struct wl1271 *wl =
936 container_of(work, struct wl1271, recovery_work);
937 struct wl12xx_vif *wlvif;
938 struct ieee80211_vif *vif;
939
940 mutex_lock(&wl->mutex);
941
942 if (wl->state == WLCORE_STATE_OFF || wl->plt)
943 goto out_unlock;
944
945 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
946 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
947 wl12xx_read_fwlog_panic(wl);
948 wlcore_print_recovery(wl);
949 }
950
951 BUG_ON(wl->conf.recovery.bug_on_recovery &&
952 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
953
954 if (wl->conf.recovery.no_recovery) {
955 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
956 goto out_unlock;
957 }
958
959 /* Prevent spurious TX during FW restart */
960 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
961
962 /* reboot the chipset */
963 while (!list_empty(&wl->wlvif_list)) {
964 wlvif = list_first_entry(&wl->wlvif_list,
965 struct wl12xx_vif, list);
966 vif = wl12xx_wlvif_to_vif(wlvif);
967 __wl1271_op_remove_interface(wl, vif, false);
968 }
969
970 wlcore_op_stop_locked(wl);
971
972 ieee80211_restart_hw(wl->hw);
973
974 /*
975 * Its safe to enable TX now - the queues are stopped after a request
976 * to restart the HW.
977 */
978 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
979
980 out_unlock:
981 wl->watchdog_recovery = false;
982 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
983 mutex_unlock(&wl->mutex);
984 }
985
986 static int wlcore_fw_wakeup(struct wl1271 *wl)
987 {
988 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
989 }
990
991 static int wl1271_setup(struct wl1271 *wl)
992 {
993 wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
994 sizeof(*wl->fw_status_2) +
995 wl->fw_status_priv_len, GFP_KERNEL);
996 if (!wl->fw_status_1)
997 return -ENOMEM;
998
999 wl->fw_status_2 = (struct wl_fw_status_2 *)
1000 (((u8 *) wl->fw_status_1) +
1001 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
1002
1003 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1004 if (!wl->tx_res_if) {
1005 kfree(wl->fw_status_1);
1006 return -ENOMEM;
1007 }
1008
1009 return 0;
1010 }
1011
1012 static int wl12xx_set_power_on(struct wl1271 *wl)
1013 {
1014 int ret;
1015
1016 msleep(WL1271_PRE_POWER_ON_SLEEP);
1017 ret = wl1271_power_on(wl);
1018 if (ret < 0)
1019 goto out;
1020 msleep(WL1271_POWER_ON_SLEEP);
1021 wl1271_io_reset(wl);
1022 wl1271_io_init(wl);
1023
1024 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1025 if (ret < 0)
1026 goto fail;
1027
1028 /* ELP module wake up */
1029 ret = wlcore_fw_wakeup(wl);
1030 if (ret < 0)
1031 goto fail;
1032
1033 out:
1034 return ret;
1035
1036 fail:
1037 wl1271_power_off(wl);
1038 return ret;
1039 }
1040
1041 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1042 {
1043 int ret = 0;
1044
1045 ret = wl12xx_set_power_on(wl);
1046 if (ret < 0)
1047 goto out;
1048
1049 /*
1050 * For wl127x based devices we could use the default block
1051 * size (512 bytes), but due to a bug in the sdio driver, we
1052 * need to set it explicitly after the chip is powered on. To
1053 * simplify the code and since the performance impact is
1054 * negligible, we use the same block size for all different
1055 * chip types.
1056 *
1057 * Check if the bus supports blocksize alignment and, if it
1058 * doesn't, make sure we don't have the quirk.
1059 */
1060 if (!wl1271_set_block_size(wl))
1061 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1062
1063 /* TODO: make sure the lower driver has set things up correctly */
1064
1065 ret = wl1271_setup(wl);
1066 if (ret < 0)
1067 goto out;
1068
1069 ret = wl12xx_fetch_firmware(wl, plt);
1070 if (ret < 0)
1071 goto out;
1072
1073 out:
1074 return ret;
1075 }
1076
1077 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1078 {
1079 int retries = WL1271_BOOT_RETRIES;
1080 struct wiphy *wiphy = wl->hw->wiphy;
1081
1082 static const char* const PLT_MODE[] = {
1083 "PLT_OFF",
1084 "PLT_ON",
1085 "PLT_FEM_DETECT",
1086 "PLT_CHIP_AWAKE"
1087 };
1088
1089 int ret;
1090
1091 mutex_lock(&wl->mutex);
1092
1093 wl1271_notice("power up");
1094
1095 if (wl->state != WLCORE_STATE_OFF) {
1096 wl1271_error("cannot go into PLT state because not "
1097 "in off state: %d", wl->state);
1098 ret = -EBUSY;
1099 goto out;
1100 }
1101
1102 /* Indicate to lower levels that we are now in PLT mode */
1103 wl->plt = true;
1104 wl->plt_mode = plt_mode;
1105
1106 while (retries) {
1107 retries--;
1108 ret = wl12xx_chip_wakeup(wl, true);
1109 if (ret < 0)
1110 goto power_off;
1111
1112 if (plt_mode != PLT_CHIP_AWAKE) {
1113 ret = wl->ops->plt_init(wl);
1114 if (ret < 0)
1115 goto power_off;
1116 }
1117
1118 wl->state = WLCORE_STATE_ON;
1119 wl1271_notice("firmware booted in PLT mode %s (%s)",
1120 PLT_MODE[plt_mode],
1121 wl->chip.fw_ver_str);
1122
1123 /* update hw/fw version info in wiphy struct */
1124 wiphy->hw_version = wl->chip.id;
1125 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1126 sizeof(wiphy->fw_version));
1127
1128 goto out;
1129
1130 power_off:
1131 wl1271_power_off(wl);
1132 }
1133
1134 wl->plt = false;
1135 wl->plt_mode = PLT_OFF;
1136
1137 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1138 WL1271_BOOT_RETRIES);
1139 out:
1140 mutex_unlock(&wl->mutex);
1141
1142 return ret;
1143 }
1144
1145 int wl1271_plt_stop(struct wl1271 *wl)
1146 {
1147 int ret = 0;
1148
1149 wl1271_notice("power down");
1150
1151 /*
1152 * Interrupts must be disabled before setting the state to OFF.
1153 * Otherwise, the interrupt handler might be called and exit without
1154 * reading the interrupt status.
1155 */
1156 wlcore_disable_interrupts(wl);
1157 mutex_lock(&wl->mutex);
1158 if (!wl->plt) {
1159 mutex_unlock(&wl->mutex);
1160
1161 /*
1162 * This will not necessarily enable interrupts as interrupts
1163 * may have been disabled when op_stop was called. It will,
1164 * however, balance the above call to disable_interrupts().
1165 */
1166 wlcore_enable_interrupts(wl);
1167
1168 wl1271_error("cannot power down because not in PLT "
1169 "state: %d", wl->state);
1170 ret = -EBUSY;
1171 goto out;
1172 }
1173
1174 mutex_unlock(&wl->mutex);
1175
1176 wl1271_flush_deferred_work(wl);
1177 cancel_work_sync(&wl->netstack_work);
1178 cancel_work_sync(&wl->recovery_work);
1179 cancel_delayed_work_sync(&wl->elp_work);
1180 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1181
1182 mutex_lock(&wl->mutex);
1183 wl1271_power_off(wl);
1184 wl->flags = 0;
1185 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1186 wl->state = WLCORE_STATE_OFF;
1187 wl->plt = false;
1188 wl->plt_mode = PLT_OFF;
1189 wl->rx_counter = 0;
1190 mutex_unlock(&wl->mutex);
1191
1192 out:
1193 return ret;
1194 }
1195
1196 static void wl1271_op_tx(struct ieee80211_hw *hw,
1197 struct ieee80211_tx_control *control,
1198 struct sk_buff *skb)
1199 {
1200 struct wl1271 *wl = hw->priv;
1201 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1202 struct ieee80211_vif *vif = info->control.vif;
1203 struct wl12xx_vif *wlvif = NULL;
1204 unsigned long flags;
1205 int q, mapping;
1206 u8 hlid;
1207
1208 if (!vif) {
1209 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1210 ieee80211_free_txskb(hw, skb);
1211 return;
1212 }
1213
1214 wlvif = wl12xx_vif_to_data(vif);
1215 mapping = skb_get_queue_mapping(skb);
1216 q = wl1271_tx_get_queue(mapping);
1217
1218 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1219
1220 spin_lock_irqsave(&wl->wl_lock, flags);
1221
1222 /*
1223 * drop the packet if the link is invalid or the queue is stopped
1224 * for any reason but watermark. Watermark is a "soft"-stop so we
1225 * allow these packets through.
1226 */
1227 if (hlid == WL12XX_INVALID_LINK_ID ||
1228 (!test_bit(hlid, wlvif->links_map)) ||
1229 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1230 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1231 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1232 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1233 ieee80211_free_txskb(hw, skb);
1234 goto out;
1235 }
1236
1237 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1238 hlid, q, skb->len);
1239 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1240
1241 wl->tx_queue_count[q]++;
1242 wlvif->tx_queue_count[q]++;
1243
1244 /*
1245 * The workqueue is slow to process the tx_queue and we need stop
1246 * the queue here, otherwise the queue will get too long.
1247 */
1248 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1249 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1250 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1251 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1252 wlcore_stop_queue_locked(wl, wlvif, q,
1253 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1254 }
1255
1256 /*
1257 * The chip specific setup must run before the first TX packet -
1258 * before that, the tx_work will not be initialized!
1259 */
1260
1261 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1262 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1263 ieee80211_queue_work(wl->hw, &wl->tx_work);
1264
1265 out:
1266 spin_unlock_irqrestore(&wl->wl_lock, flags);
1267 }
1268
1269 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1270 {
1271 unsigned long flags;
1272 int q;
1273
1274 /* no need to queue a new dummy packet if one is already pending */
1275 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1276 return 0;
1277
1278 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1279
1280 spin_lock_irqsave(&wl->wl_lock, flags);
1281 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1282 wl->tx_queue_count[q]++;
1283 spin_unlock_irqrestore(&wl->wl_lock, flags);
1284
1285 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1286 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1287 return wlcore_tx_work_locked(wl);
1288
1289 /*
1290 * If the FW TX is busy, TX work will be scheduled by the threaded
1291 * interrupt handler function
1292 */
1293 return 0;
1294 }
1295
1296 /*
1297 * The size of the dummy packet should be at least 1400 bytes. However, in
1298 * order to minimize the number of bus transactions, aligning it to 512 bytes
1299 * boundaries could be beneficial, performance wise
1300 */
1301 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1302
1303 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1304 {
1305 struct sk_buff *skb;
1306 struct ieee80211_hdr_3addr *hdr;
1307 unsigned int dummy_packet_size;
1308
1309 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1310 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1311
1312 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1313 if (!skb) {
1314 wl1271_warning("Failed to allocate a dummy packet skb");
1315 return NULL;
1316 }
1317
1318 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1319
1320 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1321 memset(hdr, 0, sizeof(*hdr));
1322 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1323 IEEE80211_STYPE_NULLFUNC |
1324 IEEE80211_FCTL_TODS);
1325
1326 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1327
1328 /* Dummy packets require the TID to be management */
1329 skb->priority = WL1271_TID_MGMT;
1330
1331 /* Initialize all fields that might be used */
1332 skb_set_queue_mapping(skb, 0);
1333 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1334
1335 return skb;
1336 }
1337
1338
1339 #ifdef CONFIG_PM
1340 static int
1341 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1342 {
1343 int num_fields = 0, in_field = 0, fields_size = 0;
1344 int i, pattern_len = 0;
1345
1346 if (!p->mask) {
1347 wl1271_warning("No mask in WoWLAN pattern");
1348 return -EINVAL;
1349 }
1350
1351 /*
1352 * The pattern is broken up into segments of bytes at different offsets
1353 * that need to be checked by the FW filter. Each segment is called
1354 * a field in the FW API. We verify that the total number of fields
1355 * required for this pattern won't exceed FW limits (8)
1356 * as well as the total fields buffer won't exceed the FW limit.
1357 * Note that if there's a pattern which crosses Ethernet/IP header
1358 * boundary a new field is required.
1359 */
1360 for (i = 0; i < p->pattern_len; i++) {
1361 if (test_bit(i, (unsigned long *)p->mask)) {
1362 if (!in_field) {
1363 in_field = 1;
1364 pattern_len = 1;
1365 } else {
1366 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1367 num_fields++;
1368 fields_size += pattern_len +
1369 RX_FILTER_FIELD_OVERHEAD;
1370 pattern_len = 1;
1371 } else
1372 pattern_len++;
1373 }
1374 } else {
1375 if (in_field) {
1376 in_field = 0;
1377 fields_size += pattern_len +
1378 RX_FILTER_FIELD_OVERHEAD;
1379 num_fields++;
1380 }
1381 }
1382 }
1383
1384 if (in_field) {
1385 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1386 num_fields++;
1387 }
1388
1389 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1390 wl1271_warning("RX Filter too complex. Too many segments");
1391 return -EINVAL;
1392 }
1393
1394 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1395 wl1271_warning("RX filter pattern is too big");
1396 return -E2BIG;
1397 }
1398
1399 return 0;
1400 }
1401
1402 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1403 {
1404 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1405 }
1406
1407 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1408 {
1409 int i;
1410
1411 if (filter == NULL)
1412 return;
1413
1414 for (i = 0; i < filter->num_fields; i++)
1415 kfree(filter->fields[i].pattern);
1416
1417 kfree(filter);
1418 }
1419
1420 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1421 u16 offset, u8 flags,
1422 u8 *pattern, u8 len)
1423 {
1424 struct wl12xx_rx_filter_field *field;
1425
1426 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1427 wl1271_warning("Max fields per RX filter. can't alloc another");
1428 return -EINVAL;
1429 }
1430
1431 field = &filter->fields[filter->num_fields];
1432
1433 field->pattern = kzalloc(len, GFP_KERNEL);
1434 if (!field->pattern) {
1435 wl1271_warning("Failed to allocate RX filter pattern");
1436 return -ENOMEM;
1437 }
1438
1439 filter->num_fields++;
1440
1441 field->offset = cpu_to_le16(offset);
1442 field->flags = flags;
1443 field->len = len;
1444 memcpy(field->pattern, pattern, len);
1445
1446 return 0;
1447 }
1448
1449 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1450 {
1451 int i, fields_size = 0;
1452
1453 for (i = 0; i < filter->num_fields; i++)
1454 fields_size += filter->fields[i].len +
1455 sizeof(struct wl12xx_rx_filter_field) -
1456 sizeof(u8 *);
1457
1458 return fields_size;
1459 }
1460
1461 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1462 u8 *buf)
1463 {
1464 int i;
1465 struct wl12xx_rx_filter_field *field;
1466
1467 for (i = 0; i < filter->num_fields; i++) {
1468 field = (struct wl12xx_rx_filter_field *)buf;
1469
1470 field->offset = filter->fields[i].offset;
1471 field->flags = filter->fields[i].flags;
1472 field->len = filter->fields[i].len;
1473
1474 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1475 buf += sizeof(struct wl12xx_rx_filter_field) -
1476 sizeof(u8 *) + field->len;
1477 }
1478 }
1479
1480 /*
1481 * Allocates an RX filter returned through f
1482 * which needs to be freed using rx_filter_free()
1483 */
1484 static int
1485 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1486 struct wl12xx_rx_filter **f)
1487 {
1488 int i, j, ret = 0;
1489 struct wl12xx_rx_filter *filter;
1490 u16 offset;
1491 u8 flags, len;
1492
1493 filter = wl1271_rx_filter_alloc();
1494 if (!filter) {
1495 wl1271_warning("Failed to alloc rx filter");
1496 ret = -ENOMEM;
1497 goto err;
1498 }
1499
1500 i = 0;
1501 while (i < p->pattern_len) {
1502 if (!test_bit(i, (unsigned long *)p->mask)) {
1503 i++;
1504 continue;
1505 }
1506
1507 for (j = i; j < p->pattern_len; j++) {
1508 if (!test_bit(j, (unsigned long *)p->mask))
1509 break;
1510
1511 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1512 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1513 break;
1514 }
1515
1516 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1517 offset = i;
1518 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1519 } else {
1520 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1521 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1522 }
1523
1524 len = j - i;
1525
1526 ret = wl1271_rx_filter_alloc_field(filter,
1527 offset,
1528 flags,
1529 &p->pattern[i], len);
1530 if (ret)
1531 goto err;
1532
1533 i = j;
1534 }
1535
1536 filter->action = FILTER_SIGNAL;
1537
1538 *f = filter;
1539 return 0;
1540
1541 err:
1542 wl1271_rx_filter_free(filter);
1543 *f = NULL;
1544
1545 return ret;
1546 }
1547
1548 static int wl1271_configure_wowlan(struct wl1271 *wl,
1549 struct cfg80211_wowlan *wow)
1550 {
1551 int i, ret;
1552
1553 if (!wow || wow->any || !wow->n_patterns) {
1554 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1555 FILTER_SIGNAL);
1556 if (ret)
1557 goto out;
1558
1559 ret = wl1271_rx_filter_clear_all(wl);
1560 if (ret)
1561 goto out;
1562
1563 return 0;
1564 }
1565
1566 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1567 return -EINVAL;
1568
1569 /* Validate all incoming patterns before clearing current FW state */
1570 for (i = 0; i < wow->n_patterns; i++) {
1571 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1572 if (ret) {
1573 wl1271_warning("Bad wowlan pattern %d", i);
1574 return ret;
1575 }
1576 }
1577
1578 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1579 if (ret)
1580 goto out;
1581
1582 ret = wl1271_rx_filter_clear_all(wl);
1583 if (ret)
1584 goto out;
1585
1586 /* Translate WoWLAN patterns into filters */
1587 for (i = 0; i < wow->n_patterns; i++) {
1588 struct cfg80211_pkt_pattern *p;
1589 struct wl12xx_rx_filter *filter = NULL;
1590
1591 p = &wow->patterns[i];
1592
1593 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1594 if (ret) {
1595 wl1271_warning("Failed to create an RX filter from "
1596 "wowlan pattern %d", i);
1597 goto out;
1598 }
1599
1600 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1601
1602 wl1271_rx_filter_free(filter);
1603 if (ret)
1604 goto out;
1605 }
1606
1607 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1608
1609 out:
1610 return ret;
1611 }
1612
1613 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1614 struct wl12xx_vif *wlvif,
1615 struct cfg80211_wowlan *wow)
1616 {
1617 int ret = 0;
1618
1619 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1620 goto out;
1621
1622 ret = wl1271_ps_elp_wakeup(wl);
1623 if (ret < 0)
1624 goto out;
1625
1626 ret = wl1271_configure_wowlan(wl, wow);
1627 if (ret < 0)
1628 goto out_sleep;
1629
1630 if ((wl->conf.conn.suspend_wake_up_event ==
1631 wl->conf.conn.wake_up_event) &&
1632 (wl->conf.conn.suspend_listen_interval ==
1633 wl->conf.conn.listen_interval))
1634 goto out_sleep;
1635
1636 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1637 wl->conf.conn.suspend_wake_up_event,
1638 wl->conf.conn.suspend_listen_interval);
1639
1640 if (ret < 0)
1641 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1642
1643 out_sleep:
1644 wl1271_ps_elp_sleep(wl);
1645 out:
1646 return ret;
1647
1648 }
1649
1650 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1651 struct wl12xx_vif *wlvif)
1652 {
1653 int ret = 0;
1654
1655 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1656 goto out;
1657
1658 ret = wl1271_ps_elp_wakeup(wl);
1659 if (ret < 0)
1660 goto out;
1661
1662 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1663
1664 wl1271_ps_elp_sleep(wl);
1665 out:
1666 return ret;
1667
1668 }
1669
1670 static int wl1271_configure_suspend(struct wl1271 *wl,
1671 struct wl12xx_vif *wlvif,
1672 struct cfg80211_wowlan *wow)
1673 {
1674 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1675 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1676 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1677 return wl1271_configure_suspend_ap(wl, wlvif);
1678 return 0;
1679 }
1680
1681 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1682 {
1683 int ret = 0;
1684 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1685 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1686
1687 if ((!is_ap) && (!is_sta))
1688 return;
1689
1690 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1691 return;
1692
1693 ret = wl1271_ps_elp_wakeup(wl);
1694 if (ret < 0)
1695 return;
1696
1697 if (is_sta) {
1698 wl1271_configure_wowlan(wl, NULL);
1699
1700 if ((wl->conf.conn.suspend_wake_up_event ==
1701 wl->conf.conn.wake_up_event) &&
1702 (wl->conf.conn.suspend_listen_interval ==
1703 wl->conf.conn.listen_interval))
1704 goto out_sleep;
1705
1706 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1707 wl->conf.conn.wake_up_event,
1708 wl->conf.conn.listen_interval);
1709
1710 if (ret < 0)
1711 wl1271_error("resume: wake up conditions failed: %d",
1712 ret);
1713
1714 } else if (is_ap) {
1715 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1716 }
1717
1718 out_sleep:
1719 wl1271_ps_elp_sleep(wl);
1720 }
1721
1722 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1723 struct cfg80211_wowlan *wow)
1724 {
1725 struct wl1271 *wl = hw->priv;
1726 struct wl12xx_vif *wlvif;
1727 int ret;
1728
1729 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1730 WARN_ON(!wow);
1731
1732 /* we want to perform the recovery before suspending */
1733 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1734 wl1271_warning("postponing suspend to perform recovery");
1735 return -EBUSY;
1736 }
1737
1738 wl1271_tx_flush(wl);
1739
1740 mutex_lock(&wl->mutex);
1741 wl->wow_enabled = true;
1742 wl12xx_for_each_wlvif(wl, wlvif) {
1743 ret = wl1271_configure_suspend(wl, wlvif, wow);
1744 if (ret < 0) {
1745 mutex_unlock(&wl->mutex);
1746 wl1271_warning("couldn't prepare device to suspend");
1747 return ret;
1748 }
1749 }
1750 mutex_unlock(&wl->mutex);
1751 /* flush any remaining work */
1752 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1753
1754 /*
1755 * disable and re-enable interrupts in order to flush
1756 * the threaded_irq
1757 */
1758 wlcore_disable_interrupts(wl);
1759
1760 /*
1761 * set suspended flag to avoid triggering a new threaded_irq
1762 * work. no need for spinlock as interrupts are disabled.
1763 */
1764 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1765
1766 wlcore_enable_interrupts(wl);
1767 flush_work(&wl->tx_work);
1768 flush_delayed_work(&wl->elp_work);
1769
1770 return 0;
1771 }
1772
1773 static int wl1271_op_resume(struct ieee80211_hw *hw)
1774 {
1775 struct wl1271 *wl = hw->priv;
1776 struct wl12xx_vif *wlvif;
1777 unsigned long flags;
1778 bool run_irq_work = false, pending_recovery;
1779 int ret;
1780
1781 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1782 wl->wow_enabled);
1783 WARN_ON(!wl->wow_enabled);
1784
1785 /*
1786 * re-enable irq_work enqueuing, and call irq_work directly if
1787 * there is a pending work.
1788 */
1789 spin_lock_irqsave(&wl->wl_lock, flags);
1790 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1791 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1792 run_irq_work = true;
1793 spin_unlock_irqrestore(&wl->wl_lock, flags);
1794
1795 mutex_lock(&wl->mutex);
1796
1797 /* test the recovery flag before calling any SDIO functions */
1798 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1799 &wl->flags);
1800
1801 if (run_irq_work) {
1802 wl1271_debug(DEBUG_MAC80211,
1803 "run postponed irq_work directly");
1804
1805 /* don't talk to the HW if recovery is pending */
1806 if (!pending_recovery) {
1807 ret = wlcore_irq_locked(wl);
1808 if (ret)
1809 wl12xx_queue_recovery_work(wl);
1810 }
1811
1812 wlcore_enable_interrupts(wl);
1813 }
1814
1815 if (pending_recovery) {
1816 wl1271_warning("queuing forgotten recovery on resume");
1817 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1818 goto out;
1819 }
1820
1821 wl12xx_for_each_wlvif(wl, wlvif) {
1822 wl1271_configure_resume(wl, wlvif);
1823 }
1824
1825 out:
1826 wl->wow_enabled = false;
1827 mutex_unlock(&wl->mutex);
1828
1829 return 0;
1830 }
1831 #endif
1832
1833 static int wl1271_op_start(struct ieee80211_hw *hw)
1834 {
1835 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1836
1837 /*
1838 * We have to delay the booting of the hardware because
1839 * we need to know the local MAC address before downloading and
1840 * initializing the firmware. The MAC address cannot be changed
1841 * after boot, and without the proper MAC address, the firmware
1842 * will not function properly.
1843 *
1844 * The MAC address is first known when the corresponding interface
1845 * is added. That is where we will initialize the hardware.
1846 */
1847
1848 return 0;
1849 }
1850
1851 static void wlcore_op_stop_locked(struct wl1271 *wl)
1852 {
1853 int i;
1854
1855 if (wl->state == WLCORE_STATE_OFF) {
1856 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1857 &wl->flags))
1858 wlcore_enable_interrupts(wl);
1859
1860 return;
1861 }
1862
1863 /*
1864 * this must be before the cancel_work calls below, so that the work
1865 * functions don't perform further work.
1866 */
1867 wl->state = WLCORE_STATE_OFF;
1868
1869 /*
1870 * Use the nosync variant to disable interrupts, so the mutex could be
1871 * held while doing so without deadlocking.
1872 */
1873 wlcore_disable_interrupts_nosync(wl);
1874
1875 mutex_unlock(&wl->mutex);
1876
1877 wlcore_synchronize_interrupts(wl);
1878 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1879 cancel_work_sync(&wl->recovery_work);
1880 wl1271_flush_deferred_work(wl);
1881 cancel_delayed_work_sync(&wl->scan_complete_work);
1882 cancel_work_sync(&wl->netstack_work);
1883 cancel_work_sync(&wl->tx_work);
1884 cancel_delayed_work_sync(&wl->elp_work);
1885 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1886
1887 /* let's notify MAC80211 about the remaining pending TX frames */
1888 mutex_lock(&wl->mutex);
1889 wl12xx_tx_reset(wl);
1890
1891 wl1271_power_off(wl);
1892 /*
1893 * In case a recovery was scheduled, interrupts were disabled to avoid
1894 * an interrupt storm. Now that the power is down, it is safe to
1895 * re-enable interrupts to balance the disable depth
1896 */
1897 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1898 wlcore_enable_interrupts(wl);
1899
1900 wl->band = IEEE80211_BAND_2GHZ;
1901
1902 wl->rx_counter = 0;
1903 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1904 wl->channel_type = NL80211_CHAN_NO_HT;
1905 wl->tx_blocks_available = 0;
1906 wl->tx_allocated_blocks = 0;
1907 wl->tx_results_count = 0;
1908 wl->tx_packets_count = 0;
1909 wl->time_offset = 0;
1910 wl->ap_fw_ps_map = 0;
1911 wl->ap_ps_map = 0;
1912 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1913 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1914 memset(wl->links_map, 0, sizeof(wl->links_map));
1915 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1916 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1917 wl->active_sta_count = 0;
1918 wl->active_link_count = 0;
1919
1920 /* The system link is always allocated */
1921 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1922 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1923 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1924
1925 /*
1926 * this is performed after the cancel_work calls and the associated
1927 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1928 * get executed before all these vars have been reset.
1929 */
1930 wl->flags = 0;
1931
1932 wl->tx_blocks_freed = 0;
1933
1934 for (i = 0; i < NUM_TX_QUEUES; i++) {
1935 wl->tx_pkts_freed[i] = 0;
1936 wl->tx_allocated_pkts[i] = 0;
1937 }
1938
1939 wl1271_debugfs_reset(wl);
1940
1941 kfree(wl->fw_status_1);
1942 wl->fw_status_1 = NULL;
1943 wl->fw_status_2 = NULL;
1944 kfree(wl->tx_res_if);
1945 wl->tx_res_if = NULL;
1946 kfree(wl->target_mem_map);
1947 wl->target_mem_map = NULL;
1948
1949 /*
1950 * FW channels must be re-calibrated after recovery,
1951 * save current Reg-Domain channel configuration and clear it.
1952 */
1953 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1954 sizeof(wl->reg_ch_conf_pending));
1955 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1956 }
1957
1958 static void wlcore_op_stop(struct ieee80211_hw *hw)
1959 {
1960 struct wl1271 *wl = hw->priv;
1961
1962 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1963
1964 mutex_lock(&wl->mutex);
1965
1966 wlcore_op_stop_locked(wl);
1967
1968 mutex_unlock(&wl->mutex);
1969 }
1970
1971 static void wlcore_channel_switch_work(struct work_struct *work)
1972 {
1973 struct delayed_work *dwork;
1974 struct wl1271 *wl;
1975 struct ieee80211_vif *vif;
1976 struct wl12xx_vif *wlvif;
1977 int ret;
1978
1979 dwork = container_of(work, struct delayed_work, work);
1980 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1981 wl = wlvif->wl;
1982
1983 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1984
1985 mutex_lock(&wl->mutex);
1986
1987 if (unlikely(wl->state != WLCORE_STATE_ON))
1988 goto out;
1989
1990 /* check the channel switch is still ongoing */
1991 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1992 goto out;
1993
1994 vif = wl12xx_wlvif_to_vif(wlvif);
1995 ieee80211_chswitch_done(vif, false);
1996
1997 ret = wl1271_ps_elp_wakeup(wl);
1998 if (ret < 0)
1999 goto out;
2000
2001 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2002
2003 wl1271_ps_elp_sleep(wl);
2004 out:
2005 mutex_unlock(&wl->mutex);
2006 }
2007
2008 static void wlcore_connection_loss_work(struct work_struct *work)
2009 {
2010 struct delayed_work *dwork;
2011 struct wl1271 *wl;
2012 struct ieee80211_vif *vif;
2013 struct wl12xx_vif *wlvif;
2014
2015 dwork = container_of(work, struct delayed_work, work);
2016 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2017 wl = wlvif->wl;
2018
2019 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2020
2021 mutex_lock(&wl->mutex);
2022
2023 if (unlikely(wl->state != WLCORE_STATE_ON))
2024 goto out;
2025
2026 /* Call mac80211 connection loss */
2027 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2028 goto out;
2029
2030 vif = wl12xx_wlvif_to_vif(wlvif);
2031 ieee80211_connection_loss(vif);
2032 out:
2033 mutex_unlock(&wl->mutex);
2034 }
2035
2036 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2037 {
2038 struct delayed_work *dwork;
2039 struct wl1271 *wl;
2040 struct wl12xx_vif *wlvif;
2041 unsigned long time_spare;
2042 int ret;
2043
2044 dwork = container_of(work, struct delayed_work, work);
2045 wlvif = container_of(dwork, struct wl12xx_vif,
2046 pending_auth_complete_work);
2047 wl = wlvif->wl;
2048
2049 mutex_lock(&wl->mutex);
2050
2051 if (unlikely(wl->state != WLCORE_STATE_ON))
2052 goto out;
2053
2054 /*
2055 * Make sure a second really passed since the last auth reply. Maybe
2056 * a second auth reply arrived while we were stuck on the mutex.
2057 * Check for a little less than the timeout to protect from scheduler
2058 * irregularities.
2059 */
2060 time_spare = jiffies +
2061 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2062 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2063 goto out;
2064
2065 ret = wl1271_ps_elp_wakeup(wl);
2066 if (ret < 0)
2067 goto out;
2068
2069 /* cancel the ROC if active */
2070 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2071
2072 wl1271_ps_elp_sleep(wl);
2073 out:
2074 mutex_unlock(&wl->mutex);
2075 }
2076
2077 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2078 {
2079 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2080 WL12XX_MAX_RATE_POLICIES);
2081 if (policy >= WL12XX_MAX_RATE_POLICIES)
2082 return -EBUSY;
2083
2084 __set_bit(policy, wl->rate_policies_map);
2085 *idx = policy;
2086 return 0;
2087 }
2088
2089 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2090 {
2091 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2092 return;
2093
2094 __clear_bit(*idx, wl->rate_policies_map);
2095 *idx = WL12XX_MAX_RATE_POLICIES;
2096 }
2097
2098 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2099 {
2100 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2101 WLCORE_MAX_KLV_TEMPLATES);
2102 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2103 return -EBUSY;
2104
2105 __set_bit(policy, wl->klv_templates_map);
2106 *idx = policy;
2107 return 0;
2108 }
2109
2110 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2111 {
2112 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2113 return;
2114
2115 __clear_bit(*idx, wl->klv_templates_map);
2116 *idx = WLCORE_MAX_KLV_TEMPLATES;
2117 }
2118
2119 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2120 {
2121 switch (wlvif->bss_type) {
2122 case BSS_TYPE_AP_BSS:
2123 if (wlvif->p2p)
2124 return WL1271_ROLE_P2P_GO;
2125 else
2126 return WL1271_ROLE_AP;
2127
2128 case BSS_TYPE_STA_BSS:
2129 if (wlvif->p2p)
2130 return WL1271_ROLE_P2P_CL;
2131 else
2132 return WL1271_ROLE_STA;
2133
2134 case BSS_TYPE_IBSS:
2135 return WL1271_ROLE_IBSS;
2136
2137 default:
2138 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2139 }
2140 return WL12XX_INVALID_ROLE_TYPE;
2141 }
2142
2143 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2144 {
2145 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2146 int i;
2147
2148 /* clear everything but the persistent data */
2149 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2150
2151 switch (ieee80211_vif_type_p2p(vif)) {
2152 case NL80211_IFTYPE_P2P_CLIENT:
2153 wlvif->p2p = 1;
2154 /* fall-through */
2155 case NL80211_IFTYPE_STATION:
2156 wlvif->bss_type = BSS_TYPE_STA_BSS;
2157 break;
2158 case NL80211_IFTYPE_ADHOC:
2159 wlvif->bss_type = BSS_TYPE_IBSS;
2160 break;
2161 case NL80211_IFTYPE_P2P_GO:
2162 wlvif->p2p = 1;
2163 /* fall-through */
2164 case NL80211_IFTYPE_AP:
2165 wlvif->bss_type = BSS_TYPE_AP_BSS;
2166 break;
2167 default:
2168 wlvif->bss_type = MAX_BSS_TYPE;
2169 return -EOPNOTSUPP;
2170 }
2171
2172 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2173 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2174 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2175
2176 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2177 wlvif->bss_type == BSS_TYPE_IBSS) {
2178 /* init sta/ibss data */
2179 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2180 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2181 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2182 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2183 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2184 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2185 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2186 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2187 } else {
2188 /* init ap data */
2189 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2190 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2191 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2192 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2193 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2194 wl12xx_allocate_rate_policy(wl,
2195 &wlvif->ap.ucast_rate_idx[i]);
2196 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2197 /*
2198 * TODO: check if basic_rate shouldn't be
2199 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2200 * instead (the same thing for STA above).
2201 */
2202 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2203 /* TODO: this seems to be used only for STA, check it */
2204 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2205 }
2206
2207 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2208 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2209 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2210
2211 /*
2212 * mac80211 configures some values globally, while we treat them
2213 * per-interface. thus, on init, we have to copy them from wl
2214 */
2215 wlvif->band = wl->band;
2216 wlvif->channel = wl->channel;
2217 wlvif->power_level = wl->power_level;
2218 wlvif->channel_type = wl->channel_type;
2219
2220 INIT_WORK(&wlvif->rx_streaming_enable_work,
2221 wl1271_rx_streaming_enable_work);
2222 INIT_WORK(&wlvif->rx_streaming_disable_work,
2223 wl1271_rx_streaming_disable_work);
2224 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2225 wlcore_channel_switch_work);
2226 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2227 wlcore_connection_loss_work);
2228 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2229 wlcore_pending_auth_complete_work);
2230 INIT_LIST_HEAD(&wlvif->list);
2231
2232 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2233 (unsigned long) wlvif);
2234 return 0;
2235 }
2236
2237 static int wl12xx_init_fw(struct wl1271 *wl)
2238 {
2239 int retries = WL1271_BOOT_RETRIES;
2240 bool booted = false;
2241 struct wiphy *wiphy = wl->hw->wiphy;
2242 int ret;
2243
2244 while (retries) {
2245 retries--;
2246 ret = wl12xx_chip_wakeup(wl, false);
2247 if (ret < 0)
2248 goto power_off;
2249
2250 ret = wl->ops->boot(wl);
2251 if (ret < 0)
2252 goto power_off;
2253
2254 ret = wl1271_hw_init(wl);
2255 if (ret < 0)
2256 goto irq_disable;
2257
2258 booted = true;
2259 break;
2260
2261 irq_disable:
2262 mutex_unlock(&wl->mutex);
2263 /* Unlocking the mutex in the middle of handling is
2264 inherently unsafe. In this case we deem it safe to do,
2265 because we need to let any possibly pending IRQ out of
2266 the system (and while we are WLCORE_STATE_OFF the IRQ
2267 work function will not do anything.) Also, any other
2268 possible concurrent operations will fail due to the
2269 current state, hence the wl1271 struct should be safe. */
2270 wlcore_disable_interrupts(wl);
2271 wl1271_flush_deferred_work(wl);
2272 cancel_work_sync(&wl->netstack_work);
2273 mutex_lock(&wl->mutex);
2274 power_off:
2275 wl1271_power_off(wl);
2276 }
2277
2278 if (!booted) {
2279 wl1271_error("firmware boot failed despite %d retries",
2280 WL1271_BOOT_RETRIES);
2281 goto out;
2282 }
2283
2284 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2285
2286 /* update hw/fw version info in wiphy struct */
2287 wiphy->hw_version = wl->chip.id;
2288 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2289 sizeof(wiphy->fw_version));
2290
2291 /*
2292 * Now we know if 11a is supported (info from the NVS), so disable
2293 * 11a channels if not supported
2294 */
2295 if (!wl->enable_11a)
2296 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2297
2298 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2299 wl->enable_11a ? "" : "not ");
2300
2301 wl->state = WLCORE_STATE_ON;
2302 out:
2303 return ret;
2304 }
2305
2306 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2307 {
2308 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2309 }
2310
2311 /*
2312 * Check whether a fw switch (i.e. moving from one loaded
2313 * fw to another) is needed. This function is also responsible
2314 * for updating wl->last_vif_count, so it must be called before
2315 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2316 * will be used).
2317 */
2318 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2319 struct vif_counter_data vif_counter_data,
2320 bool add)
2321 {
2322 enum wl12xx_fw_type current_fw = wl->fw_type;
2323 u8 vif_count = vif_counter_data.counter;
2324
2325 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2326 return false;
2327
2328 /* increase the vif count if this is a new vif */
2329 if (add && !vif_counter_data.cur_vif_running)
2330 vif_count++;
2331
2332 wl->last_vif_count = vif_count;
2333
2334 /* no need for fw change if the device is OFF */
2335 if (wl->state == WLCORE_STATE_OFF)
2336 return false;
2337
2338 /* no need for fw change if a single fw is used */
2339 if (!wl->mr_fw_name)
2340 return false;
2341
2342 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2343 return true;
2344 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2345 return true;
2346
2347 return false;
2348 }
2349
2350 /*
2351 * Enter "forced psm". Make sure the sta is in psm against the ap,
2352 * to make the fw switch a bit more disconnection-persistent.
2353 */
2354 static void wl12xx_force_active_psm(struct wl1271 *wl)
2355 {
2356 struct wl12xx_vif *wlvif;
2357
2358 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2359 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2360 }
2361 }
2362
2363 struct wlcore_hw_queue_iter_data {
2364 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2365 /* current vif */
2366 struct ieee80211_vif *vif;
2367 /* is the current vif among those iterated */
2368 bool cur_running;
2369 };
2370
2371 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2372 struct ieee80211_vif *vif)
2373 {
2374 struct wlcore_hw_queue_iter_data *iter_data = data;
2375
2376 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2377 return;
2378
2379 if (iter_data->cur_running || vif == iter_data->vif) {
2380 iter_data->cur_running = true;
2381 return;
2382 }
2383
2384 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2385 }
2386
2387 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2388 struct wl12xx_vif *wlvif)
2389 {
2390 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2391 struct wlcore_hw_queue_iter_data iter_data = {};
2392 int i, q_base;
2393
2394 iter_data.vif = vif;
2395
2396 /* mark all bits taken by active interfaces */
2397 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2398 IEEE80211_IFACE_ITER_RESUME_ALL,
2399 wlcore_hw_queue_iter, &iter_data);
2400
2401 /* the current vif is already running in mac80211 (resume/recovery) */
2402 if (iter_data.cur_running) {
2403 wlvif->hw_queue_base = vif->hw_queue[0];
2404 wl1271_debug(DEBUG_MAC80211,
2405 "using pre-allocated hw queue base %d",
2406 wlvif->hw_queue_base);
2407
2408 /* interface type might have changed type */
2409 goto adjust_cab_queue;
2410 }
2411
2412 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2413 WLCORE_NUM_MAC_ADDRESSES);
2414 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2415 return -EBUSY;
2416
2417 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2418 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2419 wlvif->hw_queue_base);
2420
2421 for (i = 0; i < NUM_TX_QUEUES; i++) {
2422 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2423 /* register hw queues in mac80211 */
2424 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2425 }
2426
2427 adjust_cab_queue:
2428 /* the last places are reserved for cab queues per interface */
2429 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2430 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2431 wlvif->hw_queue_base / NUM_TX_QUEUES;
2432 else
2433 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2434
2435 return 0;
2436 }
2437
2438 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2439 struct ieee80211_vif *vif)
2440 {
2441 struct wl1271 *wl = hw->priv;
2442 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2443 struct vif_counter_data vif_count;
2444 int ret = 0;
2445 u8 role_type;
2446
2447 if (wl->plt) {
2448 wl1271_error("Adding Interface not allowed while in PLT mode");
2449 return -EBUSY;
2450 }
2451
2452 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2453 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2454
2455 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2456 ieee80211_vif_type_p2p(vif), vif->addr);
2457
2458 wl12xx_get_vif_count(hw, vif, &vif_count);
2459
2460 mutex_lock(&wl->mutex);
2461 ret = wl1271_ps_elp_wakeup(wl);
2462 if (ret < 0)
2463 goto out_unlock;
2464
2465 /*
2466 * in some very corner case HW recovery scenarios its possible to
2467 * get here before __wl1271_op_remove_interface is complete, so
2468 * opt out if that is the case.
2469 */
2470 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2471 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2472 ret = -EBUSY;
2473 goto out;
2474 }
2475
2476
2477 ret = wl12xx_init_vif_data(wl, vif);
2478 if (ret < 0)
2479 goto out;
2480
2481 wlvif->wl = wl;
2482 role_type = wl12xx_get_role_type(wl, wlvif);
2483 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2484 ret = -EINVAL;
2485 goto out;
2486 }
2487
2488 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2489 if (ret < 0)
2490 goto out;
2491
2492 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2493 wl12xx_force_active_psm(wl);
2494 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2495 mutex_unlock(&wl->mutex);
2496 wl1271_recovery_work(&wl->recovery_work);
2497 return 0;
2498 }
2499
2500 /*
2501 * TODO: after the nvs issue will be solved, move this block
2502 * to start(), and make sure here the driver is ON.
2503 */
2504 if (wl->state == WLCORE_STATE_OFF) {
2505 /*
2506 * we still need this in order to configure the fw
2507 * while uploading the nvs
2508 */
2509 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2510
2511 ret = wl12xx_init_fw(wl);
2512 if (ret < 0)
2513 goto out;
2514 }
2515
2516 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2517 role_type, &wlvif->role_id);
2518 if (ret < 0)
2519 goto out;
2520
2521 ret = wl1271_init_vif_specific(wl, vif);
2522 if (ret < 0)
2523 goto out;
2524
2525 list_add(&wlvif->list, &wl->wlvif_list);
2526 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2527
2528 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2529 wl->ap_count++;
2530 else
2531 wl->sta_count++;
2532 out:
2533 wl1271_ps_elp_sleep(wl);
2534 out_unlock:
2535 mutex_unlock(&wl->mutex);
2536
2537 return ret;
2538 }
2539
2540 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2541 struct ieee80211_vif *vif,
2542 bool reset_tx_queues)
2543 {
2544 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2545 int i, ret;
2546 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2547
2548 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2549
2550 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2551 return;
2552
2553 /* because of hardware recovery, we may get here twice */
2554 if (wl->state == WLCORE_STATE_OFF)
2555 return;
2556
2557 wl1271_info("down");
2558
2559 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2560 wl->scan_wlvif == wlvif) {
2561 /*
2562 * Rearm the tx watchdog just before idling scan. This
2563 * prevents just-finished scans from triggering the watchdog
2564 */
2565 wl12xx_rearm_tx_watchdog_locked(wl);
2566
2567 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2568 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2569 wl->scan_wlvif = NULL;
2570 wl->scan.req = NULL;
2571 ieee80211_scan_completed(wl->hw, true);
2572 }
2573
2574 if (wl->sched_vif == wlvif) {
2575 ieee80211_sched_scan_stopped(wl->hw);
2576 wl->sched_vif = NULL;
2577 }
2578
2579 if (wl->roc_vif == vif) {
2580 wl->roc_vif = NULL;
2581 ieee80211_remain_on_channel_expired(wl->hw);
2582 }
2583
2584 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2585 /* disable active roles */
2586 ret = wl1271_ps_elp_wakeup(wl);
2587 if (ret < 0)
2588 goto deinit;
2589
2590 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2591 wlvif->bss_type == BSS_TYPE_IBSS) {
2592 if (wl12xx_dev_role_started(wlvif))
2593 wl12xx_stop_dev(wl, wlvif);
2594 }
2595
2596 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2597 if (ret < 0)
2598 goto deinit;
2599
2600 wl1271_ps_elp_sleep(wl);
2601 }
2602 deinit:
2603 wl12xx_tx_reset_wlvif(wl, wlvif);
2604
2605 /* clear all hlids (except system_hlid) */
2606 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2607
2608 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2609 wlvif->bss_type == BSS_TYPE_IBSS) {
2610 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2611 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2612 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2613 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2614 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2615 } else {
2616 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2617 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2618 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2619 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2620 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2621 wl12xx_free_rate_policy(wl,
2622 &wlvif->ap.ucast_rate_idx[i]);
2623 wl1271_free_ap_keys(wl, wlvif);
2624 }
2625
2626 dev_kfree_skb(wlvif->probereq);
2627 wlvif->probereq = NULL;
2628 if (wl->last_wlvif == wlvif)
2629 wl->last_wlvif = NULL;
2630 list_del(&wlvif->list);
2631 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2632 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2633 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2634
2635 if (is_ap)
2636 wl->ap_count--;
2637 else
2638 wl->sta_count--;
2639
2640 /*
2641 * Last AP, have more stations. Configure sleep auth according to STA.
2642 * Don't do thin on unintended recovery.
2643 */
2644 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2645 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2646 goto unlock;
2647
2648 if (wl->ap_count == 0 && is_ap) {
2649 /* mask ap events */
2650 wl->event_mask &= ~wl->ap_event_mask;
2651 wl1271_event_unmask(wl);
2652 }
2653
2654 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2655 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2656 /* Configure for power according to debugfs */
2657 if (sta_auth != WL1271_PSM_ILLEGAL)
2658 wl1271_acx_sleep_auth(wl, sta_auth);
2659 /* Configure for ELP power saving */
2660 else
2661 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2662 }
2663
2664 unlock:
2665 mutex_unlock(&wl->mutex);
2666
2667 del_timer_sync(&wlvif->rx_streaming_timer);
2668 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2669 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2670 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2671 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2672 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2673
2674 mutex_lock(&wl->mutex);
2675 }
2676
2677 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2678 struct ieee80211_vif *vif)
2679 {
2680 struct wl1271 *wl = hw->priv;
2681 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2682 struct wl12xx_vif *iter;
2683 struct vif_counter_data vif_count;
2684
2685 wl12xx_get_vif_count(hw, vif, &vif_count);
2686 mutex_lock(&wl->mutex);
2687
2688 if (wl->state == WLCORE_STATE_OFF ||
2689 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2690 goto out;
2691
2692 /*
2693 * wl->vif can be null here if someone shuts down the interface
2694 * just when hardware recovery has been started.
2695 */
2696 wl12xx_for_each_wlvif(wl, iter) {
2697 if (iter != wlvif)
2698 continue;
2699
2700 __wl1271_op_remove_interface(wl, vif, true);
2701 break;
2702 }
2703 WARN_ON(iter != wlvif);
2704 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2705 wl12xx_force_active_psm(wl);
2706 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2707 wl12xx_queue_recovery_work(wl);
2708 }
2709 out:
2710 mutex_unlock(&wl->mutex);
2711 }
2712
2713 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2714 struct ieee80211_vif *vif,
2715 enum nl80211_iftype new_type, bool p2p)
2716 {
2717 struct wl1271 *wl = hw->priv;
2718 int ret;
2719
2720 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2721 wl1271_op_remove_interface(hw, vif);
2722
2723 vif->type = new_type;
2724 vif->p2p = p2p;
2725 ret = wl1271_op_add_interface(hw, vif);
2726
2727 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2728 return ret;
2729 }
2730
2731 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2732 {
2733 int ret;
2734 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2735
2736 /*
2737 * One of the side effects of the JOIN command is that is clears
2738 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2739 * to a WPA/WPA2 access point will therefore kill the data-path.
2740 * Currently the only valid scenario for JOIN during association
2741 * is on roaming, in which case we will also be given new keys.
2742 * Keep the below message for now, unless it starts bothering
2743 * users who really like to roam a lot :)
2744 */
2745 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2746 wl1271_info("JOIN while associated.");
2747
2748 /* clear encryption type */
2749 wlvif->encryption_type = KEY_NONE;
2750
2751 if (is_ibss)
2752 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2753 else {
2754 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2755 /*
2756 * TODO: this is an ugly workaround for wl12xx fw
2757 * bug - we are not able to tx/rx after the first
2758 * start_sta, so make dummy start+stop calls,
2759 * and then call start_sta again.
2760 * this should be fixed in the fw.
2761 */
2762 wl12xx_cmd_role_start_sta(wl, wlvif);
2763 wl12xx_cmd_role_stop_sta(wl, wlvif);
2764 }
2765
2766 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2767 }
2768
2769 return ret;
2770 }
2771
2772 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2773 int offset)
2774 {
2775 u8 ssid_len;
2776 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2777 skb->len - offset);
2778
2779 if (!ptr) {
2780 wl1271_error("No SSID in IEs!");
2781 return -ENOENT;
2782 }
2783
2784 ssid_len = ptr[1];
2785 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2786 wl1271_error("SSID is too long!");
2787 return -EINVAL;
2788 }
2789
2790 wlvif->ssid_len = ssid_len;
2791 memcpy(wlvif->ssid, ptr+2, ssid_len);
2792 return 0;
2793 }
2794
2795 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2796 {
2797 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2798 struct sk_buff *skb;
2799 int ieoffset;
2800
2801 /* we currently only support setting the ssid from the ap probe req */
2802 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2803 return -EINVAL;
2804
2805 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2806 if (!skb)
2807 return -EINVAL;
2808
2809 ieoffset = offsetof(struct ieee80211_mgmt,
2810 u.probe_req.variable);
2811 wl1271_ssid_set(wlvif, skb, ieoffset);
2812 dev_kfree_skb(skb);
2813
2814 return 0;
2815 }
2816
2817 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2818 struct ieee80211_bss_conf *bss_conf,
2819 u32 sta_rate_set)
2820 {
2821 int ieoffset;
2822 int ret;
2823
2824 wlvif->aid = bss_conf->aid;
2825 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2826 wlvif->beacon_int = bss_conf->beacon_int;
2827 wlvif->wmm_enabled = bss_conf->qos;
2828
2829 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2830
2831 /*
2832 * with wl1271, we don't need to update the
2833 * beacon_int and dtim_period, because the firmware
2834 * updates it by itself when the first beacon is
2835 * received after a join.
2836 */
2837 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2838 if (ret < 0)
2839 return ret;
2840
2841 /*
2842 * Get a template for hardware connection maintenance
2843 */
2844 dev_kfree_skb(wlvif->probereq);
2845 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2846 wlvif,
2847 NULL);
2848 ieoffset = offsetof(struct ieee80211_mgmt,
2849 u.probe_req.variable);
2850 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2851
2852 /* enable the connection monitoring feature */
2853 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2854 if (ret < 0)
2855 return ret;
2856
2857 /*
2858 * The join command disable the keep-alive mode, shut down its process,
2859 * and also clear the template config, so we need to reset it all after
2860 * the join. The acx_aid starts the keep-alive process, and the order
2861 * of the commands below is relevant.
2862 */
2863 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2864 if (ret < 0)
2865 return ret;
2866
2867 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2868 if (ret < 0)
2869 return ret;
2870
2871 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2872 if (ret < 0)
2873 return ret;
2874
2875 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2876 wlvif->sta.klv_template_id,
2877 ACX_KEEP_ALIVE_TPL_VALID);
2878 if (ret < 0)
2879 return ret;
2880
2881 /*
2882 * The default fw psm configuration is AUTO, while mac80211 default
2883 * setting is off (ACTIVE), so sync the fw with the correct value.
2884 */
2885 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2886 if (ret < 0)
2887 return ret;
2888
2889 if (sta_rate_set) {
2890 wlvif->rate_set =
2891 wl1271_tx_enabled_rates_get(wl,
2892 sta_rate_set,
2893 wlvif->band);
2894 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2895 if (ret < 0)
2896 return ret;
2897 }
2898
2899 return ret;
2900 }
2901
2902 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2903 {
2904 int ret;
2905 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2906
2907 /* make sure we are connected (sta) joined */
2908 if (sta &&
2909 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2910 return false;
2911
2912 /* make sure we are joined (ibss) */
2913 if (!sta &&
2914 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2915 return false;
2916
2917 if (sta) {
2918 /* use defaults when not associated */
2919 wlvif->aid = 0;
2920
2921 /* free probe-request template */
2922 dev_kfree_skb(wlvif->probereq);
2923 wlvif->probereq = NULL;
2924
2925 /* disable connection monitor features */
2926 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2927 if (ret < 0)
2928 return ret;
2929
2930 /* Disable the keep-alive feature */
2931 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2932 if (ret < 0)
2933 return ret;
2934 }
2935
2936 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2937 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2938
2939 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2940 ieee80211_chswitch_done(vif, false);
2941 cancel_delayed_work(&wlvif->channel_switch_work);
2942 }
2943
2944 /* invalidate keep-alive template */
2945 wl1271_acx_keep_alive_config(wl, wlvif,
2946 wlvif->sta.klv_template_id,
2947 ACX_KEEP_ALIVE_TPL_INVALID);
2948
2949 return 0;
2950 }
2951
2952 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2953 {
2954 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2955 wlvif->rate_set = wlvif->basic_rate_set;
2956 }
2957
2958 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2959 bool idle)
2960 {
2961 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2962
2963 if (idle == cur_idle)
2964 return;
2965
2966 if (idle) {
2967 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2968 } else {
2969 /* The current firmware only supports sched_scan in idle */
2970 if (wl->sched_vif == wlvif)
2971 wl->ops->sched_scan_stop(wl, wlvif);
2972
2973 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2974 }
2975 }
2976
2977 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2978 struct ieee80211_conf *conf, u32 changed)
2979 {
2980 int ret;
2981
2982 if (conf->power_level != wlvif->power_level) {
2983 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2984 if (ret < 0)
2985 return ret;
2986
2987 wlvif->power_level = conf->power_level;
2988 }
2989
2990 return 0;
2991 }
2992
2993 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2994 {
2995 struct wl1271 *wl = hw->priv;
2996 struct wl12xx_vif *wlvif;
2997 struct ieee80211_conf *conf = &hw->conf;
2998 int ret = 0;
2999
3000 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3001 " changed 0x%x",
3002 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3003 conf->power_level,
3004 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3005 changed);
3006
3007 mutex_lock(&wl->mutex);
3008
3009 if (changed & IEEE80211_CONF_CHANGE_POWER)
3010 wl->power_level = conf->power_level;
3011
3012 if (unlikely(wl->state != WLCORE_STATE_ON))
3013 goto out;
3014
3015 ret = wl1271_ps_elp_wakeup(wl);
3016 if (ret < 0)
3017 goto out;
3018
3019 /* configure each interface */
3020 wl12xx_for_each_wlvif(wl, wlvif) {
3021 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3022 if (ret < 0)
3023 goto out_sleep;
3024 }
3025
3026 out_sleep:
3027 wl1271_ps_elp_sleep(wl);
3028
3029 out:
3030 mutex_unlock(&wl->mutex);
3031
3032 return ret;
3033 }
3034
3035 struct wl1271_filter_params {
3036 bool enabled;
3037 int mc_list_length;
3038 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3039 };
3040
3041 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3042 struct netdev_hw_addr_list *mc_list)
3043 {
3044 struct wl1271_filter_params *fp;
3045 struct netdev_hw_addr *ha;
3046
3047 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3048 if (!fp) {
3049 wl1271_error("Out of memory setting filters.");
3050 return 0;
3051 }
3052
3053 /* update multicast filtering parameters */
3054 fp->mc_list_length = 0;
3055 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3056 fp->enabled = false;
3057 } else {
3058 fp->enabled = true;
3059 netdev_hw_addr_list_for_each(ha, mc_list) {
3060 memcpy(fp->mc_list[fp->mc_list_length],
3061 ha->addr, ETH_ALEN);
3062 fp->mc_list_length++;
3063 }
3064 }
3065
3066 return (u64)(unsigned long)fp;
3067 }
3068
3069 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3070 FIF_ALLMULTI | \
3071 FIF_FCSFAIL | \
3072 FIF_BCN_PRBRESP_PROMISC | \
3073 FIF_CONTROL | \
3074 FIF_OTHER_BSS)
3075
3076 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3077 unsigned int changed,
3078 unsigned int *total, u64 multicast)
3079 {
3080 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3081 struct wl1271 *wl = hw->priv;
3082 struct wl12xx_vif *wlvif;
3083
3084 int ret;
3085
3086 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3087 " total %x", changed, *total);
3088
3089 mutex_lock(&wl->mutex);
3090
3091 *total &= WL1271_SUPPORTED_FILTERS;
3092 changed &= WL1271_SUPPORTED_FILTERS;
3093
3094 if (unlikely(wl->state != WLCORE_STATE_ON))
3095 goto out;
3096
3097 ret = wl1271_ps_elp_wakeup(wl);
3098 if (ret < 0)
3099 goto out;
3100
3101 wl12xx_for_each_wlvif(wl, wlvif) {
3102 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3103 if (*total & FIF_ALLMULTI)
3104 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3105 false,
3106 NULL, 0);
3107 else if (fp)
3108 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3109 fp->enabled,
3110 fp->mc_list,
3111 fp->mc_list_length);
3112 if (ret < 0)
3113 goto out_sleep;
3114 }
3115 }
3116
3117 /*
3118 * the fw doesn't provide an api to configure the filters. instead,
3119 * the filters configuration is based on the active roles / ROC
3120 * state.
3121 */
3122
3123 out_sleep:
3124 wl1271_ps_elp_sleep(wl);
3125
3126 out:
3127 mutex_unlock(&wl->mutex);
3128 kfree(fp);
3129 }
3130
3131 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3132 u8 id, u8 key_type, u8 key_size,
3133 const u8 *key, u8 hlid, u32 tx_seq_32,
3134 u16 tx_seq_16)
3135 {
3136 struct wl1271_ap_key *ap_key;
3137 int i;
3138
3139 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3140
3141 if (key_size > MAX_KEY_SIZE)
3142 return -EINVAL;
3143
3144 /*
3145 * Find next free entry in ap_keys. Also check we are not replacing
3146 * an existing key.
3147 */
3148 for (i = 0; i < MAX_NUM_KEYS; i++) {
3149 if (wlvif->ap.recorded_keys[i] == NULL)
3150 break;
3151
3152 if (wlvif->ap.recorded_keys[i]->id == id) {
3153 wl1271_warning("trying to record key replacement");
3154 return -EINVAL;
3155 }
3156 }
3157
3158 if (i == MAX_NUM_KEYS)
3159 return -EBUSY;
3160
3161 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3162 if (!ap_key)
3163 return -ENOMEM;
3164
3165 ap_key->id = id;
3166 ap_key->key_type = key_type;
3167 ap_key->key_size = key_size;
3168 memcpy(ap_key->key, key, key_size);
3169 ap_key->hlid = hlid;
3170 ap_key->tx_seq_32 = tx_seq_32;
3171 ap_key->tx_seq_16 = tx_seq_16;
3172
3173 wlvif->ap.recorded_keys[i] = ap_key;
3174 return 0;
3175 }
3176
3177 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3178 {
3179 int i;
3180
3181 for (i = 0; i < MAX_NUM_KEYS; i++) {
3182 kfree(wlvif->ap.recorded_keys[i]);
3183 wlvif->ap.recorded_keys[i] = NULL;
3184 }
3185 }
3186
3187 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3188 {
3189 int i, ret = 0;
3190 struct wl1271_ap_key *key;
3191 bool wep_key_added = false;
3192
3193 for (i = 0; i < MAX_NUM_KEYS; i++) {
3194 u8 hlid;
3195 if (wlvif->ap.recorded_keys[i] == NULL)
3196 break;
3197
3198 key = wlvif->ap.recorded_keys[i];
3199 hlid = key->hlid;
3200 if (hlid == WL12XX_INVALID_LINK_ID)
3201 hlid = wlvif->ap.bcast_hlid;
3202
3203 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3204 key->id, key->key_type,
3205 key->key_size, key->key,
3206 hlid, key->tx_seq_32,
3207 key->tx_seq_16);
3208 if (ret < 0)
3209 goto out;
3210
3211 if (key->key_type == KEY_WEP)
3212 wep_key_added = true;
3213 }
3214
3215 if (wep_key_added) {
3216 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3217 wlvif->ap.bcast_hlid);
3218 if (ret < 0)
3219 goto out;
3220 }
3221
3222 out:
3223 wl1271_free_ap_keys(wl, wlvif);
3224 return ret;
3225 }
3226
3227 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3228 u16 action, u8 id, u8 key_type,
3229 u8 key_size, const u8 *key, u32 tx_seq_32,
3230 u16 tx_seq_16, struct ieee80211_sta *sta)
3231 {
3232 int ret;
3233 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3234
3235 if (is_ap) {
3236 struct wl1271_station *wl_sta;
3237 u8 hlid;
3238
3239 if (sta) {
3240 wl_sta = (struct wl1271_station *)sta->drv_priv;
3241 hlid = wl_sta->hlid;
3242 } else {
3243 hlid = wlvif->ap.bcast_hlid;
3244 }
3245
3246 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3247 /*
3248 * We do not support removing keys after AP shutdown.
3249 * Pretend we do to make mac80211 happy.
3250 */
3251 if (action != KEY_ADD_OR_REPLACE)
3252 return 0;
3253
3254 ret = wl1271_record_ap_key(wl, wlvif, id,
3255 key_type, key_size,
3256 key, hlid, tx_seq_32,
3257 tx_seq_16);
3258 } else {
3259 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3260 id, key_type, key_size,
3261 key, hlid, tx_seq_32,
3262 tx_seq_16);
3263 }
3264
3265 if (ret < 0)
3266 return ret;
3267 } else {
3268 const u8 *addr;
3269 static const u8 bcast_addr[ETH_ALEN] = {
3270 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3271 };
3272
3273 addr = sta ? sta->addr : bcast_addr;
3274
3275 if (is_zero_ether_addr(addr)) {
3276 /* We dont support TX only encryption */
3277 return -EOPNOTSUPP;
3278 }
3279
3280 /* The wl1271 does not allow to remove unicast keys - they
3281 will be cleared automatically on next CMD_JOIN. Ignore the
3282 request silently, as we dont want the mac80211 to emit
3283 an error message. */
3284 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3285 return 0;
3286
3287 /* don't remove key if hlid was already deleted */
3288 if (action == KEY_REMOVE &&
3289 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3290 return 0;
3291
3292 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3293 id, key_type, key_size,
3294 key, addr, tx_seq_32,
3295 tx_seq_16);
3296 if (ret < 0)
3297 return ret;
3298
3299 }
3300
3301 return 0;
3302 }
3303
3304 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3305 struct ieee80211_vif *vif,
3306 struct ieee80211_sta *sta,
3307 struct ieee80211_key_conf *key_conf)
3308 {
3309 struct wl1271 *wl = hw->priv;
3310 int ret;
3311 bool might_change_spare =
3312 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3313 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3314
3315 if (might_change_spare) {
3316 /*
3317 * stop the queues and flush to ensure the next packets are
3318 * in sync with FW spare block accounting
3319 */
3320 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3321 wl1271_tx_flush(wl);
3322 }
3323
3324 mutex_lock(&wl->mutex);
3325
3326 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3327 ret = -EAGAIN;
3328 goto out_wake_queues;
3329 }
3330
3331 ret = wl1271_ps_elp_wakeup(wl);
3332 if (ret < 0)
3333 goto out_wake_queues;
3334
3335 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3336
3337 wl1271_ps_elp_sleep(wl);
3338
3339 out_wake_queues:
3340 if (might_change_spare)
3341 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3342
3343 mutex_unlock(&wl->mutex);
3344
3345 return ret;
3346 }
3347
3348 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3349 struct ieee80211_vif *vif,
3350 struct ieee80211_sta *sta,
3351 struct ieee80211_key_conf *key_conf)
3352 {
3353 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3354 int ret;
3355 u32 tx_seq_32 = 0;
3356 u16 tx_seq_16 = 0;
3357 u8 key_type;
3358 u8 hlid;
3359
3360 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3361
3362 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3363 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3364 key_conf->cipher, key_conf->keyidx,
3365 key_conf->keylen, key_conf->flags);
3366 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3367
3368 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3369 if (sta) {
3370 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3371 hlid = wl_sta->hlid;
3372 } else {
3373 hlid = wlvif->ap.bcast_hlid;
3374 }
3375 else
3376 hlid = wlvif->sta.hlid;
3377
3378 if (hlid != WL12XX_INVALID_LINK_ID) {
3379 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3380 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3381 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3382 }
3383
3384 switch (key_conf->cipher) {
3385 case WLAN_CIPHER_SUITE_WEP40:
3386 case WLAN_CIPHER_SUITE_WEP104:
3387 key_type = KEY_WEP;
3388
3389 key_conf->hw_key_idx = key_conf->keyidx;
3390 break;
3391 case WLAN_CIPHER_SUITE_TKIP:
3392 key_type = KEY_TKIP;
3393 key_conf->hw_key_idx = key_conf->keyidx;
3394 break;
3395 case WLAN_CIPHER_SUITE_CCMP:
3396 key_type = KEY_AES;
3397 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3398 break;
3399 case WL1271_CIPHER_SUITE_GEM:
3400 key_type = KEY_GEM;
3401 break;
3402 default:
3403 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3404
3405 return -EOPNOTSUPP;
3406 }
3407
3408 switch (cmd) {
3409 case SET_KEY:
3410 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3411 key_conf->keyidx, key_type,
3412 key_conf->keylen, key_conf->key,
3413 tx_seq_32, tx_seq_16, sta);
3414 if (ret < 0) {
3415 wl1271_error("Could not add or replace key");
3416 return ret;
3417 }
3418
3419 /*
3420 * reconfiguring arp response if the unicast (or common)
3421 * encryption key type was changed
3422 */
3423 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3424 (sta || key_type == KEY_WEP) &&
3425 wlvif->encryption_type != key_type) {
3426 wlvif->encryption_type = key_type;
3427 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3428 if (ret < 0) {
3429 wl1271_warning("build arp rsp failed: %d", ret);
3430 return ret;
3431 }
3432 }
3433 break;
3434
3435 case DISABLE_KEY:
3436 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3437 key_conf->keyidx, key_type,
3438 key_conf->keylen, key_conf->key,
3439 0, 0, sta);
3440 if (ret < 0) {
3441 wl1271_error("Could not remove key");
3442 return ret;
3443 }
3444 break;
3445
3446 default:
3447 wl1271_error("Unsupported key cmd 0x%x", cmd);
3448 return -EOPNOTSUPP;
3449 }
3450
3451 return ret;
3452 }
3453 EXPORT_SYMBOL_GPL(wlcore_set_key);
3454
3455 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3456 struct ieee80211_vif *vif,
3457 int key_idx)
3458 {
3459 struct wl1271 *wl = hw->priv;
3460 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3461 int ret;
3462
3463 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3464 key_idx);
3465
3466 mutex_lock(&wl->mutex);
3467
3468 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3469 ret = -EAGAIN;
3470 goto out_unlock;
3471 }
3472
3473 ret = wl1271_ps_elp_wakeup(wl);
3474 if (ret < 0)
3475 goto out_unlock;
3476
3477 wlvif->default_key = key_idx;
3478
3479 /* the default WEP key needs to be configured at least once */
3480 if (wlvif->encryption_type == KEY_WEP) {
3481 ret = wl12xx_cmd_set_default_wep_key(wl,
3482 key_idx,
3483 wlvif->sta.hlid);
3484 if (ret < 0)
3485 goto out_sleep;
3486 }
3487
3488 out_sleep:
3489 wl1271_ps_elp_sleep(wl);
3490
3491 out_unlock:
3492 mutex_unlock(&wl->mutex);
3493 }
3494
3495 void wlcore_regdomain_config(struct wl1271 *wl)
3496 {
3497 int ret;
3498
3499 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3500 return;
3501
3502 mutex_lock(&wl->mutex);
3503
3504 if (unlikely(wl->state != WLCORE_STATE_ON))
3505 goto out;
3506
3507 ret = wl1271_ps_elp_wakeup(wl);
3508 if (ret < 0)
3509 goto out;
3510
3511 ret = wlcore_cmd_regdomain_config_locked(wl);
3512 if (ret < 0) {
3513 wl12xx_queue_recovery_work(wl);
3514 goto out;
3515 }
3516
3517 wl1271_ps_elp_sleep(wl);
3518 out:
3519 mutex_unlock(&wl->mutex);
3520 }
3521
3522 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3523 struct ieee80211_vif *vif,
3524 struct cfg80211_scan_request *req)
3525 {
3526 struct wl1271 *wl = hw->priv;
3527 int ret;
3528 u8 *ssid = NULL;
3529 size_t len = 0;
3530
3531 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3532
3533 if (req->n_ssids) {
3534 ssid = req->ssids[0].ssid;
3535 len = req->ssids[0].ssid_len;
3536 }
3537
3538 mutex_lock(&wl->mutex);
3539
3540 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3541 /*
3542 * We cannot return -EBUSY here because cfg80211 will expect
3543 * a call to ieee80211_scan_completed if we do - in this case
3544 * there won't be any call.
3545 */
3546 ret = -EAGAIN;
3547 goto out;
3548 }
3549
3550 ret = wl1271_ps_elp_wakeup(wl);
3551 if (ret < 0)
3552 goto out;
3553
3554 /* fail if there is any role in ROC */
3555 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3556 /* don't allow scanning right now */
3557 ret = -EBUSY;
3558 goto out_sleep;
3559 }
3560
3561 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3562 out_sleep:
3563 wl1271_ps_elp_sleep(wl);
3564 out:
3565 mutex_unlock(&wl->mutex);
3566
3567 return ret;
3568 }
3569
3570 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3571 struct ieee80211_vif *vif)
3572 {
3573 struct wl1271 *wl = hw->priv;
3574 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3575 int ret;
3576
3577 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3578
3579 mutex_lock(&wl->mutex);
3580
3581 if (unlikely(wl->state != WLCORE_STATE_ON))
3582 goto out;
3583
3584 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3585 goto out;
3586
3587 ret = wl1271_ps_elp_wakeup(wl);
3588 if (ret < 0)
3589 goto out;
3590
3591 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3592 ret = wl->ops->scan_stop(wl, wlvif);
3593 if (ret < 0)
3594 goto out_sleep;
3595 }
3596
3597 /*
3598 * Rearm the tx watchdog just before idling scan. This
3599 * prevents just-finished scans from triggering the watchdog
3600 */
3601 wl12xx_rearm_tx_watchdog_locked(wl);
3602
3603 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3604 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3605 wl->scan_wlvif = NULL;
3606 wl->scan.req = NULL;
3607 ieee80211_scan_completed(wl->hw, true);
3608
3609 out_sleep:
3610 wl1271_ps_elp_sleep(wl);
3611 out:
3612 mutex_unlock(&wl->mutex);
3613
3614 cancel_delayed_work_sync(&wl->scan_complete_work);
3615 }
3616
3617 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3618 struct ieee80211_vif *vif,
3619 struct cfg80211_sched_scan_request *req,
3620 struct ieee80211_sched_scan_ies *ies)
3621 {
3622 struct wl1271 *wl = hw->priv;
3623 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3624 int ret;
3625
3626 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3627
3628 mutex_lock(&wl->mutex);
3629
3630 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3631 ret = -EAGAIN;
3632 goto out;
3633 }
3634
3635 ret = wl1271_ps_elp_wakeup(wl);
3636 if (ret < 0)
3637 goto out;
3638
3639 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3640 if (ret < 0)
3641 goto out_sleep;
3642
3643 wl->sched_vif = wlvif;
3644
3645 out_sleep:
3646 wl1271_ps_elp_sleep(wl);
3647 out:
3648 mutex_unlock(&wl->mutex);
3649 return ret;
3650 }
3651
3652 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3653 struct ieee80211_vif *vif)
3654 {
3655 struct wl1271 *wl = hw->priv;
3656 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3657 int ret;
3658
3659 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3660
3661 mutex_lock(&wl->mutex);
3662
3663 if (unlikely(wl->state != WLCORE_STATE_ON))
3664 goto out;
3665
3666 ret = wl1271_ps_elp_wakeup(wl);
3667 if (ret < 0)
3668 goto out;
3669
3670 wl->ops->sched_scan_stop(wl, wlvif);
3671
3672 wl1271_ps_elp_sleep(wl);
3673 out:
3674 mutex_unlock(&wl->mutex);
3675 }
3676
3677 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3678 {
3679 struct wl1271 *wl = hw->priv;
3680 int ret = 0;
3681
3682 mutex_lock(&wl->mutex);
3683
3684 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3685 ret = -EAGAIN;
3686 goto out;
3687 }
3688
3689 ret = wl1271_ps_elp_wakeup(wl);
3690 if (ret < 0)
3691 goto out;
3692
3693 ret = wl1271_acx_frag_threshold(wl, value);
3694 if (ret < 0)
3695 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3696
3697 wl1271_ps_elp_sleep(wl);
3698
3699 out:
3700 mutex_unlock(&wl->mutex);
3701
3702 return ret;
3703 }
3704
3705 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3706 {
3707 struct wl1271 *wl = hw->priv;
3708 struct wl12xx_vif *wlvif;
3709 int ret = 0;
3710
3711 mutex_lock(&wl->mutex);
3712
3713 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3714 ret = -EAGAIN;
3715 goto out;
3716 }
3717
3718 ret = wl1271_ps_elp_wakeup(wl);
3719 if (ret < 0)
3720 goto out;
3721
3722 wl12xx_for_each_wlvif(wl, wlvif) {
3723 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3724 if (ret < 0)
3725 wl1271_warning("set rts threshold failed: %d", ret);
3726 }
3727 wl1271_ps_elp_sleep(wl);
3728
3729 out:
3730 mutex_unlock(&wl->mutex);
3731
3732 return ret;
3733 }
3734
3735 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3736 {
3737 int len;
3738 const u8 *next, *end = skb->data + skb->len;
3739 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3740 skb->len - ieoffset);
3741 if (!ie)
3742 return;
3743 len = ie[1] + 2;
3744 next = ie + len;
3745 memmove(ie, next, end - next);
3746 skb_trim(skb, skb->len - len);
3747 }
3748
3749 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3750 unsigned int oui, u8 oui_type,
3751 int ieoffset)
3752 {
3753 int len;
3754 const u8 *next, *end = skb->data + skb->len;
3755 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3756 skb->data + ieoffset,
3757 skb->len - ieoffset);
3758 if (!ie)
3759 return;
3760 len = ie[1] + 2;
3761 next = ie + len;
3762 memmove(ie, next, end - next);
3763 skb_trim(skb, skb->len - len);
3764 }
3765
3766 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3767 struct ieee80211_vif *vif)
3768 {
3769 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3770 struct sk_buff *skb;
3771 int ret;
3772
3773 skb = ieee80211_proberesp_get(wl->hw, vif);
3774 if (!skb)
3775 return -EOPNOTSUPP;
3776
3777 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3778 CMD_TEMPL_AP_PROBE_RESPONSE,
3779 skb->data,
3780 skb->len, 0,
3781 rates);
3782 dev_kfree_skb(skb);
3783
3784 if (ret < 0)
3785 goto out;
3786
3787 wl1271_debug(DEBUG_AP, "probe response updated");
3788 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3789
3790 out:
3791 return ret;
3792 }
3793
3794 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3795 struct ieee80211_vif *vif,
3796 u8 *probe_rsp_data,
3797 size_t probe_rsp_len,
3798 u32 rates)
3799 {
3800 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3801 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3802 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3803 int ssid_ie_offset, ie_offset, templ_len;
3804 const u8 *ptr;
3805
3806 /* no need to change probe response if the SSID is set correctly */
3807 if (wlvif->ssid_len > 0)
3808 return wl1271_cmd_template_set(wl, wlvif->role_id,
3809 CMD_TEMPL_AP_PROBE_RESPONSE,
3810 probe_rsp_data,
3811 probe_rsp_len, 0,
3812 rates);
3813
3814 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3815 wl1271_error("probe_rsp template too big");
3816 return -EINVAL;
3817 }
3818
3819 /* start searching from IE offset */
3820 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3821
3822 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3823 probe_rsp_len - ie_offset);
3824 if (!ptr) {
3825 wl1271_error("No SSID in beacon!");
3826 return -EINVAL;
3827 }
3828
3829 ssid_ie_offset = ptr - probe_rsp_data;
3830 ptr += (ptr[1] + 2);
3831
3832 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3833
3834 /* insert SSID from bss_conf */
3835 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3836 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3837 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3838 bss_conf->ssid, bss_conf->ssid_len);
3839 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3840
3841 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3842 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3843 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3844
3845 return wl1271_cmd_template_set(wl, wlvif->role_id,
3846 CMD_TEMPL_AP_PROBE_RESPONSE,
3847 probe_rsp_templ,
3848 templ_len, 0,
3849 rates);
3850 }
3851
3852 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3853 struct ieee80211_vif *vif,
3854 struct ieee80211_bss_conf *bss_conf,
3855 u32 changed)
3856 {
3857 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3858 int ret = 0;
3859
3860 if (changed & BSS_CHANGED_ERP_SLOT) {
3861 if (bss_conf->use_short_slot)
3862 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3863 else
3864 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3865 if (ret < 0) {
3866 wl1271_warning("Set slot time failed %d", ret);
3867 goto out;
3868 }
3869 }
3870
3871 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3872 if (bss_conf->use_short_preamble)
3873 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3874 else
3875 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3876 }
3877
3878 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3879 if (bss_conf->use_cts_prot)
3880 ret = wl1271_acx_cts_protect(wl, wlvif,
3881 CTSPROTECT_ENABLE);
3882 else
3883 ret = wl1271_acx_cts_protect(wl, wlvif,
3884 CTSPROTECT_DISABLE);
3885 if (ret < 0) {
3886 wl1271_warning("Set ctsprotect failed %d", ret);
3887 goto out;
3888 }
3889 }
3890
3891 out:
3892 return ret;
3893 }
3894
3895 static int wlcore_set_beacon_template(struct wl1271 *wl,
3896 struct ieee80211_vif *vif,
3897 bool is_ap)
3898 {
3899 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3900 struct ieee80211_hdr *hdr;
3901 u32 min_rate;
3902 int ret;
3903 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3904 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3905 u16 tmpl_id;
3906
3907 if (!beacon) {
3908 ret = -EINVAL;
3909 goto out;
3910 }
3911
3912 wl1271_debug(DEBUG_MASTER, "beacon updated");
3913
3914 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3915 if (ret < 0) {
3916 dev_kfree_skb(beacon);
3917 goto out;
3918 }
3919 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3920 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3921 CMD_TEMPL_BEACON;
3922 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3923 beacon->data,
3924 beacon->len, 0,
3925 min_rate);
3926 if (ret < 0) {
3927 dev_kfree_skb(beacon);
3928 goto out;
3929 }
3930
3931 wlvif->wmm_enabled =
3932 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3933 WLAN_OUI_TYPE_MICROSOFT_WMM,
3934 beacon->data + ieoffset,
3935 beacon->len - ieoffset);
3936
3937 /*
3938 * In case we already have a probe-resp beacon set explicitly
3939 * by usermode, don't use the beacon data.
3940 */
3941 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3942 goto end_bcn;
3943
3944 /* remove TIM ie from probe response */
3945 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3946
3947 /*
3948 * remove p2p ie from probe response.
3949 * the fw reponds to probe requests that don't include
3950 * the p2p ie. probe requests with p2p ie will be passed,
3951 * and will be responded by the supplicant (the spec
3952 * forbids including the p2p ie when responding to probe
3953 * requests that didn't include it).
3954 */
3955 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3956 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3957
3958 hdr = (struct ieee80211_hdr *) beacon->data;
3959 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3960 IEEE80211_STYPE_PROBE_RESP);
3961 if (is_ap)
3962 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3963 beacon->data,
3964 beacon->len,
3965 min_rate);
3966 else
3967 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3968 CMD_TEMPL_PROBE_RESPONSE,
3969 beacon->data,
3970 beacon->len, 0,
3971 min_rate);
3972 end_bcn:
3973 dev_kfree_skb(beacon);
3974 if (ret < 0)
3975 goto out;
3976
3977 out:
3978 return ret;
3979 }
3980
3981 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3982 struct ieee80211_vif *vif,
3983 struct ieee80211_bss_conf *bss_conf,
3984 u32 changed)
3985 {
3986 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3987 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3988 int ret = 0;
3989
3990 if (changed & BSS_CHANGED_BEACON_INT) {
3991 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3992 bss_conf->beacon_int);
3993
3994 wlvif->beacon_int = bss_conf->beacon_int;
3995 }
3996
3997 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3998 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3999
4000 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4001 }
4002
4003 if (changed & BSS_CHANGED_BEACON) {
4004 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4005 if (ret < 0)
4006 goto out;
4007 }
4008
4009 out:
4010 if (ret != 0)
4011 wl1271_error("beacon info change failed: %d", ret);
4012 return ret;
4013 }
4014
4015 /* AP mode changes */
4016 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4017 struct ieee80211_vif *vif,
4018 struct ieee80211_bss_conf *bss_conf,
4019 u32 changed)
4020 {
4021 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4022 int ret = 0;
4023
4024 if (changed & BSS_CHANGED_BASIC_RATES) {
4025 u32 rates = bss_conf->basic_rates;
4026
4027 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4028 wlvif->band);
4029 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4030 wlvif->basic_rate_set);
4031
4032 ret = wl1271_init_ap_rates(wl, wlvif);
4033 if (ret < 0) {
4034 wl1271_error("AP rate policy change failed %d", ret);
4035 goto out;
4036 }
4037
4038 ret = wl1271_ap_init_templates(wl, vif);
4039 if (ret < 0)
4040 goto out;
4041
4042 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4043 if (ret < 0)
4044 goto out;
4045
4046 ret = wlcore_set_beacon_template(wl, vif, true);
4047 if (ret < 0)
4048 goto out;
4049 }
4050
4051 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4052 if (ret < 0)
4053 goto out;
4054
4055 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4056 if (bss_conf->enable_beacon) {
4057 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4058 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4059 if (ret < 0)
4060 goto out;
4061
4062 ret = wl1271_ap_init_hwenc(wl, wlvif);
4063 if (ret < 0)
4064 goto out;
4065
4066 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4067 wl1271_debug(DEBUG_AP, "started AP");
4068 }
4069 } else {
4070 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4071 /*
4072 * AP might be in ROC in case we have just
4073 * sent auth reply. handle it.
4074 */
4075 if (test_bit(wlvif->role_id, wl->roc_map))
4076 wl12xx_croc(wl, wlvif->role_id);
4077
4078 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4079 if (ret < 0)
4080 goto out;
4081
4082 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4083 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4084 &wlvif->flags);
4085 wl1271_debug(DEBUG_AP, "stopped AP");
4086 }
4087 }
4088 }
4089
4090 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4091 if (ret < 0)
4092 goto out;
4093
4094 /* Handle HT information change */
4095 if ((changed & BSS_CHANGED_HT) &&
4096 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4097 ret = wl1271_acx_set_ht_information(wl, wlvif,
4098 bss_conf->ht_operation_mode);
4099 if (ret < 0) {
4100 wl1271_warning("Set ht information failed %d", ret);
4101 goto out;
4102 }
4103 }
4104
4105 out:
4106 return;
4107 }
4108
4109 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4110 struct ieee80211_bss_conf *bss_conf,
4111 u32 sta_rate_set)
4112 {
4113 u32 rates;
4114 int ret;
4115
4116 wl1271_debug(DEBUG_MAC80211,
4117 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4118 bss_conf->bssid, bss_conf->aid,
4119 bss_conf->beacon_int,
4120 bss_conf->basic_rates, sta_rate_set);
4121
4122 wlvif->beacon_int = bss_conf->beacon_int;
4123 rates = bss_conf->basic_rates;
4124 wlvif->basic_rate_set =
4125 wl1271_tx_enabled_rates_get(wl, rates,
4126 wlvif->band);
4127 wlvif->basic_rate =
4128 wl1271_tx_min_rate_get(wl,
4129 wlvif->basic_rate_set);
4130
4131 if (sta_rate_set)
4132 wlvif->rate_set =
4133 wl1271_tx_enabled_rates_get(wl,
4134 sta_rate_set,
4135 wlvif->band);
4136
4137 /* we only support sched_scan while not connected */
4138 if (wl->sched_vif == wlvif)
4139 wl->ops->sched_scan_stop(wl, wlvif);
4140
4141 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4142 if (ret < 0)
4143 return ret;
4144
4145 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4146 if (ret < 0)
4147 return ret;
4148
4149 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4150 if (ret < 0)
4151 return ret;
4152
4153 wlcore_set_ssid(wl, wlvif);
4154
4155 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4156
4157 return 0;
4158 }
4159
4160 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4161 {
4162 int ret;
4163
4164 /* revert back to minimum rates for the current band */
4165 wl1271_set_band_rate(wl, wlvif);
4166 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4167
4168 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4169 if (ret < 0)
4170 return ret;
4171
4172 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4173 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4174 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4175 if (ret < 0)
4176 return ret;
4177 }
4178
4179 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4180 return 0;
4181 }
4182 /* STA/IBSS mode changes */
4183 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4184 struct ieee80211_vif *vif,
4185 struct ieee80211_bss_conf *bss_conf,
4186 u32 changed)
4187 {
4188 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4189 bool do_join = false;
4190 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4191 bool ibss_joined = false;
4192 u32 sta_rate_set = 0;
4193 int ret;
4194 struct ieee80211_sta *sta;
4195 bool sta_exists = false;
4196 struct ieee80211_sta_ht_cap sta_ht_cap;
4197
4198 if (is_ibss) {
4199 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4200 changed);
4201 if (ret < 0)
4202 goto out;
4203 }
4204
4205 if (changed & BSS_CHANGED_IBSS) {
4206 if (bss_conf->ibss_joined) {
4207 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4208 ibss_joined = true;
4209 } else {
4210 wlcore_unset_assoc(wl, wlvif);
4211 wl12xx_cmd_role_stop_sta(wl, wlvif);
4212 }
4213 }
4214
4215 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4216 do_join = true;
4217
4218 /* Need to update the SSID (for filtering etc) */
4219 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4220 do_join = true;
4221
4222 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4223 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4224 bss_conf->enable_beacon ? "enabled" : "disabled");
4225
4226 do_join = true;
4227 }
4228
4229 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4230 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4231
4232 if (changed & BSS_CHANGED_CQM) {
4233 bool enable = false;
4234 if (bss_conf->cqm_rssi_thold)
4235 enable = true;
4236 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4237 bss_conf->cqm_rssi_thold,
4238 bss_conf->cqm_rssi_hyst);
4239 if (ret < 0)
4240 goto out;
4241 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4242 }
4243
4244 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4245 BSS_CHANGED_ASSOC)) {
4246 rcu_read_lock();
4247 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4248 if (sta) {
4249 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4250
4251 /* save the supp_rates of the ap */
4252 sta_rate_set = sta->supp_rates[wlvif->band];
4253 if (sta->ht_cap.ht_supported)
4254 sta_rate_set |=
4255 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4256 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4257 sta_ht_cap = sta->ht_cap;
4258 sta_exists = true;
4259 }
4260
4261 rcu_read_unlock();
4262 }
4263
4264 if (changed & BSS_CHANGED_BSSID) {
4265 if (!is_zero_ether_addr(bss_conf->bssid)) {
4266 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4267 sta_rate_set);
4268 if (ret < 0)
4269 goto out;
4270
4271 /* Need to update the BSSID (for filtering etc) */
4272 do_join = true;
4273 } else {
4274 ret = wlcore_clear_bssid(wl, wlvif);
4275 if (ret < 0)
4276 goto out;
4277 }
4278 }
4279
4280 if (changed & BSS_CHANGED_IBSS) {
4281 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4282 bss_conf->ibss_joined);
4283
4284 if (bss_conf->ibss_joined) {
4285 u32 rates = bss_conf->basic_rates;
4286 wlvif->basic_rate_set =
4287 wl1271_tx_enabled_rates_get(wl, rates,
4288 wlvif->band);
4289 wlvif->basic_rate =
4290 wl1271_tx_min_rate_get(wl,
4291 wlvif->basic_rate_set);
4292
4293 /* by default, use 11b + OFDM rates */
4294 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4295 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4296 if (ret < 0)
4297 goto out;
4298 }
4299 }
4300
4301 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4302 if (ret < 0)
4303 goto out;
4304
4305 if (do_join) {
4306 ret = wlcore_join(wl, wlvif);
4307 if (ret < 0) {
4308 wl1271_warning("cmd join failed %d", ret);
4309 goto out;
4310 }
4311 }
4312
4313 if (changed & BSS_CHANGED_ASSOC) {
4314 if (bss_conf->assoc) {
4315 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4316 sta_rate_set);
4317 if (ret < 0)
4318 goto out;
4319
4320 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4321 wl12xx_set_authorized(wl, wlvif);
4322 } else {
4323 wlcore_unset_assoc(wl, wlvif);
4324 }
4325 }
4326
4327 if (changed & BSS_CHANGED_PS) {
4328 if ((bss_conf->ps) &&
4329 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4330 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4331 int ps_mode;
4332 char *ps_mode_str;
4333
4334 if (wl->conf.conn.forced_ps) {
4335 ps_mode = STATION_POWER_SAVE_MODE;
4336 ps_mode_str = "forced";
4337 } else {
4338 ps_mode = STATION_AUTO_PS_MODE;
4339 ps_mode_str = "auto";
4340 }
4341
4342 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4343
4344 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4345 if (ret < 0)
4346 wl1271_warning("enter %s ps failed %d",
4347 ps_mode_str, ret);
4348 } else if (!bss_conf->ps &&
4349 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4350 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4351
4352 ret = wl1271_ps_set_mode(wl, wlvif,
4353 STATION_ACTIVE_MODE);
4354 if (ret < 0)
4355 wl1271_warning("exit auto ps failed %d", ret);
4356 }
4357 }
4358
4359 /* Handle new association with HT. Do this after join. */
4360 if (sta_exists) {
4361 bool enabled =
4362 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4363
4364 ret = wlcore_hw_set_peer_cap(wl,
4365 &sta_ht_cap,
4366 enabled,
4367 wlvif->rate_set,
4368 wlvif->sta.hlid);
4369 if (ret < 0) {
4370 wl1271_warning("Set ht cap failed %d", ret);
4371 goto out;
4372
4373 }
4374
4375 if (enabled) {
4376 ret = wl1271_acx_set_ht_information(wl, wlvif,
4377 bss_conf->ht_operation_mode);
4378 if (ret < 0) {
4379 wl1271_warning("Set ht information failed %d",
4380 ret);
4381 goto out;
4382 }
4383 }
4384 }
4385
4386 /* Handle arp filtering. Done after join. */
4387 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4388 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4389 __be32 addr = bss_conf->arp_addr_list[0];
4390 wlvif->sta.qos = bss_conf->qos;
4391 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4392
4393 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4394 wlvif->ip_addr = addr;
4395 /*
4396 * The template should have been configured only upon
4397 * association. however, it seems that the correct ip
4398 * isn't being set (when sending), so we have to
4399 * reconfigure the template upon every ip change.
4400 */
4401 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4402 if (ret < 0) {
4403 wl1271_warning("build arp rsp failed: %d", ret);
4404 goto out;
4405 }
4406
4407 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4408 (ACX_ARP_FILTER_ARP_FILTERING |
4409 ACX_ARP_FILTER_AUTO_ARP),
4410 addr);
4411 } else {
4412 wlvif->ip_addr = 0;
4413 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4414 }
4415
4416 if (ret < 0)
4417 goto out;
4418 }
4419
4420 out:
4421 return;
4422 }
4423
4424 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4425 struct ieee80211_vif *vif,
4426 struct ieee80211_bss_conf *bss_conf,
4427 u32 changed)
4428 {
4429 struct wl1271 *wl = hw->priv;
4430 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4431 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4432 int ret;
4433
4434 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4435 wlvif->role_id, (int)changed);
4436
4437 /*
4438 * make sure to cancel pending disconnections if our association
4439 * state changed
4440 */
4441 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4442 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4443
4444 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4445 !bss_conf->enable_beacon)
4446 wl1271_tx_flush(wl);
4447
4448 mutex_lock(&wl->mutex);
4449
4450 if (unlikely(wl->state != WLCORE_STATE_ON))
4451 goto out;
4452
4453 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4454 goto out;
4455
4456 ret = wl1271_ps_elp_wakeup(wl);
4457 if (ret < 0)
4458 goto out;
4459
4460 if ((changed & BSS_CHANGED_TXPOWER) &&
4461 bss_conf->txpower != wlvif->power_level) {
4462
4463 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4464 if (ret < 0)
4465 goto out;
4466
4467 wlvif->power_level = bss_conf->txpower;
4468 }
4469
4470 if (is_ap)
4471 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4472 else
4473 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4474
4475 wl1271_ps_elp_sleep(wl);
4476
4477 out:
4478 mutex_unlock(&wl->mutex);
4479 }
4480
4481 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4482 struct ieee80211_chanctx_conf *ctx)
4483 {
4484 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4485 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4486 cfg80211_get_chandef_type(&ctx->def));
4487 return 0;
4488 }
4489
4490 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4491 struct ieee80211_chanctx_conf *ctx)
4492 {
4493 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4494 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4495 cfg80211_get_chandef_type(&ctx->def));
4496 }
4497
4498 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4499 struct ieee80211_chanctx_conf *ctx,
4500 u32 changed)
4501 {
4502 wl1271_debug(DEBUG_MAC80211,
4503 "mac80211 change chanctx %d (type %d) changed 0x%x",
4504 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4505 cfg80211_get_chandef_type(&ctx->def), changed);
4506 }
4507
4508 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4509 struct ieee80211_vif *vif,
4510 struct ieee80211_chanctx_conf *ctx)
4511 {
4512 struct wl1271 *wl = hw->priv;
4513 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4514 int channel = ieee80211_frequency_to_channel(
4515 ctx->def.chan->center_freq);
4516
4517 wl1271_debug(DEBUG_MAC80211,
4518 "mac80211 assign chanctx (role %d) %d (type %d)",
4519 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4520
4521 mutex_lock(&wl->mutex);
4522
4523 wlvif->band = ctx->def.chan->band;
4524 wlvif->channel = channel;
4525 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4526
4527 /* update default rates according to the band */
4528 wl1271_set_band_rate(wl, wlvif);
4529
4530 mutex_unlock(&wl->mutex);
4531
4532 return 0;
4533 }
4534
4535 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4536 struct ieee80211_vif *vif,
4537 struct ieee80211_chanctx_conf *ctx)
4538 {
4539 struct wl1271 *wl = hw->priv;
4540 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4541
4542 wl1271_debug(DEBUG_MAC80211,
4543 "mac80211 unassign chanctx (role %d) %d (type %d)",
4544 wlvif->role_id,
4545 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4546 cfg80211_get_chandef_type(&ctx->def));
4547
4548 wl1271_tx_flush(wl);
4549 }
4550
4551 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4552 struct ieee80211_vif *vif, u16 queue,
4553 const struct ieee80211_tx_queue_params *params)
4554 {
4555 struct wl1271 *wl = hw->priv;
4556 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4557 u8 ps_scheme;
4558 int ret = 0;
4559
4560 mutex_lock(&wl->mutex);
4561
4562 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4563
4564 if (params->uapsd)
4565 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4566 else
4567 ps_scheme = CONF_PS_SCHEME_LEGACY;
4568
4569 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4570 goto out;
4571
4572 ret = wl1271_ps_elp_wakeup(wl);
4573 if (ret < 0)
4574 goto out;
4575
4576 /*
4577 * the txop is confed in units of 32us by the mac80211,
4578 * we need us
4579 */
4580 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4581 params->cw_min, params->cw_max,
4582 params->aifs, params->txop << 5);
4583 if (ret < 0)
4584 goto out_sleep;
4585
4586 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4587 CONF_CHANNEL_TYPE_EDCF,
4588 wl1271_tx_get_queue(queue),
4589 ps_scheme, CONF_ACK_POLICY_LEGACY,
4590 0, 0);
4591
4592 out_sleep:
4593 wl1271_ps_elp_sleep(wl);
4594
4595 out:
4596 mutex_unlock(&wl->mutex);
4597
4598 return ret;
4599 }
4600
4601 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4602 struct ieee80211_vif *vif)
4603 {
4604
4605 struct wl1271 *wl = hw->priv;
4606 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4607 u64 mactime = ULLONG_MAX;
4608 int ret;
4609
4610 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4611
4612 mutex_lock(&wl->mutex);
4613
4614 if (unlikely(wl->state != WLCORE_STATE_ON))
4615 goto out;
4616
4617 ret = wl1271_ps_elp_wakeup(wl);
4618 if (ret < 0)
4619 goto out;
4620
4621 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4622 if (ret < 0)
4623 goto out_sleep;
4624
4625 out_sleep:
4626 wl1271_ps_elp_sleep(wl);
4627
4628 out:
4629 mutex_unlock(&wl->mutex);
4630 return mactime;
4631 }
4632
4633 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4634 struct survey_info *survey)
4635 {
4636 struct ieee80211_conf *conf = &hw->conf;
4637
4638 if (idx != 0)
4639 return -ENOENT;
4640
4641 survey->channel = conf->chandef.chan;
4642 survey->filled = 0;
4643 return 0;
4644 }
4645
4646 static int wl1271_allocate_sta(struct wl1271 *wl,
4647 struct wl12xx_vif *wlvif,
4648 struct ieee80211_sta *sta)
4649 {
4650 struct wl1271_station *wl_sta;
4651 int ret;
4652
4653
4654 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4655 wl1271_warning("could not allocate HLID - too much stations");
4656 return -EBUSY;
4657 }
4658
4659 wl_sta = (struct wl1271_station *)sta->drv_priv;
4660 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4661 if (ret < 0) {
4662 wl1271_warning("could not allocate HLID - too many links");
4663 return -EBUSY;
4664 }
4665
4666 /* use the previous security seq, if this is a recovery/resume */
4667 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4668
4669 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4670 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4671 wl->active_sta_count++;
4672 return 0;
4673 }
4674
4675 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4676 {
4677 struct wl1271_station *wl_sta;
4678 struct ieee80211_sta *sta;
4679 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4680
4681 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4682 return;
4683
4684 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4685 __clear_bit(hlid, &wl->ap_ps_map);
4686 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4687
4688 /*
4689 * save the last used PN in the private part of iee80211_sta,
4690 * in case of recovery/suspend
4691 */
4692 rcu_read_lock();
4693 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4694 if (sta) {
4695 wl_sta = (void *)sta->drv_priv;
4696 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4697
4698 /*
4699 * increment the initial seq number on recovery to account for
4700 * transmitted packets that we haven't yet got in the FW status
4701 */
4702 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4703 wl_sta->total_freed_pkts +=
4704 WL1271_TX_SQN_POST_RECOVERY_PADDING;
4705 }
4706 rcu_read_unlock();
4707
4708 wl12xx_free_link(wl, wlvif, &hlid);
4709 wl->active_sta_count--;
4710
4711 /*
4712 * rearm the tx watchdog when the last STA is freed - give the FW a
4713 * chance to return STA-buffered packets before complaining.
4714 */
4715 if (wl->active_sta_count == 0)
4716 wl12xx_rearm_tx_watchdog_locked(wl);
4717 }
4718
4719 static int wl12xx_sta_add(struct wl1271 *wl,
4720 struct wl12xx_vif *wlvif,
4721 struct ieee80211_sta *sta)
4722 {
4723 struct wl1271_station *wl_sta;
4724 int ret = 0;
4725 u8 hlid;
4726
4727 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4728
4729 ret = wl1271_allocate_sta(wl, wlvif, sta);
4730 if (ret < 0)
4731 return ret;
4732
4733 wl_sta = (struct wl1271_station *)sta->drv_priv;
4734 hlid = wl_sta->hlid;
4735
4736 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4737 if (ret < 0)
4738 wl1271_free_sta(wl, wlvif, hlid);
4739
4740 return ret;
4741 }
4742
4743 static int wl12xx_sta_remove(struct wl1271 *wl,
4744 struct wl12xx_vif *wlvif,
4745 struct ieee80211_sta *sta)
4746 {
4747 struct wl1271_station *wl_sta;
4748 int ret = 0, id;
4749
4750 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4751
4752 wl_sta = (struct wl1271_station *)sta->drv_priv;
4753 id = wl_sta->hlid;
4754 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4755 return -EINVAL;
4756
4757 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4758 if (ret < 0)
4759 return ret;
4760
4761 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4762 return ret;
4763 }
4764
4765 static void wlcore_roc_if_possible(struct wl1271 *wl,
4766 struct wl12xx_vif *wlvif)
4767 {
4768 if (find_first_bit(wl->roc_map,
4769 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4770 return;
4771
4772 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4773 return;
4774
4775 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4776 }
4777
4778 /*
4779 * when wl_sta is NULL, we treat this call as if coming from a
4780 * pending auth reply.
4781 * wl->mutex must be taken and the FW must be awake when the call
4782 * takes place.
4783 */
4784 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4785 struct wl1271_station *wl_sta, bool in_conn)
4786 {
4787 if (in_conn) {
4788 if (WARN_ON(wl_sta && wl_sta->in_connection))
4789 return;
4790
4791 if (!wlvif->ap_pending_auth_reply &&
4792 !wlvif->inconn_count)
4793 wlcore_roc_if_possible(wl, wlvif);
4794
4795 if (wl_sta) {
4796 wl_sta->in_connection = true;
4797 wlvif->inconn_count++;
4798 } else {
4799 wlvif->ap_pending_auth_reply = true;
4800 }
4801 } else {
4802 if (wl_sta && !wl_sta->in_connection)
4803 return;
4804
4805 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4806 return;
4807
4808 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4809 return;
4810
4811 if (wl_sta) {
4812 wl_sta->in_connection = false;
4813 wlvif->inconn_count--;
4814 } else {
4815 wlvif->ap_pending_auth_reply = false;
4816 }
4817
4818 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4819 test_bit(wlvif->role_id, wl->roc_map))
4820 wl12xx_croc(wl, wlvif->role_id);
4821 }
4822 }
4823
4824 static int wl12xx_update_sta_state(struct wl1271 *wl,
4825 struct wl12xx_vif *wlvif,
4826 struct ieee80211_sta *sta,
4827 enum ieee80211_sta_state old_state,
4828 enum ieee80211_sta_state new_state)
4829 {
4830 struct wl1271_station *wl_sta;
4831 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4832 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4833 int ret;
4834
4835 wl_sta = (struct wl1271_station *)sta->drv_priv;
4836
4837 /* Add station (AP mode) */
4838 if (is_ap &&
4839 old_state == IEEE80211_STA_NOTEXIST &&
4840 new_state == IEEE80211_STA_NONE) {
4841 ret = wl12xx_sta_add(wl, wlvif, sta);
4842 if (ret)
4843 return ret;
4844
4845 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4846 }
4847
4848 /* Remove station (AP mode) */
4849 if (is_ap &&
4850 old_state == IEEE80211_STA_NONE &&
4851 new_state == IEEE80211_STA_NOTEXIST) {
4852 /* must not fail */
4853 wl12xx_sta_remove(wl, wlvif, sta);
4854
4855 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4856 }
4857
4858 /* Authorize station (AP mode) */
4859 if (is_ap &&
4860 new_state == IEEE80211_STA_AUTHORIZED) {
4861 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4862 if (ret < 0)
4863 return ret;
4864
4865 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4866 wl_sta->hlid);
4867 if (ret)
4868 return ret;
4869
4870 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4871 }
4872
4873 /* Authorize station */
4874 if (is_sta &&
4875 new_state == IEEE80211_STA_AUTHORIZED) {
4876 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4877 ret = wl12xx_set_authorized(wl, wlvif);
4878 if (ret)
4879 return ret;
4880 }
4881
4882 if (is_sta &&
4883 old_state == IEEE80211_STA_AUTHORIZED &&
4884 new_state == IEEE80211_STA_ASSOC) {
4885 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4886 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4887 }
4888
4889 /* clear ROCs on failure or authorization */
4890 if (is_sta &&
4891 (new_state == IEEE80211_STA_AUTHORIZED ||
4892 new_state == IEEE80211_STA_NOTEXIST)) {
4893 if (test_bit(wlvif->role_id, wl->roc_map))
4894 wl12xx_croc(wl, wlvif->role_id);
4895 }
4896
4897 if (is_sta &&
4898 old_state == IEEE80211_STA_NOTEXIST &&
4899 new_state == IEEE80211_STA_NONE) {
4900 if (find_first_bit(wl->roc_map,
4901 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4902 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4903 wl12xx_roc(wl, wlvif, wlvif->role_id,
4904 wlvif->band, wlvif->channel);
4905 }
4906 }
4907 return 0;
4908 }
4909
4910 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4911 struct ieee80211_vif *vif,
4912 struct ieee80211_sta *sta,
4913 enum ieee80211_sta_state old_state,
4914 enum ieee80211_sta_state new_state)
4915 {
4916 struct wl1271 *wl = hw->priv;
4917 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4918 int ret;
4919
4920 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4921 sta->aid, old_state, new_state);
4922
4923 mutex_lock(&wl->mutex);
4924
4925 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4926 ret = -EBUSY;
4927 goto out;
4928 }
4929
4930 ret = wl1271_ps_elp_wakeup(wl);
4931 if (ret < 0)
4932 goto out;
4933
4934 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4935
4936 wl1271_ps_elp_sleep(wl);
4937 out:
4938 mutex_unlock(&wl->mutex);
4939 if (new_state < old_state)
4940 return 0;
4941 return ret;
4942 }
4943
4944 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4945 struct ieee80211_vif *vif,
4946 enum ieee80211_ampdu_mlme_action action,
4947 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4948 u8 buf_size)
4949 {
4950 struct wl1271 *wl = hw->priv;
4951 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4952 int ret;
4953 u8 hlid, *ba_bitmap;
4954
4955 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4956 tid);
4957
4958 /* sanity check - the fields in FW are only 8bits wide */
4959 if (WARN_ON(tid > 0xFF))
4960 return -ENOTSUPP;
4961
4962 mutex_lock(&wl->mutex);
4963
4964 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4965 ret = -EAGAIN;
4966 goto out;
4967 }
4968
4969 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4970 hlid = wlvif->sta.hlid;
4971 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4972 struct wl1271_station *wl_sta;
4973
4974 wl_sta = (struct wl1271_station *)sta->drv_priv;
4975 hlid = wl_sta->hlid;
4976 } else {
4977 ret = -EINVAL;
4978 goto out;
4979 }
4980
4981 ba_bitmap = &wl->links[hlid].ba_bitmap;
4982
4983 ret = wl1271_ps_elp_wakeup(wl);
4984 if (ret < 0)
4985 goto out;
4986
4987 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4988 tid, action);
4989
4990 switch (action) {
4991 case IEEE80211_AMPDU_RX_START:
4992 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4993 ret = -ENOTSUPP;
4994 break;
4995 }
4996
4997 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4998 ret = -EBUSY;
4999 wl1271_error("exceeded max RX BA sessions");
5000 break;
5001 }
5002
5003 if (*ba_bitmap & BIT(tid)) {
5004 ret = -EINVAL;
5005 wl1271_error("cannot enable RX BA session on active "
5006 "tid: %d", tid);
5007 break;
5008 }
5009
5010 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5011 hlid);
5012 if (!ret) {
5013 *ba_bitmap |= BIT(tid);
5014 wl->ba_rx_session_count++;
5015 }
5016 break;
5017
5018 case IEEE80211_AMPDU_RX_STOP:
5019 if (!(*ba_bitmap & BIT(tid))) {
5020 /*
5021 * this happens on reconfig - so only output a debug
5022 * message for now, and don't fail the function.
5023 */
5024 wl1271_debug(DEBUG_MAC80211,
5025 "no active RX BA session on tid: %d",
5026 tid);
5027 ret = 0;
5028 break;
5029 }
5030
5031 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5032 hlid);
5033 if (!ret) {
5034 *ba_bitmap &= ~BIT(tid);
5035 wl->ba_rx_session_count--;
5036 }
5037 break;
5038
5039 /*
5040 * The BA initiator session management in FW independently.
5041 * Falling break here on purpose for all TX APDU commands.
5042 */
5043 case IEEE80211_AMPDU_TX_START:
5044 case IEEE80211_AMPDU_TX_STOP_CONT:
5045 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5046 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5047 case IEEE80211_AMPDU_TX_OPERATIONAL:
5048 ret = -EINVAL;
5049 break;
5050
5051 default:
5052 wl1271_error("Incorrect ampdu action id=%x\n", action);
5053 ret = -EINVAL;
5054 }
5055
5056 wl1271_ps_elp_sleep(wl);
5057
5058 out:
5059 mutex_unlock(&wl->mutex);
5060
5061 return ret;
5062 }
5063
5064 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5065 struct ieee80211_vif *vif,
5066 const struct cfg80211_bitrate_mask *mask)
5067 {
5068 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5069 struct wl1271 *wl = hw->priv;
5070 int i, ret = 0;
5071
5072 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5073 mask->control[NL80211_BAND_2GHZ].legacy,
5074 mask->control[NL80211_BAND_5GHZ].legacy);
5075
5076 mutex_lock(&wl->mutex);
5077
5078 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5079 wlvif->bitrate_masks[i] =
5080 wl1271_tx_enabled_rates_get(wl,
5081 mask->control[i].legacy,
5082 i);
5083
5084 if (unlikely(wl->state != WLCORE_STATE_ON))
5085 goto out;
5086
5087 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5088 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5089
5090 ret = wl1271_ps_elp_wakeup(wl);
5091 if (ret < 0)
5092 goto out;
5093
5094 wl1271_set_band_rate(wl, wlvif);
5095 wlvif->basic_rate =
5096 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5097 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5098
5099 wl1271_ps_elp_sleep(wl);
5100 }
5101 out:
5102 mutex_unlock(&wl->mutex);
5103
5104 return ret;
5105 }
5106
5107 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5108 struct ieee80211_channel_switch *ch_switch)
5109 {
5110 struct wl1271 *wl = hw->priv;
5111 struct wl12xx_vif *wlvif;
5112 int ret;
5113
5114 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5115
5116 wl1271_tx_flush(wl);
5117
5118 mutex_lock(&wl->mutex);
5119
5120 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5121 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5122 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5123 ieee80211_chswitch_done(vif, false);
5124 }
5125 goto out;
5126 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5127 goto out;
5128 }
5129
5130 ret = wl1271_ps_elp_wakeup(wl);
5131 if (ret < 0)
5132 goto out;
5133
5134 /* TODO: change mac80211 to pass vif as param */
5135 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5136 unsigned long delay_usec;
5137
5138 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5139 if (ret)
5140 goto out_sleep;
5141
5142 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5143
5144 /* indicate failure 5 seconds after channel switch time */
5145 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5146 ch_switch->count;
5147 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5148 usecs_to_jiffies(delay_usec) +
5149 msecs_to_jiffies(5000));
5150 }
5151
5152 out_sleep:
5153 wl1271_ps_elp_sleep(wl);
5154
5155 out:
5156 mutex_unlock(&wl->mutex);
5157 }
5158
5159 static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
5160 {
5161 struct wl1271 *wl = hw->priv;
5162
5163 wl1271_tx_flush(wl);
5164 }
5165
5166 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5167 struct ieee80211_vif *vif,
5168 struct ieee80211_channel *chan,
5169 int duration,
5170 enum ieee80211_roc_type type)
5171 {
5172 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5173 struct wl1271 *wl = hw->priv;
5174 int channel, ret = 0;
5175
5176 channel = ieee80211_frequency_to_channel(chan->center_freq);
5177
5178 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5179 channel, wlvif->role_id);
5180
5181 mutex_lock(&wl->mutex);
5182
5183 if (unlikely(wl->state != WLCORE_STATE_ON))
5184 goto out;
5185
5186 /* return EBUSY if we can't ROC right now */
5187 if (WARN_ON(wl->roc_vif ||
5188 find_first_bit(wl->roc_map,
5189 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5190 ret = -EBUSY;
5191 goto out;
5192 }
5193
5194 ret = wl1271_ps_elp_wakeup(wl);
5195 if (ret < 0)
5196 goto out;
5197
5198 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5199 if (ret < 0)
5200 goto out_sleep;
5201
5202 wl->roc_vif = vif;
5203 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5204 msecs_to_jiffies(duration));
5205 out_sleep:
5206 wl1271_ps_elp_sleep(wl);
5207 out:
5208 mutex_unlock(&wl->mutex);
5209 return ret;
5210 }
5211
5212 static int __wlcore_roc_completed(struct wl1271 *wl)
5213 {
5214 struct wl12xx_vif *wlvif;
5215 int ret;
5216
5217 /* already completed */
5218 if (unlikely(!wl->roc_vif))
5219 return 0;
5220
5221 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5222
5223 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5224 return -EBUSY;
5225
5226 ret = wl12xx_stop_dev(wl, wlvif);
5227 if (ret < 0)
5228 return ret;
5229
5230 wl->roc_vif = NULL;
5231
5232 return 0;
5233 }
5234
5235 static int wlcore_roc_completed(struct wl1271 *wl)
5236 {
5237 int ret;
5238
5239 wl1271_debug(DEBUG_MAC80211, "roc complete");
5240
5241 mutex_lock(&wl->mutex);
5242
5243 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5244 ret = -EBUSY;
5245 goto out;
5246 }
5247
5248 ret = wl1271_ps_elp_wakeup(wl);
5249 if (ret < 0)
5250 goto out;
5251
5252 ret = __wlcore_roc_completed(wl);
5253
5254 wl1271_ps_elp_sleep(wl);
5255 out:
5256 mutex_unlock(&wl->mutex);
5257
5258 return ret;
5259 }
5260
5261 static void wlcore_roc_complete_work(struct work_struct *work)
5262 {
5263 struct delayed_work *dwork;
5264 struct wl1271 *wl;
5265 int ret;
5266
5267 dwork = container_of(work, struct delayed_work, work);
5268 wl = container_of(dwork, struct wl1271, roc_complete_work);
5269
5270 ret = wlcore_roc_completed(wl);
5271 if (!ret)
5272 ieee80211_remain_on_channel_expired(wl->hw);
5273 }
5274
5275 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5276 {
5277 struct wl1271 *wl = hw->priv;
5278
5279 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5280
5281 /* TODO: per-vif */
5282 wl1271_tx_flush(wl);
5283
5284 /*
5285 * we can't just flush_work here, because it might deadlock
5286 * (as we might get called from the same workqueue)
5287 */
5288 cancel_delayed_work_sync(&wl->roc_complete_work);
5289 wlcore_roc_completed(wl);
5290
5291 return 0;
5292 }
5293
5294 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5295 struct ieee80211_vif *vif,
5296 struct ieee80211_sta *sta,
5297 u32 changed)
5298 {
5299 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5300 struct wl1271 *wl = hw->priv;
5301
5302 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5303 }
5304
5305 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5306 struct ieee80211_vif *vif,
5307 struct ieee80211_sta *sta,
5308 s8 *rssi_dbm)
5309 {
5310 struct wl1271 *wl = hw->priv;
5311 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5312 int ret = 0;
5313
5314 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5315
5316 mutex_lock(&wl->mutex);
5317
5318 if (unlikely(wl->state != WLCORE_STATE_ON))
5319 goto out;
5320
5321 ret = wl1271_ps_elp_wakeup(wl);
5322 if (ret < 0)
5323 goto out_sleep;
5324
5325 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5326 if (ret < 0)
5327 goto out_sleep;
5328
5329 out_sleep:
5330 wl1271_ps_elp_sleep(wl);
5331
5332 out:
5333 mutex_unlock(&wl->mutex);
5334
5335 return ret;
5336 }
5337
5338 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5339 {
5340 struct wl1271 *wl = hw->priv;
5341 bool ret = false;
5342
5343 mutex_lock(&wl->mutex);
5344
5345 if (unlikely(wl->state != WLCORE_STATE_ON))
5346 goto out;
5347
5348 /* packets are considered pending if in the TX queue or the FW */
5349 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5350 out:
5351 mutex_unlock(&wl->mutex);
5352
5353 return ret;
5354 }
5355
5356 /* can't be const, mac80211 writes to this */
5357 static struct ieee80211_rate wl1271_rates[] = {
5358 { .bitrate = 10,
5359 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5360 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5361 { .bitrate = 20,
5362 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5363 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5364 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5365 { .bitrate = 55,
5366 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5367 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5368 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5369 { .bitrate = 110,
5370 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5371 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5372 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5373 { .bitrate = 60,
5374 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5375 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5376 { .bitrate = 90,
5377 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5378 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5379 { .bitrate = 120,
5380 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5381 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5382 { .bitrate = 180,
5383 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5384 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5385 { .bitrate = 240,
5386 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5387 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5388 { .bitrate = 360,
5389 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5390 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5391 { .bitrate = 480,
5392 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5393 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5394 { .bitrate = 540,
5395 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5396 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5397 };
5398
5399 /* can't be const, mac80211 writes to this */
5400 static struct ieee80211_channel wl1271_channels[] = {
5401 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5402 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5403 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5404 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5405 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5406 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5407 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5408 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5409 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5410 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5411 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5412 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5413 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5414 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5415 };
5416
5417 /* can't be const, mac80211 writes to this */
5418 static struct ieee80211_supported_band wl1271_band_2ghz = {
5419 .channels = wl1271_channels,
5420 .n_channels = ARRAY_SIZE(wl1271_channels),
5421 .bitrates = wl1271_rates,
5422 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5423 };
5424
5425 /* 5 GHz data rates for WL1273 */
5426 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5427 { .bitrate = 60,
5428 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5429 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5430 { .bitrate = 90,
5431 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5432 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5433 { .bitrate = 120,
5434 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5435 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5436 { .bitrate = 180,
5437 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5438 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5439 { .bitrate = 240,
5440 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5441 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5442 { .bitrate = 360,
5443 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5444 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5445 { .bitrate = 480,
5446 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5447 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5448 { .bitrate = 540,
5449 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5450 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5451 };
5452
5453 /* 5 GHz band channels for WL1273 */
5454 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5455 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5456 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5457 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5458 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5459 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5460 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5461 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5462 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5463 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5464 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5465 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5466 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5467 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5468 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5469 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5470 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5471 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5472 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5473 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5474 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5475 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5476 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5477 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5478 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5479 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5480 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5481 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5482 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5483 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5484 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5485 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5486 };
5487
5488 static struct ieee80211_supported_band wl1271_band_5ghz = {
5489 .channels = wl1271_channels_5ghz,
5490 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5491 .bitrates = wl1271_rates_5ghz,
5492 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5493 };
5494
5495 static const struct ieee80211_ops wl1271_ops = {
5496 .start = wl1271_op_start,
5497 .stop = wlcore_op_stop,
5498 .add_interface = wl1271_op_add_interface,
5499 .remove_interface = wl1271_op_remove_interface,
5500 .change_interface = wl12xx_op_change_interface,
5501 #ifdef CONFIG_PM
5502 .suspend = wl1271_op_suspend,
5503 .resume = wl1271_op_resume,
5504 #endif
5505 .config = wl1271_op_config,
5506 .prepare_multicast = wl1271_op_prepare_multicast,
5507 .configure_filter = wl1271_op_configure_filter,
5508 .tx = wl1271_op_tx,
5509 .set_key = wlcore_op_set_key,
5510 .hw_scan = wl1271_op_hw_scan,
5511 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5512 .sched_scan_start = wl1271_op_sched_scan_start,
5513 .sched_scan_stop = wl1271_op_sched_scan_stop,
5514 .bss_info_changed = wl1271_op_bss_info_changed,
5515 .set_frag_threshold = wl1271_op_set_frag_threshold,
5516 .set_rts_threshold = wl1271_op_set_rts_threshold,
5517 .conf_tx = wl1271_op_conf_tx,
5518 .get_tsf = wl1271_op_get_tsf,
5519 .get_survey = wl1271_op_get_survey,
5520 .sta_state = wl12xx_op_sta_state,
5521 .ampdu_action = wl1271_op_ampdu_action,
5522 .tx_frames_pending = wl1271_tx_frames_pending,
5523 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5524 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5525 .channel_switch = wl12xx_op_channel_switch,
5526 .flush = wlcore_op_flush,
5527 .remain_on_channel = wlcore_op_remain_on_channel,
5528 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5529 .add_chanctx = wlcore_op_add_chanctx,
5530 .remove_chanctx = wlcore_op_remove_chanctx,
5531 .change_chanctx = wlcore_op_change_chanctx,
5532 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5533 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5534 .sta_rc_update = wlcore_op_sta_rc_update,
5535 .get_rssi = wlcore_op_get_rssi,
5536 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5537 };
5538
5539
5540 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5541 {
5542 u8 idx;
5543
5544 BUG_ON(band >= 2);
5545
5546 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5547 wl1271_error("Illegal RX rate from HW: %d", rate);
5548 return 0;
5549 }
5550
5551 idx = wl->band_rate_to_idx[band][rate];
5552 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5553 wl1271_error("Unsupported RX rate from HW: %d", rate);
5554 return 0;
5555 }
5556
5557 return idx;
5558 }
5559
5560 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5561 {
5562 int i;
5563
5564 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5565 oui, nic);
5566
5567 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5568 wl1271_warning("NIC part of the MAC address wraps around!");
5569
5570 for (i = 0; i < wl->num_mac_addr; i++) {
5571 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5572 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5573 wl->addresses[i].addr[2] = (u8) oui;
5574 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5575 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5576 wl->addresses[i].addr[5] = (u8) nic;
5577 nic++;
5578 }
5579
5580 /* we may be one address short at the most */
5581 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5582
5583 /*
5584 * turn on the LAA bit in the first address and use it as
5585 * the last address.
5586 */
5587 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5588 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5589 memcpy(&wl->addresses[idx], &wl->addresses[0],
5590 sizeof(wl->addresses[0]));
5591 /* LAA bit */
5592 wl->addresses[idx].addr[2] |= BIT(1);
5593 }
5594
5595 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5596 wl->hw->wiphy->addresses = wl->addresses;
5597 }
5598
5599 static int wl12xx_get_hw_info(struct wl1271 *wl)
5600 {
5601 int ret;
5602
5603 ret = wl12xx_set_power_on(wl);
5604 if (ret < 0)
5605 return ret;
5606
5607 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5608 if (ret < 0)
5609 goto out;
5610
5611 wl->fuse_oui_addr = 0;
5612 wl->fuse_nic_addr = 0;
5613
5614 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5615 if (ret < 0)
5616 goto out;
5617
5618 if (wl->ops->get_mac)
5619 ret = wl->ops->get_mac(wl);
5620
5621 out:
5622 wl1271_power_off(wl);
5623 return ret;
5624 }
5625
5626 static int wl1271_register_hw(struct wl1271 *wl)
5627 {
5628 int ret;
5629 u32 oui_addr = 0, nic_addr = 0;
5630
5631 if (wl->mac80211_registered)
5632 return 0;
5633
5634 if (wl->nvs_len >= 12) {
5635 /* NOTE: The wl->nvs->nvs element must be first, in
5636 * order to simplify the casting, we assume it is at
5637 * the beginning of the wl->nvs structure.
5638 */
5639 u8 *nvs_ptr = (u8 *)wl->nvs;
5640
5641 oui_addr =
5642 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5643 nic_addr =
5644 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5645 }
5646
5647 /* if the MAC address is zeroed in the NVS derive from fuse */
5648 if (oui_addr == 0 && nic_addr == 0) {
5649 oui_addr = wl->fuse_oui_addr;
5650 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5651 nic_addr = wl->fuse_nic_addr + 1;
5652 }
5653
5654 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5655
5656 ret = ieee80211_register_hw(wl->hw);
5657 if (ret < 0) {
5658 wl1271_error("unable to register mac80211 hw: %d", ret);
5659 goto out;
5660 }
5661
5662 wl->mac80211_registered = true;
5663
5664 wl1271_debugfs_init(wl);
5665
5666 wl1271_notice("loaded");
5667
5668 out:
5669 return ret;
5670 }
5671
5672 static void wl1271_unregister_hw(struct wl1271 *wl)
5673 {
5674 if (wl->plt)
5675 wl1271_plt_stop(wl);
5676
5677 ieee80211_unregister_hw(wl->hw);
5678 wl->mac80211_registered = false;
5679
5680 }
5681
5682 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5683 {
5684 .max = 3,
5685 .types = BIT(NL80211_IFTYPE_STATION),
5686 },
5687 {
5688 .max = 1,
5689 .types = BIT(NL80211_IFTYPE_AP) |
5690 BIT(NL80211_IFTYPE_P2P_GO) |
5691 BIT(NL80211_IFTYPE_P2P_CLIENT),
5692 },
5693 };
5694
5695 static struct ieee80211_iface_combination
5696 wlcore_iface_combinations[] = {
5697 {
5698 .max_interfaces = 3,
5699 .limits = wlcore_iface_limits,
5700 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5701 },
5702 };
5703
5704 static int wl1271_init_ieee80211(struct wl1271 *wl)
5705 {
5706 int i;
5707 static const u32 cipher_suites[] = {
5708 WLAN_CIPHER_SUITE_WEP40,
5709 WLAN_CIPHER_SUITE_WEP104,
5710 WLAN_CIPHER_SUITE_TKIP,
5711 WLAN_CIPHER_SUITE_CCMP,
5712 WL1271_CIPHER_SUITE_GEM,
5713 };
5714
5715 /* The tx descriptor buffer */
5716 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5717
5718 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5719 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5720
5721 /* unit us */
5722 /* FIXME: find a proper value */
5723 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5724
5725 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5726 IEEE80211_HW_SUPPORTS_PS |
5727 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5728 IEEE80211_HW_SUPPORTS_UAPSD |
5729 IEEE80211_HW_HAS_RATE_CONTROL |
5730 IEEE80211_HW_CONNECTION_MONITOR |
5731 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5732 IEEE80211_HW_SPECTRUM_MGMT |
5733 IEEE80211_HW_AP_LINK_PS |
5734 IEEE80211_HW_AMPDU_AGGREGATION |
5735 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5736 IEEE80211_HW_QUEUE_CONTROL;
5737
5738 wl->hw->wiphy->cipher_suites = cipher_suites;
5739 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5740
5741 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5742 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5743 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5744 wl->hw->wiphy->max_scan_ssids = 1;
5745 wl->hw->wiphy->max_sched_scan_ssids = 16;
5746 wl->hw->wiphy->max_match_sets = 16;
5747 /*
5748 * Maximum length of elements in scanning probe request templates
5749 * should be the maximum length possible for a template, without
5750 * the IEEE80211 header of the template
5751 */
5752 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5753 sizeof(struct ieee80211_header);
5754
5755 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5756 sizeof(struct ieee80211_header);
5757
5758 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5759
5760 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5761 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5762 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5763
5764 /* make sure all our channels fit in the scanned_ch bitmask */
5765 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5766 ARRAY_SIZE(wl1271_channels_5ghz) >
5767 WL1271_MAX_CHANNELS);
5768 /*
5769 * clear channel flags from the previous usage
5770 * and restore max_power & max_antenna_gain values.
5771 */
5772 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5773 wl1271_band_2ghz.channels[i].flags = 0;
5774 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5775 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5776 }
5777
5778 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5779 wl1271_band_5ghz.channels[i].flags = 0;
5780 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5781 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5782 }
5783
5784 /*
5785 * We keep local copies of the band structs because we need to
5786 * modify them on a per-device basis.
5787 */
5788 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5789 sizeof(wl1271_band_2ghz));
5790 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5791 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5792 sizeof(*wl->ht_cap));
5793 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5794 sizeof(wl1271_band_5ghz));
5795 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5796 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5797 sizeof(*wl->ht_cap));
5798
5799 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5800 &wl->bands[IEEE80211_BAND_2GHZ];
5801 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5802 &wl->bands[IEEE80211_BAND_5GHZ];
5803
5804 /*
5805 * allow 4 queues per mac address we support +
5806 * 1 cab queue per mac + one global offchannel Tx queue
5807 */
5808 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5809
5810 /* the last queue is the offchannel queue */
5811 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5812 wl->hw->max_rates = 1;
5813
5814 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5815
5816 /* the FW answers probe-requests in AP-mode */
5817 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5818 wl->hw->wiphy->probe_resp_offload =
5819 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5820 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5821 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5822
5823 /* allowed interface combinations */
5824 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5825 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5826 wl->hw->wiphy->n_iface_combinations =
5827 ARRAY_SIZE(wlcore_iface_combinations);
5828
5829 SET_IEEE80211_DEV(wl->hw, wl->dev);
5830
5831 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5832 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5833
5834 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5835
5836 return 0;
5837 }
5838
5839 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5840 u32 mbox_size)
5841 {
5842 struct ieee80211_hw *hw;
5843 struct wl1271 *wl;
5844 int i, j, ret;
5845 unsigned int order;
5846
5847 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5848
5849 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5850 if (!hw) {
5851 wl1271_error("could not alloc ieee80211_hw");
5852 ret = -ENOMEM;
5853 goto err_hw_alloc;
5854 }
5855
5856 wl = hw->priv;
5857 memset(wl, 0, sizeof(*wl));
5858
5859 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5860 if (!wl->priv) {
5861 wl1271_error("could not alloc wl priv");
5862 ret = -ENOMEM;
5863 goto err_priv_alloc;
5864 }
5865
5866 INIT_LIST_HEAD(&wl->wlvif_list);
5867
5868 wl->hw = hw;
5869
5870 for (i = 0; i < NUM_TX_QUEUES; i++)
5871 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5872 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5873
5874 skb_queue_head_init(&wl->deferred_rx_queue);
5875 skb_queue_head_init(&wl->deferred_tx_queue);
5876
5877 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5878 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5879 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5880 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5881 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5882 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5883 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5884
5885 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5886 if (!wl->freezable_wq) {
5887 ret = -ENOMEM;
5888 goto err_hw;
5889 }
5890
5891 wl->channel = 0;
5892 wl->rx_counter = 0;
5893 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5894 wl->band = IEEE80211_BAND_2GHZ;
5895 wl->channel_type = NL80211_CHAN_NO_HT;
5896 wl->flags = 0;
5897 wl->sg_enabled = true;
5898 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5899 wl->recovery_count = 0;
5900 wl->hw_pg_ver = -1;
5901 wl->ap_ps_map = 0;
5902 wl->ap_fw_ps_map = 0;
5903 wl->quirks = 0;
5904 wl->platform_quirks = 0;
5905 wl->system_hlid = WL12XX_SYSTEM_HLID;
5906 wl->active_sta_count = 0;
5907 wl->active_link_count = 0;
5908 wl->fwlog_size = 0;
5909 init_waitqueue_head(&wl->fwlog_waitq);
5910
5911 /* The system link is always allocated */
5912 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5913
5914 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5915 for (i = 0; i < wl->num_tx_desc; i++)
5916 wl->tx_frames[i] = NULL;
5917
5918 spin_lock_init(&wl->wl_lock);
5919
5920 wl->state = WLCORE_STATE_OFF;
5921 wl->fw_type = WL12XX_FW_TYPE_NONE;
5922 mutex_init(&wl->mutex);
5923 mutex_init(&wl->flush_mutex);
5924 init_completion(&wl->nvs_loading_complete);
5925
5926 order = get_order(aggr_buf_size);
5927 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5928 if (!wl->aggr_buf) {
5929 ret = -ENOMEM;
5930 goto err_wq;
5931 }
5932 wl->aggr_buf_size = aggr_buf_size;
5933
5934 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5935 if (!wl->dummy_packet) {
5936 ret = -ENOMEM;
5937 goto err_aggr;
5938 }
5939
5940 /* Allocate one page for the FW log */
5941 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5942 if (!wl->fwlog) {
5943 ret = -ENOMEM;
5944 goto err_dummy_packet;
5945 }
5946
5947 wl->mbox_size = mbox_size;
5948 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5949 if (!wl->mbox) {
5950 ret = -ENOMEM;
5951 goto err_fwlog;
5952 }
5953
5954 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5955 if (!wl->buffer_32) {
5956 ret = -ENOMEM;
5957 goto err_mbox;
5958 }
5959
5960 return hw;
5961
5962 err_mbox:
5963 kfree(wl->mbox);
5964
5965 err_fwlog:
5966 free_page((unsigned long)wl->fwlog);
5967
5968 err_dummy_packet:
5969 dev_kfree_skb(wl->dummy_packet);
5970
5971 err_aggr:
5972 free_pages((unsigned long)wl->aggr_buf, order);
5973
5974 err_wq:
5975 destroy_workqueue(wl->freezable_wq);
5976
5977 err_hw:
5978 wl1271_debugfs_exit(wl);
5979 kfree(wl->priv);
5980
5981 err_priv_alloc:
5982 ieee80211_free_hw(hw);
5983
5984 err_hw_alloc:
5985
5986 return ERR_PTR(ret);
5987 }
5988 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5989
5990 int wlcore_free_hw(struct wl1271 *wl)
5991 {
5992 /* Unblock any fwlog readers */
5993 mutex_lock(&wl->mutex);
5994 wl->fwlog_size = -1;
5995 wake_up_interruptible_all(&wl->fwlog_waitq);
5996 mutex_unlock(&wl->mutex);
5997
5998 wlcore_sysfs_free(wl);
5999
6000 kfree(wl->buffer_32);
6001 kfree(wl->mbox);
6002 free_page((unsigned long)wl->fwlog);
6003 dev_kfree_skb(wl->dummy_packet);
6004 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6005
6006 wl1271_debugfs_exit(wl);
6007
6008 vfree(wl->fw);
6009 wl->fw = NULL;
6010 wl->fw_type = WL12XX_FW_TYPE_NONE;
6011 kfree(wl->nvs);
6012 wl->nvs = NULL;
6013
6014 kfree(wl->fw_status_1);
6015 kfree(wl->tx_res_if);
6016 destroy_workqueue(wl->freezable_wq);
6017
6018 kfree(wl->priv);
6019 ieee80211_free_hw(wl->hw);
6020
6021 return 0;
6022 }
6023 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6024
6025 #ifdef CONFIG_PM
6026 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6027 .flags = WIPHY_WOWLAN_ANY,
6028 .n_patterns = WL1271_MAX_RX_FILTERS,
6029 .pattern_min_len = 1,
6030 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6031 };
6032 #endif
6033
6034 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6035 {
6036 return IRQ_WAKE_THREAD;
6037 }
6038
6039 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6040 {
6041 struct wl1271 *wl = context;
6042 struct platform_device *pdev = wl->pdev;
6043 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6044 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6045 unsigned long irqflags;
6046 int ret;
6047 irq_handler_t hardirq_fn = NULL;
6048
6049 if (fw) {
6050 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6051 if (!wl->nvs) {
6052 wl1271_error("Could not allocate nvs data");
6053 goto out;
6054 }
6055 wl->nvs_len = fw->size;
6056 } else {
6057 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6058 WL12XX_NVS_NAME);
6059 wl->nvs = NULL;
6060 wl->nvs_len = 0;
6061 }
6062
6063 ret = wl->ops->setup(wl);
6064 if (ret < 0)
6065 goto out_free_nvs;
6066
6067 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6068
6069 /* adjust some runtime configuration parameters */
6070 wlcore_adjust_conf(wl);
6071
6072 wl->irq = platform_get_irq(pdev, 0);
6073 wl->platform_quirks = pdata->platform_quirks;
6074 wl->if_ops = pdev_data->if_ops;
6075
6076 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6077 irqflags = IRQF_TRIGGER_RISING;
6078 hardirq_fn = wlcore_hardirq;
6079 } else {
6080 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6081 }
6082
6083 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6084 irqflags, pdev->name, wl);
6085 if (ret < 0) {
6086 wl1271_error("request_irq() failed: %d", ret);
6087 goto out_free_nvs;
6088 }
6089
6090 #ifdef CONFIG_PM
6091 ret = enable_irq_wake(wl->irq);
6092 if (!ret) {
6093 wl->irq_wake_enabled = true;
6094 device_init_wakeup(wl->dev, 1);
6095 if (pdata->pwr_in_suspend)
6096 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6097 }
6098 #endif
6099 disable_irq(wl->irq);
6100
6101 ret = wl12xx_get_hw_info(wl);
6102 if (ret < 0) {
6103 wl1271_error("couldn't get hw info");
6104 goto out_irq;
6105 }
6106
6107 ret = wl->ops->identify_chip(wl);
6108 if (ret < 0)
6109 goto out_irq;
6110
6111 ret = wl1271_init_ieee80211(wl);
6112 if (ret)
6113 goto out_irq;
6114
6115 ret = wl1271_register_hw(wl);
6116 if (ret)
6117 goto out_irq;
6118
6119 ret = wlcore_sysfs_init(wl);
6120 if (ret)
6121 goto out_unreg;
6122
6123 wl->initialized = true;
6124 goto out;
6125
6126 out_unreg:
6127 wl1271_unregister_hw(wl);
6128
6129 out_irq:
6130 free_irq(wl->irq, wl);
6131
6132 out_free_nvs:
6133 kfree(wl->nvs);
6134
6135 out:
6136 release_firmware(fw);
6137 complete_all(&wl->nvs_loading_complete);
6138 }
6139
6140 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6141 {
6142 int ret;
6143
6144 if (!wl->ops || !wl->ptable)
6145 return -EINVAL;
6146
6147 wl->dev = &pdev->dev;
6148 wl->pdev = pdev;
6149 platform_set_drvdata(pdev, wl);
6150
6151 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6152 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6153 wl, wlcore_nvs_cb);
6154 if (ret < 0) {
6155 wl1271_error("request_firmware_nowait failed: %d", ret);
6156 complete_all(&wl->nvs_loading_complete);
6157 }
6158
6159 return ret;
6160 }
6161 EXPORT_SYMBOL_GPL(wlcore_probe);
6162
6163 int wlcore_remove(struct platform_device *pdev)
6164 {
6165 struct wl1271 *wl = platform_get_drvdata(pdev);
6166
6167 wait_for_completion(&wl->nvs_loading_complete);
6168 if (!wl->initialized)
6169 return 0;
6170
6171 if (wl->irq_wake_enabled) {
6172 device_init_wakeup(wl->dev, 0);
6173 disable_irq_wake(wl->irq);
6174 }
6175 wl1271_unregister_hw(wl);
6176 free_irq(wl->irq, wl);
6177 wlcore_free_hw(wl);
6178
6179 return 0;
6180 }
6181 EXPORT_SYMBOL_GPL(wlcore_remove);
6182
6183 u32 wl12xx_debug_level = DEBUG_NONE;
6184 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6185 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6186 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6187
6188 module_param_named(fwlog, fwlog_param, charp, 0);
6189 MODULE_PARM_DESC(fwlog,
6190 "FW logger options: continuous, ondemand, dbgpins or disable");
6191
6192 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6193 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6194
6195 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6196 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6197
6198 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6199 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6200
6201 MODULE_LICENSE("GPL");
6202 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6203 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6204 MODULE_FIRMWARE(WL12XX_NVS_NAME);
This page took 0.222241 seconds and 6 git commands to generate.