wlcore: wakeup from ELP before starting recovery
[deliverable/linux.git] / drivers / net / wireless / ti / wlcore / main.c
1
2 /*
3 * This file is part of wlcore
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
30
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43
44 #define WL1271_BOOT_RETRIES 3
45
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
50
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
58 {
59 int ret;
60
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 return -EINVAL;
63
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
65 return 0;
66
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
68 return 0;
69
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
71 if (ret < 0)
72 return ret;
73
74 wl1271_info("Association completed.");
75 return 0;
76 }
77
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
80 {
81 struct ieee80211_supported_band *band;
82 struct ieee80211_channel *ch;
83 int i;
84 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 struct wl1271 *wl = hw->priv;
86
87 band = wiphy->bands[IEEE80211_BAND_5GHZ];
88 for (i = 0; i < band->n_channels; i++) {
89 ch = &band->channels[i];
90 if (ch->flags & IEEE80211_CHAN_DISABLED)
91 continue;
92
93 if (ch->flags & IEEE80211_CHAN_RADAR)
94 ch->flags |= IEEE80211_CHAN_NO_IBSS |
95 IEEE80211_CHAN_PASSIVE_SCAN;
96
97 }
98
99 wlcore_regdomain_config(wl);
100 }
101
102 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
103 bool enable)
104 {
105 int ret = 0;
106
107 /* we should hold wl->mutex */
108 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
109 if (ret < 0)
110 goto out;
111
112 if (enable)
113 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
114 else
115 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
116 out:
117 return ret;
118 }
119
120 /*
121 * this function is being called when the rx_streaming interval
122 * has beed changed or rx_streaming should be disabled
123 */
124 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
125 {
126 int ret = 0;
127 int period = wl->conf.rx_streaming.interval;
128
129 /* don't reconfigure if rx_streaming is disabled */
130 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
131 goto out;
132
133 /* reconfigure/disable according to new streaming_period */
134 if (period &&
135 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
136 (wl->conf.rx_streaming.always ||
137 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
138 ret = wl1271_set_rx_streaming(wl, wlvif, true);
139 else {
140 ret = wl1271_set_rx_streaming(wl, wlvif, false);
141 /* don't cancel_work_sync since we might deadlock */
142 del_timer_sync(&wlvif->rx_streaming_timer);
143 }
144 out:
145 return ret;
146 }
147
148 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
149 {
150 int ret;
151 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
152 rx_streaming_enable_work);
153 struct wl1271 *wl = wlvif->wl;
154
155 mutex_lock(&wl->mutex);
156
157 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
158 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
159 (!wl->conf.rx_streaming.always &&
160 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
161 goto out;
162
163 if (!wl->conf.rx_streaming.interval)
164 goto out;
165
166 ret = wl1271_ps_elp_wakeup(wl);
167 if (ret < 0)
168 goto out;
169
170 ret = wl1271_set_rx_streaming(wl, wlvif, true);
171 if (ret < 0)
172 goto out_sleep;
173
174 /* stop it after some time of inactivity */
175 mod_timer(&wlvif->rx_streaming_timer,
176 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
177
178 out_sleep:
179 wl1271_ps_elp_sleep(wl);
180 out:
181 mutex_unlock(&wl->mutex);
182 }
183
184 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
185 {
186 int ret;
187 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
188 rx_streaming_disable_work);
189 struct wl1271 *wl = wlvif->wl;
190
191 mutex_lock(&wl->mutex);
192
193 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
194 goto out;
195
196 ret = wl1271_ps_elp_wakeup(wl);
197 if (ret < 0)
198 goto out;
199
200 ret = wl1271_set_rx_streaming(wl, wlvif, false);
201 if (ret)
202 goto out_sleep;
203
204 out_sleep:
205 wl1271_ps_elp_sleep(wl);
206 out:
207 mutex_unlock(&wl->mutex);
208 }
209
210 static void wl1271_rx_streaming_timer(unsigned long data)
211 {
212 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
213 struct wl1271 *wl = wlvif->wl;
214 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
215 }
216
217 /* wl->mutex must be taken */
218 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
219 {
220 /* if the watchdog is not armed, don't do anything */
221 if (wl->tx_allocated_blocks == 0)
222 return;
223
224 cancel_delayed_work(&wl->tx_watchdog_work);
225 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
226 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
227 }
228
229 static void wl12xx_tx_watchdog_work(struct work_struct *work)
230 {
231 struct delayed_work *dwork;
232 struct wl1271 *wl;
233
234 dwork = container_of(work, struct delayed_work, work);
235 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
236
237 mutex_lock(&wl->mutex);
238
239 if (unlikely(wl->state != WLCORE_STATE_ON))
240 goto out;
241
242 /* Tx went out in the meantime - everything is ok */
243 if (unlikely(wl->tx_allocated_blocks == 0))
244 goto out;
245
246 /*
247 * if a ROC is in progress, we might not have any Tx for a long
248 * time (e.g. pending Tx on the non-ROC channels)
249 */
250 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
251 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
252 wl->conf.tx.tx_watchdog_timeout);
253 wl12xx_rearm_tx_watchdog_locked(wl);
254 goto out;
255 }
256
257 /*
258 * if a scan is in progress, we might not have any Tx for a long
259 * time
260 */
261 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
262 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
263 wl->conf.tx.tx_watchdog_timeout);
264 wl12xx_rearm_tx_watchdog_locked(wl);
265 goto out;
266 }
267
268 /*
269 * AP might cache a frame for a long time for a sleeping station,
270 * so rearm the timer if there's an AP interface with stations. If
271 * Tx is genuinely stuck we will most hopefully discover it when all
272 * stations are removed due to inactivity.
273 */
274 if (wl->active_sta_count) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
276 " %d stations",
277 wl->conf.tx.tx_watchdog_timeout,
278 wl->active_sta_count);
279 wl12xx_rearm_tx_watchdog_locked(wl);
280 goto out;
281 }
282
283 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
284 wl->conf.tx.tx_watchdog_timeout);
285 wl12xx_queue_recovery_work(wl);
286
287 out:
288 mutex_unlock(&wl->mutex);
289 }
290
291 static void wlcore_adjust_conf(struct wl1271 *wl)
292 {
293 /* Adjust settings according to optional module parameters */
294
295 /* Firmware Logger params */
296 if (fwlog_mem_blocks != -1) {
297 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
298 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
299 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
300 } else {
301 wl1271_error(
302 "Illegal fwlog_mem_blocks=%d using default %d",
303 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
304 }
305 }
306
307 if (fwlog_param) {
308 if (!strcmp(fwlog_param, "continuous")) {
309 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
310 } else if (!strcmp(fwlog_param, "ondemand")) {
311 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
312 } else if (!strcmp(fwlog_param, "dbgpins")) {
313 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
314 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
315 } else if (!strcmp(fwlog_param, "disable")) {
316 wl->conf.fwlog.mem_blocks = 0;
317 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 } else {
319 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
320 }
321 }
322
323 if (bug_on_recovery != -1)
324 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
325
326 if (no_recovery != -1)
327 wl->conf.recovery.no_recovery = (u8) no_recovery;
328 }
329
330 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
331 struct wl12xx_vif *wlvif,
332 u8 hlid, u8 tx_pkts)
333 {
334 bool fw_ps;
335
336 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
337
338 /*
339 * Wake up from high level PS if the STA is asleep with too little
340 * packets in FW or if the STA is awake.
341 */
342 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
343 wl12xx_ps_link_end(wl, wlvif, hlid);
344
345 /*
346 * Start high-level PS if the STA is asleep with enough blocks in FW.
347 * Make an exception if this is the only connected link. In this
348 * case FW-memory congestion is less of a problem.
349 * Note that a single connected STA means 3 active links, since we must
350 * account for the global and broadcast AP links. The "fw_ps" check
351 * assures us the third link is a STA connected to the AP. Otherwise
352 * the FW would not set the PSM bit.
353 */
354 else if (wl->active_link_count > 3 && fw_ps &&
355 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_start(wl, wlvif, hlid, true);
357 }
358
359 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
360 struct wl12xx_vif *wlvif,
361 struct wl_fw_status_2 *status)
362 {
363 u32 cur_fw_ps_map;
364 u8 hlid;
365
366 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
367 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
368 wl1271_debug(DEBUG_PSM,
369 "link ps prev 0x%x cur 0x%x changed 0x%x",
370 wl->ap_fw_ps_map, cur_fw_ps_map,
371 wl->ap_fw_ps_map ^ cur_fw_ps_map);
372
373 wl->ap_fw_ps_map = cur_fw_ps_map;
374 }
375
376 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
377 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
378 wl->links[hlid].allocated_pkts);
379 }
380
381 static int wlcore_fw_status(struct wl1271 *wl,
382 struct wl_fw_status_1 *status_1,
383 struct wl_fw_status_2 *status_2)
384 {
385 struct wl12xx_vif *wlvif;
386 struct timespec ts;
387 u32 old_tx_blk_count = wl->tx_blocks_available;
388 int avail, freed_blocks;
389 int i;
390 size_t status_len;
391 int ret;
392 struct wl1271_link *lnk;
393
394 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
395 sizeof(*status_2) + wl->fw_status_priv_len;
396
397 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
398 status_len, false);
399 if (ret < 0)
400 return ret;
401
402 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
403 "drv_rx_counter = %d, tx_results_counter = %d)",
404 status_1->intr,
405 status_1->fw_rx_counter,
406 status_1->drv_rx_counter,
407 status_1->tx_results_counter);
408
409 for (i = 0; i < NUM_TX_QUEUES; i++) {
410 /* prevent wrap-around in freed-packets counter */
411 wl->tx_allocated_pkts[i] -=
412 (status_2->counters.tx_released_pkts[i] -
413 wl->tx_pkts_freed[i]) & 0xff;
414
415 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
416 }
417
418
419 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
420 u8 diff;
421 lnk = &wl->links[i];
422
423 /* prevent wrap-around in freed-packets counter */
424 diff = (status_2->counters.tx_lnk_free_pkts[i] -
425 lnk->prev_freed_pkts) & 0xff;
426
427 if (diff == 0)
428 continue;
429
430 lnk->allocated_pkts -= diff;
431 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
432
433 /* accumulate the prev_freed_pkts counter */
434 lnk->total_freed_pkts += diff;
435 }
436
437 /* prevent wrap-around in total blocks counter */
438 if (likely(wl->tx_blocks_freed <=
439 le32_to_cpu(status_2->total_released_blks)))
440 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
441 wl->tx_blocks_freed;
442 else
443 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
444 le32_to_cpu(status_2->total_released_blks);
445
446 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
447
448 wl->tx_allocated_blocks -= freed_blocks;
449
450 /*
451 * If the FW freed some blocks:
452 * If we still have allocated blocks - re-arm the timer, Tx is
453 * not stuck. Otherwise, cancel the timer (no Tx currently).
454 */
455 if (freed_blocks) {
456 if (wl->tx_allocated_blocks)
457 wl12xx_rearm_tx_watchdog_locked(wl);
458 else
459 cancel_delayed_work(&wl->tx_watchdog_work);
460 }
461
462 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
463
464 /*
465 * The FW might change the total number of TX memblocks before
466 * we get a notification about blocks being released. Thus, the
467 * available blocks calculation might yield a temporary result
468 * which is lower than the actual available blocks. Keeping in
469 * mind that only blocks that were allocated can be moved from
470 * TX to RX, tx_blocks_available should never decrease here.
471 */
472 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
473 avail);
474
475 /* if more blocks are available now, tx work can be scheduled */
476 if (wl->tx_blocks_available > old_tx_blk_count)
477 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
478
479 /* for AP update num of allocated TX blocks per link and ps status */
480 wl12xx_for_each_wlvif_ap(wl, wlvif) {
481 wl12xx_irq_update_links_status(wl, wlvif, status_2);
482 }
483
484 /* update the host-chipset time offset */
485 getnstimeofday(&ts);
486 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
487 (s64)le32_to_cpu(status_2->fw_localtime);
488
489 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
490
491 return 0;
492 }
493
494 static void wl1271_flush_deferred_work(struct wl1271 *wl)
495 {
496 struct sk_buff *skb;
497
498 /* Pass all received frames to the network stack */
499 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
500 ieee80211_rx_ni(wl->hw, skb);
501
502 /* Return sent skbs to the network stack */
503 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
504 ieee80211_tx_status_ni(wl->hw, skb);
505 }
506
507 static void wl1271_netstack_work(struct work_struct *work)
508 {
509 struct wl1271 *wl =
510 container_of(work, struct wl1271, netstack_work);
511
512 do {
513 wl1271_flush_deferred_work(wl);
514 } while (skb_queue_len(&wl->deferred_rx_queue));
515 }
516
517 #define WL1271_IRQ_MAX_LOOPS 256
518
519 static int wlcore_irq_locked(struct wl1271 *wl)
520 {
521 int ret = 0;
522 u32 intr;
523 int loopcount = WL1271_IRQ_MAX_LOOPS;
524 bool done = false;
525 unsigned int defer_count;
526 unsigned long flags;
527
528 /*
529 * In case edge triggered interrupt must be used, we cannot iterate
530 * more than once without introducing race conditions with the hardirq.
531 */
532 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
533 loopcount = 1;
534
535 wl1271_debug(DEBUG_IRQ, "IRQ work");
536
537 if (unlikely(wl->state != WLCORE_STATE_ON))
538 goto out;
539
540 ret = wl1271_ps_elp_wakeup(wl);
541 if (ret < 0)
542 goto out;
543
544 while (!done && loopcount--) {
545 /*
546 * In order to avoid a race with the hardirq, clear the flag
547 * before acknowledging the chip. Since the mutex is held,
548 * wl1271_ps_elp_wakeup cannot be called concurrently.
549 */
550 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
551 smp_mb__after_clear_bit();
552
553 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
554 if (ret < 0)
555 goto out;
556
557 wlcore_hw_tx_immediate_compl(wl);
558
559 intr = le32_to_cpu(wl->fw_status_1->intr);
560 intr &= WLCORE_ALL_INTR_MASK;
561 if (!intr) {
562 done = true;
563 continue;
564 }
565
566 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
567 wl1271_error("HW watchdog interrupt received! starting recovery.");
568 wl->watchdog_recovery = true;
569 ret = -EIO;
570
571 /* restarting the chip. ignore any other interrupt. */
572 goto out;
573 }
574
575 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
576 wl1271_error("SW watchdog interrupt received! "
577 "starting recovery.");
578 wl->watchdog_recovery = true;
579 ret = -EIO;
580
581 /* restarting the chip. ignore any other interrupt. */
582 goto out;
583 }
584
585 if (likely(intr & WL1271_ACX_INTR_DATA)) {
586 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
587
588 ret = wlcore_rx(wl, wl->fw_status_1);
589 if (ret < 0)
590 goto out;
591
592 /* Check if any tx blocks were freed */
593 spin_lock_irqsave(&wl->wl_lock, flags);
594 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
595 wl1271_tx_total_queue_count(wl) > 0) {
596 spin_unlock_irqrestore(&wl->wl_lock, flags);
597 /*
598 * In order to avoid starvation of the TX path,
599 * call the work function directly.
600 */
601 ret = wlcore_tx_work_locked(wl);
602 if (ret < 0)
603 goto out;
604 } else {
605 spin_unlock_irqrestore(&wl->wl_lock, flags);
606 }
607
608 /* check for tx results */
609 ret = wlcore_hw_tx_delayed_compl(wl);
610 if (ret < 0)
611 goto out;
612
613 /* Make sure the deferred queues don't get too long */
614 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
615 skb_queue_len(&wl->deferred_rx_queue);
616 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
617 wl1271_flush_deferred_work(wl);
618 }
619
620 if (intr & WL1271_ACX_INTR_EVENT_A) {
621 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
622 ret = wl1271_event_handle(wl, 0);
623 if (ret < 0)
624 goto out;
625 }
626
627 if (intr & WL1271_ACX_INTR_EVENT_B) {
628 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
629 ret = wl1271_event_handle(wl, 1);
630 if (ret < 0)
631 goto out;
632 }
633
634 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
635 wl1271_debug(DEBUG_IRQ,
636 "WL1271_ACX_INTR_INIT_COMPLETE");
637
638 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
639 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
640 }
641
642 wl1271_ps_elp_sleep(wl);
643
644 out:
645 return ret;
646 }
647
648 static irqreturn_t wlcore_irq(int irq, void *cookie)
649 {
650 int ret;
651 unsigned long flags;
652 struct wl1271 *wl = cookie;
653
654 /* complete the ELP completion */
655 spin_lock_irqsave(&wl->wl_lock, flags);
656 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
657 if (wl->elp_compl) {
658 complete(wl->elp_compl);
659 wl->elp_compl = NULL;
660 }
661
662 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
663 /* don't enqueue a work right now. mark it as pending */
664 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
665 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
666 disable_irq_nosync(wl->irq);
667 pm_wakeup_event(wl->dev, 0);
668 spin_unlock_irqrestore(&wl->wl_lock, flags);
669 return IRQ_HANDLED;
670 }
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
672
673 /* TX might be handled here, avoid redundant work */
674 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
675 cancel_work_sync(&wl->tx_work);
676
677 mutex_lock(&wl->mutex);
678
679 ret = wlcore_irq_locked(wl);
680 if (ret)
681 wl12xx_queue_recovery_work(wl);
682
683 spin_lock_irqsave(&wl->wl_lock, flags);
684 /* In case TX was not handled here, queue TX work */
685 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
686 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
687 wl1271_tx_total_queue_count(wl) > 0)
688 ieee80211_queue_work(wl->hw, &wl->tx_work);
689 spin_unlock_irqrestore(&wl->wl_lock, flags);
690
691 mutex_unlock(&wl->mutex);
692
693 return IRQ_HANDLED;
694 }
695
696 struct vif_counter_data {
697 u8 counter;
698
699 struct ieee80211_vif *cur_vif;
700 bool cur_vif_running;
701 };
702
703 static void wl12xx_vif_count_iter(void *data, u8 *mac,
704 struct ieee80211_vif *vif)
705 {
706 struct vif_counter_data *counter = data;
707
708 counter->counter++;
709 if (counter->cur_vif == vif)
710 counter->cur_vif_running = true;
711 }
712
713 /* caller must not hold wl->mutex, as it might deadlock */
714 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
715 struct ieee80211_vif *cur_vif,
716 struct vif_counter_data *data)
717 {
718 memset(data, 0, sizeof(*data));
719 data->cur_vif = cur_vif;
720
721 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
722 wl12xx_vif_count_iter, data);
723 }
724
725 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
726 {
727 const struct firmware *fw;
728 const char *fw_name;
729 enum wl12xx_fw_type fw_type;
730 int ret;
731
732 if (plt) {
733 fw_type = WL12XX_FW_TYPE_PLT;
734 fw_name = wl->plt_fw_name;
735 } else {
736 /*
737 * we can't call wl12xx_get_vif_count() here because
738 * wl->mutex is taken, so use the cached last_vif_count value
739 */
740 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
741 fw_type = WL12XX_FW_TYPE_MULTI;
742 fw_name = wl->mr_fw_name;
743 } else {
744 fw_type = WL12XX_FW_TYPE_NORMAL;
745 fw_name = wl->sr_fw_name;
746 }
747 }
748
749 if (wl->fw_type == fw_type)
750 return 0;
751
752 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
753
754 ret = request_firmware(&fw, fw_name, wl->dev);
755
756 if (ret < 0) {
757 wl1271_error("could not get firmware %s: %d", fw_name, ret);
758 return ret;
759 }
760
761 if (fw->size % 4) {
762 wl1271_error("firmware size is not multiple of 32 bits: %zu",
763 fw->size);
764 ret = -EILSEQ;
765 goto out;
766 }
767
768 vfree(wl->fw);
769 wl->fw_type = WL12XX_FW_TYPE_NONE;
770 wl->fw_len = fw->size;
771 wl->fw = vmalloc(wl->fw_len);
772
773 if (!wl->fw) {
774 wl1271_error("could not allocate memory for the firmware");
775 ret = -ENOMEM;
776 goto out;
777 }
778
779 memcpy(wl->fw, fw->data, wl->fw_len);
780 ret = 0;
781 wl->fw_type = fw_type;
782 out:
783 release_firmware(fw);
784
785 return ret;
786 }
787
788 void wl12xx_queue_recovery_work(struct wl1271 *wl)
789 {
790 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
791
792 /* Avoid a recursive recovery */
793 if (wl->state == WLCORE_STATE_ON) {
794 wl->state = WLCORE_STATE_RESTARTING;
795 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
796 wl1271_ps_elp_wakeup(wl);
797 wlcore_disable_interrupts_nosync(wl);
798 ieee80211_queue_work(wl->hw, &wl->recovery_work);
799 }
800 }
801
802 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
803 {
804 size_t len;
805
806 /* Make sure we have enough room */
807 len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
808
809 /* Fill the FW log file, consumed by the sysfs fwlog entry */
810 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
811 wl->fwlog_size += len;
812
813 return len;
814 }
815
816 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
817 {
818 struct wlcore_partition_set part, old_part;
819 u32 addr;
820 u32 offset;
821 u32 end_of_log;
822 u8 *block;
823 int ret;
824
825 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
826 (wl->conf.fwlog.mem_blocks == 0))
827 return;
828
829 wl1271_info("Reading FW panic log");
830
831 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
832 if (!block)
833 return;
834
835 /*
836 * Make sure the chip is awake and the logger isn't active.
837 * Do not send a stop fwlog command if the fw is hanged or if
838 * dbgpins are used (due to some fw bug).
839 */
840 if (wl1271_ps_elp_wakeup(wl))
841 goto out;
842 if (!wl->watchdog_recovery &&
843 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
844 wl12xx_cmd_stop_fwlog(wl);
845
846 /* Read the first memory block address */
847 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
848 if (ret < 0)
849 goto out;
850
851 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
852 if (!addr)
853 goto out;
854
855 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
856 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
857 end_of_log = wl->fwlog_end;
858 } else {
859 offset = sizeof(addr);
860 end_of_log = addr;
861 }
862
863 old_part = wl->curr_part;
864 memset(&part, 0, sizeof(part));
865
866 /* Traverse the memory blocks linked list */
867 do {
868 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
869 part.mem.size = PAGE_SIZE;
870
871 ret = wlcore_set_partition(wl, &part);
872 if (ret < 0) {
873 wl1271_error("%s: set_partition start=0x%X size=%d",
874 __func__, part.mem.start, part.mem.size);
875 goto out;
876 }
877
878 memset(block, 0, wl->fw_mem_block_size);
879 ret = wlcore_read_hwaddr(wl, addr, block,
880 wl->fw_mem_block_size, false);
881
882 if (ret < 0)
883 goto out;
884
885 /*
886 * Memory blocks are linked to one another. The first 4 bytes
887 * of each memory block hold the hardware address of the next
888 * one. The last memory block points to the first one in
889 * on demand mode and is equal to 0x2000000 in continuous mode.
890 */
891 addr = le32_to_cpup((__le32 *)block);
892
893 if (!wl12xx_copy_fwlog(wl, block + offset,
894 wl->fw_mem_block_size - offset))
895 break;
896 } while (addr && (addr != end_of_log));
897
898 wake_up_interruptible(&wl->fwlog_waitq);
899
900 out:
901 kfree(block);
902 wlcore_set_partition(wl, &old_part);
903 }
904
905 static void wlcore_print_recovery(struct wl1271 *wl)
906 {
907 u32 pc = 0;
908 u32 hint_sts = 0;
909 int ret;
910
911 wl1271_info("Hardware recovery in progress. FW ver: %s",
912 wl->chip.fw_ver_str);
913
914 /* change partitions momentarily so we can read the FW pc */
915 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
916 if (ret < 0)
917 return;
918
919 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
920 if (ret < 0)
921 return;
922
923 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
924 if (ret < 0)
925 return;
926
927 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
928 pc, hint_sts, ++wl->recovery_count);
929
930 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
931 }
932
933
934 static void wl1271_recovery_work(struct work_struct *work)
935 {
936 struct wl1271 *wl =
937 container_of(work, struct wl1271, recovery_work);
938 struct wl12xx_vif *wlvif;
939 struct ieee80211_vif *vif;
940
941 mutex_lock(&wl->mutex);
942
943 if (wl->state == WLCORE_STATE_OFF || wl->plt)
944 goto out_unlock;
945
946 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
947 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
948 wl12xx_read_fwlog_panic(wl);
949 wlcore_print_recovery(wl);
950 }
951
952 BUG_ON(wl->conf.recovery.bug_on_recovery &&
953 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
954
955 if (wl->conf.recovery.no_recovery) {
956 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
957 goto out_unlock;
958 }
959
960 /* Prevent spurious TX during FW restart */
961 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
962
963 /* reboot the chipset */
964 while (!list_empty(&wl->wlvif_list)) {
965 wlvif = list_first_entry(&wl->wlvif_list,
966 struct wl12xx_vif, list);
967 vif = wl12xx_wlvif_to_vif(wlvif);
968 __wl1271_op_remove_interface(wl, vif, false);
969 }
970
971 wlcore_op_stop_locked(wl);
972
973 ieee80211_restart_hw(wl->hw);
974
975 /*
976 * Its safe to enable TX now - the queues are stopped after a request
977 * to restart the HW.
978 */
979 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
980
981 out_unlock:
982 wl->watchdog_recovery = false;
983 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
984 mutex_unlock(&wl->mutex);
985 }
986
987 static int wlcore_fw_wakeup(struct wl1271 *wl)
988 {
989 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
990 }
991
992 static int wl1271_setup(struct wl1271 *wl)
993 {
994 wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
995 sizeof(*wl->fw_status_2) +
996 wl->fw_status_priv_len, GFP_KERNEL);
997 if (!wl->fw_status_1)
998 return -ENOMEM;
999
1000 wl->fw_status_2 = (struct wl_fw_status_2 *)
1001 (((u8 *) wl->fw_status_1) +
1002 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
1003
1004 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1005 if (!wl->tx_res_if) {
1006 kfree(wl->fw_status_1);
1007 return -ENOMEM;
1008 }
1009
1010 return 0;
1011 }
1012
1013 static int wl12xx_set_power_on(struct wl1271 *wl)
1014 {
1015 int ret;
1016
1017 msleep(WL1271_PRE_POWER_ON_SLEEP);
1018 ret = wl1271_power_on(wl);
1019 if (ret < 0)
1020 goto out;
1021 msleep(WL1271_POWER_ON_SLEEP);
1022 wl1271_io_reset(wl);
1023 wl1271_io_init(wl);
1024
1025 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1026 if (ret < 0)
1027 goto fail;
1028
1029 /* ELP module wake up */
1030 ret = wlcore_fw_wakeup(wl);
1031 if (ret < 0)
1032 goto fail;
1033
1034 out:
1035 return ret;
1036
1037 fail:
1038 wl1271_power_off(wl);
1039 return ret;
1040 }
1041
1042 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1043 {
1044 int ret = 0;
1045
1046 ret = wl12xx_set_power_on(wl);
1047 if (ret < 0)
1048 goto out;
1049
1050 /*
1051 * For wl127x based devices we could use the default block
1052 * size (512 bytes), but due to a bug in the sdio driver, we
1053 * need to set it explicitly after the chip is powered on. To
1054 * simplify the code and since the performance impact is
1055 * negligible, we use the same block size for all different
1056 * chip types.
1057 *
1058 * Check if the bus supports blocksize alignment and, if it
1059 * doesn't, make sure we don't have the quirk.
1060 */
1061 if (!wl1271_set_block_size(wl))
1062 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1063
1064 /* TODO: make sure the lower driver has set things up correctly */
1065
1066 ret = wl1271_setup(wl);
1067 if (ret < 0)
1068 goto out;
1069
1070 ret = wl12xx_fetch_firmware(wl, plt);
1071 if (ret < 0)
1072 goto out;
1073
1074 out:
1075 return ret;
1076 }
1077
1078 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1079 {
1080 int retries = WL1271_BOOT_RETRIES;
1081 struct wiphy *wiphy = wl->hw->wiphy;
1082
1083 static const char* const PLT_MODE[] = {
1084 "PLT_OFF",
1085 "PLT_ON",
1086 "PLT_FEM_DETECT",
1087 "PLT_CHIP_AWAKE"
1088 };
1089
1090 int ret;
1091
1092 mutex_lock(&wl->mutex);
1093
1094 wl1271_notice("power up");
1095
1096 if (wl->state != WLCORE_STATE_OFF) {
1097 wl1271_error("cannot go into PLT state because not "
1098 "in off state: %d", wl->state);
1099 ret = -EBUSY;
1100 goto out;
1101 }
1102
1103 /* Indicate to lower levels that we are now in PLT mode */
1104 wl->plt = true;
1105 wl->plt_mode = plt_mode;
1106
1107 while (retries) {
1108 retries--;
1109 ret = wl12xx_chip_wakeup(wl, true);
1110 if (ret < 0)
1111 goto power_off;
1112
1113 if (plt_mode != PLT_CHIP_AWAKE) {
1114 ret = wl->ops->plt_init(wl);
1115 if (ret < 0)
1116 goto power_off;
1117 }
1118
1119 wl->state = WLCORE_STATE_ON;
1120 wl1271_notice("firmware booted in PLT mode %s (%s)",
1121 PLT_MODE[plt_mode],
1122 wl->chip.fw_ver_str);
1123
1124 /* update hw/fw version info in wiphy struct */
1125 wiphy->hw_version = wl->chip.id;
1126 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1127 sizeof(wiphy->fw_version));
1128
1129 goto out;
1130
1131 power_off:
1132 wl1271_power_off(wl);
1133 }
1134
1135 wl->plt = false;
1136 wl->plt_mode = PLT_OFF;
1137
1138 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1139 WL1271_BOOT_RETRIES);
1140 out:
1141 mutex_unlock(&wl->mutex);
1142
1143 return ret;
1144 }
1145
1146 int wl1271_plt_stop(struct wl1271 *wl)
1147 {
1148 int ret = 0;
1149
1150 wl1271_notice("power down");
1151
1152 /*
1153 * Interrupts must be disabled before setting the state to OFF.
1154 * Otherwise, the interrupt handler might be called and exit without
1155 * reading the interrupt status.
1156 */
1157 wlcore_disable_interrupts(wl);
1158 mutex_lock(&wl->mutex);
1159 if (!wl->plt) {
1160 mutex_unlock(&wl->mutex);
1161
1162 /*
1163 * This will not necessarily enable interrupts as interrupts
1164 * may have been disabled when op_stop was called. It will,
1165 * however, balance the above call to disable_interrupts().
1166 */
1167 wlcore_enable_interrupts(wl);
1168
1169 wl1271_error("cannot power down because not in PLT "
1170 "state: %d", wl->state);
1171 ret = -EBUSY;
1172 goto out;
1173 }
1174
1175 mutex_unlock(&wl->mutex);
1176
1177 wl1271_flush_deferred_work(wl);
1178 cancel_work_sync(&wl->netstack_work);
1179 cancel_work_sync(&wl->recovery_work);
1180 cancel_delayed_work_sync(&wl->elp_work);
1181 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1182
1183 mutex_lock(&wl->mutex);
1184 wl1271_power_off(wl);
1185 wl->flags = 0;
1186 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1187 wl->state = WLCORE_STATE_OFF;
1188 wl->plt = false;
1189 wl->plt_mode = PLT_OFF;
1190 wl->rx_counter = 0;
1191 mutex_unlock(&wl->mutex);
1192
1193 out:
1194 return ret;
1195 }
1196
1197 static void wl1271_op_tx(struct ieee80211_hw *hw,
1198 struct ieee80211_tx_control *control,
1199 struct sk_buff *skb)
1200 {
1201 struct wl1271 *wl = hw->priv;
1202 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1203 struct ieee80211_vif *vif = info->control.vif;
1204 struct wl12xx_vif *wlvif = NULL;
1205 unsigned long flags;
1206 int q, mapping;
1207 u8 hlid;
1208
1209 if (!vif) {
1210 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1211 ieee80211_free_txskb(hw, skb);
1212 return;
1213 }
1214
1215 wlvif = wl12xx_vif_to_data(vif);
1216 mapping = skb_get_queue_mapping(skb);
1217 q = wl1271_tx_get_queue(mapping);
1218
1219 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1220
1221 spin_lock_irqsave(&wl->wl_lock, flags);
1222
1223 /*
1224 * drop the packet if the link is invalid or the queue is stopped
1225 * for any reason but watermark. Watermark is a "soft"-stop so we
1226 * allow these packets through.
1227 */
1228 if (hlid == WL12XX_INVALID_LINK_ID ||
1229 (!test_bit(hlid, wlvif->links_map)) ||
1230 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1231 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1232 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1233 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1234 ieee80211_free_txskb(hw, skb);
1235 goto out;
1236 }
1237
1238 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1239 hlid, q, skb->len);
1240 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1241
1242 wl->tx_queue_count[q]++;
1243 wlvif->tx_queue_count[q]++;
1244
1245 /*
1246 * The workqueue is slow to process the tx_queue and we need stop
1247 * the queue here, otherwise the queue will get too long.
1248 */
1249 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1250 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1251 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1252 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1253 wlcore_stop_queue_locked(wl, wlvif, q,
1254 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1255 }
1256
1257 /*
1258 * The chip specific setup must run before the first TX packet -
1259 * before that, the tx_work will not be initialized!
1260 */
1261
1262 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1263 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1264 ieee80211_queue_work(wl->hw, &wl->tx_work);
1265
1266 out:
1267 spin_unlock_irqrestore(&wl->wl_lock, flags);
1268 }
1269
1270 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1271 {
1272 unsigned long flags;
1273 int q;
1274
1275 /* no need to queue a new dummy packet if one is already pending */
1276 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1277 return 0;
1278
1279 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1280
1281 spin_lock_irqsave(&wl->wl_lock, flags);
1282 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1283 wl->tx_queue_count[q]++;
1284 spin_unlock_irqrestore(&wl->wl_lock, flags);
1285
1286 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1287 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1288 return wlcore_tx_work_locked(wl);
1289
1290 /*
1291 * If the FW TX is busy, TX work will be scheduled by the threaded
1292 * interrupt handler function
1293 */
1294 return 0;
1295 }
1296
1297 /*
1298 * The size of the dummy packet should be at least 1400 bytes. However, in
1299 * order to minimize the number of bus transactions, aligning it to 512 bytes
1300 * boundaries could be beneficial, performance wise
1301 */
1302 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1303
1304 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1305 {
1306 struct sk_buff *skb;
1307 struct ieee80211_hdr_3addr *hdr;
1308 unsigned int dummy_packet_size;
1309
1310 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1311 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1312
1313 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1314 if (!skb) {
1315 wl1271_warning("Failed to allocate a dummy packet skb");
1316 return NULL;
1317 }
1318
1319 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1320
1321 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1322 memset(hdr, 0, sizeof(*hdr));
1323 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1324 IEEE80211_STYPE_NULLFUNC |
1325 IEEE80211_FCTL_TODS);
1326
1327 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1328
1329 /* Dummy packets require the TID to be management */
1330 skb->priority = WL1271_TID_MGMT;
1331
1332 /* Initialize all fields that might be used */
1333 skb_set_queue_mapping(skb, 0);
1334 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1335
1336 return skb;
1337 }
1338
1339
1340 #ifdef CONFIG_PM
1341 static int
1342 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1343 {
1344 int num_fields = 0, in_field = 0, fields_size = 0;
1345 int i, pattern_len = 0;
1346
1347 if (!p->mask) {
1348 wl1271_warning("No mask in WoWLAN pattern");
1349 return -EINVAL;
1350 }
1351
1352 /*
1353 * The pattern is broken up into segments of bytes at different offsets
1354 * that need to be checked by the FW filter. Each segment is called
1355 * a field in the FW API. We verify that the total number of fields
1356 * required for this pattern won't exceed FW limits (8)
1357 * as well as the total fields buffer won't exceed the FW limit.
1358 * Note that if there's a pattern which crosses Ethernet/IP header
1359 * boundary a new field is required.
1360 */
1361 for (i = 0; i < p->pattern_len; i++) {
1362 if (test_bit(i, (unsigned long *)p->mask)) {
1363 if (!in_field) {
1364 in_field = 1;
1365 pattern_len = 1;
1366 } else {
1367 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1368 num_fields++;
1369 fields_size += pattern_len +
1370 RX_FILTER_FIELD_OVERHEAD;
1371 pattern_len = 1;
1372 } else
1373 pattern_len++;
1374 }
1375 } else {
1376 if (in_field) {
1377 in_field = 0;
1378 fields_size += pattern_len +
1379 RX_FILTER_FIELD_OVERHEAD;
1380 num_fields++;
1381 }
1382 }
1383 }
1384
1385 if (in_field) {
1386 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1387 num_fields++;
1388 }
1389
1390 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1391 wl1271_warning("RX Filter too complex. Too many segments");
1392 return -EINVAL;
1393 }
1394
1395 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1396 wl1271_warning("RX filter pattern is too big");
1397 return -E2BIG;
1398 }
1399
1400 return 0;
1401 }
1402
1403 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1404 {
1405 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1406 }
1407
1408 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1409 {
1410 int i;
1411
1412 if (filter == NULL)
1413 return;
1414
1415 for (i = 0; i < filter->num_fields; i++)
1416 kfree(filter->fields[i].pattern);
1417
1418 kfree(filter);
1419 }
1420
1421 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1422 u16 offset, u8 flags,
1423 u8 *pattern, u8 len)
1424 {
1425 struct wl12xx_rx_filter_field *field;
1426
1427 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1428 wl1271_warning("Max fields per RX filter. can't alloc another");
1429 return -EINVAL;
1430 }
1431
1432 field = &filter->fields[filter->num_fields];
1433
1434 field->pattern = kzalloc(len, GFP_KERNEL);
1435 if (!field->pattern) {
1436 wl1271_warning("Failed to allocate RX filter pattern");
1437 return -ENOMEM;
1438 }
1439
1440 filter->num_fields++;
1441
1442 field->offset = cpu_to_le16(offset);
1443 field->flags = flags;
1444 field->len = len;
1445 memcpy(field->pattern, pattern, len);
1446
1447 return 0;
1448 }
1449
1450 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1451 {
1452 int i, fields_size = 0;
1453
1454 for (i = 0; i < filter->num_fields; i++)
1455 fields_size += filter->fields[i].len +
1456 sizeof(struct wl12xx_rx_filter_field) -
1457 sizeof(u8 *);
1458
1459 return fields_size;
1460 }
1461
1462 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1463 u8 *buf)
1464 {
1465 int i;
1466 struct wl12xx_rx_filter_field *field;
1467
1468 for (i = 0; i < filter->num_fields; i++) {
1469 field = (struct wl12xx_rx_filter_field *)buf;
1470
1471 field->offset = filter->fields[i].offset;
1472 field->flags = filter->fields[i].flags;
1473 field->len = filter->fields[i].len;
1474
1475 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1476 buf += sizeof(struct wl12xx_rx_filter_field) -
1477 sizeof(u8 *) + field->len;
1478 }
1479 }
1480
1481 /*
1482 * Allocates an RX filter returned through f
1483 * which needs to be freed using rx_filter_free()
1484 */
1485 static int
1486 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1487 struct wl12xx_rx_filter **f)
1488 {
1489 int i, j, ret = 0;
1490 struct wl12xx_rx_filter *filter;
1491 u16 offset;
1492 u8 flags, len;
1493
1494 filter = wl1271_rx_filter_alloc();
1495 if (!filter) {
1496 wl1271_warning("Failed to alloc rx filter");
1497 ret = -ENOMEM;
1498 goto err;
1499 }
1500
1501 i = 0;
1502 while (i < p->pattern_len) {
1503 if (!test_bit(i, (unsigned long *)p->mask)) {
1504 i++;
1505 continue;
1506 }
1507
1508 for (j = i; j < p->pattern_len; j++) {
1509 if (!test_bit(j, (unsigned long *)p->mask))
1510 break;
1511
1512 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1513 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1514 break;
1515 }
1516
1517 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1518 offset = i;
1519 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1520 } else {
1521 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1522 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1523 }
1524
1525 len = j - i;
1526
1527 ret = wl1271_rx_filter_alloc_field(filter,
1528 offset,
1529 flags,
1530 &p->pattern[i], len);
1531 if (ret)
1532 goto err;
1533
1534 i = j;
1535 }
1536
1537 filter->action = FILTER_SIGNAL;
1538
1539 *f = filter;
1540 return 0;
1541
1542 err:
1543 wl1271_rx_filter_free(filter);
1544 *f = NULL;
1545
1546 return ret;
1547 }
1548
1549 static int wl1271_configure_wowlan(struct wl1271 *wl,
1550 struct cfg80211_wowlan *wow)
1551 {
1552 int i, ret;
1553
1554 if (!wow || wow->any || !wow->n_patterns) {
1555 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1556 FILTER_SIGNAL);
1557 if (ret)
1558 goto out;
1559
1560 ret = wl1271_rx_filter_clear_all(wl);
1561 if (ret)
1562 goto out;
1563
1564 return 0;
1565 }
1566
1567 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1568 return -EINVAL;
1569
1570 /* Validate all incoming patterns before clearing current FW state */
1571 for (i = 0; i < wow->n_patterns; i++) {
1572 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1573 if (ret) {
1574 wl1271_warning("Bad wowlan pattern %d", i);
1575 return ret;
1576 }
1577 }
1578
1579 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1580 if (ret)
1581 goto out;
1582
1583 ret = wl1271_rx_filter_clear_all(wl);
1584 if (ret)
1585 goto out;
1586
1587 /* Translate WoWLAN patterns into filters */
1588 for (i = 0; i < wow->n_patterns; i++) {
1589 struct cfg80211_pkt_pattern *p;
1590 struct wl12xx_rx_filter *filter = NULL;
1591
1592 p = &wow->patterns[i];
1593
1594 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1595 if (ret) {
1596 wl1271_warning("Failed to create an RX filter from "
1597 "wowlan pattern %d", i);
1598 goto out;
1599 }
1600
1601 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1602
1603 wl1271_rx_filter_free(filter);
1604 if (ret)
1605 goto out;
1606 }
1607
1608 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1609
1610 out:
1611 return ret;
1612 }
1613
1614 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1615 struct wl12xx_vif *wlvif,
1616 struct cfg80211_wowlan *wow)
1617 {
1618 int ret = 0;
1619
1620 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1621 goto out;
1622
1623 ret = wl1271_ps_elp_wakeup(wl);
1624 if (ret < 0)
1625 goto out;
1626
1627 ret = wl1271_configure_wowlan(wl, wow);
1628 if (ret < 0)
1629 goto out_sleep;
1630
1631 if ((wl->conf.conn.suspend_wake_up_event ==
1632 wl->conf.conn.wake_up_event) &&
1633 (wl->conf.conn.suspend_listen_interval ==
1634 wl->conf.conn.listen_interval))
1635 goto out_sleep;
1636
1637 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1638 wl->conf.conn.suspend_wake_up_event,
1639 wl->conf.conn.suspend_listen_interval);
1640
1641 if (ret < 0)
1642 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1643
1644 out_sleep:
1645 wl1271_ps_elp_sleep(wl);
1646 out:
1647 return ret;
1648
1649 }
1650
1651 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1652 struct wl12xx_vif *wlvif)
1653 {
1654 int ret = 0;
1655
1656 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1657 goto out;
1658
1659 ret = wl1271_ps_elp_wakeup(wl);
1660 if (ret < 0)
1661 goto out;
1662
1663 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1664
1665 wl1271_ps_elp_sleep(wl);
1666 out:
1667 return ret;
1668
1669 }
1670
1671 static int wl1271_configure_suspend(struct wl1271 *wl,
1672 struct wl12xx_vif *wlvif,
1673 struct cfg80211_wowlan *wow)
1674 {
1675 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1676 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1677 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1678 return wl1271_configure_suspend_ap(wl, wlvif);
1679 return 0;
1680 }
1681
1682 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1683 {
1684 int ret = 0;
1685 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1686 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1687
1688 if ((!is_ap) && (!is_sta))
1689 return;
1690
1691 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1692 return;
1693
1694 ret = wl1271_ps_elp_wakeup(wl);
1695 if (ret < 0)
1696 return;
1697
1698 if (is_sta) {
1699 wl1271_configure_wowlan(wl, NULL);
1700
1701 if ((wl->conf.conn.suspend_wake_up_event ==
1702 wl->conf.conn.wake_up_event) &&
1703 (wl->conf.conn.suspend_listen_interval ==
1704 wl->conf.conn.listen_interval))
1705 goto out_sleep;
1706
1707 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1708 wl->conf.conn.wake_up_event,
1709 wl->conf.conn.listen_interval);
1710
1711 if (ret < 0)
1712 wl1271_error("resume: wake up conditions failed: %d",
1713 ret);
1714
1715 } else if (is_ap) {
1716 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1717 }
1718
1719 out_sleep:
1720 wl1271_ps_elp_sleep(wl);
1721 }
1722
1723 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1724 struct cfg80211_wowlan *wow)
1725 {
1726 struct wl1271 *wl = hw->priv;
1727 struct wl12xx_vif *wlvif;
1728 int ret;
1729
1730 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1731 WARN_ON(!wow);
1732
1733 /* we want to perform the recovery before suspending */
1734 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1735 wl1271_warning("postponing suspend to perform recovery");
1736 return -EBUSY;
1737 }
1738
1739 wl1271_tx_flush(wl);
1740
1741 mutex_lock(&wl->mutex);
1742 wl->wow_enabled = true;
1743 wl12xx_for_each_wlvif(wl, wlvif) {
1744 ret = wl1271_configure_suspend(wl, wlvif, wow);
1745 if (ret < 0) {
1746 mutex_unlock(&wl->mutex);
1747 wl1271_warning("couldn't prepare device to suspend");
1748 return ret;
1749 }
1750 }
1751 mutex_unlock(&wl->mutex);
1752 /* flush any remaining work */
1753 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1754
1755 /*
1756 * disable and re-enable interrupts in order to flush
1757 * the threaded_irq
1758 */
1759 wlcore_disable_interrupts(wl);
1760
1761 /*
1762 * set suspended flag to avoid triggering a new threaded_irq
1763 * work. no need for spinlock as interrupts are disabled.
1764 */
1765 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1766
1767 wlcore_enable_interrupts(wl);
1768 flush_work(&wl->tx_work);
1769 flush_delayed_work(&wl->elp_work);
1770
1771 return 0;
1772 }
1773
1774 static int wl1271_op_resume(struct ieee80211_hw *hw)
1775 {
1776 struct wl1271 *wl = hw->priv;
1777 struct wl12xx_vif *wlvif;
1778 unsigned long flags;
1779 bool run_irq_work = false, pending_recovery;
1780 int ret;
1781
1782 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1783 wl->wow_enabled);
1784 WARN_ON(!wl->wow_enabled);
1785
1786 /*
1787 * re-enable irq_work enqueuing, and call irq_work directly if
1788 * there is a pending work.
1789 */
1790 spin_lock_irqsave(&wl->wl_lock, flags);
1791 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1792 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1793 run_irq_work = true;
1794 spin_unlock_irqrestore(&wl->wl_lock, flags);
1795
1796 mutex_lock(&wl->mutex);
1797
1798 /* test the recovery flag before calling any SDIO functions */
1799 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1800 &wl->flags);
1801
1802 if (run_irq_work) {
1803 wl1271_debug(DEBUG_MAC80211,
1804 "run postponed irq_work directly");
1805
1806 /* don't talk to the HW if recovery is pending */
1807 if (!pending_recovery) {
1808 ret = wlcore_irq_locked(wl);
1809 if (ret)
1810 wl12xx_queue_recovery_work(wl);
1811 }
1812
1813 wlcore_enable_interrupts(wl);
1814 }
1815
1816 if (pending_recovery) {
1817 wl1271_warning("queuing forgotten recovery on resume");
1818 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1819 goto out;
1820 }
1821
1822 wl12xx_for_each_wlvif(wl, wlvif) {
1823 wl1271_configure_resume(wl, wlvif);
1824 }
1825
1826 out:
1827 wl->wow_enabled = false;
1828 mutex_unlock(&wl->mutex);
1829
1830 return 0;
1831 }
1832 #endif
1833
1834 static int wl1271_op_start(struct ieee80211_hw *hw)
1835 {
1836 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1837
1838 /*
1839 * We have to delay the booting of the hardware because
1840 * we need to know the local MAC address before downloading and
1841 * initializing the firmware. The MAC address cannot be changed
1842 * after boot, and without the proper MAC address, the firmware
1843 * will not function properly.
1844 *
1845 * The MAC address is first known when the corresponding interface
1846 * is added. That is where we will initialize the hardware.
1847 */
1848
1849 return 0;
1850 }
1851
1852 static void wlcore_op_stop_locked(struct wl1271 *wl)
1853 {
1854 int i;
1855
1856 if (wl->state == WLCORE_STATE_OFF) {
1857 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1858 &wl->flags))
1859 wlcore_enable_interrupts(wl);
1860
1861 return;
1862 }
1863
1864 /*
1865 * this must be before the cancel_work calls below, so that the work
1866 * functions don't perform further work.
1867 */
1868 wl->state = WLCORE_STATE_OFF;
1869
1870 /*
1871 * Use the nosync variant to disable interrupts, so the mutex could be
1872 * held while doing so without deadlocking.
1873 */
1874 wlcore_disable_interrupts_nosync(wl);
1875
1876 mutex_unlock(&wl->mutex);
1877
1878 wlcore_synchronize_interrupts(wl);
1879 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1880 cancel_work_sync(&wl->recovery_work);
1881 wl1271_flush_deferred_work(wl);
1882 cancel_delayed_work_sync(&wl->scan_complete_work);
1883 cancel_work_sync(&wl->netstack_work);
1884 cancel_work_sync(&wl->tx_work);
1885 cancel_delayed_work_sync(&wl->elp_work);
1886 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1887
1888 /* let's notify MAC80211 about the remaining pending TX frames */
1889 mutex_lock(&wl->mutex);
1890 wl12xx_tx_reset(wl);
1891
1892 wl1271_power_off(wl);
1893 /*
1894 * In case a recovery was scheduled, interrupts were disabled to avoid
1895 * an interrupt storm. Now that the power is down, it is safe to
1896 * re-enable interrupts to balance the disable depth
1897 */
1898 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1899 wlcore_enable_interrupts(wl);
1900
1901 wl->band = IEEE80211_BAND_2GHZ;
1902
1903 wl->rx_counter = 0;
1904 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1905 wl->channel_type = NL80211_CHAN_NO_HT;
1906 wl->tx_blocks_available = 0;
1907 wl->tx_allocated_blocks = 0;
1908 wl->tx_results_count = 0;
1909 wl->tx_packets_count = 0;
1910 wl->time_offset = 0;
1911 wl->ap_fw_ps_map = 0;
1912 wl->ap_ps_map = 0;
1913 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1914 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1915 memset(wl->links_map, 0, sizeof(wl->links_map));
1916 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1917 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1918 wl->active_sta_count = 0;
1919 wl->active_link_count = 0;
1920
1921 /* The system link is always allocated */
1922 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1923 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1924 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1925
1926 /*
1927 * this is performed after the cancel_work calls and the associated
1928 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1929 * get executed before all these vars have been reset.
1930 */
1931 wl->flags = 0;
1932
1933 wl->tx_blocks_freed = 0;
1934
1935 for (i = 0; i < NUM_TX_QUEUES; i++) {
1936 wl->tx_pkts_freed[i] = 0;
1937 wl->tx_allocated_pkts[i] = 0;
1938 }
1939
1940 wl1271_debugfs_reset(wl);
1941
1942 kfree(wl->fw_status_1);
1943 wl->fw_status_1 = NULL;
1944 wl->fw_status_2 = NULL;
1945 kfree(wl->tx_res_if);
1946 wl->tx_res_if = NULL;
1947 kfree(wl->target_mem_map);
1948 wl->target_mem_map = NULL;
1949
1950 /*
1951 * FW channels must be re-calibrated after recovery,
1952 * clear the last Reg-Domain channel configuration.
1953 */
1954 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1955 }
1956
1957 static void wlcore_op_stop(struct ieee80211_hw *hw)
1958 {
1959 struct wl1271 *wl = hw->priv;
1960
1961 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1962
1963 mutex_lock(&wl->mutex);
1964
1965 wlcore_op_stop_locked(wl);
1966
1967 mutex_unlock(&wl->mutex);
1968 }
1969
1970 static void wlcore_channel_switch_work(struct work_struct *work)
1971 {
1972 struct delayed_work *dwork;
1973 struct wl1271 *wl;
1974 struct ieee80211_vif *vif;
1975 struct wl12xx_vif *wlvif;
1976 int ret;
1977
1978 dwork = container_of(work, struct delayed_work, work);
1979 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1980 wl = wlvif->wl;
1981
1982 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1983
1984 mutex_lock(&wl->mutex);
1985
1986 if (unlikely(wl->state != WLCORE_STATE_ON))
1987 goto out;
1988
1989 /* check the channel switch is still ongoing */
1990 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1991 goto out;
1992
1993 vif = wl12xx_wlvif_to_vif(wlvif);
1994 ieee80211_chswitch_done(vif, false);
1995
1996 ret = wl1271_ps_elp_wakeup(wl);
1997 if (ret < 0)
1998 goto out;
1999
2000 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2001
2002 wl1271_ps_elp_sleep(wl);
2003 out:
2004 mutex_unlock(&wl->mutex);
2005 }
2006
2007 static void wlcore_connection_loss_work(struct work_struct *work)
2008 {
2009 struct delayed_work *dwork;
2010 struct wl1271 *wl;
2011 struct ieee80211_vif *vif;
2012 struct wl12xx_vif *wlvif;
2013
2014 dwork = container_of(work, struct delayed_work, work);
2015 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2016 wl = wlvif->wl;
2017
2018 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2019
2020 mutex_lock(&wl->mutex);
2021
2022 if (unlikely(wl->state != WLCORE_STATE_ON))
2023 goto out;
2024
2025 /* Call mac80211 connection loss */
2026 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2027 goto out;
2028
2029 vif = wl12xx_wlvif_to_vif(wlvif);
2030 ieee80211_connection_loss(vif);
2031 out:
2032 mutex_unlock(&wl->mutex);
2033 }
2034
2035 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2036 {
2037 struct delayed_work *dwork;
2038 struct wl1271 *wl;
2039 struct wl12xx_vif *wlvif;
2040 unsigned long time_spare;
2041 int ret;
2042
2043 dwork = container_of(work, struct delayed_work, work);
2044 wlvif = container_of(dwork, struct wl12xx_vif,
2045 pending_auth_complete_work);
2046 wl = wlvif->wl;
2047
2048 mutex_lock(&wl->mutex);
2049
2050 if (unlikely(wl->state != WLCORE_STATE_ON))
2051 goto out;
2052
2053 /*
2054 * Make sure a second really passed since the last auth reply. Maybe
2055 * a second auth reply arrived while we were stuck on the mutex.
2056 * Check for a little less than the timeout to protect from scheduler
2057 * irregularities.
2058 */
2059 time_spare = jiffies +
2060 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2061 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2062 goto out;
2063
2064 ret = wl1271_ps_elp_wakeup(wl);
2065 if (ret < 0)
2066 goto out;
2067
2068 /* cancel the ROC if active */
2069 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2070
2071 wl1271_ps_elp_sleep(wl);
2072 out:
2073 mutex_unlock(&wl->mutex);
2074 }
2075
2076 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2077 {
2078 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2079 WL12XX_MAX_RATE_POLICIES);
2080 if (policy >= WL12XX_MAX_RATE_POLICIES)
2081 return -EBUSY;
2082
2083 __set_bit(policy, wl->rate_policies_map);
2084 *idx = policy;
2085 return 0;
2086 }
2087
2088 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2089 {
2090 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2091 return;
2092
2093 __clear_bit(*idx, wl->rate_policies_map);
2094 *idx = WL12XX_MAX_RATE_POLICIES;
2095 }
2096
2097 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2098 {
2099 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2100 WLCORE_MAX_KLV_TEMPLATES);
2101 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2102 return -EBUSY;
2103
2104 __set_bit(policy, wl->klv_templates_map);
2105 *idx = policy;
2106 return 0;
2107 }
2108
2109 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2110 {
2111 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2112 return;
2113
2114 __clear_bit(*idx, wl->klv_templates_map);
2115 *idx = WLCORE_MAX_KLV_TEMPLATES;
2116 }
2117
2118 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2119 {
2120 switch (wlvif->bss_type) {
2121 case BSS_TYPE_AP_BSS:
2122 if (wlvif->p2p)
2123 return WL1271_ROLE_P2P_GO;
2124 else
2125 return WL1271_ROLE_AP;
2126
2127 case BSS_TYPE_STA_BSS:
2128 if (wlvif->p2p)
2129 return WL1271_ROLE_P2P_CL;
2130 else
2131 return WL1271_ROLE_STA;
2132
2133 case BSS_TYPE_IBSS:
2134 return WL1271_ROLE_IBSS;
2135
2136 default:
2137 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2138 }
2139 return WL12XX_INVALID_ROLE_TYPE;
2140 }
2141
2142 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2143 {
2144 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2145 int i;
2146
2147 /* clear everything but the persistent data */
2148 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2149
2150 switch (ieee80211_vif_type_p2p(vif)) {
2151 case NL80211_IFTYPE_P2P_CLIENT:
2152 wlvif->p2p = 1;
2153 /* fall-through */
2154 case NL80211_IFTYPE_STATION:
2155 wlvif->bss_type = BSS_TYPE_STA_BSS;
2156 break;
2157 case NL80211_IFTYPE_ADHOC:
2158 wlvif->bss_type = BSS_TYPE_IBSS;
2159 break;
2160 case NL80211_IFTYPE_P2P_GO:
2161 wlvif->p2p = 1;
2162 /* fall-through */
2163 case NL80211_IFTYPE_AP:
2164 wlvif->bss_type = BSS_TYPE_AP_BSS;
2165 break;
2166 default:
2167 wlvif->bss_type = MAX_BSS_TYPE;
2168 return -EOPNOTSUPP;
2169 }
2170
2171 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2172 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2173 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2174
2175 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2176 wlvif->bss_type == BSS_TYPE_IBSS) {
2177 /* init sta/ibss data */
2178 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2179 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2180 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2181 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2182 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2183 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2184 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2185 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2186 } else {
2187 /* init ap data */
2188 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2189 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2190 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2191 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2192 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2193 wl12xx_allocate_rate_policy(wl,
2194 &wlvif->ap.ucast_rate_idx[i]);
2195 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2196 /*
2197 * TODO: check if basic_rate shouldn't be
2198 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2199 * instead (the same thing for STA above).
2200 */
2201 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2202 /* TODO: this seems to be used only for STA, check it */
2203 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2204 }
2205
2206 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2207 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2208 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2209
2210 /*
2211 * mac80211 configures some values globally, while we treat them
2212 * per-interface. thus, on init, we have to copy them from wl
2213 */
2214 wlvif->band = wl->band;
2215 wlvif->channel = wl->channel;
2216 wlvif->power_level = wl->power_level;
2217 wlvif->channel_type = wl->channel_type;
2218
2219 INIT_WORK(&wlvif->rx_streaming_enable_work,
2220 wl1271_rx_streaming_enable_work);
2221 INIT_WORK(&wlvif->rx_streaming_disable_work,
2222 wl1271_rx_streaming_disable_work);
2223 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2224 wlcore_channel_switch_work);
2225 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2226 wlcore_connection_loss_work);
2227 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2228 wlcore_pending_auth_complete_work);
2229 INIT_LIST_HEAD(&wlvif->list);
2230
2231 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2232 (unsigned long) wlvif);
2233 return 0;
2234 }
2235
2236 static int wl12xx_init_fw(struct wl1271 *wl)
2237 {
2238 int retries = WL1271_BOOT_RETRIES;
2239 bool booted = false;
2240 struct wiphy *wiphy = wl->hw->wiphy;
2241 int ret;
2242
2243 while (retries) {
2244 retries--;
2245 ret = wl12xx_chip_wakeup(wl, false);
2246 if (ret < 0)
2247 goto power_off;
2248
2249 ret = wl->ops->boot(wl);
2250 if (ret < 0)
2251 goto power_off;
2252
2253 ret = wl1271_hw_init(wl);
2254 if (ret < 0)
2255 goto irq_disable;
2256
2257 booted = true;
2258 break;
2259
2260 irq_disable:
2261 mutex_unlock(&wl->mutex);
2262 /* Unlocking the mutex in the middle of handling is
2263 inherently unsafe. In this case we deem it safe to do,
2264 because we need to let any possibly pending IRQ out of
2265 the system (and while we are WLCORE_STATE_OFF the IRQ
2266 work function will not do anything.) Also, any other
2267 possible concurrent operations will fail due to the
2268 current state, hence the wl1271 struct should be safe. */
2269 wlcore_disable_interrupts(wl);
2270 wl1271_flush_deferred_work(wl);
2271 cancel_work_sync(&wl->netstack_work);
2272 mutex_lock(&wl->mutex);
2273 power_off:
2274 wl1271_power_off(wl);
2275 }
2276
2277 if (!booted) {
2278 wl1271_error("firmware boot failed despite %d retries",
2279 WL1271_BOOT_RETRIES);
2280 goto out;
2281 }
2282
2283 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2284
2285 /* update hw/fw version info in wiphy struct */
2286 wiphy->hw_version = wl->chip.id;
2287 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2288 sizeof(wiphy->fw_version));
2289
2290 /*
2291 * Now we know if 11a is supported (info from the NVS), so disable
2292 * 11a channels if not supported
2293 */
2294 if (!wl->enable_11a)
2295 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2296
2297 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2298 wl->enable_11a ? "" : "not ");
2299
2300 wl->state = WLCORE_STATE_ON;
2301 out:
2302 return ret;
2303 }
2304
2305 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2306 {
2307 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2308 }
2309
2310 /*
2311 * Check whether a fw switch (i.e. moving from one loaded
2312 * fw to another) is needed. This function is also responsible
2313 * for updating wl->last_vif_count, so it must be called before
2314 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2315 * will be used).
2316 */
2317 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2318 struct vif_counter_data vif_counter_data,
2319 bool add)
2320 {
2321 enum wl12xx_fw_type current_fw = wl->fw_type;
2322 u8 vif_count = vif_counter_data.counter;
2323
2324 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2325 return false;
2326
2327 /* increase the vif count if this is a new vif */
2328 if (add && !vif_counter_data.cur_vif_running)
2329 vif_count++;
2330
2331 wl->last_vif_count = vif_count;
2332
2333 /* no need for fw change if the device is OFF */
2334 if (wl->state == WLCORE_STATE_OFF)
2335 return false;
2336
2337 /* no need for fw change if a single fw is used */
2338 if (!wl->mr_fw_name)
2339 return false;
2340
2341 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2342 return true;
2343 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2344 return true;
2345
2346 return false;
2347 }
2348
2349 /*
2350 * Enter "forced psm". Make sure the sta is in psm against the ap,
2351 * to make the fw switch a bit more disconnection-persistent.
2352 */
2353 static void wl12xx_force_active_psm(struct wl1271 *wl)
2354 {
2355 struct wl12xx_vif *wlvif;
2356
2357 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2358 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2359 }
2360 }
2361
2362 struct wlcore_hw_queue_iter_data {
2363 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2364 /* current vif */
2365 struct ieee80211_vif *vif;
2366 /* is the current vif among those iterated */
2367 bool cur_running;
2368 };
2369
2370 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2371 struct ieee80211_vif *vif)
2372 {
2373 struct wlcore_hw_queue_iter_data *iter_data = data;
2374
2375 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2376 return;
2377
2378 if (iter_data->cur_running || vif == iter_data->vif) {
2379 iter_data->cur_running = true;
2380 return;
2381 }
2382
2383 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2384 }
2385
2386 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2387 struct wl12xx_vif *wlvif)
2388 {
2389 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2390 struct wlcore_hw_queue_iter_data iter_data = {};
2391 int i, q_base;
2392
2393 iter_data.vif = vif;
2394
2395 /* mark all bits taken by active interfaces */
2396 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2397 IEEE80211_IFACE_ITER_RESUME_ALL,
2398 wlcore_hw_queue_iter, &iter_data);
2399
2400 /* the current vif is already running in mac80211 (resume/recovery) */
2401 if (iter_data.cur_running) {
2402 wlvif->hw_queue_base = vif->hw_queue[0];
2403 wl1271_debug(DEBUG_MAC80211,
2404 "using pre-allocated hw queue base %d",
2405 wlvif->hw_queue_base);
2406
2407 /* interface type might have changed type */
2408 goto adjust_cab_queue;
2409 }
2410
2411 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2412 WLCORE_NUM_MAC_ADDRESSES);
2413 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2414 return -EBUSY;
2415
2416 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2417 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2418 wlvif->hw_queue_base);
2419
2420 for (i = 0; i < NUM_TX_QUEUES; i++) {
2421 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2422 /* register hw queues in mac80211 */
2423 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2424 }
2425
2426 adjust_cab_queue:
2427 /* the last places are reserved for cab queues per interface */
2428 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2429 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2430 wlvif->hw_queue_base / NUM_TX_QUEUES;
2431 else
2432 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2433
2434 return 0;
2435 }
2436
2437 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2438 struct ieee80211_vif *vif)
2439 {
2440 struct wl1271 *wl = hw->priv;
2441 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2442 struct vif_counter_data vif_count;
2443 int ret = 0;
2444 u8 role_type;
2445
2446 if (wl->plt) {
2447 wl1271_error("Adding Interface not allowed while in PLT mode");
2448 return -EBUSY;
2449 }
2450
2451 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2452 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2453
2454 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2455 ieee80211_vif_type_p2p(vif), vif->addr);
2456
2457 wl12xx_get_vif_count(hw, vif, &vif_count);
2458
2459 mutex_lock(&wl->mutex);
2460 ret = wl1271_ps_elp_wakeup(wl);
2461 if (ret < 0)
2462 goto out_unlock;
2463
2464 /*
2465 * in some very corner case HW recovery scenarios its possible to
2466 * get here before __wl1271_op_remove_interface is complete, so
2467 * opt out if that is the case.
2468 */
2469 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2470 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2471 ret = -EBUSY;
2472 goto out;
2473 }
2474
2475
2476 ret = wl12xx_init_vif_data(wl, vif);
2477 if (ret < 0)
2478 goto out;
2479
2480 wlvif->wl = wl;
2481 role_type = wl12xx_get_role_type(wl, wlvif);
2482 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2483 ret = -EINVAL;
2484 goto out;
2485 }
2486
2487 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2488 if (ret < 0)
2489 goto out;
2490
2491 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2492 wl12xx_force_active_psm(wl);
2493 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2494 mutex_unlock(&wl->mutex);
2495 wl1271_recovery_work(&wl->recovery_work);
2496 return 0;
2497 }
2498
2499 /*
2500 * TODO: after the nvs issue will be solved, move this block
2501 * to start(), and make sure here the driver is ON.
2502 */
2503 if (wl->state == WLCORE_STATE_OFF) {
2504 /*
2505 * we still need this in order to configure the fw
2506 * while uploading the nvs
2507 */
2508 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2509
2510 ret = wl12xx_init_fw(wl);
2511 if (ret < 0)
2512 goto out;
2513 }
2514
2515 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2516 role_type, &wlvif->role_id);
2517 if (ret < 0)
2518 goto out;
2519
2520 ret = wl1271_init_vif_specific(wl, vif);
2521 if (ret < 0)
2522 goto out;
2523
2524 list_add(&wlvif->list, &wl->wlvif_list);
2525 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2526
2527 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2528 wl->ap_count++;
2529 else
2530 wl->sta_count++;
2531 out:
2532 wl1271_ps_elp_sleep(wl);
2533 out_unlock:
2534 mutex_unlock(&wl->mutex);
2535
2536 return ret;
2537 }
2538
2539 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2540 struct ieee80211_vif *vif,
2541 bool reset_tx_queues)
2542 {
2543 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2544 int i, ret;
2545 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2546
2547 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2548
2549 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2550 return;
2551
2552 /* because of hardware recovery, we may get here twice */
2553 if (wl->state == WLCORE_STATE_OFF)
2554 return;
2555
2556 wl1271_info("down");
2557
2558 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2559 wl->scan_wlvif == wlvif) {
2560 /*
2561 * Rearm the tx watchdog just before idling scan. This
2562 * prevents just-finished scans from triggering the watchdog
2563 */
2564 wl12xx_rearm_tx_watchdog_locked(wl);
2565
2566 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2567 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2568 wl->scan_wlvif = NULL;
2569 wl->scan.req = NULL;
2570 ieee80211_scan_completed(wl->hw, true);
2571 }
2572
2573 if (wl->sched_vif == wlvif) {
2574 ieee80211_sched_scan_stopped(wl->hw);
2575 wl->sched_vif = NULL;
2576 }
2577
2578 if (wl->roc_vif == vif) {
2579 wl->roc_vif = NULL;
2580 ieee80211_remain_on_channel_expired(wl->hw);
2581 }
2582
2583 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2584 /* disable active roles */
2585 ret = wl1271_ps_elp_wakeup(wl);
2586 if (ret < 0)
2587 goto deinit;
2588
2589 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2590 wlvif->bss_type == BSS_TYPE_IBSS) {
2591 if (wl12xx_dev_role_started(wlvif))
2592 wl12xx_stop_dev(wl, wlvif);
2593 }
2594
2595 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2596 if (ret < 0)
2597 goto deinit;
2598
2599 wl1271_ps_elp_sleep(wl);
2600 }
2601 deinit:
2602 wl12xx_tx_reset_wlvif(wl, wlvif);
2603
2604 /* clear all hlids (except system_hlid) */
2605 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2606
2607 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2608 wlvif->bss_type == BSS_TYPE_IBSS) {
2609 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2610 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2611 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2612 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2613 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2614 } else {
2615 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2616 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2617 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2618 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2619 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2620 wl12xx_free_rate_policy(wl,
2621 &wlvif->ap.ucast_rate_idx[i]);
2622 wl1271_free_ap_keys(wl, wlvif);
2623 }
2624
2625 dev_kfree_skb(wlvif->probereq);
2626 wlvif->probereq = NULL;
2627 if (wl->last_wlvif == wlvif)
2628 wl->last_wlvif = NULL;
2629 list_del(&wlvif->list);
2630 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2631 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2632 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2633
2634 if (is_ap)
2635 wl->ap_count--;
2636 else
2637 wl->sta_count--;
2638
2639 /*
2640 * Last AP, have more stations. Configure sleep auth according to STA.
2641 * Don't do thin on unintended recovery.
2642 */
2643 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2644 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2645 goto unlock;
2646
2647 if (wl->ap_count == 0 && is_ap) {
2648 /* mask ap events */
2649 wl->event_mask &= ~wl->ap_event_mask;
2650 wl1271_event_unmask(wl);
2651 }
2652
2653 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2654 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2655 /* Configure for power according to debugfs */
2656 if (sta_auth != WL1271_PSM_ILLEGAL)
2657 wl1271_acx_sleep_auth(wl, sta_auth);
2658 /* Configure for ELP power saving */
2659 else
2660 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2661 }
2662
2663 unlock:
2664 mutex_unlock(&wl->mutex);
2665
2666 del_timer_sync(&wlvif->rx_streaming_timer);
2667 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2668 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2669 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2670 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2671 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2672
2673 mutex_lock(&wl->mutex);
2674 }
2675
2676 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2677 struct ieee80211_vif *vif)
2678 {
2679 struct wl1271 *wl = hw->priv;
2680 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2681 struct wl12xx_vif *iter;
2682 struct vif_counter_data vif_count;
2683
2684 wl12xx_get_vif_count(hw, vif, &vif_count);
2685 mutex_lock(&wl->mutex);
2686
2687 if (wl->state == WLCORE_STATE_OFF ||
2688 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2689 goto out;
2690
2691 /*
2692 * wl->vif can be null here if someone shuts down the interface
2693 * just when hardware recovery has been started.
2694 */
2695 wl12xx_for_each_wlvif(wl, iter) {
2696 if (iter != wlvif)
2697 continue;
2698
2699 __wl1271_op_remove_interface(wl, vif, true);
2700 break;
2701 }
2702 WARN_ON(iter != wlvif);
2703 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2704 wl12xx_force_active_psm(wl);
2705 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2706 wl12xx_queue_recovery_work(wl);
2707 }
2708 out:
2709 mutex_unlock(&wl->mutex);
2710 }
2711
2712 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2713 struct ieee80211_vif *vif,
2714 enum nl80211_iftype new_type, bool p2p)
2715 {
2716 struct wl1271 *wl = hw->priv;
2717 int ret;
2718
2719 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2720 wl1271_op_remove_interface(hw, vif);
2721
2722 vif->type = new_type;
2723 vif->p2p = p2p;
2724 ret = wl1271_op_add_interface(hw, vif);
2725
2726 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2727 return ret;
2728 }
2729
2730 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2731 {
2732 int ret;
2733 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2734
2735 /*
2736 * One of the side effects of the JOIN command is that is clears
2737 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2738 * to a WPA/WPA2 access point will therefore kill the data-path.
2739 * Currently the only valid scenario for JOIN during association
2740 * is on roaming, in which case we will also be given new keys.
2741 * Keep the below message for now, unless it starts bothering
2742 * users who really like to roam a lot :)
2743 */
2744 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2745 wl1271_info("JOIN while associated.");
2746
2747 /* clear encryption type */
2748 wlvif->encryption_type = KEY_NONE;
2749
2750 if (is_ibss)
2751 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2752 else {
2753 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2754 /*
2755 * TODO: this is an ugly workaround for wl12xx fw
2756 * bug - we are not able to tx/rx after the first
2757 * start_sta, so make dummy start+stop calls,
2758 * and then call start_sta again.
2759 * this should be fixed in the fw.
2760 */
2761 wl12xx_cmd_role_start_sta(wl, wlvif);
2762 wl12xx_cmd_role_stop_sta(wl, wlvif);
2763 }
2764
2765 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2766 }
2767
2768 return ret;
2769 }
2770
2771 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2772 int offset)
2773 {
2774 u8 ssid_len;
2775 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2776 skb->len - offset);
2777
2778 if (!ptr) {
2779 wl1271_error("No SSID in IEs!");
2780 return -ENOENT;
2781 }
2782
2783 ssid_len = ptr[1];
2784 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2785 wl1271_error("SSID is too long!");
2786 return -EINVAL;
2787 }
2788
2789 wlvif->ssid_len = ssid_len;
2790 memcpy(wlvif->ssid, ptr+2, ssid_len);
2791 return 0;
2792 }
2793
2794 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2795 {
2796 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2797 struct sk_buff *skb;
2798 int ieoffset;
2799
2800 /* we currently only support setting the ssid from the ap probe req */
2801 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2802 return -EINVAL;
2803
2804 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2805 if (!skb)
2806 return -EINVAL;
2807
2808 ieoffset = offsetof(struct ieee80211_mgmt,
2809 u.probe_req.variable);
2810 wl1271_ssid_set(wlvif, skb, ieoffset);
2811 dev_kfree_skb(skb);
2812
2813 return 0;
2814 }
2815
2816 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2817 struct ieee80211_bss_conf *bss_conf,
2818 u32 sta_rate_set)
2819 {
2820 int ieoffset;
2821 int ret;
2822
2823 wlvif->aid = bss_conf->aid;
2824 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2825 wlvif->beacon_int = bss_conf->beacon_int;
2826 wlvif->wmm_enabled = bss_conf->qos;
2827
2828 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2829
2830 /*
2831 * with wl1271, we don't need to update the
2832 * beacon_int and dtim_period, because the firmware
2833 * updates it by itself when the first beacon is
2834 * received after a join.
2835 */
2836 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2837 if (ret < 0)
2838 return ret;
2839
2840 /*
2841 * Get a template for hardware connection maintenance
2842 */
2843 dev_kfree_skb(wlvif->probereq);
2844 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2845 wlvif,
2846 NULL);
2847 ieoffset = offsetof(struct ieee80211_mgmt,
2848 u.probe_req.variable);
2849 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2850
2851 /* enable the connection monitoring feature */
2852 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2853 if (ret < 0)
2854 return ret;
2855
2856 /*
2857 * The join command disable the keep-alive mode, shut down its process,
2858 * and also clear the template config, so we need to reset it all after
2859 * the join. The acx_aid starts the keep-alive process, and the order
2860 * of the commands below is relevant.
2861 */
2862 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2863 if (ret < 0)
2864 return ret;
2865
2866 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2867 if (ret < 0)
2868 return ret;
2869
2870 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2871 if (ret < 0)
2872 return ret;
2873
2874 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2875 wlvif->sta.klv_template_id,
2876 ACX_KEEP_ALIVE_TPL_VALID);
2877 if (ret < 0)
2878 return ret;
2879
2880 /*
2881 * The default fw psm configuration is AUTO, while mac80211 default
2882 * setting is off (ACTIVE), so sync the fw with the correct value.
2883 */
2884 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2885 if (ret < 0)
2886 return ret;
2887
2888 if (sta_rate_set) {
2889 wlvif->rate_set =
2890 wl1271_tx_enabled_rates_get(wl,
2891 sta_rate_set,
2892 wlvif->band);
2893 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2894 if (ret < 0)
2895 return ret;
2896 }
2897
2898 return ret;
2899 }
2900
2901 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2902 {
2903 int ret;
2904 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2905
2906 /* make sure we are connected (sta) joined */
2907 if (sta &&
2908 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2909 return false;
2910
2911 /* make sure we are joined (ibss) */
2912 if (!sta &&
2913 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2914 return false;
2915
2916 if (sta) {
2917 /* use defaults when not associated */
2918 wlvif->aid = 0;
2919
2920 /* free probe-request template */
2921 dev_kfree_skb(wlvif->probereq);
2922 wlvif->probereq = NULL;
2923
2924 /* disable connection monitor features */
2925 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2926 if (ret < 0)
2927 return ret;
2928
2929 /* Disable the keep-alive feature */
2930 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2931 if (ret < 0)
2932 return ret;
2933 }
2934
2935 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2936 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2937
2938 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2939 ieee80211_chswitch_done(vif, false);
2940 cancel_delayed_work(&wlvif->channel_switch_work);
2941 }
2942
2943 /* invalidate keep-alive template */
2944 wl1271_acx_keep_alive_config(wl, wlvif,
2945 wlvif->sta.klv_template_id,
2946 ACX_KEEP_ALIVE_TPL_INVALID);
2947
2948 return 0;
2949 }
2950
2951 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2952 {
2953 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2954 wlvif->rate_set = wlvif->basic_rate_set;
2955 }
2956
2957 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2958 bool idle)
2959 {
2960 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2961
2962 if (idle == cur_idle)
2963 return;
2964
2965 if (idle) {
2966 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2967 } else {
2968 /* The current firmware only supports sched_scan in idle */
2969 if (wl->sched_vif == wlvif)
2970 wl->ops->sched_scan_stop(wl, wlvif);
2971
2972 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2973 }
2974 }
2975
2976 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2977 struct ieee80211_conf *conf, u32 changed)
2978 {
2979 int ret;
2980
2981 if (conf->power_level != wlvif->power_level) {
2982 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2983 if (ret < 0)
2984 return ret;
2985
2986 wlvif->power_level = conf->power_level;
2987 }
2988
2989 return 0;
2990 }
2991
2992 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2993 {
2994 struct wl1271 *wl = hw->priv;
2995 struct wl12xx_vif *wlvif;
2996 struct ieee80211_conf *conf = &hw->conf;
2997 int ret = 0;
2998
2999 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3000 " changed 0x%x",
3001 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3002 conf->power_level,
3003 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3004 changed);
3005
3006 mutex_lock(&wl->mutex);
3007
3008 if (changed & IEEE80211_CONF_CHANGE_POWER)
3009 wl->power_level = conf->power_level;
3010
3011 if (unlikely(wl->state != WLCORE_STATE_ON))
3012 goto out;
3013
3014 ret = wl1271_ps_elp_wakeup(wl);
3015 if (ret < 0)
3016 goto out;
3017
3018 /* configure each interface */
3019 wl12xx_for_each_wlvif(wl, wlvif) {
3020 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3021 if (ret < 0)
3022 goto out_sleep;
3023 }
3024
3025 out_sleep:
3026 wl1271_ps_elp_sleep(wl);
3027
3028 out:
3029 mutex_unlock(&wl->mutex);
3030
3031 return ret;
3032 }
3033
3034 struct wl1271_filter_params {
3035 bool enabled;
3036 int mc_list_length;
3037 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3038 };
3039
3040 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3041 struct netdev_hw_addr_list *mc_list)
3042 {
3043 struct wl1271_filter_params *fp;
3044 struct netdev_hw_addr *ha;
3045
3046 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3047 if (!fp) {
3048 wl1271_error("Out of memory setting filters.");
3049 return 0;
3050 }
3051
3052 /* update multicast filtering parameters */
3053 fp->mc_list_length = 0;
3054 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3055 fp->enabled = false;
3056 } else {
3057 fp->enabled = true;
3058 netdev_hw_addr_list_for_each(ha, mc_list) {
3059 memcpy(fp->mc_list[fp->mc_list_length],
3060 ha->addr, ETH_ALEN);
3061 fp->mc_list_length++;
3062 }
3063 }
3064
3065 return (u64)(unsigned long)fp;
3066 }
3067
3068 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3069 FIF_ALLMULTI | \
3070 FIF_FCSFAIL | \
3071 FIF_BCN_PRBRESP_PROMISC | \
3072 FIF_CONTROL | \
3073 FIF_OTHER_BSS)
3074
3075 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3076 unsigned int changed,
3077 unsigned int *total, u64 multicast)
3078 {
3079 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3080 struct wl1271 *wl = hw->priv;
3081 struct wl12xx_vif *wlvif;
3082
3083 int ret;
3084
3085 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3086 " total %x", changed, *total);
3087
3088 mutex_lock(&wl->mutex);
3089
3090 *total &= WL1271_SUPPORTED_FILTERS;
3091 changed &= WL1271_SUPPORTED_FILTERS;
3092
3093 if (unlikely(wl->state != WLCORE_STATE_ON))
3094 goto out;
3095
3096 ret = wl1271_ps_elp_wakeup(wl);
3097 if (ret < 0)
3098 goto out;
3099
3100 wl12xx_for_each_wlvif(wl, wlvif) {
3101 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3102 if (*total & FIF_ALLMULTI)
3103 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3104 false,
3105 NULL, 0);
3106 else if (fp)
3107 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3108 fp->enabled,
3109 fp->mc_list,
3110 fp->mc_list_length);
3111 if (ret < 0)
3112 goto out_sleep;
3113 }
3114 }
3115
3116 /*
3117 * the fw doesn't provide an api to configure the filters. instead,
3118 * the filters configuration is based on the active roles / ROC
3119 * state.
3120 */
3121
3122 out_sleep:
3123 wl1271_ps_elp_sleep(wl);
3124
3125 out:
3126 mutex_unlock(&wl->mutex);
3127 kfree(fp);
3128 }
3129
3130 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3131 u8 id, u8 key_type, u8 key_size,
3132 const u8 *key, u8 hlid, u32 tx_seq_32,
3133 u16 tx_seq_16)
3134 {
3135 struct wl1271_ap_key *ap_key;
3136 int i;
3137
3138 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3139
3140 if (key_size > MAX_KEY_SIZE)
3141 return -EINVAL;
3142
3143 /*
3144 * Find next free entry in ap_keys. Also check we are not replacing
3145 * an existing key.
3146 */
3147 for (i = 0; i < MAX_NUM_KEYS; i++) {
3148 if (wlvif->ap.recorded_keys[i] == NULL)
3149 break;
3150
3151 if (wlvif->ap.recorded_keys[i]->id == id) {
3152 wl1271_warning("trying to record key replacement");
3153 return -EINVAL;
3154 }
3155 }
3156
3157 if (i == MAX_NUM_KEYS)
3158 return -EBUSY;
3159
3160 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3161 if (!ap_key)
3162 return -ENOMEM;
3163
3164 ap_key->id = id;
3165 ap_key->key_type = key_type;
3166 ap_key->key_size = key_size;
3167 memcpy(ap_key->key, key, key_size);
3168 ap_key->hlid = hlid;
3169 ap_key->tx_seq_32 = tx_seq_32;
3170 ap_key->tx_seq_16 = tx_seq_16;
3171
3172 wlvif->ap.recorded_keys[i] = ap_key;
3173 return 0;
3174 }
3175
3176 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3177 {
3178 int i;
3179
3180 for (i = 0; i < MAX_NUM_KEYS; i++) {
3181 kfree(wlvif->ap.recorded_keys[i]);
3182 wlvif->ap.recorded_keys[i] = NULL;
3183 }
3184 }
3185
3186 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3187 {
3188 int i, ret = 0;
3189 struct wl1271_ap_key *key;
3190 bool wep_key_added = false;
3191
3192 for (i = 0; i < MAX_NUM_KEYS; i++) {
3193 u8 hlid;
3194 if (wlvif->ap.recorded_keys[i] == NULL)
3195 break;
3196
3197 key = wlvif->ap.recorded_keys[i];
3198 hlid = key->hlid;
3199 if (hlid == WL12XX_INVALID_LINK_ID)
3200 hlid = wlvif->ap.bcast_hlid;
3201
3202 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3203 key->id, key->key_type,
3204 key->key_size, key->key,
3205 hlid, key->tx_seq_32,
3206 key->tx_seq_16);
3207 if (ret < 0)
3208 goto out;
3209
3210 if (key->key_type == KEY_WEP)
3211 wep_key_added = true;
3212 }
3213
3214 if (wep_key_added) {
3215 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3216 wlvif->ap.bcast_hlid);
3217 if (ret < 0)
3218 goto out;
3219 }
3220
3221 out:
3222 wl1271_free_ap_keys(wl, wlvif);
3223 return ret;
3224 }
3225
3226 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3227 u16 action, u8 id, u8 key_type,
3228 u8 key_size, const u8 *key, u32 tx_seq_32,
3229 u16 tx_seq_16, struct ieee80211_sta *sta)
3230 {
3231 int ret;
3232 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3233
3234 if (is_ap) {
3235 struct wl1271_station *wl_sta;
3236 u8 hlid;
3237
3238 if (sta) {
3239 wl_sta = (struct wl1271_station *)sta->drv_priv;
3240 hlid = wl_sta->hlid;
3241 } else {
3242 hlid = wlvif->ap.bcast_hlid;
3243 }
3244
3245 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3246 /*
3247 * We do not support removing keys after AP shutdown.
3248 * Pretend we do to make mac80211 happy.
3249 */
3250 if (action != KEY_ADD_OR_REPLACE)
3251 return 0;
3252
3253 ret = wl1271_record_ap_key(wl, wlvif, id,
3254 key_type, key_size,
3255 key, hlid, tx_seq_32,
3256 tx_seq_16);
3257 } else {
3258 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3259 id, key_type, key_size,
3260 key, hlid, tx_seq_32,
3261 tx_seq_16);
3262 }
3263
3264 if (ret < 0)
3265 return ret;
3266 } else {
3267 const u8 *addr;
3268 static const u8 bcast_addr[ETH_ALEN] = {
3269 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3270 };
3271
3272 addr = sta ? sta->addr : bcast_addr;
3273
3274 if (is_zero_ether_addr(addr)) {
3275 /* We dont support TX only encryption */
3276 return -EOPNOTSUPP;
3277 }
3278
3279 /* The wl1271 does not allow to remove unicast keys - they
3280 will be cleared automatically on next CMD_JOIN. Ignore the
3281 request silently, as we dont want the mac80211 to emit
3282 an error message. */
3283 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3284 return 0;
3285
3286 /* don't remove key if hlid was already deleted */
3287 if (action == KEY_REMOVE &&
3288 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3289 return 0;
3290
3291 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3292 id, key_type, key_size,
3293 key, addr, tx_seq_32,
3294 tx_seq_16);
3295 if (ret < 0)
3296 return ret;
3297
3298 }
3299
3300 return 0;
3301 }
3302
3303 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3304 struct ieee80211_vif *vif,
3305 struct ieee80211_sta *sta,
3306 struct ieee80211_key_conf *key_conf)
3307 {
3308 struct wl1271 *wl = hw->priv;
3309 int ret;
3310 bool might_change_spare =
3311 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3312 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3313
3314 if (might_change_spare) {
3315 /*
3316 * stop the queues and flush to ensure the next packets are
3317 * in sync with FW spare block accounting
3318 */
3319 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3320 wl1271_tx_flush(wl);
3321 }
3322
3323 mutex_lock(&wl->mutex);
3324
3325 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3326 ret = -EAGAIN;
3327 goto out_wake_queues;
3328 }
3329
3330 ret = wl1271_ps_elp_wakeup(wl);
3331 if (ret < 0)
3332 goto out_wake_queues;
3333
3334 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3335
3336 wl1271_ps_elp_sleep(wl);
3337
3338 out_wake_queues:
3339 if (might_change_spare)
3340 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3341
3342 mutex_unlock(&wl->mutex);
3343
3344 return ret;
3345 }
3346
3347 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3348 struct ieee80211_vif *vif,
3349 struct ieee80211_sta *sta,
3350 struct ieee80211_key_conf *key_conf)
3351 {
3352 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3353 int ret;
3354 u32 tx_seq_32 = 0;
3355 u16 tx_seq_16 = 0;
3356 u8 key_type;
3357 u8 hlid;
3358
3359 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3360
3361 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3362 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3363 key_conf->cipher, key_conf->keyidx,
3364 key_conf->keylen, key_conf->flags);
3365 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3366
3367 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3368 if (sta) {
3369 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3370 hlid = wl_sta->hlid;
3371 } else {
3372 hlid = wlvif->ap.bcast_hlid;
3373 }
3374 else
3375 hlid = wlvif->sta.hlid;
3376
3377 if (hlid != WL12XX_INVALID_LINK_ID) {
3378 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3379 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3380 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3381 }
3382
3383 switch (key_conf->cipher) {
3384 case WLAN_CIPHER_SUITE_WEP40:
3385 case WLAN_CIPHER_SUITE_WEP104:
3386 key_type = KEY_WEP;
3387
3388 key_conf->hw_key_idx = key_conf->keyidx;
3389 break;
3390 case WLAN_CIPHER_SUITE_TKIP:
3391 key_type = KEY_TKIP;
3392 key_conf->hw_key_idx = key_conf->keyidx;
3393 break;
3394 case WLAN_CIPHER_SUITE_CCMP:
3395 key_type = KEY_AES;
3396 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3397 break;
3398 case WL1271_CIPHER_SUITE_GEM:
3399 key_type = KEY_GEM;
3400 break;
3401 default:
3402 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3403
3404 return -EOPNOTSUPP;
3405 }
3406
3407 switch (cmd) {
3408 case SET_KEY:
3409 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3410 key_conf->keyidx, key_type,
3411 key_conf->keylen, key_conf->key,
3412 tx_seq_32, tx_seq_16, sta);
3413 if (ret < 0) {
3414 wl1271_error("Could not add or replace key");
3415 return ret;
3416 }
3417
3418 /*
3419 * reconfiguring arp response if the unicast (or common)
3420 * encryption key type was changed
3421 */
3422 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3423 (sta || key_type == KEY_WEP) &&
3424 wlvif->encryption_type != key_type) {
3425 wlvif->encryption_type = key_type;
3426 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3427 if (ret < 0) {
3428 wl1271_warning("build arp rsp failed: %d", ret);
3429 return ret;
3430 }
3431 }
3432 break;
3433
3434 case DISABLE_KEY:
3435 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3436 key_conf->keyidx, key_type,
3437 key_conf->keylen, key_conf->key,
3438 0, 0, sta);
3439 if (ret < 0) {
3440 wl1271_error("Could not remove key");
3441 return ret;
3442 }
3443 break;
3444
3445 default:
3446 wl1271_error("Unsupported key cmd 0x%x", cmd);
3447 return -EOPNOTSUPP;
3448 }
3449
3450 return ret;
3451 }
3452 EXPORT_SYMBOL_GPL(wlcore_set_key);
3453
3454 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3455 struct ieee80211_vif *vif,
3456 int key_idx)
3457 {
3458 struct wl1271 *wl = hw->priv;
3459 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3460 int ret;
3461
3462 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3463 key_idx);
3464
3465 mutex_lock(&wl->mutex);
3466
3467 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3468 ret = -EAGAIN;
3469 goto out_unlock;
3470 }
3471
3472 ret = wl1271_ps_elp_wakeup(wl);
3473 if (ret < 0)
3474 goto out_unlock;
3475
3476 wlvif->default_key = key_idx;
3477
3478 /* the default WEP key needs to be configured at least once */
3479 if (wlvif->encryption_type == KEY_WEP) {
3480 ret = wl12xx_cmd_set_default_wep_key(wl,
3481 key_idx,
3482 wlvif->sta.hlid);
3483 if (ret < 0)
3484 goto out_sleep;
3485 }
3486
3487 out_sleep:
3488 wl1271_ps_elp_sleep(wl);
3489
3490 out_unlock:
3491 mutex_unlock(&wl->mutex);
3492 }
3493
3494 void wlcore_regdomain_config(struct wl1271 *wl)
3495 {
3496 int ret;
3497
3498 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3499 return;
3500
3501 mutex_lock(&wl->mutex);
3502
3503 if (unlikely(wl->state != WLCORE_STATE_ON))
3504 goto out;
3505
3506 ret = wl1271_ps_elp_wakeup(wl);
3507 if (ret < 0)
3508 goto out;
3509
3510 ret = wlcore_cmd_regdomain_config_locked(wl);
3511 if (ret < 0) {
3512 wl12xx_queue_recovery_work(wl);
3513 goto out;
3514 }
3515
3516 wl1271_ps_elp_sleep(wl);
3517 out:
3518 mutex_unlock(&wl->mutex);
3519 }
3520
3521 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3522 struct ieee80211_vif *vif,
3523 struct cfg80211_scan_request *req)
3524 {
3525 struct wl1271 *wl = hw->priv;
3526 int ret;
3527 u8 *ssid = NULL;
3528 size_t len = 0;
3529
3530 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3531
3532 if (req->n_ssids) {
3533 ssid = req->ssids[0].ssid;
3534 len = req->ssids[0].ssid_len;
3535 }
3536
3537 mutex_lock(&wl->mutex);
3538
3539 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3540 /*
3541 * We cannot return -EBUSY here because cfg80211 will expect
3542 * a call to ieee80211_scan_completed if we do - in this case
3543 * there won't be any call.
3544 */
3545 ret = -EAGAIN;
3546 goto out;
3547 }
3548
3549 ret = wl1271_ps_elp_wakeup(wl);
3550 if (ret < 0)
3551 goto out;
3552
3553 /* fail if there is any role in ROC */
3554 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3555 /* don't allow scanning right now */
3556 ret = -EBUSY;
3557 goto out_sleep;
3558 }
3559
3560 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3561 out_sleep:
3562 wl1271_ps_elp_sleep(wl);
3563 out:
3564 mutex_unlock(&wl->mutex);
3565
3566 return ret;
3567 }
3568
3569 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3570 struct ieee80211_vif *vif)
3571 {
3572 struct wl1271 *wl = hw->priv;
3573 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3574 int ret;
3575
3576 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3577
3578 mutex_lock(&wl->mutex);
3579
3580 if (unlikely(wl->state != WLCORE_STATE_ON))
3581 goto out;
3582
3583 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3584 goto out;
3585
3586 ret = wl1271_ps_elp_wakeup(wl);
3587 if (ret < 0)
3588 goto out;
3589
3590 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3591 ret = wl->ops->scan_stop(wl, wlvif);
3592 if (ret < 0)
3593 goto out_sleep;
3594 }
3595
3596 /*
3597 * Rearm the tx watchdog just before idling scan. This
3598 * prevents just-finished scans from triggering the watchdog
3599 */
3600 wl12xx_rearm_tx_watchdog_locked(wl);
3601
3602 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3603 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3604 wl->scan_wlvif = NULL;
3605 wl->scan.req = NULL;
3606 ieee80211_scan_completed(wl->hw, true);
3607
3608 out_sleep:
3609 wl1271_ps_elp_sleep(wl);
3610 out:
3611 mutex_unlock(&wl->mutex);
3612
3613 cancel_delayed_work_sync(&wl->scan_complete_work);
3614 }
3615
3616 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3617 struct ieee80211_vif *vif,
3618 struct cfg80211_sched_scan_request *req,
3619 struct ieee80211_sched_scan_ies *ies)
3620 {
3621 struct wl1271 *wl = hw->priv;
3622 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3623 int ret;
3624
3625 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3626
3627 mutex_lock(&wl->mutex);
3628
3629 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3630 ret = -EAGAIN;
3631 goto out;
3632 }
3633
3634 ret = wl1271_ps_elp_wakeup(wl);
3635 if (ret < 0)
3636 goto out;
3637
3638 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3639 if (ret < 0)
3640 goto out_sleep;
3641
3642 wl->sched_vif = wlvif;
3643
3644 out_sleep:
3645 wl1271_ps_elp_sleep(wl);
3646 out:
3647 mutex_unlock(&wl->mutex);
3648 return ret;
3649 }
3650
3651 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3652 struct ieee80211_vif *vif)
3653 {
3654 struct wl1271 *wl = hw->priv;
3655 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3656 int ret;
3657
3658 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3659
3660 mutex_lock(&wl->mutex);
3661
3662 if (unlikely(wl->state != WLCORE_STATE_ON))
3663 goto out;
3664
3665 ret = wl1271_ps_elp_wakeup(wl);
3666 if (ret < 0)
3667 goto out;
3668
3669 wl->ops->sched_scan_stop(wl, wlvif);
3670
3671 wl1271_ps_elp_sleep(wl);
3672 out:
3673 mutex_unlock(&wl->mutex);
3674 }
3675
3676 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3677 {
3678 struct wl1271 *wl = hw->priv;
3679 int ret = 0;
3680
3681 mutex_lock(&wl->mutex);
3682
3683 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3684 ret = -EAGAIN;
3685 goto out;
3686 }
3687
3688 ret = wl1271_ps_elp_wakeup(wl);
3689 if (ret < 0)
3690 goto out;
3691
3692 ret = wl1271_acx_frag_threshold(wl, value);
3693 if (ret < 0)
3694 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3695
3696 wl1271_ps_elp_sleep(wl);
3697
3698 out:
3699 mutex_unlock(&wl->mutex);
3700
3701 return ret;
3702 }
3703
3704 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3705 {
3706 struct wl1271 *wl = hw->priv;
3707 struct wl12xx_vif *wlvif;
3708 int ret = 0;
3709
3710 mutex_lock(&wl->mutex);
3711
3712 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3713 ret = -EAGAIN;
3714 goto out;
3715 }
3716
3717 ret = wl1271_ps_elp_wakeup(wl);
3718 if (ret < 0)
3719 goto out;
3720
3721 wl12xx_for_each_wlvif(wl, wlvif) {
3722 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3723 if (ret < 0)
3724 wl1271_warning("set rts threshold failed: %d", ret);
3725 }
3726 wl1271_ps_elp_sleep(wl);
3727
3728 out:
3729 mutex_unlock(&wl->mutex);
3730
3731 return ret;
3732 }
3733
3734 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3735 {
3736 int len;
3737 const u8 *next, *end = skb->data + skb->len;
3738 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3739 skb->len - ieoffset);
3740 if (!ie)
3741 return;
3742 len = ie[1] + 2;
3743 next = ie + len;
3744 memmove(ie, next, end - next);
3745 skb_trim(skb, skb->len - len);
3746 }
3747
3748 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3749 unsigned int oui, u8 oui_type,
3750 int ieoffset)
3751 {
3752 int len;
3753 const u8 *next, *end = skb->data + skb->len;
3754 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3755 skb->data + ieoffset,
3756 skb->len - ieoffset);
3757 if (!ie)
3758 return;
3759 len = ie[1] + 2;
3760 next = ie + len;
3761 memmove(ie, next, end - next);
3762 skb_trim(skb, skb->len - len);
3763 }
3764
3765 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3766 struct ieee80211_vif *vif)
3767 {
3768 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3769 struct sk_buff *skb;
3770 int ret;
3771
3772 skb = ieee80211_proberesp_get(wl->hw, vif);
3773 if (!skb)
3774 return -EOPNOTSUPP;
3775
3776 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3777 CMD_TEMPL_AP_PROBE_RESPONSE,
3778 skb->data,
3779 skb->len, 0,
3780 rates);
3781 dev_kfree_skb(skb);
3782
3783 if (ret < 0)
3784 goto out;
3785
3786 wl1271_debug(DEBUG_AP, "probe response updated");
3787 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3788
3789 out:
3790 return ret;
3791 }
3792
3793 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3794 struct ieee80211_vif *vif,
3795 u8 *probe_rsp_data,
3796 size_t probe_rsp_len,
3797 u32 rates)
3798 {
3799 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3800 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3801 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3802 int ssid_ie_offset, ie_offset, templ_len;
3803 const u8 *ptr;
3804
3805 /* no need to change probe response if the SSID is set correctly */
3806 if (wlvif->ssid_len > 0)
3807 return wl1271_cmd_template_set(wl, wlvif->role_id,
3808 CMD_TEMPL_AP_PROBE_RESPONSE,
3809 probe_rsp_data,
3810 probe_rsp_len, 0,
3811 rates);
3812
3813 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3814 wl1271_error("probe_rsp template too big");
3815 return -EINVAL;
3816 }
3817
3818 /* start searching from IE offset */
3819 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3820
3821 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3822 probe_rsp_len - ie_offset);
3823 if (!ptr) {
3824 wl1271_error("No SSID in beacon!");
3825 return -EINVAL;
3826 }
3827
3828 ssid_ie_offset = ptr - probe_rsp_data;
3829 ptr += (ptr[1] + 2);
3830
3831 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3832
3833 /* insert SSID from bss_conf */
3834 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3835 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3836 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3837 bss_conf->ssid, bss_conf->ssid_len);
3838 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3839
3840 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3841 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3842 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3843
3844 return wl1271_cmd_template_set(wl, wlvif->role_id,
3845 CMD_TEMPL_AP_PROBE_RESPONSE,
3846 probe_rsp_templ,
3847 templ_len, 0,
3848 rates);
3849 }
3850
3851 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3852 struct ieee80211_vif *vif,
3853 struct ieee80211_bss_conf *bss_conf,
3854 u32 changed)
3855 {
3856 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3857 int ret = 0;
3858
3859 if (changed & BSS_CHANGED_ERP_SLOT) {
3860 if (bss_conf->use_short_slot)
3861 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3862 else
3863 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3864 if (ret < 0) {
3865 wl1271_warning("Set slot time failed %d", ret);
3866 goto out;
3867 }
3868 }
3869
3870 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3871 if (bss_conf->use_short_preamble)
3872 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3873 else
3874 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3875 }
3876
3877 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3878 if (bss_conf->use_cts_prot)
3879 ret = wl1271_acx_cts_protect(wl, wlvif,
3880 CTSPROTECT_ENABLE);
3881 else
3882 ret = wl1271_acx_cts_protect(wl, wlvif,
3883 CTSPROTECT_DISABLE);
3884 if (ret < 0) {
3885 wl1271_warning("Set ctsprotect failed %d", ret);
3886 goto out;
3887 }
3888 }
3889
3890 out:
3891 return ret;
3892 }
3893
3894 static int wlcore_set_beacon_template(struct wl1271 *wl,
3895 struct ieee80211_vif *vif,
3896 bool is_ap)
3897 {
3898 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3899 struct ieee80211_hdr *hdr;
3900 u32 min_rate;
3901 int ret;
3902 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3903 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3904 u16 tmpl_id;
3905
3906 if (!beacon) {
3907 ret = -EINVAL;
3908 goto out;
3909 }
3910
3911 wl1271_debug(DEBUG_MASTER, "beacon updated");
3912
3913 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3914 if (ret < 0) {
3915 dev_kfree_skb(beacon);
3916 goto out;
3917 }
3918 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3919 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3920 CMD_TEMPL_BEACON;
3921 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3922 beacon->data,
3923 beacon->len, 0,
3924 min_rate);
3925 if (ret < 0) {
3926 dev_kfree_skb(beacon);
3927 goto out;
3928 }
3929
3930 wlvif->wmm_enabled =
3931 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3932 WLAN_OUI_TYPE_MICROSOFT_WMM,
3933 beacon->data + ieoffset,
3934 beacon->len - ieoffset);
3935
3936 /*
3937 * In case we already have a probe-resp beacon set explicitly
3938 * by usermode, don't use the beacon data.
3939 */
3940 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3941 goto end_bcn;
3942
3943 /* remove TIM ie from probe response */
3944 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3945
3946 /*
3947 * remove p2p ie from probe response.
3948 * the fw reponds to probe requests that don't include
3949 * the p2p ie. probe requests with p2p ie will be passed,
3950 * and will be responded by the supplicant (the spec
3951 * forbids including the p2p ie when responding to probe
3952 * requests that didn't include it).
3953 */
3954 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3955 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3956
3957 hdr = (struct ieee80211_hdr *) beacon->data;
3958 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3959 IEEE80211_STYPE_PROBE_RESP);
3960 if (is_ap)
3961 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3962 beacon->data,
3963 beacon->len,
3964 min_rate);
3965 else
3966 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3967 CMD_TEMPL_PROBE_RESPONSE,
3968 beacon->data,
3969 beacon->len, 0,
3970 min_rate);
3971 end_bcn:
3972 dev_kfree_skb(beacon);
3973 if (ret < 0)
3974 goto out;
3975
3976 out:
3977 return ret;
3978 }
3979
3980 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3981 struct ieee80211_vif *vif,
3982 struct ieee80211_bss_conf *bss_conf,
3983 u32 changed)
3984 {
3985 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3986 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3987 int ret = 0;
3988
3989 if (changed & BSS_CHANGED_BEACON_INT) {
3990 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3991 bss_conf->beacon_int);
3992
3993 wlvif->beacon_int = bss_conf->beacon_int;
3994 }
3995
3996 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3997 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3998
3999 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4000 }
4001
4002 if (changed & BSS_CHANGED_BEACON) {
4003 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4004 if (ret < 0)
4005 goto out;
4006 }
4007
4008 out:
4009 if (ret != 0)
4010 wl1271_error("beacon info change failed: %d", ret);
4011 return ret;
4012 }
4013
4014 /* AP mode changes */
4015 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4016 struct ieee80211_vif *vif,
4017 struct ieee80211_bss_conf *bss_conf,
4018 u32 changed)
4019 {
4020 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4021 int ret = 0;
4022
4023 if (changed & BSS_CHANGED_BASIC_RATES) {
4024 u32 rates = bss_conf->basic_rates;
4025
4026 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4027 wlvif->band);
4028 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4029 wlvif->basic_rate_set);
4030
4031 ret = wl1271_init_ap_rates(wl, wlvif);
4032 if (ret < 0) {
4033 wl1271_error("AP rate policy change failed %d", ret);
4034 goto out;
4035 }
4036
4037 ret = wl1271_ap_init_templates(wl, vif);
4038 if (ret < 0)
4039 goto out;
4040
4041 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4042 if (ret < 0)
4043 goto out;
4044
4045 ret = wlcore_set_beacon_template(wl, vif, true);
4046 if (ret < 0)
4047 goto out;
4048 }
4049
4050 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4051 if (ret < 0)
4052 goto out;
4053
4054 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4055 if (bss_conf->enable_beacon) {
4056 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4057 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4058 if (ret < 0)
4059 goto out;
4060
4061 ret = wl1271_ap_init_hwenc(wl, wlvif);
4062 if (ret < 0)
4063 goto out;
4064
4065 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4066 wl1271_debug(DEBUG_AP, "started AP");
4067 }
4068 } else {
4069 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4070 /*
4071 * AP might be in ROC in case we have just
4072 * sent auth reply. handle it.
4073 */
4074 if (test_bit(wlvif->role_id, wl->roc_map))
4075 wl12xx_croc(wl, wlvif->role_id);
4076
4077 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4078 if (ret < 0)
4079 goto out;
4080
4081 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4082 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4083 &wlvif->flags);
4084 wl1271_debug(DEBUG_AP, "stopped AP");
4085 }
4086 }
4087 }
4088
4089 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4090 if (ret < 0)
4091 goto out;
4092
4093 /* Handle HT information change */
4094 if ((changed & BSS_CHANGED_HT) &&
4095 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4096 ret = wl1271_acx_set_ht_information(wl, wlvif,
4097 bss_conf->ht_operation_mode);
4098 if (ret < 0) {
4099 wl1271_warning("Set ht information failed %d", ret);
4100 goto out;
4101 }
4102 }
4103
4104 out:
4105 return;
4106 }
4107
4108 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4109 struct ieee80211_bss_conf *bss_conf,
4110 u32 sta_rate_set)
4111 {
4112 u32 rates;
4113 int ret;
4114
4115 wl1271_debug(DEBUG_MAC80211,
4116 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4117 bss_conf->bssid, bss_conf->aid,
4118 bss_conf->beacon_int,
4119 bss_conf->basic_rates, sta_rate_set);
4120
4121 wlvif->beacon_int = bss_conf->beacon_int;
4122 rates = bss_conf->basic_rates;
4123 wlvif->basic_rate_set =
4124 wl1271_tx_enabled_rates_get(wl, rates,
4125 wlvif->band);
4126 wlvif->basic_rate =
4127 wl1271_tx_min_rate_get(wl,
4128 wlvif->basic_rate_set);
4129
4130 if (sta_rate_set)
4131 wlvif->rate_set =
4132 wl1271_tx_enabled_rates_get(wl,
4133 sta_rate_set,
4134 wlvif->band);
4135
4136 /* we only support sched_scan while not connected */
4137 if (wl->sched_vif == wlvif)
4138 wl->ops->sched_scan_stop(wl, wlvif);
4139
4140 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4141 if (ret < 0)
4142 return ret;
4143
4144 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4145 if (ret < 0)
4146 return ret;
4147
4148 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4149 if (ret < 0)
4150 return ret;
4151
4152 wlcore_set_ssid(wl, wlvif);
4153
4154 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4155
4156 return 0;
4157 }
4158
4159 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4160 {
4161 int ret;
4162
4163 /* revert back to minimum rates for the current band */
4164 wl1271_set_band_rate(wl, wlvif);
4165 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4166
4167 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4168 if (ret < 0)
4169 return ret;
4170
4171 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4172 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4173 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4174 if (ret < 0)
4175 return ret;
4176 }
4177
4178 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4179 return 0;
4180 }
4181 /* STA/IBSS mode changes */
4182 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4183 struct ieee80211_vif *vif,
4184 struct ieee80211_bss_conf *bss_conf,
4185 u32 changed)
4186 {
4187 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4188 bool do_join = false;
4189 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4190 bool ibss_joined = false;
4191 u32 sta_rate_set = 0;
4192 int ret;
4193 struct ieee80211_sta *sta;
4194 bool sta_exists = false;
4195 struct ieee80211_sta_ht_cap sta_ht_cap;
4196
4197 if (is_ibss) {
4198 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4199 changed);
4200 if (ret < 0)
4201 goto out;
4202 }
4203
4204 if (changed & BSS_CHANGED_IBSS) {
4205 if (bss_conf->ibss_joined) {
4206 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4207 ibss_joined = true;
4208 } else {
4209 wlcore_unset_assoc(wl, wlvif);
4210 wl12xx_cmd_role_stop_sta(wl, wlvif);
4211 }
4212 }
4213
4214 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4215 do_join = true;
4216
4217 /* Need to update the SSID (for filtering etc) */
4218 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4219 do_join = true;
4220
4221 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4222 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4223 bss_conf->enable_beacon ? "enabled" : "disabled");
4224
4225 do_join = true;
4226 }
4227
4228 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4229 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4230
4231 if (changed & BSS_CHANGED_CQM) {
4232 bool enable = false;
4233 if (bss_conf->cqm_rssi_thold)
4234 enable = true;
4235 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4236 bss_conf->cqm_rssi_thold,
4237 bss_conf->cqm_rssi_hyst);
4238 if (ret < 0)
4239 goto out;
4240 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4241 }
4242
4243 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4244 BSS_CHANGED_ASSOC)) {
4245 rcu_read_lock();
4246 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4247 if (sta) {
4248 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4249
4250 /* save the supp_rates of the ap */
4251 sta_rate_set = sta->supp_rates[wlvif->band];
4252 if (sta->ht_cap.ht_supported)
4253 sta_rate_set |=
4254 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4255 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4256 sta_ht_cap = sta->ht_cap;
4257 sta_exists = true;
4258 }
4259
4260 rcu_read_unlock();
4261 }
4262
4263 if (changed & BSS_CHANGED_BSSID) {
4264 if (!is_zero_ether_addr(bss_conf->bssid)) {
4265 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4266 sta_rate_set);
4267 if (ret < 0)
4268 goto out;
4269
4270 /* Need to update the BSSID (for filtering etc) */
4271 do_join = true;
4272 } else {
4273 ret = wlcore_clear_bssid(wl, wlvif);
4274 if (ret < 0)
4275 goto out;
4276 }
4277 }
4278
4279 if (changed & BSS_CHANGED_IBSS) {
4280 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4281 bss_conf->ibss_joined);
4282
4283 if (bss_conf->ibss_joined) {
4284 u32 rates = bss_conf->basic_rates;
4285 wlvif->basic_rate_set =
4286 wl1271_tx_enabled_rates_get(wl, rates,
4287 wlvif->band);
4288 wlvif->basic_rate =
4289 wl1271_tx_min_rate_get(wl,
4290 wlvif->basic_rate_set);
4291
4292 /* by default, use 11b + OFDM rates */
4293 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4294 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4295 if (ret < 0)
4296 goto out;
4297 }
4298 }
4299
4300 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4301 if (ret < 0)
4302 goto out;
4303
4304 if (do_join) {
4305 ret = wlcore_join(wl, wlvif);
4306 if (ret < 0) {
4307 wl1271_warning("cmd join failed %d", ret);
4308 goto out;
4309 }
4310 }
4311
4312 if (changed & BSS_CHANGED_ASSOC) {
4313 if (bss_conf->assoc) {
4314 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4315 sta_rate_set);
4316 if (ret < 0)
4317 goto out;
4318
4319 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4320 wl12xx_set_authorized(wl, wlvif);
4321 } else {
4322 wlcore_unset_assoc(wl, wlvif);
4323 }
4324 }
4325
4326 if (changed & BSS_CHANGED_PS) {
4327 if ((bss_conf->ps) &&
4328 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4329 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4330 int ps_mode;
4331 char *ps_mode_str;
4332
4333 if (wl->conf.conn.forced_ps) {
4334 ps_mode = STATION_POWER_SAVE_MODE;
4335 ps_mode_str = "forced";
4336 } else {
4337 ps_mode = STATION_AUTO_PS_MODE;
4338 ps_mode_str = "auto";
4339 }
4340
4341 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4342
4343 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4344 if (ret < 0)
4345 wl1271_warning("enter %s ps failed %d",
4346 ps_mode_str, ret);
4347 } else if (!bss_conf->ps &&
4348 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4349 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4350
4351 ret = wl1271_ps_set_mode(wl, wlvif,
4352 STATION_ACTIVE_MODE);
4353 if (ret < 0)
4354 wl1271_warning("exit auto ps failed %d", ret);
4355 }
4356 }
4357
4358 /* Handle new association with HT. Do this after join. */
4359 if (sta_exists) {
4360 bool enabled =
4361 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4362
4363 ret = wlcore_hw_set_peer_cap(wl,
4364 &sta_ht_cap,
4365 enabled,
4366 wlvif->rate_set,
4367 wlvif->sta.hlid);
4368 if (ret < 0) {
4369 wl1271_warning("Set ht cap failed %d", ret);
4370 goto out;
4371
4372 }
4373
4374 if (enabled) {
4375 ret = wl1271_acx_set_ht_information(wl, wlvif,
4376 bss_conf->ht_operation_mode);
4377 if (ret < 0) {
4378 wl1271_warning("Set ht information failed %d",
4379 ret);
4380 goto out;
4381 }
4382 }
4383 }
4384
4385 /* Handle arp filtering. Done after join. */
4386 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4387 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4388 __be32 addr = bss_conf->arp_addr_list[0];
4389 wlvif->sta.qos = bss_conf->qos;
4390 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4391
4392 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4393 wlvif->ip_addr = addr;
4394 /*
4395 * The template should have been configured only upon
4396 * association. however, it seems that the correct ip
4397 * isn't being set (when sending), so we have to
4398 * reconfigure the template upon every ip change.
4399 */
4400 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4401 if (ret < 0) {
4402 wl1271_warning("build arp rsp failed: %d", ret);
4403 goto out;
4404 }
4405
4406 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4407 (ACX_ARP_FILTER_ARP_FILTERING |
4408 ACX_ARP_FILTER_AUTO_ARP),
4409 addr);
4410 } else {
4411 wlvif->ip_addr = 0;
4412 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4413 }
4414
4415 if (ret < 0)
4416 goto out;
4417 }
4418
4419 out:
4420 return;
4421 }
4422
4423 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4424 struct ieee80211_vif *vif,
4425 struct ieee80211_bss_conf *bss_conf,
4426 u32 changed)
4427 {
4428 struct wl1271 *wl = hw->priv;
4429 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4430 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4431 int ret;
4432
4433 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4434 wlvif->role_id, (int)changed);
4435
4436 /*
4437 * make sure to cancel pending disconnections if our association
4438 * state changed
4439 */
4440 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4441 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4442
4443 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4444 !bss_conf->enable_beacon)
4445 wl1271_tx_flush(wl);
4446
4447 mutex_lock(&wl->mutex);
4448
4449 if (unlikely(wl->state != WLCORE_STATE_ON))
4450 goto out;
4451
4452 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4453 goto out;
4454
4455 ret = wl1271_ps_elp_wakeup(wl);
4456 if (ret < 0)
4457 goto out;
4458
4459 if (is_ap)
4460 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4461 else
4462 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4463
4464 wl1271_ps_elp_sleep(wl);
4465
4466 out:
4467 mutex_unlock(&wl->mutex);
4468 }
4469
4470 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4471 struct ieee80211_chanctx_conf *ctx)
4472 {
4473 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4474 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4475 cfg80211_get_chandef_type(&ctx->def));
4476 return 0;
4477 }
4478
4479 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4480 struct ieee80211_chanctx_conf *ctx)
4481 {
4482 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4483 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4484 cfg80211_get_chandef_type(&ctx->def));
4485 }
4486
4487 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4488 struct ieee80211_chanctx_conf *ctx,
4489 u32 changed)
4490 {
4491 wl1271_debug(DEBUG_MAC80211,
4492 "mac80211 change chanctx %d (type %d) changed 0x%x",
4493 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4494 cfg80211_get_chandef_type(&ctx->def), changed);
4495 }
4496
4497 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4498 struct ieee80211_vif *vif,
4499 struct ieee80211_chanctx_conf *ctx)
4500 {
4501 struct wl1271 *wl = hw->priv;
4502 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4503 int channel = ieee80211_frequency_to_channel(
4504 ctx->def.chan->center_freq);
4505
4506 wl1271_debug(DEBUG_MAC80211,
4507 "mac80211 assign chanctx (role %d) %d (type %d)",
4508 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4509
4510 mutex_lock(&wl->mutex);
4511
4512 wlvif->band = ctx->def.chan->band;
4513 wlvif->channel = channel;
4514 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4515
4516 /* update default rates according to the band */
4517 wl1271_set_band_rate(wl, wlvif);
4518
4519 mutex_unlock(&wl->mutex);
4520
4521 return 0;
4522 }
4523
4524 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4525 struct ieee80211_vif *vif,
4526 struct ieee80211_chanctx_conf *ctx)
4527 {
4528 struct wl1271 *wl = hw->priv;
4529 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4530
4531 wl1271_debug(DEBUG_MAC80211,
4532 "mac80211 unassign chanctx (role %d) %d (type %d)",
4533 wlvif->role_id,
4534 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4535 cfg80211_get_chandef_type(&ctx->def));
4536
4537 wl1271_tx_flush(wl);
4538 }
4539
4540 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4541 struct ieee80211_vif *vif, u16 queue,
4542 const struct ieee80211_tx_queue_params *params)
4543 {
4544 struct wl1271 *wl = hw->priv;
4545 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4546 u8 ps_scheme;
4547 int ret = 0;
4548
4549 mutex_lock(&wl->mutex);
4550
4551 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4552
4553 if (params->uapsd)
4554 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4555 else
4556 ps_scheme = CONF_PS_SCHEME_LEGACY;
4557
4558 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4559 goto out;
4560
4561 ret = wl1271_ps_elp_wakeup(wl);
4562 if (ret < 0)
4563 goto out;
4564
4565 /*
4566 * the txop is confed in units of 32us by the mac80211,
4567 * we need us
4568 */
4569 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4570 params->cw_min, params->cw_max,
4571 params->aifs, params->txop << 5);
4572 if (ret < 0)
4573 goto out_sleep;
4574
4575 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4576 CONF_CHANNEL_TYPE_EDCF,
4577 wl1271_tx_get_queue(queue),
4578 ps_scheme, CONF_ACK_POLICY_LEGACY,
4579 0, 0);
4580
4581 out_sleep:
4582 wl1271_ps_elp_sleep(wl);
4583
4584 out:
4585 mutex_unlock(&wl->mutex);
4586
4587 return ret;
4588 }
4589
4590 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4591 struct ieee80211_vif *vif)
4592 {
4593
4594 struct wl1271 *wl = hw->priv;
4595 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4596 u64 mactime = ULLONG_MAX;
4597 int ret;
4598
4599 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4600
4601 mutex_lock(&wl->mutex);
4602
4603 if (unlikely(wl->state != WLCORE_STATE_ON))
4604 goto out;
4605
4606 ret = wl1271_ps_elp_wakeup(wl);
4607 if (ret < 0)
4608 goto out;
4609
4610 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4611 if (ret < 0)
4612 goto out_sleep;
4613
4614 out_sleep:
4615 wl1271_ps_elp_sleep(wl);
4616
4617 out:
4618 mutex_unlock(&wl->mutex);
4619 return mactime;
4620 }
4621
4622 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4623 struct survey_info *survey)
4624 {
4625 struct ieee80211_conf *conf = &hw->conf;
4626
4627 if (idx != 0)
4628 return -ENOENT;
4629
4630 survey->channel = conf->chandef.chan;
4631 survey->filled = 0;
4632 return 0;
4633 }
4634
4635 static int wl1271_allocate_sta(struct wl1271 *wl,
4636 struct wl12xx_vif *wlvif,
4637 struct ieee80211_sta *sta)
4638 {
4639 struct wl1271_station *wl_sta;
4640 int ret;
4641
4642
4643 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4644 wl1271_warning("could not allocate HLID - too much stations");
4645 return -EBUSY;
4646 }
4647
4648 wl_sta = (struct wl1271_station *)sta->drv_priv;
4649 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4650 if (ret < 0) {
4651 wl1271_warning("could not allocate HLID - too many links");
4652 return -EBUSY;
4653 }
4654
4655 /* use the previous security seq, if this is a recovery/resume */
4656 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4657
4658 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4659 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4660 wl->active_sta_count++;
4661 return 0;
4662 }
4663
4664 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4665 {
4666 struct wl1271_station *wl_sta;
4667 struct ieee80211_sta *sta;
4668 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4669
4670 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4671 return;
4672
4673 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4674 __clear_bit(hlid, &wl->ap_ps_map);
4675 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4676
4677 /*
4678 * save the last used PN in the private part of iee80211_sta,
4679 * in case of recovery/suspend
4680 */
4681 rcu_read_lock();
4682 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4683 if (sta) {
4684 wl_sta = (void *)sta->drv_priv;
4685 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4686
4687 /*
4688 * increment the initial seq number on recovery to account for
4689 * transmitted packets that we haven't yet got in the FW status
4690 */
4691 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4692 wl_sta->total_freed_pkts +=
4693 WL1271_TX_SQN_POST_RECOVERY_PADDING;
4694 }
4695 rcu_read_unlock();
4696
4697 wl12xx_free_link(wl, wlvif, &hlid);
4698 wl->active_sta_count--;
4699
4700 /*
4701 * rearm the tx watchdog when the last STA is freed - give the FW a
4702 * chance to return STA-buffered packets before complaining.
4703 */
4704 if (wl->active_sta_count == 0)
4705 wl12xx_rearm_tx_watchdog_locked(wl);
4706 }
4707
4708 static int wl12xx_sta_add(struct wl1271 *wl,
4709 struct wl12xx_vif *wlvif,
4710 struct ieee80211_sta *sta)
4711 {
4712 struct wl1271_station *wl_sta;
4713 int ret = 0;
4714 u8 hlid;
4715
4716 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4717
4718 ret = wl1271_allocate_sta(wl, wlvif, sta);
4719 if (ret < 0)
4720 return ret;
4721
4722 wl_sta = (struct wl1271_station *)sta->drv_priv;
4723 hlid = wl_sta->hlid;
4724
4725 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4726 if (ret < 0)
4727 wl1271_free_sta(wl, wlvif, hlid);
4728
4729 return ret;
4730 }
4731
4732 static int wl12xx_sta_remove(struct wl1271 *wl,
4733 struct wl12xx_vif *wlvif,
4734 struct ieee80211_sta *sta)
4735 {
4736 struct wl1271_station *wl_sta;
4737 int ret = 0, id;
4738
4739 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4740
4741 wl_sta = (struct wl1271_station *)sta->drv_priv;
4742 id = wl_sta->hlid;
4743 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4744 return -EINVAL;
4745
4746 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4747 if (ret < 0)
4748 return ret;
4749
4750 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4751 return ret;
4752 }
4753
4754 static void wlcore_roc_if_possible(struct wl1271 *wl,
4755 struct wl12xx_vif *wlvif)
4756 {
4757 if (find_first_bit(wl->roc_map,
4758 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4759 return;
4760
4761 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4762 return;
4763
4764 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4765 }
4766
4767 /*
4768 * when wl_sta is NULL, we treat this call as if coming from a
4769 * pending auth reply.
4770 * wl->mutex must be taken and the FW must be awake when the call
4771 * takes place.
4772 */
4773 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4774 struct wl1271_station *wl_sta, bool in_conn)
4775 {
4776 if (in_conn) {
4777 if (WARN_ON(wl_sta && wl_sta->in_connection))
4778 return;
4779
4780 if (!wlvif->ap_pending_auth_reply &&
4781 !wlvif->inconn_count)
4782 wlcore_roc_if_possible(wl, wlvif);
4783
4784 if (wl_sta) {
4785 wl_sta->in_connection = true;
4786 wlvif->inconn_count++;
4787 } else {
4788 wlvif->ap_pending_auth_reply = true;
4789 }
4790 } else {
4791 if (wl_sta && !wl_sta->in_connection)
4792 return;
4793
4794 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4795 return;
4796
4797 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4798 return;
4799
4800 if (wl_sta) {
4801 wl_sta->in_connection = false;
4802 wlvif->inconn_count--;
4803 } else {
4804 wlvif->ap_pending_auth_reply = false;
4805 }
4806
4807 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4808 test_bit(wlvif->role_id, wl->roc_map))
4809 wl12xx_croc(wl, wlvif->role_id);
4810 }
4811 }
4812
4813 static int wl12xx_update_sta_state(struct wl1271 *wl,
4814 struct wl12xx_vif *wlvif,
4815 struct ieee80211_sta *sta,
4816 enum ieee80211_sta_state old_state,
4817 enum ieee80211_sta_state new_state)
4818 {
4819 struct wl1271_station *wl_sta;
4820 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4821 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4822 int ret;
4823
4824 wl_sta = (struct wl1271_station *)sta->drv_priv;
4825
4826 /* Add station (AP mode) */
4827 if (is_ap &&
4828 old_state == IEEE80211_STA_NOTEXIST &&
4829 new_state == IEEE80211_STA_NONE) {
4830 ret = wl12xx_sta_add(wl, wlvif, sta);
4831 if (ret)
4832 return ret;
4833
4834 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4835 }
4836
4837 /* Remove station (AP mode) */
4838 if (is_ap &&
4839 old_state == IEEE80211_STA_NONE &&
4840 new_state == IEEE80211_STA_NOTEXIST) {
4841 /* must not fail */
4842 wl12xx_sta_remove(wl, wlvif, sta);
4843
4844 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4845 }
4846
4847 /* Authorize station (AP mode) */
4848 if (is_ap &&
4849 new_state == IEEE80211_STA_AUTHORIZED) {
4850 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4851 if (ret < 0)
4852 return ret;
4853
4854 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4855 wl_sta->hlid);
4856 if (ret)
4857 return ret;
4858
4859 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4860 }
4861
4862 /* Authorize station */
4863 if (is_sta &&
4864 new_state == IEEE80211_STA_AUTHORIZED) {
4865 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4866 ret = wl12xx_set_authorized(wl, wlvif);
4867 if (ret)
4868 return ret;
4869 }
4870
4871 if (is_sta &&
4872 old_state == IEEE80211_STA_AUTHORIZED &&
4873 new_state == IEEE80211_STA_ASSOC) {
4874 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4875 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4876 }
4877
4878 /* clear ROCs on failure or authorization */
4879 if (is_sta &&
4880 (new_state == IEEE80211_STA_AUTHORIZED ||
4881 new_state == IEEE80211_STA_NOTEXIST)) {
4882 if (test_bit(wlvif->role_id, wl->roc_map))
4883 wl12xx_croc(wl, wlvif->role_id);
4884 }
4885
4886 if (is_sta &&
4887 old_state == IEEE80211_STA_NOTEXIST &&
4888 new_state == IEEE80211_STA_NONE) {
4889 if (find_first_bit(wl->roc_map,
4890 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4891 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4892 wl12xx_roc(wl, wlvif, wlvif->role_id,
4893 wlvif->band, wlvif->channel);
4894 }
4895 }
4896 return 0;
4897 }
4898
4899 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4900 struct ieee80211_vif *vif,
4901 struct ieee80211_sta *sta,
4902 enum ieee80211_sta_state old_state,
4903 enum ieee80211_sta_state new_state)
4904 {
4905 struct wl1271 *wl = hw->priv;
4906 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4907 int ret;
4908
4909 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4910 sta->aid, old_state, new_state);
4911
4912 mutex_lock(&wl->mutex);
4913
4914 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4915 ret = -EBUSY;
4916 goto out;
4917 }
4918
4919 ret = wl1271_ps_elp_wakeup(wl);
4920 if (ret < 0)
4921 goto out;
4922
4923 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4924
4925 wl1271_ps_elp_sleep(wl);
4926 out:
4927 mutex_unlock(&wl->mutex);
4928 if (new_state < old_state)
4929 return 0;
4930 return ret;
4931 }
4932
4933 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4934 struct ieee80211_vif *vif,
4935 enum ieee80211_ampdu_mlme_action action,
4936 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4937 u8 buf_size)
4938 {
4939 struct wl1271 *wl = hw->priv;
4940 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4941 int ret;
4942 u8 hlid, *ba_bitmap;
4943
4944 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4945 tid);
4946
4947 /* sanity check - the fields in FW are only 8bits wide */
4948 if (WARN_ON(tid > 0xFF))
4949 return -ENOTSUPP;
4950
4951 mutex_lock(&wl->mutex);
4952
4953 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4954 ret = -EAGAIN;
4955 goto out;
4956 }
4957
4958 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4959 hlid = wlvif->sta.hlid;
4960 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4961 struct wl1271_station *wl_sta;
4962
4963 wl_sta = (struct wl1271_station *)sta->drv_priv;
4964 hlid = wl_sta->hlid;
4965 } else {
4966 ret = -EINVAL;
4967 goto out;
4968 }
4969
4970 ba_bitmap = &wl->links[hlid].ba_bitmap;
4971
4972 ret = wl1271_ps_elp_wakeup(wl);
4973 if (ret < 0)
4974 goto out;
4975
4976 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4977 tid, action);
4978
4979 switch (action) {
4980 case IEEE80211_AMPDU_RX_START:
4981 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4982 ret = -ENOTSUPP;
4983 break;
4984 }
4985
4986 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4987 ret = -EBUSY;
4988 wl1271_error("exceeded max RX BA sessions");
4989 break;
4990 }
4991
4992 if (*ba_bitmap & BIT(tid)) {
4993 ret = -EINVAL;
4994 wl1271_error("cannot enable RX BA session on active "
4995 "tid: %d", tid);
4996 break;
4997 }
4998
4999 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5000 hlid);
5001 if (!ret) {
5002 *ba_bitmap |= BIT(tid);
5003 wl->ba_rx_session_count++;
5004 }
5005 break;
5006
5007 case IEEE80211_AMPDU_RX_STOP:
5008 if (!(*ba_bitmap & BIT(tid))) {
5009 /*
5010 * this happens on reconfig - so only output a debug
5011 * message for now, and don't fail the function.
5012 */
5013 wl1271_debug(DEBUG_MAC80211,
5014 "no active RX BA session on tid: %d",
5015 tid);
5016 ret = 0;
5017 break;
5018 }
5019
5020 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5021 hlid);
5022 if (!ret) {
5023 *ba_bitmap &= ~BIT(tid);
5024 wl->ba_rx_session_count--;
5025 }
5026 break;
5027
5028 /*
5029 * The BA initiator session management in FW independently.
5030 * Falling break here on purpose for all TX APDU commands.
5031 */
5032 case IEEE80211_AMPDU_TX_START:
5033 case IEEE80211_AMPDU_TX_STOP_CONT:
5034 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5035 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5036 case IEEE80211_AMPDU_TX_OPERATIONAL:
5037 ret = -EINVAL;
5038 break;
5039
5040 default:
5041 wl1271_error("Incorrect ampdu action id=%x\n", action);
5042 ret = -EINVAL;
5043 }
5044
5045 wl1271_ps_elp_sleep(wl);
5046
5047 out:
5048 mutex_unlock(&wl->mutex);
5049
5050 return ret;
5051 }
5052
5053 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5054 struct ieee80211_vif *vif,
5055 const struct cfg80211_bitrate_mask *mask)
5056 {
5057 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5058 struct wl1271 *wl = hw->priv;
5059 int i, ret = 0;
5060
5061 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5062 mask->control[NL80211_BAND_2GHZ].legacy,
5063 mask->control[NL80211_BAND_5GHZ].legacy);
5064
5065 mutex_lock(&wl->mutex);
5066
5067 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5068 wlvif->bitrate_masks[i] =
5069 wl1271_tx_enabled_rates_get(wl,
5070 mask->control[i].legacy,
5071 i);
5072
5073 if (unlikely(wl->state != WLCORE_STATE_ON))
5074 goto out;
5075
5076 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5077 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5078
5079 ret = wl1271_ps_elp_wakeup(wl);
5080 if (ret < 0)
5081 goto out;
5082
5083 wl1271_set_band_rate(wl, wlvif);
5084 wlvif->basic_rate =
5085 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5086 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5087
5088 wl1271_ps_elp_sleep(wl);
5089 }
5090 out:
5091 mutex_unlock(&wl->mutex);
5092
5093 return ret;
5094 }
5095
5096 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5097 struct ieee80211_channel_switch *ch_switch)
5098 {
5099 struct wl1271 *wl = hw->priv;
5100 struct wl12xx_vif *wlvif;
5101 int ret;
5102
5103 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5104
5105 wl1271_tx_flush(wl);
5106
5107 mutex_lock(&wl->mutex);
5108
5109 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5110 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5111 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5112 ieee80211_chswitch_done(vif, false);
5113 }
5114 goto out;
5115 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5116 goto out;
5117 }
5118
5119 ret = wl1271_ps_elp_wakeup(wl);
5120 if (ret < 0)
5121 goto out;
5122
5123 /* TODO: change mac80211 to pass vif as param */
5124 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5125 unsigned long delay_usec;
5126
5127 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5128 if (ret)
5129 goto out_sleep;
5130
5131 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5132
5133 /* indicate failure 5 seconds after channel switch time */
5134 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5135 ch_switch->count;
5136 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5137 usecs_to_jiffies(delay_usec) +
5138 msecs_to_jiffies(5000));
5139 }
5140
5141 out_sleep:
5142 wl1271_ps_elp_sleep(wl);
5143
5144 out:
5145 mutex_unlock(&wl->mutex);
5146 }
5147
5148 static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
5149 {
5150 struct wl1271 *wl = hw->priv;
5151
5152 wl1271_tx_flush(wl);
5153 }
5154
5155 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5156 struct ieee80211_vif *vif,
5157 struct ieee80211_channel *chan,
5158 int duration,
5159 enum ieee80211_roc_type type)
5160 {
5161 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5162 struct wl1271 *wl = hw->priv;
5163 int channel, ret = 0;
5164
5165 channel = ieee80211_frequency_to_channel(chan->center_freq);
5166
5167 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5168 channel, wlvif->role_id);
5169
5170 mutex_lock(&wl->mutex);
5171
5172 if (unlikely(wl->state != WLCORE_STATE_ON))
5173 goto out;
5174
5175 /* return EBUSY if we can't ROC right now */
5176 if (WARN_ON(wl->roc_vif ||
5177 find_first_bit(wl->roc_map,
5178 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5179 ret = -EBUSY;
5180 goto out;
5181 }
5182
5183 ret = wl1271_ps_elp_wakeup(wl);
5184 if (ret < 0)
5185 goto out;
5186
5187 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5188 if (ret < 0)
5189 goto out_sleep;
5190
5191 wl->roc_vif = vif;
5192 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5193 msecs_to_jiffies(duration));
5194 out_sleep:
5195 wl1271_ps_elp_sleep(wl);
5196 out:
5197 mutex_unlock(&wl->mutex);
5198 return ret;
5199 }
5200
5201 static int __wlcore_roc_completed(struct wl1271 *wl)
5202 {
5203 struct wl12xx_vif *wlvif;
5204 int ret;
5205
5206 /* already completed */
5207 if (unlikely(!wl->roc_vif))
5208 return 0;
5209
5210 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5211
5212 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5213 return -EBUSY;
5214
5215 ret = wl12xx_stop_dev(wl, wlvif);
5216 if (ret < 0)
5217 return ret;
5218
5219 wl->roc_vif = NULL;
5220
5221 return 0;
5222 }
5223
5224 static int wlcore_roc_completed(struct wl1271 *wl)
5225 {
5226 int ret;
5227
5228 wl1271_debug(DEBUG_MAC80211, "roc complete");
5229
5230 mutex_lock(&wl->mutex);
5231
5232 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5233 ret = -EBUSY;
5234 goto out;
5235 }
5236
5237 ret = wl1271_ps_elp_wakeup(wl);
5238 if (ret < 0)
5239 goto out;
5240
5241 ret = __wlcore_roc_completed(wl);
5242
5243 wl1271_ps_elp_sleep(wl);
5244 out:
5245 mutex_unlock(&wl->mutex);
5246
5247 return ret;
5248 }
5249
5250 static void wlcore_roc_complete_work(struct work_struct *work)
5251 {
5252 struct delayed_work *dwork;
5253 struct wl1271 *wl;
5254 int ret;
5255
5256 dwork = container_of(work, struct delayed_work, work);
5257 wl = container_of(dwork, struct wl1271, roc_complete_work);
5258
5259 ret = wlcore_roc_completed(wl);
5260 if (!ret)
5261 ieee80211_remain_on_channel_expired(wl->hw);
5262 }
5263
5264 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5265 {
5266 struct wl1271 *wl = hw->priv;
5267
5268 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5269
5270 /* TODO: per-vif */
5271 wl1271_tx_flush(wl);
5272
5273 /*
5274 * we can't just flush_work here, because it might deadlock
5275 * (as we might get called from the same workqueue)
5276 */
5277 cancel_delayed_work_sync(&wl->roc_complete_work);
5278 wlcore_roc_completed(wl);
5279
5280 return 0;
5281 }
5282
5283 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5284 struct ieee80211_vif *vif,
5285 struct ieee80211_sta *sta,
5286 u32 changed)
5287 {
5288 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5289 struct wl1271 *wl = hw->priv;
5290
5291 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5292 }
5293
5294 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5295 struct ieee80211_vif *vif,
5296 struct ieee80211_sta *sta,
5297 s8 *rssi_dbm)
5298 {
5299 struct wl1271 *wl = hw->priv;
5300 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5301 int ret = 0;
5302
5303 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5304
5305 mutex_lock(&wl->mutex);
5306
5307 if (unlikely(wl->state != WLCORE_STATE_ON))
5308 goto out;
5309
5310 ret = wl1271_ps_elp_wakeup(wl);
5311 if (ret < 0)
5312 goto out_sleep;
5313
5314 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5315 if (ret < 0)
5316 goto out_sleep;
5317
5318 out_sleep:
5319 wl1271_ps_elp_sleep(wl);
5320
5321 out:
5322 mutex_unlock(&wl->mutex);
5323
5324 return ret;
5325 }
5326
5327 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5328 {
5329 struct wl1271 *wl = hw->priv;
5330 bool ret = false;
5331
5332 mutex_lock(&wl->mutex);
5333
5334 if (unlikely(wl->state != WLCORE_STATE_ON))
5335 goto out;
5336
5337 /* packets are considered pending if in the TX queue or the FW */
5338 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5339 out:
5340 mutex_unlock(&wl->mutex);
5341
5342 return ret;
5343 }
5344
5345 /* can't be const, mac80211 writes to this */
5346 static struct ieee80211_rate wl1271_rates[] = {
5347 { .bitrate = 10,
5348 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5349 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5350 { .bitrate = 20,
5351 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5352 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5353 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5354 { .bitrate = 55,
5355 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5356 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5357 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5358 { .bitrate = 110,
5359 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5360 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5361 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5362 { .bitrate = 60,
5363 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5364 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5365 { .bitrate = 90,
5366 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5367 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5368 { .bitrate = 120,
5369 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5370 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5371 { .bitrate = 180,
5372 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5373 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5374 { .bitrate = 240,
5375 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5376 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5377 { .bitrate = 360,
5378 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5379 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5380 { .bitrate = 480,
5381 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5382 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5383 { .bitrate = 540,
5384 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5385 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5386 };
5387
5388 /* can't be const, mac80211 writes to this */
5389 static struct ieee80211_channel wl1271_channels[] = {
5390 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5391 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5392 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5393 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5394 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5395 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5396 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5397 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5398 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5399 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5400 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5401 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5402 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5403 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5404 };
5405
5406 /* can't be const, mac80211 writes to this */
5407 static struct ieee80211_supported_band wl1271_band_2ghz = {
5408 .channels = wl1271_channels,
5409 .n_channels = ARRAY_SIZE(wl1271_channels),
5410 .bitrates = wl1271_rates,
5411 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5412 };
5413
5414 /* 5 GHz data rates for WL1273 */
5415 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5416 { .bitrate = 60,
5417 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5418 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5419 { .bitrate = 90,
5420 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5421 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5422 { .bitrate = 120,
5423 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5424 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5425 { .bitrate = 180,
5426 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5427 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5428 { .bitrate = 240,
5429 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5430 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5431 { .bitrate = 360,
5432 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5433 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5434 { .bitrate = 480,
5435 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5436 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5437 { .bitrate = 540,
5438 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5439 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5440 };
5441
5442 /* 5 GHz band channels for WL1273 */
5443 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5444 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5445 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5446 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5447 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5448 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5449 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5450 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5451 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5452 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5453 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5454 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5455 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5456 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5457 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5458 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5459 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5460 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5461 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5462 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5463 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5464 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5465 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5466 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5467 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5468 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5469 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5470 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5471 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5472 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5473 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5474 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5475 };
5476
5477 static struct ieee80211_supported_band wl1271_band_5ghz = {
5478 .channels = wl1271_channels_5ghz,
5479 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5480 .bitrates = wl1271_rates_5ghz,
5481 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5482 };
5483
5484 static const struct ieee80211_ops wl1271_ops = {
5485 .start = wl1271_op_start,
5486 .stop = wlcore_op_stop,
5487 .add_interface = wl1271_op_add_interface,
5488 .remove_interface = wl1271_op_remove_interface,
5489 .change_interface = wl12xx_op_change_interface,
5490 #ifdef CONFIG_PM
5491 .suspend = wl1271_op_suspend,
5492 .resume = wl1271_op_resume,
5493 #endif
5494 .config = wl1271_op_config,
5495 .prepare_multicast = wl1271_op_prepare_multicast,
5496 .configure_filter = wl1271_op_configure_filter,
5497 .tx = wl1271_op_tx,
5498 .set_key = wlcore_op_set_key,
5499 .hw_scan = wl1271_op_hw_scan,
5500 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5501 .sched_scan_start = wl1271_op_sched_scan_start,
5502 .sched_scan_stop = wl1271_op_sched_scan_stop,
5503 .bss_info_changed = wl1271_op_bss_info_changed,
5504 .set_frag_threshold = wl1271_op_set_frag_threshold,
5505 .set_rts_threshold = wl1271_op_set_rts_threshold,
5506 .conf_tx = wl1271_op_conf_tx,
5507 .get_tsf = wl1271_op_get_tsf,
5508 .get_survey = wl1271_op_get_survey,
5509 .sta_state = wl12xx_op_sta_state,
5510 .ampdu_action = wl1271_op_ampdu_action,
5511 .tx_frames_pending = wl1271_tx_frames_pending,
5512 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5513 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5514 .channel_switch = wl12xx_op_channel_switch,
5515 .flush = wlcore_op_flush,
5516 .remain_on_channel = wlcore_op_remain_on_channel,
5517 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5518 .add_chanctx = wlcore_op_add_chanctx,
5519 .remove_chanctx = wlcore_op_remove_chanctx,
5520 .change_chanctx = wlcore_op_change_chanctx,
5521 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5522 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5523 .sta_rc_update = wlcore_op_sta_rc_update,
5524 .get_rssi = wlcore_op_get_rssi,
5525 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5526 };
5527
5528
5529 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5530 {
5531 u8 idx;
5532
5533 BUG_ON(band >= 2);
5534
5535 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5536 wl1271_error("Illegal RX rate from HW: %d", rate);
5537 return 0;
5538 }
5539
5540 idx = wl->band_rate_to_idx[band][rate];
5541 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5542 wl1271_error("Unsupported RX rate from HW: %d", rate);
5543 return 0;
5544 }
5545
5546 return idx;
5547 }
5548
5549 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5550 {
5551 int i;
5552
5553 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5554 oui, nic);
5555
5556 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5557 wl1271_warning("NIC part of the MAC address wraps around!");
5558
5559 for (i = 0; i < wl->num_mac_addr; i++) {
5560 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5561 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5562 wl->addresses[i].addr[2] = (u8) oui;
5563 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5564 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5565 wl->addresses[i].addr[5] = (u8) nic;
5566 nic++;
5567 }
5568
5569 /* we may be one address short at the most */
5570 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5571
5572 /*
5573 * turn on the LAA bit in the first address and use it as
5574 * the last address.
5575 */
5576 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5577 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5578 memcpy(&wl->addresses[idx], &wl->addresses[0],
5579 sizeof(wl->addresses[0]));
5580 /* LAA bit */
5581 wl->addresses[idx].addr[2] |= BIT(1);
5582 }
5583
5584 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5585 wl->hw->wiphy->addresses = wl->addresses;
5586 }
5587
5588 static int wl12xx_get_hw_info(struct wl1271 *wl)
5589 {
5590 int ret;
5591
5592 ret = wl12xx_set_power_on(wl);
5593 if (ret < 0)
5594 return ret;
5595
5596 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5597 if (ret < 0)
5598 goto out;
5599
5600 wl->fuse_oui_addr = 0;
5601 wl->fuse_nic_addr = 0;
5602
5603 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5604 if (ret < 0)
5605 goto out;
5606
5607 if (wl->ops->get_mac)
5608 ret = wl->ops->get_mac(wl);
5609
5610 out:
5611 wl1271_power_off(wl);
5612 return ret;
5613 }
5614
5615 static int wl1271_register_hw(struct wl1271 *wl)
5616 {
5617 int ret;
5618 u32 oui_addr = 0, nic_addr = 0;
5619
5620 if (wl->mac80211_registered)
5621 return 0;
5622
5623 if (wl->nvs_len >= 12) {
5624 /* NOTE: The wl->nvs->nvs element must be first, in
5625 * order to simplify the casting, we assume it is at
5626 * the beginning of the wl->nvs structure.
5627 */
5628 u8 *nvs_ptr = (u8 *)wl->nvs;
5629
5630 oui_addr =
5631 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5632 nic_addr =
5633 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5634 }
5635
5636 /* if the MAC address is zeroed in the NVS derive from fuse */
5637 if (oui_addr == 0 && nic_addr == 0) {
5638 oui_addr = wl->fuse_oui_addr;
5639 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5640 nic_addr = wl->fuse_nic_addr + 1;
5641 }
5642
5643 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5644
5645 ret = ieee80211_register_hw(wl->hw);
5646 if (ret < 0) {
5647 wl1271_error("unable to register mac80211 hw: %d", ret);
5648 goto out;
5649 }
5650
5651 wl->mac80211_registered = true;
5652
5653 wl1271_debugfs_init(wl);
5654
5655 wl1271_notice("loaded");
5656
5657 out:
5658 return ret;
5659 }
5660
5661 static void wl1271_unregister_hw(struct wl1271 *wl)
5662 {
5663 if (wl->plt)
5664 wl1271_plt_stop(wl);
5665
5666 ieee80211_unregister_hw(wl->hw);
5667 wl->mac80211_registered = false;
5668
5669 }
5670
5671 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5672 {
5673 .max = 3,
5674 .types = BIT(NL80211_IFTYPE_STATION),
5675 },
5676 {
5677 .max = 1,
5678 .types = BIT(NL80211_IFTYPE_AP) |
5679 BIT(NL80211_IFTYPE_P2P_GO) |
5680 BIT(NL80211_IFTYPE_P2P_CLIENT),
5681 },
5682 };
5683
5684 static struct ieee80211_iface_combination
5685 wlcore_iface_combinations[] = {
5686 {
5687 .max_interfaces = 3,
5688 .limits = wlcore_iface_limits,
5689 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5690 },
5691 };
5692
5693 static int wl1271_init_ieee80211(struct wl1271 *wl)
5694 {
5695 int i;
5696 static const u32 cipher_suites[] = {
5697 WLAN_CIPHER_SUITE_WEP40,
5698 WLAN_CIPHER_SUITE_WEP104,
5699 WLAN_CIPHER_SUITE_TKIP,
5700 WLAN_CIPHER_SUITE_CCMP,
5701 WL1271_CIPHER_SUITE_GEM,
5702 };
5703
5704 /* The tx descriptor buffer */
5705 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5706
5707 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5708 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5709
5710 /* unit us */
5711 /* FIXME: find a proper value */
5712 wl->hw->channel_change_time = 10000;
5713 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5714
5715 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5716 IEEE80211_HW_SUPPORTS_PS |
5717 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5718 IEEE80211_HW_SUPPORTS_UAPSD |
5719 IEEE80211_HW_HAS_RATE_CONTROL |
5720 IEEE80211_HW_CONNECTION_MONITOR |
5721 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5722 IEEE80211_HW_SPECTRUM_MGMT |
5723 IEEE80211_HW_AP_LINK_PS |
5724 IEEE80211_HW_AMPDU_AGGREGATION |
5725 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5726 IEEE80211_HW_QUEUE_CONTROL;
5727
5728 wl->hw->wiphy->cipher_suites = cipher_suites;
5729 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5730
5731 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5732 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5733 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5734 wl->hw->wiphy->max_scan_ssids = 1;
5735 wl->hw->wiphy->max_sched_scan_ssids = 16;
5736 wl->hw->wiphy->max_match_sets = 16;
5737 /*
5738 * Maximum length of elements in scanning probe request templates
5739 * should be the maximum length possible for a template, without
5740 * the IEEE80211 header of the template
5741 */
5742 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5743 sizeof(struct ieee80211_header);
5744
5745 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5746 sizeof(struct ieee80211_header);
5747
5748 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5749
5750 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5751 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5752 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5753
5754 /* make sure all our channels fit in the scanned_ch bitmask */
5755 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5756 ARRAY_SIZE(wl1271_channels_5ghz) >
5757 WL1271_MAX_CHANNELS);
5758 /*
5759 * clear channel flags from the previous usage
5760 * and restore max_power & max_antenna_gain values.
5761 */
5762 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5763 wl1271_band_2ghz.channels[i].flags = 0;
5764 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5765 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5766 }
5767
5768 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5769 wl1271_band_5ghz.channels[i].flags = 0;
5770 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5771 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5772 }
5773
5774 /*
5775 * We keep local copies of the band structs because we need to
5776 * modify them on a per-device basis.
5777 */
5778 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5779 sizeof(wl1271_band_2ghz));
5780 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5781 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5782 sizeof(*wl->ht_cap));
5783 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5784 sizeof(wl1271_band_5ghz));
5785 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5786 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5787 sizeof(*wl->ht_cap));
5788
5789 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5790 &wl->bands[IEEE80211_BAND_2GHZ];
5791 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5792 &wl->bands[IEEE80211_BAND_5GHZ];
5793
5794 /*
5795 * allow 4 queues per mac address we support +
5796 * 1 cab queue per mac + one global offchannel Tx queue
5797 */
5798 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5799
5800 /* the last queue is the offchannel queue */
5801 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5802 wl->hw->max_rates = 1;
5803
5804 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5805
5806 /* the FW answers probe-requests in AP-mode */
5807 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5808 wl->hw->wiphy->probe_resp_offload =
5809 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5810 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5811 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5812
5813 /* allowed interface combinations */
5814 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5815 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5816 wl->hw->wiphy->n_iface_combinations =
5817 ARRAY_SIZE(wlcore_iface_combinations);
5818
5819 SET_IEEE80211_DEV(wl->hw, wl->dev);
5820
5821 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5822 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5823
5824 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5825
5826 return 0;
5827 }
5828
5829 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5830 u32 mbox_size)
5831 {
5832 struct ieee80211_hw *hw;
5833 struct wl1271 *wl;
5834 int i, j, ret;
5835 unsigned int order;
5836
5837 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5838
5839 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5840 if (!hw) {
5841 wl1271_error("could not alloc ieee80211_hw");
5842 ret = -ENOMEM;
5843 goto err_hw_alloc;
5844 }
5845
5846 wl = hw->priv;
5847 memset(wl, 0, sizeof(*wl));
5848
5849 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5850 if (!wl->priv) {
5851 wl1271_error("could not alloc wl priv");
5852 ret = -ENOMEM;
5853 goto err_priv_alloc;
5854 }
5855
5856 INIT_LIST_HEAD(&wl->wlvif_list);
5857
5858 wl->hw = hw;
5859
5860 for (i = 0; i < NUM_TX_QUEUES; i++)
5861 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5862 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5863
5864 skb_queue_head_init(&wl->deferred_rx_queue);
5865 skb_queue_head_init(&wl->deferred_tx_queue);
5866
5867 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5868 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5869 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5870 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5871 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5872 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5873 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5874
5875 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5876 if (!wl->freezable_wq) {
5877 ret = -ENOMEM;
5878 goto err_hw;
5879 }
5880
5881 wl->channel = 0;
5882 wl->rx_counter = 0;
5883 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5884 wl->band = IEEE80211_BAND_2GHZ;
5885 wl->channel_type = NL80211_CHAN_NO_HT;
5886 wl->flags = 0;
5887 wl->sg_enabled = true;
5888 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5889 wl->recovery_count = 0;
5890 wl->hw_pg_ver = -1;
5891 wl->ap_ps_map = 0;
5892 wl->ap_fw_ps_map = 0;
5893 wl->quirks = 0;
5894 wl->platform_quirks = 0;
5895 wl->system_hlid = WL12XX_SYSTEM_HLID;
5896 wl->active_sta_count = 0;
5897 wl->active_link_count = 0;
5898 wl->fwlog_size = 0;
5899 init_waitqueue_head(&wl->fwlog_waitq);
5900
5901 /* The system link is always allocated */
5902 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5903
5904 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5905 for (i = 0; i < wl->num_tx_desc; i++)
5906 wl->tx_frames[i] = NULL;
5907
5908 spin_lock_init(&wl->wl_lock);
5909
5910 wl->state = WLCORE_STATE_OFF;
5911 wl->fw_type = WL12XX_FW_TYPE_NONE;
5912 mutex_init(&wl->mutex);
5913 mutex_init(&wl->flush_mutex);
5914 init_completion(&wl->nvs_loading_complete);
5915
5916 order = get_order(aggr_buf_size);
5917 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5918 if (!wl->aggr_buf) {
5919 ret = -ENOMEM;
5920 goto err_wq;
5921 }
5922 wl->aggr_buf_size = aggr_buf_size;
5923
5924 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5925 if (!wl->dummy_packet) {
5926 ret = -ENOMEM;
5927 goto err_aggr;
5928 }
5929
5930 /* Allocate one page for the FW log */
5931 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5932 if (!wl->fwlog) {
5933 ret = -ENOMEM;
5934 goto err_dummy_packet;
5935 }
5936
5937 wl->mbox_size = mbox_size;
5938 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5939 if (!wl->mbox) {
5940 ret = -ENOMEM;
5941 goto err_fwlog;
5942 }
5943
5944 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5945 if (!wl->buffer_32) {
5946 ret = -ENOMEM;
5947 goto err_mbox;
5948 }
5949
5950 return hw;
5951
5952 err_mbox:
5953 kfree(wl->mbox);
5954
5955 err_fwlog:
5956 free_page((unsigned long)wl->fwlog);
5957
5958 err_dummy_packet:
5959 dev_kfree_skb(wl->dummy_packet);
5960
5961 err_aggr:
5962 free_pages((unsigned long)wl->aggr_buf, order);
5963
5964 err_wq:
5965 destroy_workqueue(wl->freezable_wq);
5966
5967 err_hw:
5968 wl1271_debugfs_exit(wl);
5969 kfree(wl->priv);
5970
5971 err_priv_alloc:
5972 ieee80211_free_hw(hw);
5973
5974 err_hw_alloc:
5975
5976 return ERR_PTR(ret);
5977 }
5978 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5979
5980 int wlcore_free_hw(struct wl1271 *wl)
5981 {
5982 /* Unblock any fwlog readers */
5983 mutex_lock(&wl->mutex);
5984 wl->fwlog_size = -1;
5985 wake_up_interruptible_all(&wl->fwlog_waitq);
5986 mutex_unlock(&wl->mutex);
5987
5988 wlcore_sysfs_free(wl);
5989
5990 kfree(wl->buffer_32);
5991 kfree(wl->mbox);
5992 free_page((unsigned long)wl->fwlog);
5993 dev_kfree_skb(wl->dummy_packet);
5994 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5995
5996 wl1271_debugfs_exit(wl);
5997
5998 vfree(wl->fw);
5999 wl->fw = NULL;
6000 wl->fw_type = WL12XX_FW_TYPE_NONE;
6001 kfree(wl->nvs);
6002 wl->nvs = NULL;
6003
6004 kfree(wl->fw_status_1);
6005 kfree(wl->tx_res_if);
6006 destroy_workqueue(wl->freezable_wq);
6007
6008 kfree(wl->priv);
6009 ieee80211_free_hw(wl->hw);
6010
6011 return 0;
6012 }
6013 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6014
6015 #ifdef CONFIG_PM
6016 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6017 .flags = WIPHY_WOWLAN_ANY,
6018 .n_patterns = WL1271_MAX_RX_FILTERS,
6019 .pattern_min_len = 1,
6020 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6021 };
6022 #endif
6023
6024 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6025 {
6026 return IRQ_WAKE_THREAD;
6027 }
6028
6029 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6030 {
6031 struct wl1271 *wl = context;
6032 struct platform_device *pdev = wl->pdev;
6033 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6034 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6035 unsigned long irqflags;
6036 int ret;
6037 irq_handler_t hardirq_fn = NULL;
6038
6039 if (fw) {
6040 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6041 if (!wl->nvs) {
6042 wl1271_error("Could not allocate nvs data");
6043 goto out;
6044 }
6045 wl->nvs_len = fw->size;
6046 } else {
6047 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6048 WL12XX_NVS_NAME);
6049 wl->nvs = NULL;
6050 wl->nvs_len = 0;
6051 }
6052
6053 ret = wl->ops->setup(wl);
6054 if (ret < 0)
6055 goto out_free_nvs;
6056
6057 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6058
6059 /* adjust some runtime configuration parameters */
6060 wlcore_adjust_conf(wl);
6061
6062 wl->irq = platform_get_irq(pdev, 0);
6063 wl->platform_quirks = pdata->platform_quirks;
6064 wl->if_ops = pdev_data->if_ops;
6065
6066 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6067 irqflags = IRQF_TRIGGER_RISING;
6068 hardirq_fn = wlcore_hardirq;
6069 } else {
6070 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6071 }
6072
6073 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6074 irqflags, pdev->name, wl);
6075 if (ret < 0) {
6076 wl1271_error("request_irq() failed: %d", ret);
6077 goto out_free_nvs;
6078 }
6079
6080 #ifdef CONFIG_PM
6081 ret = enable_irq_wake(wl->irq);
6082 if (!ret) {
6083 wl->irq_wake_enabled = true;
6084 device_init_wakeup(wl->dev, 1);
6085 if (pdata->pwr_in_suspend)
6086 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6087 }
6088 #endif
6089 disable_irq(wl->irq);
6090
6091 ret = wl12xx_get_hw_info(wl);
6092 if (ret < 0) {
6093 wl1271_error("couldn't get hw info");
6094 goto out_irq;
6095 }
6096
6097 ret = wl->ops->identify_chip(wl);
6098 if (ret < 0)
6099 goto out_irq;
6100
6101 ret = wl1271_init_ieee80211(wl);
6102 if (ret)
6103 goto out_irq;
6104
6105 ret = wl1271_register_hw(wl);
6106 if (ret)
6107 goto out_irq;
6108
6109 ret = wlcore_sysfs_init(wl);
6110 if (ret)
6111 goto out_unreg;
6112
6113 wl->initialized = true;
6114 goto out;
6115
6116 out_unreg:
6117 wl1271_unregister_hw(wl);
6118
6119 out_irq:
6120 free_irq(wl->irq, wl);
6121
6122 out_free_nvs:
6123 kfree(wl->nvs);
6124
6125 out:
6126 release_firmware(fw);
6127 complete_all(&wl->nvs_loading_complete);
6128 }
6129
6130 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6131 {
6132 int ret;
6133
6134 if (!wl->ops || !wl->ptable)
6135 return -EINVAL;
6136
6137 wl->dev = &pdev->dev;
6138 wl->pdev = pdev;
6139 platform_set_drvdata(pdev, wl);
6140
6141 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6142 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6143 wl, wlcore_nvs_cb);
6144 if (ret < 0) {
6145 wl1271_error("request_firmware_nowait failed: %d", ret);
6146 complete_all(&wl->nvs_loading_complete);
6147 }
6148
6149 return ret;
6150 }
6151 EXPORT_SYMBOL_GPL(wlcore_probe);
6152
6153 int wlcore_remove(struct platform_device *pdev)
6154 {
6155 struct wl1271 *wl = platform_get_drvdata(pdev);
6156
6157 wait_for_completion(&wl->nvs_loading_complete);
6158 if (!wl->initialized)
6159 return 0;
6160
6161 if (wl->irq_wake_enabled) {
6162 device_init_wakeup(wl->dev, 0);
6163 disable_irq_wake(wl->irq);
6164 }
6165 wl1271_unregister_hw(wl);
6166 free_irq(wl->irq, wl);
6167 wlcore_free_hw(wl);
6168
6169 return 0;
6170 }
6171 EXPORT_SYMBOL_GPL(wlcore_remove);
6172
6173 u32 wl12xx_debug_level = DEBUG_NONE;
6174 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6175 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6176 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6177
6178 module_param_named(fwlog, fwlog_param, charp, 0);
6179 MODULE_PARM_DESC(fwlog,
6180 "FW logger options: continuous, ondemand, dbgpins or disable");
6181
6182 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6183 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6184
6185 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6186 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6187
6188 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6189 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6190
6191 MODULE_LICENSE("GPL");
6192 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6193 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6194 MODULE_FIRMWARE(WL12XX_NVS_NAME);
This page took 0.24269 seconds and 5 git commands to generate.