3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
44 #define WL1271_BOOT_RETRIES 3
46 static char *fwlog_param
;
47 static int fwlog_mem_blocks
= -1;
48 static int bug_on_recovery
= -1;
49 static int no_recovery
= -1;
51 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
52 struct ieee80211_vif
*vif
,
53 bool reset_tx_queues
);
54 static void wlcore_op_stop_locked(struct wl1271
*wl
);
55 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
57 static int wl12xx_set_authorized(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
61 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
70 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, wlvif
->sta
.hlid
);
74 wl1271_info("Association completed.");
78 static void wl1271_reg_notify(struct wiphy
*wiphy
,
79 struct regulatory_request
*request
)
81 struct ieee80211_supported_band
*band
;
82 struct ieee80211_channel
*ch
;
84 struct ieee80211_hw
*hw
= wiphy_to_ieee80211_hw(wiphy
);
85 struct wl1271
*wl
= hw
->priv
;
87 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
88 for (i
= 0; i
< band
->n_channels
; i
++) {
89 ch
= &band
->channels
[i
];
90 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
93 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
94 ch
->flags
|= IEEE80211_CHAN_NO_IR
;
98 wlcore_regdomain_config(wl
);
101 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
106 /* we should hold wl->mutex */
107 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
112 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
114 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
120 * this function is being called when the rx_streaming interval
121 * has beed changed or rx_streaming should be disabled
123 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
126 int period
= wl
->conf
.rx_streaming
.interval
;
128 /* don't reconfigure if rx_streaming is disabled */
129 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
132 /* reconfigure/disable according to new streaming_period */
134 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
135 (wl
->conf
.rx_streaming
.always
||
136 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
137 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
139 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
140 /* don't cancel_work_sync since we might deadlock */
141 del_timer_sync(&wlvif
->rx_streaming_timer
);
147 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
150 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
151 rx_streaming_enable_work
);
152 struct wl1271
*wl
= wlvif
->wl
;
154 mutex_lock(&wl
->mutex
);
156 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
157 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
158 (!wl
->conf
.rx_streaming
.always
&&
159 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
162 if (!wl
->conf
.rx_streaming
.interval
)
165 ret
= wl1271_ps_elp_wakeup(wl
);
169 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
173 /* stop it after some time of inactivity */
174 mod_timer(&wlvif
->rx_streaming_timer
,
175 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
178 wl1271_ps_elp_sleep(wl
);
180 mutex_unlock(&wl
->mutex
);
183 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
186 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
187 rx_streaming_disable_work
);
188 struct wl1271
*wl
= wlvif
->wl
;
190 mutex_lock(&wl
->mutex
);
192 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
195 ret
= wl1271_ps_elp_wakeup(wl
);
199 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
204 wl1271_ps_elp_sleep(wl
);
206 mutex_unlock(&wl
->mutex
);
209 static void wl1271_rx_streaming_timer(unsigned long data
)
211 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
212 struct wl1271
*wl
= wlvif
->wl
;
213 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
219 /* if the watchdog is not armed, don't do anything */
220 if (wl
->tx_allocated_blocks
== 0)
223 cancel_delayed_work(&wl
->tx_watchdog_work
);
224 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
225 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
228 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
230 struct delayed_work
*dwork
;
233 dwork
= container_of(work
, struct delayed_work
, work
);
234 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
236 mutex_lock(&wl
->mutex
);
238 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
241 /* Tx went out in the meantime - everything is ok */
242 if (unlikely(wl
->tx_allocated_blocks
== 0))
246 * if a ROC is in progress, we might not have any Tx for a long
247 * time (e.g. pending Tx on the non-ROC channels)
249 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
250 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
251 wl
->conf
.tx
.tx_watchdog_timeout
);
252 wl12xx_rearm_tx_watchdog_locked(wl
);
257 * if a scan is in progress, we might not have any Tx for a long
260 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
261 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
262 wl
->conf
.tx
.tx_watchdog_timeout
);
263 wl12xx_rearm_tx_watchdog_locked(wl
);
268 * AP might cache a frame for a long time for a sleeping station,
269 * so rearm the timer if there's an AP interface with stations. If
270 * Tx is genuinely stuck we will most hopefully discover it when all
271 * stations are removed due to inactivity.
273 if (wl
->active_sta_count
) {
274 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
276 wl
->conf
.tx
.tx_watchdog_timeout
,
277 wl
->active_sta_count
);
278 wl12xx_rearm_tx_watchdog_locked(wl
);
282 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
283 wl
->conf
.tx
.tx_watchdog_timeout
);
284 wl12xx_queue_recovery_work(wl
);
287 mutex_unlock(&wl
->mutex
);
290 static void wlcore_adjust_conf(struct wl1271
*wl
)
292 /* Adjust settings according to optional module parameters */
294 /* Firmware Logger params */
295 if (fwlog_mem_blocks
!= -1) {
296 if (fwlog_mem_blocks
>= CONF_FWLOG_MIN_MEM_BLOCKS
&&
297 fwlog_mem_blocks
<= CONF_FWLOG_MAX_MEM_BLOCKS
) {
298 wl
->conf
.fwlog
.mem_blocks
= fwlog_mem_blocks
;
301 "Illegal fwlog_mem_blocks=%d using default %d",
302 fwlog_mem_blocks
, wl
->conf
.fwlog
.mem_blocks
);
307 if (!strcmp(fwlog_param
, "continuous")) {
308 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
309 } else if (!strcmp(fwlog_param
, "ondemand")) {
310 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
311 } else if (!strcmp(fwlog_param
, "dbgpins")) {
312 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
313 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
314 } else if (!strcmp(fwlog_param
, "disable")) {
315 wl
->conf
.fwlog
.mem_blocks
= 0;
316 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
322 if (bug_on_recovery
!= -1)
323 wl
->conf
.recovery
.bug_on_recovery
= (u8
) bug_on_recovery
;
325 if (no_recovery
!= -1)
326 wl
->conf
.recovery
.no_recovery
= (u8
) no_recovery
;
329 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
330 struct wl12xx_vif
*wlvif
,
335 fw_ps
= test_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
338 * Wake up from high level PS if the STA is asleep with too little
339 * packets in FW or if the STA is awake.
341 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
342 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
345 * Start high-level PS if the STA is asleep with enough blocks in FW.
346 * Make an exception if this is the only connected link. In this
347 * case FW-memory congestion is less of a problem.
348 * Note that a single connected STA means 3 active links, since we must
349 * account for the global and broadcast AP links. The "fw_ps" check
350 * assures us the third link is a STA connected to the AP. Otherwise
351 * the FW would not set the PSM bit.
353 else if (wl
->active_link_count
> 3 && fw_ps
&&
354 tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
355 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
358 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
359 struct wl12xx_vif
*wlvif
,
360 struct wl_fw_status
*status
)
365 cur_fw_ps_map
= status
->link_ps_bitmap
;
366 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
367 wl1271_debug(DEBUG_PSM
,
368 "link ps prev 0x%x cur 0x%x changed 0x%x",
369 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
370 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
372 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
375 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, wl
->num_links
)
376 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
377 wl
->links
[hlid
].allocated_pkts
);
380 static int wlcore_fw_status(struct wl1271
*wl
, struct wl_fw_status
*status
)
382 struct wl12xx_vif
*wlvif
;
384 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
385 int avail
, freed_blocks
;
388 struct wl1271_link
*lnk
;
390 ret
= wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
,
392 wl
->fw_status_len
, false);
396 wlcore_hw_convert_fw_status(wl
, wl
->raw_fw_status
, wl
->fw_status
);
398 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
399 "drv_rx_counter = %d, tx_results_counter = %d)",
401 status
->fw_rx_counter
,
402 status
->drv_rx_counter
,
403 status
->tx_results_counter
);
405 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
406 /* prevent wrap-around in freed-packets counter */
407 wl
->tx_allocated_pkts
[i
] -=
408 (status
->counters
.tx_released_pkts
[i
] -
409 wl
->tx_pkts_freed
[i
]) & 0xff;
411 wl
->tx_pkts_freed
[i
] = status
->counters
.tx_released_pkts
[i
];
415 for_each_set_bit(i
, wl
->links_map
, wl
->num_links
) {
419 /* prevent wrap-around in freed-packets counter */
420 diff
= (status
->counters
.tx_lnk_free_pkts
[i
] -
421 lnk
->prev_freed_pkts
) & 0xff;
426 lnk
->allocated_pkts
-= diff
;
427 lnk
->prev_freed_pkts
= status
->counters
.tx_lnk_free_pkts
[i
];
429 /* accumulate the prev_freed_pkts counter */
430 lnk
->total_freed_pkts
+= diff
;
433 /* prevent wrap-around in total blocks counter */
434 if (likely(wl
->tx_blocks_freed
<= status
->total_released_blks
))
435 freed_blocks
= status
->total_released_blks
-
438 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
439 status
->total_released_blks
;
441 wl
->tx_blocks_freed
= status
->total_released_blks
;
443 wl
->tx_allocated_blocks
-= freed_blocks
;
446 * If the FW freed some blocks:
447 * If we still have allocated blocks - re-arm the timer, Tx is
448 * not stuck. Otherwise, cancel the timer (no Tx currently).
451 if (wl
->tx_allocated_blocks
)
452 wl12xx_rearm_tx_watchdog_locked(wl
);
454 cancel_delayed_work(&wl
->tx_watchdog_work
);
457 avail
= status
->tx_total
- wl
->tx_allocated_blocks
;
460 * The FW might change the total number of TX memblocks before
461 * we get a notification about blocks being released. Thus, the
462 * available blocks calculation might yield a temporary result
463 * which is lower than the actual available blocks. Keeping in
464 * mind that only blocks that were allocated can be moved from
465 * TX to RX, tx_blocks_available should never decrease here.
467 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
470 /* if more blocks are available now, tx work can be scheduled */
471 if (wl
->tx_blocks_available
> old_tx_blk_count
)
472 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
474 /* for AP update num of allocated TX blocks per link and ps status */
475 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
476 wl12xx_irq_update_links_status(wl
, wlvif
, status
);
479 /* update the host-chipset time offset */
481 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
482 (s64
)(status
->fw_localtime
);
484 wl
->fw_fast_lnk_map
= status
->link_fast_bitmap
;
489 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
493 /* Pass all received frames to the network stack */
494 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
495 ieee80211_rx_ni(wl
->hw
, skb
);
497 /* Return sent skbs to the network stack */
498 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
499 ieee80211_tx_status_ni(wl
->hw
, skb
);
502 static void wl1271_netstack_work(struct work_struct
*work
)
505 container_of(work
, struct wl1271
, netstack_work
);
508 wl1271_flush_deferred_work(wl
);
509 } while (skb_queue_len(&wl
->deferred_rx_queue
));
512 #define WL1271_IRQ_MAX_LOOPS 256
514 static int wlcore_irq_locked(struct wl1271
*wl
)
518 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
520 unsigned int defer_count
;
524 * In case edge triggered interrupt must be used, we cannot iterate
525 * more than once without introducing race conditions with the hardirq.
527 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
530 wl1271_debug(DEBUG_IRQ
, "IRQ work");
532 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
535 ret
= wl1271_ps_elp_wakeup(wl
);
539 while (!done
&& loopcount
--) {
541 * In order to avoid a race with the hardirq, clear the flag
542 * before acknowledging the chip. Since the mutex is held,
543 * wl1271_ps_elp_wakeup cannot be called concurrently.
545 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
546 smp_mb__after_clear_bit();
548 ret
= wlcore_fw_status(wl
, wl
->fw_status
);
552 wlcore_hw_tx_immediate_compl(wl
);
554 intr
= wl
->fw_status
->intr
;
555 intr
&= WLCORE_ALL_INTR_MASK
;
561 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
562 wl1271_error("HW watchdog interrupt received! starting recovery.");
563 wl
->watchdog_recovery
= true;
566 /* restarting the chip. ignore any other interrupt. */
570 if (unlikely(intr
& WL1271_ACX_SW_INTR_WATCHDOG
)) {
571 wl1271_error("SW watchdog interrupt received! "
572 "starting recovery.");
573 wl
->watchdog_recovery
= true;
576 /* restarting the chip. ignore any other interrupt. */
580 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
581 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
583 ret
= wlcore_rx(wl
, wl
->fw_status
);
587 /* Check if any tx blocks were freed */
588 spin_lock_irqsave(&wl
->wl_lock
, flags
);
589 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
590 wl1271_tx_total_queue_count(wl
) > 0) {
591 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
593 * In order to avoid starvation of the TX path,
594 * call the work function directly.
596 ret
= wlcore_tx_work_locked(wl
);
600 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
603 /* check for tx results */
604 ret
= wlcore_hw_tx_delayed_compl(wl
);
608 /* Make sure the deferred queues don't get too long */
609 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
610 skb_queue_len(&wl
->deferred_rx_queue
);
611 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
612 wl1271_flush_deferred_work(wl
);
615 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
616 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
617 ret
= wl1271_event_handle(wl
, 0);
622 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
623 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
624 ret
= wl1271_event_handle(wl
, 1);
629 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
630 wl1271_debug(DEBUG_IRQ
,
631 "WL1271_ACX_INTR_INIT_COMPLETE");
633 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
634 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
637 wl1271_ps_elp_sleep(wl
);
643 static irqreturn_t
wlcore_irq(int irq
, void *cookie
)
647 struct wl1271
*wl
= cookie
;
649 /* complete the ELP completion */
650 spin_lock_irqsave(&wl
->wl_lock
, flags
);
651 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
653 complete(wl
->elp_compl
);
654 wl
->elp_compl
= NULL
;
657 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
658 /* don't enqueue a work right now. mark it as pending */
659 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
660 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
661 disable_irq_nosync(wl
->irq
);
662 pm_wakeup_event(wl
->dev
, 0);
663 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
666 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
668 /* TX might be handled here, avoid redundant work */
669 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
670 cancel_work_sync(&wl
->tx_work
);
672 mutex_lock(&wl
->mutex
);
674 ret
= wlcore_irq_locked(wl
);
676 wl12xx_queue_recovery_work(wl
);
678 spin_lock_irqsave(&wl
->wl_lock
, flags
);
679 /* In case TX was not handled here, queue TX work */
680 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
681 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
682 wl1271_tx_total_queue_count(wl
) > 0)
683 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
684 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
686 mutex_unlock(&wl
->mutex
);
691 struct vif_counter_data
{
694 struct ieee80211_vif
*cur_vif
;
695 bool cur_vif_running
;
698 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
699 struct ieee80211_vif
*vif
)
701 struct vif_counter_data
*counter
= data
;
704 if (counter
->cur_vif
== vif
)
705 counter
->cur_vif_running
= true;
708 /* caller must not hold wl->mutex, as it might deadlock */
709 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
710 struct ieee80211_vif
*cur_vif
,
711 struct vif_counter_data
*data
)
713 memset(data
, 0, sizeof(*data
));
714 data
->cur_vif
= cur_vif
;
716 ieee80211_iterate_active_interfaces(hw
, IEEE80211_IFACE_ITER_RESUME_ALL
,
717 wl12xx_vif_count_iter
, data
);
720 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
722 const struct firmware
*fw
;
724 enum wl12xx_fw_type fw_type
;
728 fw_type
= WL12XX_FW_TYPE_PLT
;
729 fw_name
= wl
->plt_fw_name
;
732 * we can't call wl12xx_get_vif_count() here because
733 * wl->mutex is taken, so use the cached last_vif_count value
735 if (wl
->last_vif_count
> 1 && wl
->mr_fw_name
) {
736 fw_type
= WL12XX_FW_TYPE_MULTI
;
737 fw_name
= wl
->mr_fw_name
;
739 fw_type
= WL12XX_FW_TYPE_NORMAL
;
740 fw_name
= wl
->sr_fw_name
;
744 if (wl
->fw_type
== fw_type
)
747 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
749 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
752 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
757 wl1271_error("firmware size is not multiple of 32 bits: %zu",
764 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
765 wl
->fw_len
= fw
->size
;
766 wl
->fw
= vmalloc(wl
->fw_len
);
769 wl1271_error("could not allocate memory for the firmware");
774 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
776 wl
->fw_type
= fw_type
;
778 release_firmware(fw
);
783 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
785 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
787 /* Avoid a recursive recovery */
788 if (wl
->state
== WLCORE_STATE_ON
) {
789 wl
->state
= WLCORE_STATE_RESTARTING
;
790 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
791 wl1271_ps_elp_wakeup(wl
);
792 wlcore_disable_interrupts_nosync(wl
);
793 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
797 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
801 /* Make sure we have enough room */
802 len
= min(maxlen
, (size_t)(PAGE_SIZE
- wl
->fwlog_size
));
804 /* Fill the FW log file, consumed by the sysfs fwlog entry */
805 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
806 wl
->fwlog_size
+= len
;
811 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
813 struct wlcore_partition_set part
, old_part
;
820 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
821 (wl
->conf
.fwlog
.mem_blocks
== 0))
824 wl1271_info("Reading FW panic log");
826 block
= kmalloc(wl
->fw_mem_block_size
, GFP_KERNEL
);
831 * Make sure the chip is awake and the logger isn't active.
832 * Do not send a stop fwlog command if the fw is hanged or if
833 * dbgpins are used (due to some fw bug).
835 if (wl1271_ps_elp_wakeup(wl
))
837 if (!wl
->watchdog_recovery
&&
838 wl
->conf
.fwlog
.output
!= WL12XX_FWLOG_OUTPUT_DBG_PINS
)
839 wl12xx_cmd_stop_fwlog(wl
);
841 /* Read the first memory block address */
842 ret
= wlcore_fw_status(wl
, wl
->fw_status
);
846 addr
= wl
->fw_status
->log_start_addr
;
850 if (wl
->conf
.fwlog
.mode
== WL12XX_FWLOG_CONTINUOUS
) {
851 offset
= sizeof(addr
) + sizeof(struct wl1271_rx_descriptor
);
852 end_of_log
= wl
->fwlog_end
;
854 offset
= sizeof(addr
);
858 old_part
= wl
->curr_part
;
859 memset(&part
, 0, sizeof(part
));
861 /* Traverse the memory blocks linked list */
863 part
.mem
.start
= wlcore_hw_convert_hwaddr(wl
, addr
);
864 part
.mem
.size
= PAGE_SIZE
;
866 ret
= wlcore_set_partition(wl
, &part
);
868 wl1271_error("%s: set_partition start=0x%X size=%d",
869 __func__
, part
.mem
.start
, part
.mem
.size
);
873 memset(block
, 0, wl
->fw_mem_block_size
);
874 ret
= wlcore_read_hwaddr(wl
, addr
, block
,
875 wl
->fw_mem_block_size
, false);
881 * Memory blocks are linked to one another. The first 4 bytes
882 * of each memory block hold the hardware address of the next
883 * one. The last memory block points to the first one in
884 * on demand mode and is equal to 0x2000000 in continuous mode.
886 addr
= le32_to_cpup((__le32
*)block
);
888 if (!wl12xx_copy_fwlog(wl
, block
+ offset
,
889 wl
->fw_mem_block_size
- offset
))
891 } while (addr
&& (addr
!= end_of_log
));
893 wake_up_interruptible(&wl
->fwlog_waitq
);
897 wlcore_set_partition(wl
, &old_part
);
900 static void wlcore_print_recovery(struct wl1271
*wl
)
906 wl1271_info("Hardware recovery in progress. FW ver: %s",
907 wl
->chip
.fw_ver_str
);
909 /* change partitions momentarily so we can read the FW pc */
910 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
914 ret
= wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
, &pc
);
918 ret
= wlcore_read_reg(wl
, REG_INTERRUPT_NO_CLEAR
, &hint_sts
);
922 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
923 pc
, hint_sts
, ++wl
->recovery_count
);
925 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
929 static void wl1271_recovery_work(struct work_struct
*work
)
932 container_of(work
, struct wl1271
, recovery_work
);
933 struct wl12xx_vif
*wlvif
;
934 struct ieee80211_vif
*vif
;
936 mutex_lock(&wl
->mutex
);
938 if (wl
->state
== WLCORE_STATE_OFF
|| wl
->plt
)
941 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
)) {
942 if (wl
->conf
.fwlog
.output
== WL12XX_FWLOG_OUTPUT_HOST
)
943 wl12xx_read_fwlog_panic(wl
);
944 wlcore_print_recovery(wl
);
947 BUG_ON(wl
->conf
.recovery
.bug_on_recovery
&&
948 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
950 if (wl
->conf
.recovery
.no_recovery
) {
951 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
955 /* Prevent spurious TX during FW restart */
956 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
958 /* reboot the chipset */
959 while (!list_empty(&wl
->wlvif_list
)) {
960 wlvif
= list_first_entry(&wl
->wlvif_list
,
961 struct wl12xx_vif
, list
);
962 vif
= wl12xx_wlvif_to_vif(wlvif
);
963 __wl1271_op_remove_interface(wl
, vif
, false);
966 wlcore_op_stop_locked(wl
);
968 ieee80211_restart_hw(wl
->hw
);
971 * Its safe to enable TX now - the queues are stopped after a request
974 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
977 wl
->watchdog_recovery
= false;
978 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
979 mutex_unlock(&wl
->mutex
);
982 static int wlcore_fw_wakeup(struct wl1271
*wl
)
984 return wlcore_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
987 static int wl1271_setup(struct wl1271
*wl
)
989 wl
->raw_fw_status
= kzalloc(wl
->fw_status_len
, GFP_KERNEL
);
990 if (!wl
->raw_fw_status
)
993 wl
->fw_status
= kzalloc(sizeof(*wl
->fw_status
), GFP_KERNEL
);
997 wl
->tx_res_if
= kzalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
1003 kfree(wl
->fw_status
);
1004 kfree(wl
->raw_fw_status
);
1008 static int wl12xx_set_power_on(struct wl1271
*wl
)
1012 msleep(WL1271_PRE_POWER_ON_SLEEP
);
1013 ret
= wl1271_power_on(wl
);
1016 msleep(WL1271_POWER_ON_SLEEP
);
1017 wl1271_io_reset(wl
);
1020 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
1024 /* ELP module wake up */
1025 ret
= wlcore_fw_wakeup(wl
);
1033 wl1271_power_off(wl
);
1037 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
1041 ret
= wl12xx_set_power_on(wl
);
1046 * For wl127x based devices we could use the default block
1047 * size (512 bytes), but due to a bug in the sdio driver, we
1048 * need to set it explicitly after the chip is powered on. To
1049 * simplify the code and since the performance impact is
1050 * negligible, we use the same block size for all different
1053 * Check if the bus supports blocksize alignment and, if it
1054 * doesn't, make sure we don't have the quirk.
1056 if (!wl1271_set_block_size(wl
))
1057 wl
->quirks
&= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
1059 /* TODO: make sure the lower driver has set things up correctly */
1061 ret
= wl1271_setup(wl
);
1065 ret
= wl12xx_fetch_firmware(wl
, plt
);
1073 int wl1271_plt_start(struct wl1271
*wl
, const enum plt_mode plt_mode
)
1075 int retries
= WL1271_BOOT_RETRIES
;
1076 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1078 static const char* const PLT_MODE
[] = {
1087 mutex_lock(&wl
->mutex
);
1089 wl1271_notice("power up");
1091 if (wl
->state
!= WLCORE_STATE_OFF
) {
1092 wl1271_error("cannot go into PLT state because not "
1093 "in off state: %d", wl
->state
);
1098 /* Indicate to lower levels that we are now in PLT mode */
1100 wl
->plt_mode
= plt_mode
;
1104 ret
= wl12xx_chip_wakeup(wl
, true);
1108 if (plt_mode
!= PLT_CHIP_AWAKE
) {
1109 ret
= wl
->ops
->plt_init(wl
);
1114 wl
->state
= WLCORE_STATE_ON
;
1115 wl1271_notice("firmware booted in PLT mode %s (%s)",
1117 wl
->chip
.fw_ver_str
);
1119 /* update hw/fw version info in wiphy struct */
1120 wiphy
->hw_version
= wl
->chip
.id
;
1121 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1122 sizeof(wiphy
->fw_version
));
1127 wl1271_power_off(wl
);
1131 wl
->plt_mode
= PLT_OFF
;
1133 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1134 WL1271_BOOT_RETRIES
);
1136 mutex_unlock(&wl
->mutex
);
1141 int wl1271_plt_stop(struct wl1271
*wl
)
1145 wl1271_notice("power down");
1148 * Interrupts must be disabled before setting the state to OFF.
1149 * Otherwise, the interrupt handler might be called and exit without
1150 * reading the interrupt status.
1152 wlcore_disable_interrupts(wl
);
1153 mutex_lock(&wl
->mutex
);
1155 mutex_unlock(&wl
->mutex
);
1158 * This will not necessarily enable interrupts as interrupts
1159 * may have been disabled when op_stop was called. It will,
1160 * however, balance the above call to disable_interrupts().
1162 wlcore_enable_interrupts(wl
);
1164 wl1271_error("cannot power down because not in PLT "
1165 "state: %d", wl
->state
);
1170 mutex_unlock(&wl
->mutex
);
1172 wl1271_flush_deferred_work(wl
);
1173 cancel_work_sync(&wl
->netstack_work
);
1174 cancel_work_sync(&wl
->recovery_work
);
1175 cancel_delayed_work_sync(&wl
->elp_work
);
1176 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1178 mutex_lock(&wl
->mutex
);
1179 wl1271_power_off(wl
);
1181 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1182 wl
->state
= WLCORE_STATE_OFF
;
1184 wl
->plt_mode
= PLT_OFF
;
1186 mutex_unlock(&wl
->mutex
);
1192 static void wl1271_op_tx(struct ieee80211_hw
*hw
,
1193 struct ieee80211_tx_control
*control
,
1194 struct sk_buff
*skb
)
1196 struct wl1271
*wl
= hw
->priv
;
1197 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1198 struct ieee80211_vif
*vif
= info
->control
.vif
;
1199 struct wl12xx_vif
*wlvif
= NULL
;
1200 unsigned long flags
;
1205 wl1271_debug(DEBUG_TX
, "DROP skb with no vif");
1206 ieee80211_free_txskb(hw
, skb
);
1210 wlvif
= wl12xx_vif_to_data(vif
);
1211 mapping
= skb_get_queue_mapping(skb
);
1212 q
= wl1271_tx_get_queue(mapping
);
1214 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
, control
->sta
);
1216 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1219 * drop the packet if the link is invalid or the queue is stopped
1220 * for any reason but watermark. Watermark is a "soft"-stop so we
1221 * allow these packets through.
1223 if (hlid
== WL12XX_INVALID_LINK_ID
||
1224 (!test_bit(hlid
, wlvif
->links_map
)) ||
1225 (wlcore_is_queue_stopped_locked(wl
, wlvif
, q
) &&
1226 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1227 WLCORE_QUEUE_STOP_REASON_WATERMARK
))) {
1228 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1229 ieee80211_free_txskb(hw
, skb
);
1233 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1235 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1237 wl
->tx_queue_count
[q
]++;
1238 wlvif
->tx_queue_count
[q
]++;
1241 * The workqueue is slow to process the tx_queue and we need stop
1242 * the queue here, otherwise the queue will get too long.
1244 if (wlvif
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
&&
1245 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1246 WLCORE_QUEUE_STOP_REASON_WATERMARK
)) {
1247 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1248 wlcore_stop_queue_locked(wl
, wlvif
, q
,
1249 WLCORE_QUEUE_STOP_REASON_WATERMARK
);
1253 * The chip specific setup must run before the first TX packet -
1254 * before that, the tx_work will not be initialized!
1257 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1258 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1259 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1262 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1265 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1267 unsigned long flags
;
1270 /* no need to queue a new dummy packet if one is already pending */
1271 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1274 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1276 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1277 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1278 wl
->tx_queue_count
[q
]++;
1279 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1281 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1282 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1283 return wlcore_tx_work_locked(wl
);
1286 * If the FW TX is busy, TX work will be scheduled by the threaded
1287 * interrupt handler function
1293 * The size of the dummy packet should be at least 1400 bytes. However, in
1294 * order to minimize the number of bus transactions, aligning it to 512 bytes
1295 * boundaries could be beneficial, performance wise
1297 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1299 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1301 struct sk_buff
*skb
;
1302 struct ieee80211_hdr_3addr
*hdr
;
1303 unsigned int dummy_packet_size
;
1305 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1306 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1308 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1310 wl1271_warning("Failed to allocate a dummy packet skb");
1314 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1316 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1317 memset(hdr
, 0, sizeof(*hdr
));
1318 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1319 IEEE80211_STYPE_NULLFUNC
|
1320 IEEE80211_FCTL_TODS
);
1322 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1324 /* Dummy packets require the TID to be management */
1325 skb
->priority
= WL1271_TID_MGMT
;
1327 /* Initialize all fields that might be used */
1328 skb_set_queue_mapping(skb
, 0);
1329 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1337 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern
*p
)
1339 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1340 int i
, pattern_len
= 0;
1343 wl1271_warning("No mask in WoWLAN pattern");
1348 * The pattern is broken up into segments of bytes at different offsets
1349 * that need to be checked by the FW filter. Each segment is called
1350 * a field in the FW API. We verify that the total number of fields
1351 * required for this pattern won't exceed FW limits (8)
1352 * as well as the total fields buffer won't exceed the FW limit.
1353 * Note that if there's a pattern which crosses Ethernet/IP header
1354 * boundary a new field is required.
1356 for (i
= 0; i
< p
->pattern_len
; i
++) {
1357 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1362 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1364 fields_size
+= pattern_len
+
1365 RX_FILTER_FIELD_OVERHEAD
;
1373 fields_size
+= pattern_len
+
1374 RX_FILTER_FIELD_OVERHEAD
;
1381 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1385 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1386 wl1271_warning("RX Filter too complex. Too many segments");
1390 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1391 wl1271_warning("RX filter pattern is too big");
1398 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1400 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1403 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1410 for (i
= 0; i
< filter
->num_fields
; i
++)
1411 kfree(filter
->fields
[i
].pattern
);
1416 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1417 u16 offset
, u8 flags
,
1418 u8
*pattern
, u8 len
)
1420 struct wl12xx_rx_filter_field
*field
;
1422 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1423 wl1271_warning("Max fields per RX filter. can't alloc another");
1427 field
= &filter
->fields
[filter
->num_fields
];
1429 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1430 if (!field
->pattern
) {
1431 wl1271_warning("Failed to allocate RX filter pattern");
1435 filter
->num_fields
++;
1437 field
->offset
= cpu_to_le16(offset
);
1438 field
->flags
= flags
;
1440 memcpy(field
->pattern
, pattern
, len
);
1445 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1447 int i
, fields_size
= 0;
1449 for (i
= 0; i
< filter
->num_fields
; i
++)
1450 fields_size
+= filter
->fields
[i
].len
+
1451 sizeof(struct wl12xx_rx_filter_field
) -
1457 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1461 struct wl12xx_rx_filter_field
*field
;
1463 for (i
= 0; i
< filter
->num_fields
; i
++) {
1464 field
= (struct wl12xx_rx_filter_field
*)buf
;
1466 field
->offset
= filter
->fields
[i
].offset
;
1467 field
->flags
= filter
->fields
[i
].flags
;
1468 field
->len
= filter
->fields
[i
].len
;
1470 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1471 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1472 sizeof(u8
*) + field
->len
;
1477 * Allocates an RX filter returned through f
1478 * which needs to be freed using rx_filter_free()
1481 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern
*p
,
1482 struct wl12xx_rx_filter
**f
)
1485 struct wl12xx_rx_filter
*filter
;
1489 filter
= wl1271_rx_filter_alloc();
1491 wl1271_warning("Failed to alloc rx filter");
1497 while (i
< p
->pattern_len
) {
1498 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1503 for (j
= i
; j
< p
->pattern_len
; j
++) {
1504 if (!test_bit(j
, (unsigned long *)p
->mask
))
1507 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1508 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1512 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1514 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1516 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1517 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1522 ret
= wl1271_rx_filter_alloc_field(filter
,
1525 &p
->pattern
[i
], len
);
1532 filter
->action
= FILTER_SIGNAL
;
1538 wl1271_rx_filter_free(filter
);
1544 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1545 struct cfg80211_wowlan
*wow
)
1549 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1550 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0,
1555 ret
= wl1271_rx_filter_clear_all(wl
);
1562 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1565 /* Validate all incoming patterns before clearing current FW state */
1566 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1567 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1569 wl1271_warning("Bad wowlan pattern %d", i
);
1574 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1578 ret
= wl1271_rx_filter_clear_all(wl
);
1582 /* Translate WoWLAN patterns into filters */
1583 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1584 struct cfg80211_pkt_pattern
*p
;
1585 struct wl12xx_rx_filter
*filter
= NULL
;
1587 p
= &wow
->patterns
[i
];
1589 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1591 wl1271_warning("Failed to create an RX filter from "
1592 "wowlan pattern %d", i
);
1596 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1598 wl1271_rx_filter_free(filter
);
1603 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1609 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1610 struct wl12xx_vif
*wlvif
,
1611 struct cfg80211_wowlan
*wow
)
1615 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1618 ret
= wl1271_ps_elp_wakeup(wl
);
1622 ret
= wl1271_configure_wowlan(wl
, wow
);
1626 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1627 wl
->conf
.conn
.wake_up_event
) &&
1628 (wl
->conf
.conn
.suspend_listen_interval
==
1629 wl
->conf
.conn
.listen_interval
))
1632 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1633 wl
->conf
.conn
.suspend_wake_up_event
,
1634 wl
->conf
.conn
.suspend_listen_interval
);
1637 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1640 wl1271_ps_elp_sleep(wl
);
1646 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1647 struct wl12xx_vif
*wlvif
)
1651 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1654 ret
= wl1271_ps_elp_wakeup(wl
);
1658 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1660 wl1271_ps_elp_sleep(wl
);
1666 static int wl1271_configure_suspend(struct wl1271
*wl
,
1667 struct wl12xx_vif
*wlvif
,
1668 struct cfg80211_wowlan
*wow
)
1670 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1671 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1672 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1673 return wl1271_configure_suspend_ap(wl
, wlvif
);
1677 static void wl1271_configure_resume(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1680 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1681 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1683 if ((!is_ap
) && (!is_sta
))
1686 if (is_sta
&& !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1689 ret
= wl1271_ps_elp_wakeup(wl
);
1694 wl1271_configure_wowlan(wl
, NULL
);
1696 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1697 wl
->conf
.conn
.wake_up_event
) &&
1698 (wl
->conf
.conn
.suspend_listen_interval
==
1699 wl
->conf
.conn
.listen_interval
))
1702 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1703 wl
->conf
.conn
.wake_up_event
,
1704 wl
->conf
.conn
.listen_interval
);
1707 wl1271_error("resume: wake up conditions failed: %d",
1711 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1715 wl1271_ps_elp_sleep(wl
);
1718 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1719 struct cfg80211_wowlan
*wow
)
1721 struct wl1271
*wl
= hw
->priv
;
1722 struct wl12xx_vif
*wlvif
;
1725 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1728 /* we want to perform the recovery before suspending */
1729 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
1730 wl1271_warning("postponing suspend to perform recovery");
1734 wl1271_tx_flush(wl
);
1736 mutex_lock(&wl
->mutex
);
1737 wl
->wow_enabled
= true;
1738 wl12xx_for_each_wlvif(wl
, wlvif
) {
1739 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1741 mutex_unlock(&wl
->mutex
);
1742 wl1271_warning("couldn't prepare device to suspend");
1746 mutex_unlock(&wl
->mutex
);
1747 /* flush any remaining work */
1748 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1751 * disable and re-enable interrupts in order to flush
1754 wlcore_disable_interrupts(wl
);
1757 * set suspended flag to avoid triggering a new threaded_irq
1758 * work. no need for spinlock as interrupts are disabled.
1760 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1762 wlcore_enable_interrupts(wl
);
1763 flush_work(&wl
->tx_work
);
1764 flush_delayed_work(&wl
->elp_work
);
1767 * Cancel the watchdog even if above tx_flush failed. We will detect
1768 * it on resume anyway.
1770 cancel_delayed_work(&wl
->tx_watchdog_work
);
1775 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1777 struct wl1271
*wl
= hw
->priv
;
1778 struct wl12xx_vif
*wlvif
;
1779 unsigned long flags
;
1780 bool run_irq_work
= false, pending_recovery
;
1783 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1785 WARN_ON(!wl
->wow_enabled
);
1788 * re-enable irq_work enqueuing, and call irq_work directly if
1789 * there is a pending work.
1791 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1792 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1793 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1794 run_irq_work
= true;
1795 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1797 mutex_lock(&wl
->mutex
);
1799 /* test the recovery flag before calling any SDIO functions */
1800 pending_recovery
= test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1804 wl1271_debug(DEBUG_MAC80211
,
1805 "run postponed irq_work directly");
1807 /* don't talk to the HW if recovery is pending */
1808 if (!pending_recovery
) {
1809 ret
= wlcore_irq_locked(wl
);
1811 wl12xx_queue_recovery_work(wl
);
1814 wlcore_enable_interrupts(wl
);
1817 if (pending_recovery
) {
1818 wl1271_warning("queuing forgotten recovery on resume");
1819 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
1823 wl12xx_for_each_wlvif(wl
, wlvif
) {
1824 wl1271_configure_resume(wl
, wlvif
);
1828 wl
->wow_enabled
= false;
1831 * Set a flag to re-init the watchdog on the first Tx after resume.
1832 * That way we avoid possible conditions where Tx-complete interrupts
1833 * fail to arrive and we perform a spurious recovery.
1835 set_bit(WL1271_FLAG_REINIT_TX_WDOG
, &wl
->flags
);
1836 mutex_unlock(&wl
->mutex
);
1842 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1844 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1847 * We have to delay the booting of the hardware because
1848 * we need to know the local MAC address before downloading and
1849 * initializing the firmware. The MAC address cannot be changed
1850 * after boot, and without the proper MAC address, the firmware
1851 * will not function properly.
1853 * The MAC address is first known when the corresponding interface
1854 * is added. That is where we will initialize the hardware.
1860 static void wlcore_op_stop_locked(struct wl1271
*wl
)
1864 if (wl
->state
== WLCORE_STATE_OFF
) {
1865 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1867 wlcore_enable_interrupts(wl
);
1873 * this must be before the cancel_work calls below, so that the work
1874 * functions don't perform further work.
1876 wl
->state
= WLCORE_STATE_OFF
;
1879 * Use the nosync variant to disable interrupts, so the mutex could be
1880 * held while doing so without deadlocking.
1882 wlcore_disable_interrupts_nosync(wl
);
1884 mutex_unlock(&wl
->mutex
);
1886 wlcore_synchronize_interrupts(wl
);
1887 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1888 cancel_work_sync(&wl
->recovery_work
);
1889 wl1271_flush_deferred_work(wl
);
1890 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1891 cancel_work_sync(&wl
->netstack_work
);
1892 cancel_work_sync(&wl
->tx_work
);
1893 cancel_delayed_work_sync(&wl
->elp_work
);
1894 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1896 /* let's notify MAC80211 about the remaining pending TX frames */
1897 mutex_lock(&wl
->mutex
);
1898 wl12xx_tx_reset(wl
);
1900 wl1271_power_off(wl
);
1902 * In case a recovery was scheduled, interrupts were disabled to avoid
1903 * an interrupt storm. Now that the power is down, it is safe to
1904 * re-enable interrupts to balance the disable depth
1906 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1907 wlcore_enable_interrupts(wl
);
1909 wl
->band
= IEEE80211_BAND_2GHZ
;
1912 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1913 wl
->channel_type
= NL80211_CHAN_NO_HT
;
1914 wl
->tx_blocks_available
= 0;
1915 wl
->tx_allocated_blocks
= 0;
1916 wl
->tx_results_count
= 0;
1917 wl
->tx_packets_count
= 0;
1918 wl
->time_offset
= 0;
1919 wl
->ap_fw_ps_map
= 0;
1921 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1922 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1923 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1924 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1925 memset(wl
->session_ids
, 0, sizeof(wl
->session_ids
));
1926 memset(wl
->rx_filter_enabled
, 0, sizeof(wl
->rx_filter_enabled
));
1927 wl
->active_sta_count
= 0;
1928 wl
->active_link_count
= 0;
1930 /* The system link is always allocated */
1931 wl
->links
[WL12XX_SYSTEM_HLID
].allocated_pkts
= 0;
1932 wl
->links
[WL12XX_SYSTEM_HLID
].prev_freed_pkts
= 0;
1933 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1936 * this is performed after the cancel_work calls and the associated
1937 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1938 * get executed before all these vars have been reset.
1942 wl
->tx_blocks_freed
= 0;
1944 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
1945 wl
->tx_pkts_freed
[i
] = 0;
1946 wl
->tx_allocated_pkts
[i
] = 0;
1949 wl1271_debugfs_reset(wl
);
1951 kfree(wl
->raw_fw_status
);
1952 wl
->raw_fw_status
= NULL
;
1953 kfree(wl
->fw_status
);
1954 wl
->fw_status
= NULL
;
1955 kfree(wl
->tx_res_if
);
1956 wl
->tx_res_if
= NULL
;
1957 kfree(wl
->target_mem_map
);
1958 wl
->target_mem_map
= NULL
;
1961 * FW channels must be re-calibrated after recovery,
1962 * save current Reg-Domain channel configuration and clear it.
1964 memcpy(wl
->reg_ch_conf_pending
, wl
->reg_ch_conf_last
,
1965 sizeof(wl
->reg_ch_conf_pending
));
1966 memset(wl
->reg_ch_conf_last
, 0, sizeof(wl
->reg_ch_conf_last
));
1969 static void wlcore_op_stop(struct ieee80211_hw
*hw
)
1971 struct wl1271
*wl
= hw
->priv
;
1973 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
1975 mutex_lock(&wl
->mutex
);
1977 wlcore_op_stop_locked(wl
);
1979 mutex_unlock(&wl
->mutex
);
1982 static void wlcore_channel_switch_work(struct work_struct
*work
)
1984 struct delayed_work
*dwork
;
1986 struct ieee80211_vif
*vif
;
1987 struct wl12xx_vif
*wlvif
;
1990 dwork
= container_of(work
, struct delayed_work
, work
);
1991 wlvif
= container_of(dwork
, struct wl12xx_vif
, channel_switch_work
);
1994 wl1271_info("channel switch failed (role_id: %d).", wlvif
->role_id
);
1996 mutex_lock(&wl
->mutex
);
1998 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2001 /* check the channel switch is still ongoing */
2002 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
))
2005 vif
= wl12xx_wlvif_to_vif(wlvif
);
2006 ieee80211_chswitch_done(vif
, false);
2008 ret
= wl1271_ps_elp_wakeup(wl
);
2012 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
2014 wl1271_ps_elp_sleep(wl
);
2016 mutex_unlock(&wl
->mutex
);
2019 static void wlcore_connection_loss_work(struct work_struct
*work
)
2021 struct delayed_work
*dwork
;
2023 struct ieee80211_vif
*vif
;
2024 struct wl12xx_vif
*wlvif
;
2026 dwork
= container_of(work
, struct delayed_work
, work
);
2027 wlvif
= container_of(dwork
, struct wl12xx_vif
, connection_loss_work
);
2030 wl1271_info("Connection loss work (role_id: %d).", wlvif
->role_id
);
2032 mutex_lock(&wl
->mutex
);
2034 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2037 /* Call mac80211 connection loss */
2038 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2041 vif
= wl12xx_wlvif_to_vif(wlvif
);
2042 ieee80211_connection_loss(vif
);
2044 mutex_unlock(&wl
->mutex
);
2047 static void wlcore_pending_auth_complete_work(struct work_struct
*work
)
2049 struct delayed_work
*dwork
;
2051 struct wl12xx_vif
*wlvif
;
2052 unsigned long time_spare
;
2055 dwork
= container_of(work
, struct delayed_work
, work
);
2056 wlvif
= container_of(dwork
, struct wl12xx_vif
,
2057 pending_auth_complete_work
);
2060 mutex_lock(&wl
->mutex
);
2062 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2066 * Make sure a second really passed since the last auth reply. Maybe
2067 * a second auth reply arrived while we were stuck on the mutex.
2068 * Check for a little less than the timeout to protect from scheduler
2071 time_spare
= jiffies
+
2072 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT
- 50);
2073 if (!time_after(time_spare
, wlvif
->pending_auth_reply_time
))
2076 ret
= wl1271_ps_elp_wakeup(wl
);
2080 /* cancel the ROC if active */
2081 wlcore_update_inconn_sta(wl
, wlvif
, NULL
, false);
2083 wl1271_ps_elp_sleep(wl
);
2085 mutex_unlock(&wl
->mutex
);
2088 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
2090 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
2091 WL12XX_MAX_RATE_POLICIES
);
2092 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
2095 __set_bit(policy
, wl
->rate_policies_map
);
2100 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
2102 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
2105 __clear_bit(*idx
, wl
->rate_policies_map
);
2106 *idx
= WL12XX_MAX_RATE_POLICIES
;
2109 static int wlcore_allocate_klv_template(struct wl1271
*wl
, u8
*idx
)
2111 u8 policy
= find_first_zero_bit(wl
->klv_templates_map
,
2112 WLCORE_MAX_KLV_TEMPLATES
);
2113 if (policy
>= WLCORE_MAX_KLV_TEMPLATES
)
2116 __set_bit(policy
, wl
->klv_templates_map
);
2121 static void wlcore_free_klv_template(struct wl1271
*wl
, u8
*idx
)
2123 if (WARN_ON(*idx
>= WLCORE_MAX_KLV_TEMPLATES
))
2126 __clear_bit(*idx
, wl
->klv_templates_map
);
2127 *idx
= WLCORE_MAX_KLV_TEMPLATES
;
2130 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2132 switch (wlvif
->bss_type
) {
2133 case BSS_TYPE_AP_BSS
:
2135 return WL1271_ROLE_P2P_GO
;
2137 return WL1271_ROLE_AP
;
2139 case BSS_TYPE_STA_BSS
:
2141 return WL1271_ROLE_P2P_CL
;
2143 return WL1271_ROLE_STA
;
2146 return WL1271_ROLE_IBSS
;
2149 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
2151 return WL12XX_INVALID_ROLE_TYPE
;
2154 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
2156 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2159 /* clear everything but the persistent data */
2160 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
2162 switch (ieee80211_vif_type_p2p(vif
)) {
2163 case NL80211_IFTYPE_P2P_CLIENT
:
2166 case NL80211_IFTYPE_STATION
:
2167 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
2169 case NL80211_IFTYPE_ADHOC
:
2170 wlvif
->bss_type
= BSS_TYPE_IBSS
;
2172 case NL80211_IFTYPE_P2P_GO
:
2175 case NL80211_IFTYPE_AP
:
2176 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
2179 wlvif
->bss_type
= MAX_BSS_TYPE
;
2183 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2184 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2185 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2187 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2188 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2189 /* init sta/ibss data */
2190 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2191 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2192 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2193 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2194 wlcore_allocate_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2195 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
2196 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
2197 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
2200 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2201 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2202 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2203 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2204 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2205 wl12xx_allocate_rate_policy(wl
,
2206 &wlvif
->ap
.ucast_rate_idx
[i
]);
2207 wlvif
->basic_rate_set
= CONF_TX_ENABLED_RATES
;
2209 * TODO: check if basic_rate shouldn't be
2210 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2211 * instead (the same thing for STA above).
2213 wlvif
->basic_rate
= CONF_TX_ENABLED_RATES
;
2214 /* TODO: this seems to be used only for STA, check it */
2215 wlvif
->rate_set
= CONF_TX_ENABLED_RATES
;
2218 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
2219 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
2220 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
2223 * mac80211 configures some values globally, while we treat them
2224 * per-interface. thus, on init, we have to copy them from wl
2226 wlvif
->band
= wl
->band
;
2227 wlvif
->channel
= wl
->channel
;
2228 wlvif
->power_level
= wl
->power_level
;
2229 wlvif
->channel_type
= wl
->channel_type
;
2231 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
2232 wl1271_rx_streaming_enable_work
);
2233 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
2234 wl1271_rx_streaming_disable_work
);
2235 INIT_DELAYED_WORK(&wlvif
->channel_switch_work
,
2236 wlcore_channel_switch_work
);
2237 INIT_DELAYED_WORK(&wlvif
->connection_loss_work
,
2238 wlcore_connection_loss_work
);
2239 INIT_DELAYED_WORK(&wlvif
->pending_auth_complete_work
,
2240 wlcore_pending_auth_complete_work
);
2241 INIT_LIST_HEAD(&wlvif
->list
);
2243 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
2244 (unsigned long) wlvif
);
2248 static int wl12xx_init_fw(struct wl1271
*wl
)
2250 int retries
= WL1271_BOOT_RETRIES
;
2251 bool booted
= false;
2252 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
2257 ret
= wl12xx_chip_wakeup(wl
, false);
2261 ret
= wl
->ops
->boot(wl
);
2265 ret
= wl1271_hw_init(wl
);
2273 mutex_unlock(&wl
->mutex
);
2274 /* Unlocking the mutex in the middle of handling is
2275 inherently unsafe. In this case we deem it safe to do,
2276 because we need to let any possibly pending IRQ out of
2277 the system (and while we are WLCORE_STATE_OFF the IRQ
2278 work function will not do anything.) Also, any other
2279 possible concurrent operations will fail due to the
2280 current state, hence the wl1271 struct should be safe. */
2281 wlcore_disable_interrupts(wl
);
2282 wl1271_flush_deferred_work(wl
);
2283 cancel_work_sync(&wl
->netstack_work
);
2284 mutex_lock(&wl
->mutex
);
2286 wl1271_power_off(wl
);
2290 wl1271_error("firmware boot failed despite %d retries",
2291 WL1271_BOOT_RETRIES
);
2295 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
2297 /* update hw/fw version info in wiphy struct */
2298 wiphy
->hw_version
= wl
->chip
.id
;
2299 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
2300 sizeof(wiphy
->fw_version
));
2303 * Now we know if 11a is supported (info from the NVS), so disable
2304 * 11a channels if not supported
2306 if (!wl
->enable_11a
)
2307 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
2309 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
2310 wl
->enable_11a
? "" : "not ");
2312 wl
->state
= WLCORE_STATE_ON
;
2317 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2319 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2323 * Check whether a fw switch (i.e. moving from one loaded
2324 * fw to another) is needed. This function is also responsible
2325 * for updating wl->last_vif_count, so it must be called before
2326 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2329 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2330 struct vif_counter_data vif_counter_data
,
2333 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2334 u8 vif_count
= vif_counter_data
.counter
;
2336 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2339 /* increase the vif count if this is a new vif */
2340 if (add
&& !vif_counter_data
.cur_vif_running
)
2343 wl
->last_vif_count
= vif_count
;
2345 /* no need for fw change if the device is OFF */
2346 if (wl
->state
== WLCORE_STATE_OFF
)
2349 /* no need for fw change if a single fw is used */
2350 if (!wl
->mr_fw_name
)
2353 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2355 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2362 * Enter "forced psm". Make sure the sta is in psm against the ap,
2363 * to make the fw switch a bit more disconnection-persistent.
2365 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2367 struct wl12xx_vif
*wlvif
;
2369 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2370 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2374 struct wlcore_hw_queue_iter_data
{
2375 unsigned long hw_queue_map
[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES
)];
2377 struct ieee80211_vif
*vif
;
2378 /* is the current vif among those iterated */
2382 static void wlcore_hw_queue_iter(void *data
, u8
*mac
,
2383 struct ieee80211_vif
*vif
)
2385 struct wlcore_hw_queue_iter_data
*iter_data
= data
;
2387 if (WARN_ON_ONCE(vif
->hw_queue
[0] == IEEE80211_INVAL_HW_QUEUE
))
2390 if (iter_data
->cur_running
|| vif
== iter_data
->vif
) {
2391 iter_data
->cur_running
= true;
2395 __set_bit(vif
->hw_queue
[0] / NUM_TX_QUEUES
, iter_data
->hw_queue_map
);
2398 static int wlcore_allocate_hw_queue_base(struct wl1271
*wl
,
2399 struct wl12xx_vif
*wlvif
)
2401 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2402 struct wlcore_hw_queue_iter_data iter_data
= {};
2405 iter_data
.vif
= vif
;
2407 /* mark all bits taken by active interfaces */
2408 ieee80211_iterate_active_interfaces_atomic(wl
->hw
,
2409 IEEE80211_IFACE_ITER_RESUME_ALL
,
2410 wlcore_hw_queue_iter
, &iter_data
);
2412 /* the current vif is already running in mac80211 (resume/recovery) */
2413 if (iter_data
.cur_running
) {
2414 wlvif
->hw_queue_base
= vif
->hw_queue
[0];
2415 wl1271_debug(DEBUG_MAC80211
,
2416 "using pre-allocated hw queue base %d",
2417 wlvif
->hw_queue_base
);
2419 /* interface type might have changed type */
2420 goto adjust_cab_queue
;
2423 q_base
= find_first_zero_bit(iter_data
.hw_queue_map
,
2424 WLCORE_NUM_MAC_ADDRESSES
);
2425 if (q_base
>= WLCORE_NUM_MAC_ADDRESSES
)
2428 wlvif
->hw_queue_base
= q_base
* NUM_TX_QUEUES
;
2429 wl1271_debug(DEBUG_MAC80211
, "allocating hw queue base: %d",
2430 wlvif
->hw_queue_base
);
2432 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
2433 wl
->queue_stop_reasons
[wlvif
->hw_queue_base
+ i
] = 0;
2434 /* register hw queues in mac80211 */
2435 vif
->hw_queue
[i
] = wlvif
->hw_queue_base
+ i
;
2439 /* the last places are reserved for cab queues per interface */
2440 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2441 vif
->cab_queue
= NUM_TX_QUEUES
* WLCORE_NUM_MAC_ADDRESSES
+
2442 wlvif
->hw_queue_base
/ NUM_TX_QUEUES
;
2444 vif
->cab_queue
= IEEE80211_INVAL_HW_QUEUE
;
2449 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2450 struct ieee80211_vif
*vif
)
2452 struct wl1271
*wl
= hw
->priv
;
2453 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2454 struct vif_counter_data vif_count
;
2459 wl1271_error("Adding Interface not allowed while in PLT mode");
2463 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2464 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2466 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2467 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2469 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2471 mutex_lock(&wl
->mutex
);
2472 ret
= wl1271_ps_elp_wakeup(wl
);
2477 * in some very corner case HW recovery scenarios its possible to
2478 * get here before __wl1271_op_remove_interface is complete, so
2479 * opt out if that is the case.
2481 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2482 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2488 ret
= wl12xx_init_vif_data(wl
, vif
);
2493 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2494 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2499 ret
= wlcore_allocate_hw_queue_base(wl
, wlvif
);
2503 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2504 wl12xx_force_active_psm(wl
);
2505 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2506 mutex_unlock(&wl
->mutex
);
2507 wl1271_recovery_work(&wl
->recovery_work
);
2512 * TODO: after the nvs issue will be solved, move this block
2513 * to start(), and make sure here the driver is ON.
2515 if (wl
->state
== WLCORE_STATE_OFF
) {
2517 * we still need this in order to configure the fw
2518 * while uploading the nvs
2520 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2522 ret
= wl12xx_init_fw(wl
);
2527 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2528 role_type
, &wlvif
->role_id
);
2532 ret
= wl1271_init_vif_specific(wl
, vif
);
2536 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2537 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2539 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2544 wl1271_ps_elp_sleep(wl
);
2546 mutex_unlock(&wl
->mutex
);
2551 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2552 struct ieee80211_vif
*vif
,
2553 bool reset_tx_queues
)
2555 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2557 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2559 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2561 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2564 /* because of hardware recovery, we may get here twice */
2565 if (wl
->state
== WLCORE_STATE_OFF
)
2568 wl1271_info("down");
2570 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2571 wl
->scan_wlvif
== wlvif
) {
2573 * Rearm the tx watchdog just before idling scan. This
2574 * prevents just-finished scans from triggering the watchdog
2576 wl12xx_rearm_tx_watchdog_locked(wl
);
2578 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2579 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2580 wl
->scan_wlvif
= NULL
;
2581 wl
->scan
.req
= NULL
;
2582 ieee80211_scan_completed(wl
->hw
, true);
2585 if (wl
->sched_vif
== wlvif
) {
2586 ieee80211_sched_scan_stopped(wl
->hw
);
2587 wl
->sched_vif
= NULL
;
2590 if (wl
->roc_vif
== vif
) {
2592 ieee80211_remain_on_channel_expired(wl
->hw
);
2595 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2596 /* disable active roles */
2597 ret
= wl1271_ps_elp_wakeup(wl
);
2601 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2602 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2603 if (wl12xx_dev_role_started(wlvif
))
2604 wl12xx_stop_dev(wl
, wlvif
);
2607 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2611 wl1271_ps_elp_sleep(wl
);
2614 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2616 /* clear all hlids (except system_hlid) */
2617 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2619 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2620 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2621 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2622 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2623 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2624 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2625 wlcore_free_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2627 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2628 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2629 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2630 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2631 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2632 wl12xx_free_rate_policy(wl
,
2633 &wlvif
->ap
.ucast_rate_idx
[i
]);
2634 wl1271_free_ap_keys(wl
, wlvif
);
2637 dev_kfree_skb(wlvif
->probereq
);
2638 wlvif
->probereq
= NULL
;
2639 if (wl
->last_wlvif
== wlvif
)
2640 wl
->last_wlvif
= NULL
;
2641 list_del(&wlvif
->list
);
2642 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2643 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2644 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2652 * Last AP, have more stations. Configure sleep auth according to STA.
2653 * Don't do thin on unintended recovery.
2655 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) &&
2656 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
))
2659 if (wl
->ap_count
== 0 && is_ap
) {
2660 /* mask ap events */
2661 wl
->event_mask
&= ~wl
->ap_event_mask
;
2662 wl1271_event_unmask(wl
);
2665 if (wl
->ap_count
== 0 && is_ap
&& wl
->sta_count
) {
2666 u8 sta_auth
= wl
->conf
.conn
.sta_sleep_auth
;
2667 /* Configure for power according to debugfs */
2668 if (sta_auth
!= WL1271_PSM_ILLEGAL
)
2669 wl1271_acx_sleep_auth(wl
, sta_auth
);
2670 /* Configure for ELP power saving */
2672 wl1271_acx_sleep_auth(wl
, WL1271_PSM_ELP
);
2676 mutex_unlock(&wl
->mutex
);
2678 del_timer_sync(&wlvif
->rx_streaming_timer
);
2679 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2680 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2681 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
2682 cancel_delayed_work_sync(&wlvif
->channel_switch_work
);
2683 cancel_delayed_work_sync(&wlvif
->pending_auth_complete_work
);
2685 mutex_lock(&wl
->mutex
);
2688 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2689 struct ieee80211_vif
*vif
)
2691 struct wl1271
*wl
= hw
->priv
;
2692 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2693 struct wl12xx_vif
*iter
;
2694 struct vif_counter_data vif_count
;
2696 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2697 mutex_lock(&wl
->mutex
);
2699 if (wl
->state
== WLCORE_STATE_OFF
||
2700 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2704 * wl->vif can be null here if someone shuts down the interface
2705 * just when hardware recovery has been started.
2707 wl12xx_for_each_wlvif(wl
, iter
) {
2711 __wl1271_op_remove_interface(wl
, vif
, true);
2714 WARN_ON(iter
!= wlvif
);
2715 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2716 wl12xx_force_active_psm(wl
);
2717 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2718 wl12xx_queue_recovery_work(wl
);
2721 mutex_unlock(&wl
->mutex
);
2724 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2725 struct ieee80211_vif
*vif
,
2726 enum nl80211_iftype new_type
, bool p2p
)
2728 struct wl1271
*wl
= hw
->priv
;
2731 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2732 wl1271_op_remove_interface(hw
, vif
);
2734 vif
->type
= new_type
;
2736 ret
= wl1271_op_add_interface(hw
, vif
);
2738 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2742 static int wlcore_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2745 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2748 * One of the side effects of the JOIN command is that is clears
2749 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2750 * to a WPA/WPA2 access point will therefore kill the data-path.
2751 * Currently the only valid scenario for JOIN during association
2752 * is on roaming, in which case we will also be given new keys.
2753 * Keep the below message for now, unless it starts bothering
2754 * users who really like to roam a lot :)
2756 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2757 wl1271_info("JOIN while associated.");
2759 /* clear encryption type */
2760 wlvif
->encryption_type
= KEY_NONE
;
2763 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2765 if (wl
->quirks
& WLCORE_QUIRK_START_STA_FAILS
) {
2767 * TODO: this is an ugly workaround for wl12xx fw
2768 * bug - we are not able to tx/rx after the first
2769 * start_sta, so make dummy start+stop calls,
2770 * and then call start_sta again.
2771 * this should be fixed in the fw.
2773 wl12xx_cmd_role_start_sta(wl
, wlvif
);
2774 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2777 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2783 static int wl1271_ssid_set(struct wl12xx_vif
*wlvif
, struct sk_buff
*skb
,
2787 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
2791 wl1271_error("No SSID in IEs!");
2796 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
2797 wl1271_error("SSID is too long!");
2801 wlvif
->ssid_len
= ssid_len
;
2802 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
2806 static int wlcore_set_ssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2808 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2809 struct sk_buff
*skb
;
2812 /* we currently only support setting the ssid from the ap probe req */
2813 if (wlvif
->bss_type
!= BSS_TYPE_STA_BSS
)
2816 skb
= ieee80211_ap_probereq_get(wl
->hw
, vif
);
2820 ieoffset
= offsetof(struct ieee80211_mgmt
,
2821 u
.probe_req
.variable
);
2822 wl1271_ssid_set(wlvif
, skb
, ieoffset
);
2828 static int wlcore_set_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2829 struct ieee80211_bss_conf
*bss_conf
,
2835 wlvif
->aid
= bss_conf
->aid
;
2836 wlvif
->channel_type
= cfg80211_get_chandef_type(&bss_conf
->chandef
);
2837 wlvif
->beacon_int
= bss_conf
->beacon_int
;
2838 wlvif
->wmm_enabled
= bss_conf
->qos
;
2840 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2843 * with wl1271, we don't need to update the
2844 * beacon_int and dtim_period, because the firmware
2845 * updates it by itself when the first beacon is
2846 * received after a join.
2848 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
2853 * Get a template for hardware connection maintenance
2855 dev_kfree_skb(wlvif
->probereq
);
2856 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
2859 ieoffset
= offsetof(struct ieee80211_mgmt
,
2860 u
.probe_req
.variable
);
2861 wl1271_ssid_set(wlvif
, wlvif
->probereq
, ieoffset
);
2863 /* enable the connection monitoring feature */
2864 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
2869 * The join command disable the keep-alive mode, shut down its process,
2870 * and also clear the template config, so we need to reset it all after
2871 * the join. The acx_aid starts the keep-alive process, and the order
2872 * of the commands below is relevant.
2874 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2878 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2882 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2886 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2887 wlvif
->sta
.klv_template_id
,
2888 ACX_KEEP_ALIVE_TPL_VALID
);
2893 * The default fw psm configuration is AUTO, while mac80211 default
2894 * setting is off (ACTIVE), so sync the fw with the correct value.
2896 ret
= wl1271_ps_set_mode(wl
, wlvif
, STATION_ACTIVE_MODE
);
2902 wl1271_tx_enabled_rates_get(wl
,
2905 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2913 static int wlcore_unset_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2916 bool sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
2918 /* make sure we are connected (sta) joined */
2920 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2923 /* make sure we are joined (ibss) */
2925 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
))
2929 /* use defaults when not associated */
2932 /* free probe-request template */
2933 dev_kfree_skb(wlvif
->probereq
);
2934 wlvif
->probereq
= NULL
;
2936 /* disable connection monitor features */
2937 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
2941 /* Disable the keep-alive feature */
2942 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
2947 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
2948 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2950 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
2951 ieee80211_chswitch_done(vif
, false);
2952 cancel_delayed_work(&wlvif
->channel_switch_work
);
2955 /* invalidate keep-alive template */
2956 wl1271_acx_keep_alive_config(wl
, wlvif
,
2957 wlvif
->sta
.klv_template_id
,
2958 ACX_KEEP_ALIVE_TPL_INVALID
);
2963 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2965 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
2966 wlvif
->rate_set
= wlvif
->basic_rate_set
;
2969 static void wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2972 bool cur_idle
= !test_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
2974 if (idle
== cur_idle
)
2978 clear_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
2980 /* The current firmware only supports sched_scan in idle */
2981 if (wl
->sched_vif
== wlvif
)
2982 wl
->ops
->sched_scan_stop(wl
, wlvif
);
2984 set_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
2988 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2989 struct ieee80211_conf
*conf
, u32 changed
)
2993 if (conf
->power_level
!= wlvif
->power_level
) {
2994 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
2998 wlvif
->power_level
= conf
->power_level
;
3004 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
3006 struct wl1271
*wl
= hw
->priv
;
3007 struct wl12xx_vif
*wlvif
;
3008 struct ieee80211_conf
*conf
= &hw
->conf
;
3011 wl1271_debug(DEBUG_MAC80211
, "mac80211 config psm %s power %d %s"
3013 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
3015 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
3018 mutex_lock(&wl
->mutex
);
3020 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
3021 wl
->power_level
= conf
->power_level
;
3023 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3026 ret
= wl1271_ps_elp_wakeup(wl
);
3030 /* configure each interface */
3031 wl12xx_for_each_wlvif(wl
, wlvif
) {
3032 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
3038 wl1271_ps_elp_sleep(wl
);
3041 mutex_unlock(&wl
->mutex
);
3046 struct wl1271_filter_params
{
3049 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
3052 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
3053 struct netdev_hw_addr_list
*mc_list
)
3055 struct wl1271_filter_params
*fp
;
3056 struct netdev_hw_addr
*ha
;
3058 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
3060 wl1271_error("Out of memory setting filters.");
3064 /* update multicast filtering parameters */
3065 fp
->mc_list_length
= 0;
3066 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
3067 fp
->enabled
= false;
3070 netdev_hw_addr_list_for_each(ha
, mc_list
) {
3071 memcpy(fp
->mc_list
[fp
->mc_list_length
],
3072 ha
->addr
, ETH_ALEN
);
3073 fp
->mc_list_length
++;
3077 return (u64
)(unsigned long)fp
;
3080 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3083 FIF_BCN_PRBRESP_PROMISC | \
3087 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
3088 unsigned int changed
,
3089 unsigned int *total
, u64 multicast
)
3091 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
3092 struct wl1271
*wl
= hw
->priv
;
3093 struct wl12xx_vif
*wlvif
;
3097 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
3098 " total %x", changed
, *total
);
3100 mutex_lock(&wl
->mutex
);
3102 *total
&= WL1271_SUPPORTED_FILTERS
;
3103 changed
&= WL1271_SUPPORTED_FILTERS
;
3105 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3108 ret
= wl1271_ps_elp_wakeup(wl
);
3112 wl12xx_for_each_wlvif(wl
, wlvif
) {
3113 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
3114 if (*total
& FIF_ALLMULTI
)
3115 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3119 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3122 fp
->mc_list_length
);
3129 * the fw doesn't provide an api to configure the filters. instead,
3130 * the filters configuration is based on the active roles / ROC
3135 wl1271_ps_elp_sleep(wl
);
3138 mutex_unlock(&wl
->mutex
);
3142 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3143 u8 id
, u8 key_type
, u8 key_size
,
3144 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
3147 struct wl1271_ap_key
*ap_key
;
3150 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
3152 if (key_size
> MAX_KEY_SIZE
)
3156 * Find next free entry in ap_keys. Also check we are not replacing
3159 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3160 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3163 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
3164 wl1271_warning("trying to record key replacement");
3169 if (i
== MAX_NUM_KEYS
)
3172 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
3177 ap_key
->key_type
= key_type
;
3178 ap_key
->key_size
= key_size
;
3179 memcpy(ap_key
->key
, key
, key_size
);
3180 ap_key
->hlid
= hlid
;
3181 ap_key
->tx_seq_32
= tx_seq_32
;
3182 ap_key
->tx_seq_16
= tx_seq_16
;
3184 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
3188 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3192 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3193 kfree(wlvif
->ap
.recorded_keys
[i
]);
3194 wlvif
->ap
.recorded_keys
[i
] = NULL
;
3198 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3201 struct wl1271_ap_key
*key
;
3202 bool wep_key_added
= false;
3204 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3206 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3209 key
= wlvif
->ap
.recorded_keys
[i
];
3211 if (hlid
== WL12XX_INVALID_LINK_ID
)
3212 hlid
= wlvif
->ap
.bcast_hlid
;
3214 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3215 key
->id
, key
->key_type
,
3216 key
->key_size
, key
->key
,
3217 hlid
, key
->tx_seq_32
,
3222 if (key
->key_type
== KEY_WEP
)
3223 wep_key_added
= true;
3226 if (wep_key_added
) {
3227 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
3228 wlvif
->ap
.bcast_hlid
);
3234 wl1271_free_ap_keys(wl
, wlvif
);
3238 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3239 u16 action
, u8 id
, u8 key_type
,
3240 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
3241 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
3244 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3247 struct wl1271_station
*wl_sta
;
3251 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
3252 hlid
= wl_sta
->hlid
;
3254 hlid
= wlvif
->ap
.bcast_hlid
;
3257 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3259 * We do not support removing keys after AP shutdown.
3260 * Pretend we do to make mac80211 happy.
3262 if (action
!= KEY_ADD_OR_REPLACE
)
3265 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
3267 key
, hlid
, tx_seq_32
,
3270 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
3271 id
, key_type
, key_size
,
3272 key
, hlid
, tx_seq_32
,
3280 static const u8 bcast_addr
[ETH_ALEN
] = {
3281 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3284 addr
= sta
? sta
->addr
: bcast_addr
;
3286 if (is_zero_ether_addr(addr
)) {
3287 /* We dont support TX only encryption */
3291 /* The wl1271 does not allow to remove unicast keys - they
3292 will be cleared automatically on next CMD_JOIN. Ignore the
3293 request silently, as we dont want the mac80211 to emit
3294 an error message. */
3295 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
3298 /* don't remove key if hlid was already deleted */
3299 if (action
== KEY_REMOVE
&&
3300 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
3303 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
3304 id
, key_type
, key_size
,
3305 key
, addr
, tx_seq_32
,
3315 static int wlcore_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
3316 struct ieee80211_vif
*vif
,
3317 struct ieee80211_sta
*sta
,
3318 struct ieee80211_key_conf
*key_conf
)
3320 struct wl1271
*wl
= hw
->priv
;
3322 bool might_change_spare
=
3323 key_conf
->cipher
== WL1271_CIPHER_SUITE_GEM
||
3324 key_conf
->cipher
== WLAN_CIPHER_SUITE_TKIP
;
3326 if (might_change_spare
) {
3328 * stop the queues and flush to ensure the next packets are
3329 * in sync with FW spare block accounting
3331 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3332 wl1271_tx_flush(wl
);
3335 mutex_lock(&wl
->mutex
);
3337 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3339 goto out_wake_queues
;
3342 ret
= wl1271_ps_elp_wakeup(wl
);
3344 goto out_wake_queues
;
3346 ret
= wlcore_hw_set_key(wl
, cmd
, vif
, sta
, key_conf
);
3348 wl1271_ps_elp_sleep(wl
);
3351 if (might_change_spare
)
3352 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3354 mutex_unlock(&wl
->mutex
);
3359 int wlcore_set_key(struct wl1271
*wl
, enum set_key_cmd cmd
,
3360 struct ieee80211_vif
*vif
,
3361 struct ieee80211_sta
*sta
,
3362 struct ieee80211_key_conf
*key_conf
)
3364 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3371 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
3373 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
3374 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3375 key_conf
->cipher
, key_conf
->keyidx
,
3376 key_conf
->keylen
, key_conf
->flags
);
3377 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
3379 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
3381 struct wl1271_station
*wl_sta
= (void *)sta
->drv_priv
;
3382 hlid
= wl_sta
->hlid
;
3384 hlid
= wlvif
->ap
.bcast_hlid
;
3387 hlid
= wlvif
->sta
.hlid
;
3389 if (hlid
!= WL12XX_INVALID_LINK_ID
) {
3390 u64 tx_seq
= wl
->links
[hlid
].total_freed_pkts
;
3391 tx_seq_32
= WL1271_TX_SECURITY_HI32(tx_seq
);
3392 tx_seq_16
= WL1271_TX_SECURITY_LO16(tx_seq
);
3395 switch (key_conf
->cipher
) {
3396 case WLAN_CIPHER_SUITE_WEP40
:
3397 case WLAN_CIPHER_SUITE_WEP104
:
3400 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3402 case WLAN_CIPHER_SUITE_TKIP
:
3403 key_type
= KEY_TKIP
;
3404 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3406 case WLAN_CIPHER_SUITE_CCMP
:
3408 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
3410 case WL1271_CIPHER_SUITE_GEM
:
3414 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
3421 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3422 key_conf
->keyidx
, key_type
,
3423 key_conf
->keylen
, key_conf
->key
,
3424 tx_seq_32
, tx_seq_16
, sta
);
3426 wl1271_error("Could not add or replace key");
3431 * reconfiguring arp response if the unicast (or common)
3432 * encryption key type was changed
3434 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
3435 (sta
|| key_type
== KEY_WEP
) &&
3436 wlvif
->encryption_type
!= key_type
) {
3437 wlvif
->encryption_type
= key_type
;
3438 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3440 wl1271_warning("build arp rsp failed: %d", ret
);
3447 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3448 key_conf
->keyidx
, key_type
,
3449 key_conf
->keylen
, key_conf
->key
,
3452 wl1271_error("Could not remove key");
3458 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3464 EXPORT_SYMBOL_GPL(wlcore_set_key
);
3466 static void wl1271_op_set_default_key_idx(struct ieee80211_hw
*hw
,
3467 struct ieee80211_vif
*vif
,
3470 struct wl1271
*wl
= hw
->priv
;
3471 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3474 wl1271_debug(DEBUG_MAC80211
, "mac80211 set default key idx %d",
3477 mutex_lock(&wl
->mutex
);
3479 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3484 ret
= wl1271_ps_elp_wakeup(wl
);
3488 wlvif
->default_key
= key_idx
;
3490 /* the default WEP key needs to be configured at least once */
3491 if (wlvif
->encryption_type
== KEY_WEP
) {
3492 ret
= wl12xx_cmd_set_default_wep_key(wl
,
3500 wl1271_ps_elp_sleep(wl
);
3503 mutex_unlock(&wl
->mutex
);
3506 void wlcore_regdomain_config(struct wl1271
*wl
)
3510 if (!(wl
->quirks
& WLCORE_QUIRK_REGDOMAIN_CONF
))
3513 mutex_lock(&wl
->mutex
);
3515 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3518 ret
= wl1271_ps_elp_wakeup(wl
);
3522 ret
= wlcore_cmd_regdomain_config_locked(wl
);
3524 wl12xx_queue_recovery_work(wl
);
3528 wl1271_ps_elp_sleep(wl
);
3530 mutex_unlock(&wl
->mutex
);
3533 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3534 struct ieee80211_vif
*vif
,
3535 struct cfg80211_scan_request
*req
)
3537 struct wl1271
*wl
= hw
->priv
;
3542 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3545 ssid
= req
->ssids
[0].ssid
;
3546 len
= req
->ssids
[0].ssid_len
;
3549 mutex_lock(&wl
->mutex
);
3551 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3553 * We cannot return -EBUSY here because cfg80211 will expect
3554 * a call to ieee80211_scan_completed if we do - in this case
3555 * there won't be any call.
3561 ret
= wl1271_ps_elp_wakeup(wl
);
3565 /* fail if there is any role in ROC */
3566 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3567 /* don't allow scanning right now */
3572 ret
= wlcore_scan(hw
->priv
, vif
, ssid
, len
, req
);
3574 wl1271_ps_elp_sleep(wl
);
3576 mutex_unlock(&wl
->mutex
);
3581 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3582 struct ieee80211_vif
*vif
)
3584 struct wl1271
*wl
= hw
->priv
;
3585 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3588 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3590 mutex_lock(&wl
->mutex
);
3592 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3595 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3598 ret
= wl1271_ps_elp_wakeup(wl
);
3602 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3603 ret
= wl
->ops
->scan_stop(wl
, wlvif
);
3609 * Rearm the tx watchdog just before idling scan. This
3610 * prevents just-finished scans from triggering the watchdog
3612 wl12xx_rearm_tx_watchdog_locked(wl
);
3614 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3615 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3616 wl
->scan_wlvif
= NULL
;
3617 wl
->scan
.req
= NULL
;
3618 ieee80211_scan_completed(wl
->hw
, true);
3621 wl1271_ps_elp_sleep(wl
);
3623 mutex_unlock(&wl
->mutex
);
3625 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3628 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3629 struct ieee80211_vif
*vif
,
3630 struct cfg80211_sched_scan_request
*req
,
3631 struct ieee80211_sched_scan_ies
*ies
)
3633 struct wl1271
*wl
= hw
->priv
;
3634 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3637 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3639 mutex_lock(&wl
->mutex
);
3641 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3646 ret
= wl1271_ps_elp_wakeup(wl
);
3650 ret
= wl
->ops
->sched_scan_start(wl
, wlvif
, req
, ies
);
3654 wl
->sched_vif
= wlvif
;
3657 wl1271_ps_elp_sleep(wl
);
3659 mutex_unlock(&wl
->mutex
);
3663 static void wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3664 struct ieee80211_vif
*vif
)
3666 struct wl1271
*wl
= hw
->priv
;
3667 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3670 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3672 mutex_lock(&wl
->mutex
);
3674 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3677 ret
= wl1271_ps_elp_wakeup(wl
);
3681 wl
->ops
->sched_scan_stop(wl
, wlvif
);
3683 wl1271_ps_elp_sleep(wl
);
3685 mutex_unlock(&wl
->mutex
);
3688 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3690 struct wl1271
*wl
= hw
->priv
;
3693 mutex_lock(&wl
->mutex
);
3695 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3700 ret
= wl1271_ps_elp_wakeup(wl
);
3704 ret
= wl1271_acx_frag_threshold(wl
, value
);
3706 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3708 wl1271_ps_elp_sleep(wl
);
3711 mutex_unlock(&wl
->mutex
);
3716 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3718 struct wl1271
*wl
= hw
->priv
;
3719 struct wl12xx_vif
*wlvif
;
3722 mutex_lock(&wl
->mutex
);
3724 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3729 ret
= wl1271_ps_elp_wakeup(wl
);
3733 wl12xx_for_each_wlvif(wl
, wlvif
) {
3734 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3736 wl1271_warning("set rts threshold failed: %d", ret
);
3738 wl1271_ps_elp_sleep(wl
);
3741 mutex_unlock(&wl
->mutex
);
3746 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3749 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3750 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3751 skb
->len
- ieoffset
);
3756 memmove(ie
, next
, end
- next
);
3757 skb_trim(skb
, skb
->len
- len
);
3760 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3761 unsigned int oui
, u8 oui_type
,
3765 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3766 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3767 skb
->data
+ ieoffset
,
3768 skb
->len
- ieoffset
);
3773 memmove(ie
, next
, end
- next
);
3774 skb_trim(skb
, skb
->len
- len
);
3777 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3778 struct ieee80211_vif
*vif
)
3780 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3781 struct sk_buff
*skb
;
3784 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3788 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3789 CMD_TEMPL_AP_PROBE_RESPONSE
,
3798 wl1271_debug(DEBUG_AP
, "probe response updated");
3799 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3805 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3806 struct ieee80211_vif
*vif
,
3808 size_t probe_rsp_len
,
3811 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3812 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3813 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3814 int ssid_ie_offset
, ie_offset
, templ_len
;
3817 /* no need to change probe response if the SSID is set correctly */
3818 if (wlvif
->ssid_len
> 0)
3819 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3820 CMD_TEMPL_AP_PROBE_RESPONSE
,
3825 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3826 wl1271_error("probe_rsp template too big");
3830 /* start searching from IE offset */
3831 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3833 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3834 probe_rsp_len
- ie_offset
);
3836 wl1271_error("No SSID in beacon!");
3840 ssid_ie_offset
= ptr
- probe_rsp_data
;
3841 ptr
+= (ptr
[1] + 2);
3843 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3845 /* insert SSID from bss_conf */
3846 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3847 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3848 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3849 bss_conf
->ssid
, bss_conf
->ssid_len
);
3850 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3852 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3853 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3854 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3856 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3857 CMD_TEMPL_AP_PROBE_RESPONSE
,
3863 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3864 struct ieee80211_vif
*vif
,
3865 struct ieee80211_bss_conf
*bss_conf
,
3868 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3871 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3872 if (bss_conf
->use_short_slot
)
3873 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3875 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3877 wl1271_warning("Set slot time failed %d", ret
);
3882 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3883 if (bss_conf
->use_short_preamble
)
3884 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3886 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3889 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3890 if (bss_conf
->use_cts_prot
)
3891 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3894 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3895 CTSPROTECT_DISABLE
);
3897 wl1271_warning("Set ctsprotect failed %d", ret
);
3906 static int wlcore_set_beacon_template(struct wl1271
*wl
,
3907 struct ieee80211_vif
*vif
,
3910 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3911 struct ieee80211_hdr
*hdr
;
3914 int ieoffset
= offsetof(struct ieee80211_mgmt
, u
.beacon
.variable
);
3915 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
3923 wl1271_debug(DEBUG_MASTER
, "beacon updated");
3925 ret
= wl1271_ssid_set(wlvif
, beacon
, ieoffset
);
3927 dev_kfree_skb(beacon
);
3930 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3931 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
3933 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
3938 dev_kfree_skb(beacon
);
3942 wlvif
->wmm_enabled
=
3943 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT
,
3944 WLAN_OUI_TYPE_MICROSOFT_WMM
,
3945 beacon
->data
+ ieoffset
,
3946 beacon
->len
- ieoffset
);
3949 * In case we already have a probe-resp beacon set explicitly
3950 * by usermode, don't use the beacon data.
3952 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
3955 /* remove TIM ie from probe response */
3956 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
3959 * remove p2p ie from probe response.
3960 * the fw reponds to probe requests that don't include
3961 * the p2p ie. probe requests with p2p ie will be passed,
3962 * and will be responded by the supplicant (the spec
3963 * forbids including the p2p ie when responding to probe
3964 * requests that didn't include it).
3966 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
3967 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
3969 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
3970 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
3971 IEEE80211_STYPE_PROBE_RESP
);
3973 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
3978 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3979 CMD_TEMPL_PROBE_RESPONSE
,
3984 dev_kfree_skb(beacon
);
3992 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
3993 struct ieee80211_vif
*vif
,
3994 struct ieee80211_bss_conf
*bss_conf
,
3997 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3998 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4001 if (changed
& BSS_CHANGED_BEACON_INT
) {
4002 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
4003 bss_conf
->beacon_int
);
4005 wlvif
->beacon_int
= bss_conf
->beacon_int
;
4008 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
4009 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4011 wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
);
4014 if (changed
& BSS_CHANGED_BEACON
) {
4015 ret
= wlcore_set_beacon_template(wl
, vif
, is_ap
);
4022 wl1271_error("beacon info change failed: %d", ret
);
4026 /* AP mode changes */
4027 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
4028 struct ieee80211_vif
*vif
,
4029 struct ieee80211_bss_conf
*bss_conf
,
4032 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4035 if (changed
& BSS_CHANGED_BASIC_RATES
) {
4036 u32 rates
= bss_conf
->basic_rates
;
4038 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
4040 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
4041 wlvif
->basic_rate_set
);
4043 ret
= wl1271_init_ap_rates(wl
, wlvif
);
4045 wl1271_error("AP rate policy change failed %d", ret
);
4049 ret
= wl1271_ap_init_templates(wl
, vif
);
4053 ret
= wl1271_ap_set_probe_resp_tmpl(wl
, wlvif
->basic_rate
, vif
);
4057 ret
= wlcore_set_beacon_template(wl
, vif
, true);
4062 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
4066 if (changed
& BSS_CHANGED_BEACON_ENABLED
) {
4067 if (bss_conf
->enable_beacon
) {
4068 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
4069 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
4073 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
4077 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
4078 wl1271_debug(DEBUG_AP
, "started AP");
4081 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
4083 * AP might be in ROC in case we have just
4084 * sent auth reply. handle it.
4086 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
4087 wl12xx_croc(wl
, wlvif
->role_id
);
4089 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
4093 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
4094 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
4096 wl1271_debug(DEBUG_AP
, "stopped AP");
4101 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4105 /* Handle HT information change */
4106 if ((changed
& BSS_CHANGED_HT
) &&
4107 (bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
)) {
4108 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4109 bss_conf
->ht_operation_mode
);
4111 wl1271_warning("Set ht information failed %d", ret
);
4120 static int wlcore_set_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
4121 struct ieee80211_bss_conf
*bss_conf
,
4127 wl1271_debug(DEBUG_MAC80211
,
4128 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4129 bss_conf
->bssid
, bss_conf
->aid
,
4130 bss_conf
->beacon_int
,
4131 bss_conf
->basic_rates
, sta_rate_set
);
4133 wlvif
->beacon_int
= bss_conf
->beacon_int
;
4134 rates
= bss_conf
->basic_rates
;
4135 wlvif
->basic_rate_set
=
4136 wl1271_tx_enabled_rates_get(wl
, rates
,
4139 wl1271_tx_min_rate_get(wl
,
4140 wlvif
->basic_rate_set
);
4144 wl1271_tx_enabled_rates_get(wl
,
4148 /* we only support sched_scan while not connected */
4149 if (wl
->sched_vif
== wlvif
)
4150 wl
->ops
->sched_scan_stop(wl
, wlvif
);
4152 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4156 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
4160 ret
= wl1271_build_qos_null_data(wl
, wl12xx_wlvif_to_vif(wlvif
));
4164 wlcore_set_ssid(wl
, wlvif
);
4166 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4171 static int wlcore_clear_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
4175 /* revert back to minimum rates for the current band */
4176 wl1271_set_band_rate(wl
, wlvif
);
4177 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4179 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4183 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4184 test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
)) {
4185 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4190 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4193 /* STA/IBSS mode changes */
4194 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
4195 struct ieee80211_vif
*vif
,
4196 struct ieee80211_bss_conf
*bss_conf
,
4199 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4200 bool do_join
= false;
4201 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
4202 bool ibss_joined
= false;
4203 u32 sta_rate_set
= 0;
4205 struct ieee80211_sta
*sta
;
4206 bool sta_exists
= false;
4207 struct ieee80211_sta_ht_cap sta_ht_cap
;
4210 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
4216 if (changed
& BSS_CHANGED_IBSS
) {
4217 if (bss_conf
->ibss_joined
) {
4218 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
4221 wlcore_unset_assoc(wl
, wlvif
);
4222 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4226 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
4229 /* Need to update the SSID (for filtering etc) */
4230 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
4233 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
4234 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
4235 bss_conf
->enable_beacon
? "enabled" : "disabled");
4240 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
)
4241 wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
4243 if (changed
& BSS_CHANGED_CQM
) {
4244 bool enable
= false;
4245 if (bss_conf
->cqm_rssi_thold
)
4247 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
4248 bss_conf
->cqm_rssi_thold
,
4249 bss_conf
->cqm_rssi_hyst
);
4252 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
4255 if (changed
& (BSS_CHANGED_BSSID
| BSS_CHANGED_HT
|
4256 BSS_CHANGED_ASSOC
)) {
4258 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
4260 u8
*rx_mask
= sta
->ht_cap
.mcs
.rx_mask
;
4262 /* save the supp_rates of the ap */
4263 sta_rate_set
= sta
->supp_rates
[wlvif
->band
];
4264 if (sta
->ht_cap
.ht_supported
)
4266 (rx_mask
[0] << HW_HT_RATES_OFFSET
) |
4267 (rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
4268 sta_ht_cap
= sta
->ht_cap
;
4275 if (changed
& BSS_CHANGED_BSSID
) {
4276 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
4277 ret
= wlcore_set_bssid(wl
, wlvif
, bss_conf
,
4282 /* Need to update the BSSID (for filtering etc) */
4285 ret
= wlcore_clear_bssid(wl
, wlvif
);
4291 if (changed
& BSS_CHANGED_IBSS
) {
4292 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
4293 bss_conf
->ibss_joined
);
4295 if (bss_conf
->ibss_joined
) {
4296 u32 rates
= bss_conf
->basic_rates
;
4297 wlvif
->basic_rate_set
=
4298 wl1271_tx_enabled_rates_get(wl
, rates
,
4301 wl1271_tx_min_rate_get(wl
,
4302 wlvif
->basic_rate_set
);
4304 /* by default, use 11b + OFDM rates */
4305 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
4306 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4312 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4317 ret
= wlcore_join(wl
, wlvif
);
4319 wl1271_warning("cmd join failed %d", ret
);
4324 if (changed
& BSS_CHANGED_ASSOC
) {
4325 if (bss_conf
->assoc
) {
4326 ret
= wlcore_set_assoc(wl
, wlvif
, bss_conf
,
4331 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
4332 wl12xx_set_authorized(wl
, wlvif
);
4334 wlcore_unset_assoc(wl
, wlvif
);
4338 if (changed
& BSS_CHANGED_PS
) {
4339 if ((bss_conf
->ps
) &&
4340 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
4341 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4345 if (wl
->conf
.conn
.forced_ps
) {
4346 ps_mode
= STATION_POWER_SAVE_MODE
;
4347 ps_mode_str
= "forced";
4349 ps_mode
= STATION_AUTO_PS_MODE
;
4350 ps_mode_str
= "auto";
4353 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
4355 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
4357 wl1271_warning("enter %s ps failed %d",
4359 } else if (!bss_conf
->ps
&&
4360 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4361 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
4363 ret
= wl1271_ps_set_mode(wl
, wlvif
,
4364 STATION_ACTIVE_MODE
);
4366 wl1271_warning("exit auto ps failed %d", ret
);
4370 /* Handle new association with HT. Do this after join. */
4373 bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
;
4375 ret
= wlcore_hw_set_peer_cap(wl
,
4381 wl1271_warning("Set ht cap failed %d", ret
);
4387 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4388 bss_conf
->ht_operation_mode
);
4390 wl1271_warning("Set ht information failed %d",
4397 /* Handle arp filtering. Done after join. */
4398 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
4399 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
4400 __be32 addr
= bss_conf
->arp_addr_list
[0];
4401 wlvif
->sta
.qos
= bss_conf
->qos
;
4402 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
4404 if (bss_conf
->arp_addr_cnt
== 1 && bss_conf
->assoc
) {
4405 wlvif
->ip_addr
= addr
;
4407 * The template should have been configured only upon
4408 * association. however, it seems that the correct ip
4409 * isn't being set (when sending), so we have to
4410 * reconfigure the template upon every ip change.
4412 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
4414 wl1271_warning("build arp rsp failed: %d", ret
);
4418 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
4419 (ACX_ARP_FILTER_ARP_FILTERING
|
4420 ACX_ARP_FILTER_AUTO_ARP
),
4424 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
4435 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
4436 struct ieee80211_vif
*vif
,
4437 struct ieee80211_bss_conf
*bss_conf
,
4440 struct wl1271
*wl
= hw
->priv
;
4441 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4442 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4445 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info role %d changed 0x%x",
4446 wlvif
->role_id
, (int)changed
);
4449 * make sure to cancel pending disconnections if our association
4452 if (!is_ap
&& (changed
& BSS_CHANGED_ASSOC
))
4453 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
4455 if (is_ap
&& (changed
& BSS_CHANGED_BEACON_ENABLED
) &&
4456 !bss_conf
->enable_beacon
)
4457 wl1271_tx_flush(wl
);
4459 mutex_lock(&wl
->mutex
);
4461 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4464 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4467 ret
= wl1271_ps_elp_wakeup(wl
);
4471 if ((changed
& BSS_CHANGED_TXPOWER
) &&
4472 bss_conf
->txpower
!= wlvif
->power_level
) {
4474 ret
= wl1271_acx_tx_power(wl
, wlvif
, bss_conf
->txpower
);
4478 wlvif
->power_level
= bss_conf
->txpower
;
4482 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
4484 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
4486 wl1271_ps_elp_sleep(wl
);
4489 mutex_unlock(&wl
->mutex
);
4492 static int wlcore_op_add_chanctx(struct ieee80211_hw
*hw
,
4493 struct ieee80211_chanctx_conf
*ctx
)
4495 wl1271_debug(DEBUG_MAC80211
, "mac80211 add chanctx %d (type %d)",
4496 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4497 cfg80211_get_chandef_type(&ctx
->def
));
4501 static void wlcore_op_remove_chanctx(struct ieee80211_hw
*hw
,
4502 struct ieee80211_chanctx_conf
*ctx
)
4504 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove chanctx %d (type %d)",
4505 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4506 cfg80211_get_chandef_type(&ctx
->def
));
4509 static void wlcore_op_change_chanctx(struct ieee80211_hw
*hw
,
4510 struct ieee80211_chanctx_conf
*ctx
,
4513 wl1271_debug(DEBUG_MAC80211
,
4514 "mac80211 change chanctx %d (type %d) changed 0x%x",
4515 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4516 cfg80211_get_chandef_type(&ctx
->def
), changed
);
4519 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw
*hw
,
4520 struct ieee80211_vif
*vif
,
4521 struct ieee80211_chanctx_conf
*ctx
)
4523 struct wl1271
*wl
= hw
->priv
;
4524 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4525 int channel
= ieee80211_frequency_to_channel(
4526 ctx
->def
.chan
->center_freq
);
4528 wl1271_debug(DEBUG_MAC80211
,
4529 "mac80211 assign chanctx (role %d) %d (type %d)",
4530 wlvif
->role_id
, channel
, cfg80211_get_chandef_type(&ctx
->def
));
4532 mutex_lock(&wl
->mutex
);
4534 wlvif
->band
= ctx
->def
.chan
->band
;
4535 wlvif
->channel
= channel
;
4536 wlvif
->channel_type
= cfg80211_get_chandef_type(&ctx
->def
);
4538 /* update default rates according to the band */
4539 wl1271_set_band_rate(wl
, wlvif
);
4541 mutex_unlock(&wl
->mutex
);
4546 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw
*hw
,
4547 struct ieee80211_vif
*vif
,
4548 struct ieee80211_chanctx_conf
*ctx
)
4550 struct wl1271
*wl
= hw
->priv
;
4551 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4553 wl1271_debug(DEBUG_MAC80211
,
4554 "mac80211 unassign chanctx (role %d) %d (type %d)",
4556 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4557 cfg80211_get_chandef_type(&ctx
->def
));
4559 wl1271_tx_flush(wl
);
4562 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4563 struct ieee80211_vif
*vif
, u16 queue
,
4564 const struct ieee80211_tx_queue_params
*params
)
4566 struct wl1271
*wl
= hw
->priv
;
4567 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4571 mutex_lock(&wl
->mutex
);
4573 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4576 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4578 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4580 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4583 ret
= wl1271_ps_elp_wakeup(wl
);
4588 * the txop is confed in units of 32us by the mac80211,
4591 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4592 params
->cw_min
, params
->cw_max
,
4593 params
->aifs
, params
->txop
<< 5);
4597 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4598 CONF_CHANNEL_TYPE_EDCF
,
4599 wl1271_tx_get_queue(queue
),
4600 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4604 wl1271_ps_elp_sleep(wl
);
4607 mutex_unlock(&wl
->mutex
);
4612 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4613 struct ieee80211_vif
*vif
)
4616 struct wl1271
*wl
= hw
->priv
;
4617 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4618 u64 mactime
= ULLONG_MAX
;
4621 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4623 mutex_lock(&wl
->mutex
);
4625 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4628 ret
= wl1271_ps_elp_wakeup(wl
);
4632 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4637 wl1271_ps_elp_sleep(wl
);
4640 mutex_unlock(&wl
->mutex
);
4644 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4645 struct survey_info
*survey
)
4647 struct ieee80211_conf
*conf
= &hw
->conf
;
4652 survey
->channel
= conf
->chandef
.chan
;
4657 static int wl1271_allocate_sta(struct wl1271
*wl
,
4658 struct wl12xx_vif
*wlvif
,
4659 struct ieee80211_sta
*sta
)
4661 struct wl1271_station
*wl_sta
;
4665 if (wl
->active_sta_count
>= AP_MAX_STATIONS
) {
4666 wl1271_warning("could not allocate HLID - too much stations");
4670 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4671 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4673 wl1271_warning("could not allocate HLID - too many links");
4677 /* use the previous security seq, if this is a recovery/resume */
4678 wl
->links
[wl_sta
->hlid
].total_freed_pkts
= wl_sta
->total_freed_pkts
;
4680 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4681 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4682 wl
->active_sta_count
++;
4686 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4688 struct wl1271_station
*wl_sta
;
4689 struct ieee80211_sta
*sta
;
4690 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4692 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4695 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4696 __clear_bit(hlid
, &wl
->ap_ps_map
);
4697 __clear_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
4700 * save the last used PN in the private part of iee80211_sta,
4701 * in case of recovery/suspend
4704 sta
= ieee80211_find_sta(vif
, wl
->links
[hlid
].addr
);
4706 wl_sta
= (void *)sta
->drv_priv
;
4707 wl_sta
->total_freed_pkts
= wl
->links
[hlid
].total_freed_pkts
;
4710 * increment the initial seq number on recovery to account for
4711 * transmitted packets that we haven't yet got in the FW status
4713 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
4714 wl_sta
->total_freed_pkts
+=
4715 WL1271_TX_SQN_POST_RECOVERY_PADDING
;
4719 wl12xx_free_link(wl
, wlvif
, &hlid
);
4720 wl
->active_sta_count
--;
4723 * rearm the tx watchdog when the last STA is freed - give the FW a
4724 * chance to return STA-buffered packets before complaining.
4726 if (wl
->active_sta_count
== 0)
4727 wl12xx_rearm_tx_watchdog_locked(wl
);
4730 static int wl12xx_sta_add(struct wl1271
*wl
,
4731 struct wl12xx_vif
*wlvif
,
4732 struct ieee80211_sta
*sta
)
4734 struct wl1271_station
*wl_sta
;
4738 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4740 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4744 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4745 hlid
= wl_sta
->hlid
;
4747 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4749 wl1271_free_sta(wl
, wlvif
, hlid
);
4754 static int wl12xx_sta_remove(struct wl1271
*wl
,
4755 struct wl12xx_vif
*wlvif
,
4756 struct ieee80211_sta
*sta
)
4758 struct wl1271_station
*wl_sta
;
4761 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4763 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4765 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4768 ret
= wl12xx_cmd_remove_peer(wl
, wl_sta
->hlid
);
4772 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4776 static void wlcore_roc_if_possible(struct wl1271
*wl
,
4777 struct wl12xx_vif
*wlvif
)
4779 if (find_first_bit(wl
->roc_map
,
4780 WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
)
4783 if (WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
))
4786 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
, wlvif
->band
, wlvif
->channel
);
4790 * when wl_sta is NULL, we treat this call as if coming from a
4791 * pending auth reply.
4792 * wl->mutex must be taken and the FW must be awake when the call
4795 void wlcore_update_inconn_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
4796 struct wl1271_station
*wl_sta
, bool in_conn
)
4799 if (WARN_ON(wl_sta
&& wl_sta
->in_connection
))
4802 if (!wlvif
->ap_pending_auth_reply
&&
4803 !wlvif
->inconn_count
)
4804 wlcore_roc_if_possible(wl
, wlvif
);
4807 wl_sta
->in_connection
= true;
4808 wlvif
->inconn_count
++;
4810 wlvif
->ap_pending_auth_reply
= true;
4813 if (wl_sta
&& !wl_sta
->in_connection
)
4816 if (WARN_ON(!wl_sta
&& !wlvif
->ap_pending_auth_reply
))
4819 if (WARN_ON(wl_sta
&& !wlvif
->inconn_count
))
4823 wl_sta
->in_connection
= false;
4824 wlvif
->inconn_count
--;
4826 wlvif
->ap_pending_auth_reply
= false;
4829 if (!wlvif
->inconn_count
&& !wlvif
->ap_pending_auth_reply
&&
4830 test_bit(wlvif
->role_id
, wl
->roc_map
))
4831 wl12xx_croc(wl
, wlvif
->role_id
);
4835 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4836 struct wl12xx_vif
*wlvif
,
4837 struct ieee80211_sta
*sta
,
4838 enum ieee80211_sta_state old_state
,
4839 enum ieee80211_sta_state new_state
)
4841 struct wl1271_station
*wl_sta
;
4842 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4843 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4846 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4848 /* Add station (AP mode) */
4850 old_state
== IEEE80211_STA_NOTEXIST
&&
4851 new_state
== IEEE80211_STA_NONE
) {
4852 ret
= wl12xx_sta_add(wl
, wlvif
, sta
);
4856 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, true);
4859 /* Remove station (AP mode) */
4861 old_state
== IEEE80211_STA_NONE
&&
4862 new_state
== IEEE80211_STA_NOTEXIST
) {
4864 wl12xx_sta_remove(wl
, wlvif
, sta
);
4866 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
4869 /* Authorize station (AP mode) */
4871 new_state
== IEEE80211_STA_AUTHORIZED
) {
4872 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, wl_sta
->hlid
);
4876 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4881 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
4884 /* Authorize station */
4886 new_state
== IEEE80211_STA_AUTHORIZED
) {
4887 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4888 ret
= wl12xx_set_authorized(wl
, wlvif
);
4894 old_state
== IEEE80211_STA_AUTHORIZED
&&
4895 new_state
== IEEE80211_STA_ASSOC
) {
4896 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4897 clear_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
);
4900 /* clear ROCs on failure or authorization */
4902 (new_state
== IEEE80211_STA_AUTHORIZED
||
4903 new_state
== IEEE80211_STA_NOTEXIST
)) {
4904 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
4905 wl12xx_croc(wl
, wlvif
->role_id
);
4909 old_state
== IEEE80211_STA_NOTEXIST
&&
4910 new_state
== IEEE80211_STA_NONE
) {
4911 if (find_first_bit(wl
->roc_map
,
4912 WL12XX_MAX_ROLES
) >= WL12XX_MAX_ROLES
) {
4913 WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
);
4914 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
,
4915 wlvif
->band
, wlvif
->channel
);
4921 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
4922 struct ieee80211_vif
*vif
,
4923 struct ieee80211_sta
*sta
,
4924 enum ieee80211_sta_state old_state
,
4925 enum ieee80211_sta_state new_state
)
4927 struct wl1271
*wl
= hw
->priv
;
4928 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4931 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
4932 sta
->aid
, old_state
, new_state
);
4934 mutex_lock(&wl
->mutex
);
4936 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4941 ret
= wl1271_ps_elp_wakeup(wl
);
4945 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
4947 wl1271_ps_elp_sleep(wl
);
4949 mutex_unlock(&wl
->mutex
);
4950 if (new_state
< old_state
)
4955 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
4956 struct ieee80211_vif
*vif
,
4957 enum ieee80211_ampdu_mlme_action action
,
4958 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
4961 struct wl1271
*wl
= hw
->priv
;
4962 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4964 u8 hlid
, *ba_bitmap
;
4966 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
4969 /* sanity check - the fields in FW are only 8bits wide */
4970 if (WARN_ON(tid
> 0xFF))
4973 mutex_lock(&wl
->mutex
);
4975 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4980 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
4981 hlid
= wlvif
->sta
.hlid
;
4982 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
4983 struct wl1271_station
*wl_sta
;
4985 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4986 hlid
= wl_sta
->hlid
;
4992 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
4994 ret
= wl1271_ps_elp_wakeup(wl
);
4998 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
5002 case IEEE80211_AMPDU_RX_START
:
5003 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
5008 if (wl
->ba_rx_session_count
>= wl
->ba_rx_session_count_max
) {
5010 wl1271_error("exceeded max RX BA sessions");
5014 if (*ba_bitmap
& BIT(tid
)) {
5016 wl1271_error("cannot enable RX BA session on active "
5021 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
5024 *ba_bitmap
|= BIT(tid
);
5025 wl
->ba_rx_session_count
++;
5029 case IEEE80211_AMPDU_RX_STOP
:
5030 if (!(*ba_bitmap
& BIT(tid
))) {
5032 * this happens on reconfig - so only output a debug
5033 * message for now, and don't fail the function.
5035 wl1271_debug(DEBUG_MAC80211
,
5036 "no active RX BA session on tid: %d",
5042 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
5045 *ba_bitmap
&= ~BIT(tid
);
5046 wl
->ba_rx_session_count
--;
5051 * The BA initiator session management in FW independently.
5052 * Falling break here on purpose for all TX APDU commands.
5054 case IEEE80211_AMPDU_TX_START
:
5055 case IEEE80211_AMPDU_TX_STOP_CONT
:
5056 case IEEE80211_AMPDU_TX_STOP_FLUSH
:
5057 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT
:
5058 case IEEE80211_AMPDU_TX_OPERATIONAL
:
5063 wl1271_error("Incorrect ampdu action id=%x\n", action
);
5067 wl1271_ps_elp_sleep(wl
);
5070 mutex_unlock(&wl
->mutex
);
5075 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
5076 struct ieee80211_vif
*vif
,
5077 const struct cfg80211_bitrate_mask
*mask
)
5079 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5080 struct wl1271
*wl
= hw
->priv
;
5083 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
5084 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
5085 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
5087 mutex_lock(&wl
->mutex
);
5089 for (i
= 0; i
< WLCORE_NUM_BANDS
; i
++)
5090 wlvif
->bitrate_masks
[i
] =
5091 wl1271_tx_enabled_rates_get(wl
,
5092 mask
->control
[i
].legacy
,
5095 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5098 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
5099 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
5101 ret
= wl1271_ps_elp_wakeup(wl
);
5105 wl1271_set_band_rate(wl
, wlvif
);
5107 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
5108 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
5110 wl1271_ps_elp_sleep(wl
);
5113 mutex_unlock(&wl
->mutex
);
5118 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
5119 struct ieee80211_channel_switch
*ch_switch
)
5121 struct wl1271
*wl
= hw
->priv
;
5122 struct wl12xx_vif
*wlvif
;
5125 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
5127 wl1271_tx_flush(wl
);
5129 mutex_lock(&wl
->mutex
);
5131 if (unlikely(wl
->state
== WLCORE_STATE_OFF
)) {
5132 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
5133 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
5134 ieee80211_chswitch_done(vif
, false);
5137 } else if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5141 ret
= wl1271_ps_elp_wakeup(wl
);
5145 /* TODO: change mac80211 to pass vif as param */
5146 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
5147 unsigned long delay_usec
;
5149 ret
= wl
->ops
->channel_switch(wl
, wlvif
, ch_switch
);
5153 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
5155 /* indicate failure 5 seconds after channel switch time */
5156 delay_usec
= ieee80211_tu_to_usec(wlvif
->beacon_int
) *
5158 ieee80211_queue_delayed_work(hw
, &wlvif
->channel_switch_work
,
5159 usecs_to_jiffies(delay_usec
) +
5160 msecs_to_jiffies(5000));
5164 wl1271_ps_elp_sleep(wl
);
5167 mutex_unlock(&wl
->mutex
);
5170 static void wlcore_op_flush(struct ieee80211_hw
*hw
, u32 queues
, bool drop
)
5172 struct wl1271
*wl
= hw
->priv
;
5174 wl1271_tx_flush(wl
);
5177 static int wlcore_op_remain_on_channel(struct ieee80211_hw
*hw
,
5178 struct ieee80211_vif
*vif
,
5179 struct ieee80211_channel
*chan
,
5181 enum ieee80211_roc_type type
)
5183 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5184 struct wl1271
*wl
= hw
->priv
;
5185 int channel
, ret
= 0;
5187 channel
= ieee80211_frequency_to_channel(chan
->center_freq
);
5189 wl1271_debug(DEBUG_MAC80211
, "mac80211 roc %d (%d)",
5190 channel
, wlvif
->role_id
);
5192 mutex_lock(&wl
->mutex
);
5194 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5197 /* return EBUSY if we can't ROC right now */
5198 if (WARN_ON(wl
->roc_vif
||
5199 find_first_bit(wl
->roc_map
,
5200 WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
)) {
5205 ret
= wl1271_ps_elp_wakeup(wl
);
5209 ret
= wl12xx_start_dev(wl
, wlvif
, chan
->band
, channel
);
5214 ieee80211_queue_delayed_work(hw
, &wl
->roc_complete_work
,
5215 msecs_to_jiffies(duration
));
5217 wl1271_ps_elp_sleep(wl
);
5219 mutex_unlock(&wl
->mutex
);
5223 static int __wlcore_roc_completed(struct wl1271
*wl
)
5225 struct wl12xx_vif
*wlvif
;
5228 /* already completed */
5229 if (unlikely(!wl
->roc_vif
))
5232 wlvif
= wl12xx_vif_to_data(wl
->roc_vif
);
5234 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
5237 ret
= wl12xx_stop_dev(wl
, wlvif
);
5246 static int wlcore_roc_completed(struct wl1271
*wl
)
5250 wl1271_debug(DEBUG_MAC80211
, "roc complete");
5252 mutex_lock(&wl
->mutex
);
5254 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5259 ret
= wl1271_ps_elp_wakeup(wl
);
5263 ret
= __wlcore_roc_completed(wl
);
5265 wl1271_ps_elp_sleep(wl
);
5267 mutex_unlock(&wl
->mutex
);
5272 static void wlcore_roc_complete_work(struct work_struct
*work
)
5274 struct delayed_work
*dwork
;
5278 dwork
= container_of(work
, struct delayed_work
, work
);
5279 wl
= container_of(dwork
, struct wl1271
, roc_complete_work
);
5281 ret
= wlcore_roc_completed(wl
);
5283 ieee80211_remain_on_channel_expired(wl
->hw
);
5286 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw
*hw
)
5288 struct wl1271
*wl
= hw
->priv
;
5290 wl1271_debug(DEBUG_MAC80211
, "mac80211 croc");
5293 wl1271_tx_flush(wl
);
5296 * we can't just flush_work here, because it might deadlock
5297 * (as we might get called from the same workqueue)
5299 cancel_delayed_work_sync(&wl
->roc_complete_work
);
5300 wlcore_roc_completed(wl
);
5305 static void wlcore_op_sta_rc_update(struct ieee80211_hw
*hw
,
5306 struct ieee80211_vif
*vif
,
5307 struct ieee80211_sta
*sta
,
5310 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5311 struct wl1271
*wl
= hw
->priv
;
5313 wlcore_hw_sta_rc_update(wl
, wlvif
, sta
, changed
);
5316 static int wlcore_op_get_rssi(struct ieee80211_hw
*hw
,
5317 struct ieee80211_vif
*vif
,
5318 struct ieee80211_sta
*sta
,
5321 struct wl1271
*wl
= hw
->priv
;
5322 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5325 wl1271_debug(DEBUG_MAC80211
, "mac80211 get_rssi");
5327 mutex_lock(&wl
->mutex
);
5329 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5332 ret
= wl1271_ps_elp_wakeup(wl
);
5336 ret
= wlcore_acx_average_rssi(wl
, wlvif
, rssi_dbm
);
5341 wl1271_ps_elp_sleep(wl
);
5344 mutex_unlock(&wl
->mutex
);
5349 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
5351 struct wl1271
*wl
= hw
->priv
;
5354 mutex_lock(&wl
->mutex
);
5356 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5359 /* packets are considered pending if in the TX queue or the FW */
5360 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
5362 mutex_unlock(&wl
->mutex
);
5367 /* can't be const, mac80211 writes to this */
5368 static struct ieee80211_rate wl1271_rates
[] = {
5370 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
5371 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
5373 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
5374 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
5375 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5377 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
5378 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
5379 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5381 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
5382 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
5383 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5385 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5386 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5388 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5389 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5391 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5392 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5394 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5395 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5397 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5398 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5400 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5401 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5403 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5404 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5406 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5407 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5410 /* can't be const, mac80211 writes to this */
5411 static struct ieee80211_channel wl1271_channels
[] = {
5412 { .hw_value
= 1, .center_freq
= 2412, .max_power
= WLCORE_MAX_TXPWR
},
5413 { .hw_value
= 2, .center_freq
= 2417, .max_power
= WLCORE_MAX_TXPWR
},
5414 { .hw_value
= 3, .center_freq
= 2422, .max_power
= WLCORE_MAX_TXPWR
},
5415 { .hw_value
= 4, .center_freq
= 2427, .max_power
= WLCORE_MAX_TXPWR
},
5416 { .hw_value
= 5, .center_freq
= 2432, .max_power
= WLCORE_MAX_TXPWR
},
5417 { .hw_value
= 6, .center_freq
= 2437, .max_power
= WLCORE_MAX_TXPWR
},
5418 { .hw_value
= 7, .center_freq
= 2442, .max_power
= WLCORE_MAX_TXPWR
},
5419 { .hw_value
= 8, .center_freq
= 2447, .max_power
= WLCORE_MAX_TXPWR
},
5420 { .hw_value
= 9, .center_freq
= 2452, .max_power
= WLCORE_MAX_TXPWR
},
5421 { .hw_value
= 10, .center_freq
= 2457, .max_power
= WLCORE_MAX_TXPWR
},
5422 { .hw_value
= 11, .center_freq
= 2462, .max_power
= WLCORE_MAX_TXPWR
},
5423 { .hw_value
= 12, .center_freq
= 2467, .max_power
= WLCORE_MAX_TXPWR
},
5424 { .hw_value
= 13, .center_freq
= 2472, .max_power
= WLCORE_MAX_TXPWR
},
5425 { .hw_value
= 14, .center_freq
= 2484, .max_power
= WLCORE_MAX_TXPWR
},
5428 /* can't be const, mac80211 writes to this */
5429 static struct ieee80211_supported_band wl1271_band_2ghz
= {
5430 .channels
= wl1271_channels
,
5431 .n_channels
= ARRAY_SIZE(wl1271_channels
),
5432 .bitrates
= wl1271_rates
,
5433 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
5436 /* 5 GHz data rates for WL1273 */
5437 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
5439 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5440 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5442 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5443 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5445 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5446 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5448 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5449 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5451 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5452 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5454 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5455 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5457 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5458 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5460 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5461 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5464 /* 5 GHz band channels for WL1273 */
5465 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
5466 { .hw_value
= 8, .center_freq
= 5040, .max_power
= WLCORE_MAX_TXPWR
},
5467 { .hw_value
= 12, .center_freq
= 5060, .max_power
= WLCORE_MAX_TXPWR
},
5468 { .hw_value
= 16, .center_freq
= 5080, .max_power
= WLCORE_MAX_TXPWR
},
5469 { .hw_value
= 34, .center_freq
= 5170, .max_power
= WLCORE_MAX_TXPWR
},
5470 { .hw_value
= 36, .center_freq
= 5180, .max_power
= WLCORE_MAX_TXPWR
},
5471 { .hw_value
= 38, .center_freq
= 5190, .max_power
= WLCORE_MAX_TXPWR
},
5472 { .hw_value
= 40, .center_freq
= 5200, .max_power
= WLCORE_MAX_TXPWR
},
5473 { .hw_value
= 42, .center_freq
= 5210, .max_power
= WLCORE_MAX_TXPWR
},
5474 { .hw_value
= 44, .center_freq
= 5220, .max_power
= WLCORE_MAX_TXPWR
},
5475 { .hw_value
= 46, .center_freq
= 5230, .max_power
= WLCORE_MAX_TXPWR
},
5476 { .hw_value
= 48, .center_freq
= 5240, .max_power
= WLCORE_MAX_TXPWR
},
5477 { .hw_value
= 52, .center_freq
= 5260, .max_power
= WLCORE_MAX_TXPWR
},
5478 { .hw_value
= 56, .center_freq
= 5280, .max_power
= WLCORE_MAX_TXPWR
},
5479 { .hw_value
= 60, .center_freq
= 5300, .max_power
= WLCORE_MAX_TXPWR
},
5480 { .hw_value
= 64, .center_freq
= 5320, .max_power
= WLCORE_MAX_TXPWR
},
5481 { .hw_value
= 100, .center_freq
= 5500, .max_power
= WLCORE_MAX_TXPWR
},
5482 { .hw_value
= 104, .center_freq
= 5520, .max_power
= WLCORE_MAX_TXPWR
},
5483 { .hw_value
= 108, .center_freq
= 5540, .max_power
= WLCORE_MAX_TXPWR
},
5484 { .hw_value
= 112, .center_freq
= 5560, .max_power
= WLCORE_MAX_TXPWR
},
5485 { .hw_value
= 116, .center_freq
= 5580, .max_power
= WLCORE_MAX_TXPWR
},
5486 { .hw_value
= 120, .center_freq
= 5600, .max_power
= WLCORE_MAX_TXPWR
},
5487 { .hw_value
= 124, .center_freq
= 5620, .max_power
= WLCORE_MAX_TXPWR
},
5488 { .hw_value
= 128, .center_freq
= 5640, .max_power
= WLCORE_MAX_TXPWR
},
5489 { .hw_value
= 132, .center_freq
= 5660, .max_power
= WLCORE_MAX_TXPWR
},
5490 { .hw_value
= 136, .center_freq
= 5680, .max_power
= WLCORE_MAX_TXPWR
},
5491 { .hw_value
= 140, .center_freq
= 5700, .max_power
= WLCORE_MAX_TXPWR
},
5492 { .hw_value
= 149, .center_freq
= 5745, .max_power
= WLCORE_MAX_TXPWR
},
5493 { .hw_value
= 153, .center_freq
= 5765, .max_power
= WLCORE_MAX_TXPWR
},
5494 { .hw_value
= 157, .center_freq
= 5785, .max_power
= WLCORE_MAX_TXPWR
},
5495 { .hw_value
= 161, .center_freq
= 5805, .max_power
= WLCORE_MAX_TXPWR
},
5496 { .hw_value
= 165, .center_freq
= 5825, .max_power
= WLCORE_MAX_TXPWR
},
5499 static struct ieee80211_supported_band wl1271_band_5ghz
= {
5500 .channels
= wl1271_channels_5ghz
,
5501 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
5502 .bitrates
= wl1271_rates_5ghz
,
5503 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
5506 static const struct ieee80211_ops wl1271_ops
= {
5507 .start
= wl1271_op_start
,
5508 .stop
= wlcore_op_stop
,
5509 .add_interface
= wl1271_op_add_interface
,
5510 .remove_interface
= wl1271_op_remove_interface
,
5511 .change_interface
= wl12xx_op_change_interface
,
5513 .suspend
= wl1271_op_suspend
,
5514 .resume
= wl1271_op_resume
,
5516 .config
= wl1271_op_config
,
5517 .prepare_multicast
= wl1271_op_prepare_multicast
,
5518 .configure_filter
= wl1271_op_configure_filter
,
5520 .set_key
= wlcore_op_set_key
,
5521 .hw_scan
= wl1271_op_hw_scan
,
5522 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
5523 .sched_scan_start
= wl1271_op_sched_scan_start
,
5524 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
5525 .bss_info_changed
= wl1271_op_bss_info_changed
,
5526 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
5527 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
5528 .conf_tx
= wl1271_op_conf_tx
,
5529 .get_tsf
= wl1271_op_get_tsf
,
5530 .get_survey
= wl1271_op_get_survey
,
5531 .sta_state
= wl12xx_op_sta_state
,
5532 .ampdu_action
= wl1271_op_ampdu_action
,
5533 .tx_frames_pending
= wl1271_tx_frames_pending
,
5534 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
5535 .set_default_unicast_key
= wl1271_op_set_default_key_idx
,
5536 .channel_switch
= wl12xx_op_channel_switch
,
5537 .flush
= wlcore_op_flush
,
5538 .remain_on_channel
= wlcore_op_remain_on_channel
,
5539 .cancel_remain_on_channel
= wlcore_op_cancel_remain_on_channel
,
5540 .add_chanctx
= wlcore_op_add_chanctx
,
5541 .remove_chanctx
= wlcore_op_remove_chanctx
,
5542 .change_chanctx
= wlcore_op_change_chanctx
,
5543 .assign_vif_chanctx
= wlcore_op_assign_vif_chanctx
,
5544 .unassign_vif_chanctx
= wlcore_op_unassign_vif_chanctx
,
5545 .sta_rc_update
= wlcore_op_sta_rc_update
,
5546 .get_rssi
= wlcore_op_get_rssi
,
5547 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
5551 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
5557 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
5558 wl1271_error("Illegal RX rate from HW: %d", rate
);
5562 idx
= wl
->band_rate_to_idx
[band
][rate
];
5563 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
5564 wl1271_error("Unsupported RX rate from HW: %d", rate
);
5571 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
, u32 oui
, u32 nic
)
5575 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x",
5578 if (nic
+ WLCORE_NUM_MAC_ADDRESSES
- wl
->num_mac_addr
> 0xffffff)
5579 wl1271_warning("NIC part of the MAC address wraps around!");
5581 for (i
= 0; i
< wl
->num_mac_addr
; i
++) {
5582 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
5583 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
5584 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
5585 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
5586 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
5587 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
5591 /* we may be one address short at the most */
5592 WARN_ON(wl
->num_mac_addr
+ 1 < WLCORE_NUM_MAC_ADDRESSES
);
5595 * turn on the LAA bit in the first address and use it as
5598 if (wl
->num_mac_addr
< WLCORE_NUM_MAC_ADDRESSES
) {
5599 int idx
= WLCORE_NUM_MAC_ADDRESSES
- 1;
5600 memcpy(&wl
->addresses
[idx
], &wl
->addresses
[0],
5601 sizeof(wl
->addresses
[0]));
5603 wl
->addresses
[idx
].addr
[2] |= BIT(1);
5606 wl
->hw
->wiphy
->n_addresses
= WLCORE_NUM_MAC_ADDRESSES
;
5607 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
5610 static int wl12xx_get_hw_info(struct wl1271
*wl
)
5614 ret
= wl12xx_set_power_on(wl
);
5618 ret
= wlcore_read_reg(wl
, REG_CHIP_ID_B
, &wl
->chip
.id
);
5622 wl
->fuse_oui_addr
= 0;
5623 wl
->fuse_nic_addr
= 0;
5625 ret
= wl
->ops
->get_pg_ver(wl
, &wl
->hw_pg_ver
);
5629 if (wl
->ops
->get_mac
)
5630 ret
= wl
->ops
->get_mac(wl
);
5633 wl1271_power_off(wl
);
5637 static int wl1271_register_hw(struct wl1271
*wl
)
5640 u32 oui_addr
= 0, nic_addr
= 0;
5642 if (wl
->mac80211_registered
)
5645 if (wl
->nvs_len
>= 12) {
5646 /* NOTE: The wl->nvs->nvs element must be first, in
5647 * order to simplify the casting, we assume it is at
5648 * the beginning of the wl->nvs structure.
5650 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
5653 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
5655 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
5658 /* if the MAC address is zeroed in the NVS derive from fuse */
5659 if (oui_addr
== 0 && nic_addr
== 0) {
5660 oui_addr
= wl
->fuse_oui_addr
;
5661 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5662 nic_addr
= wl
->fuse_nic_addr
+ 1;
5665 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
);
5667 ret
= ieee80211_register_hw(wl
->hw
);
5669 wl1271_error("unable to register mac80211 hw: %d", ret
);
5673 wl
->mac80211_registered
= true;
5675 wl1271_debugfs_init(wl
);
5677 wl1271_notice("loaded");
5683 static void wl1271_unregister_hw(struct wl1271
*wl
)
5686 wl1271_plt_stop(wl
);
5688 ieee80211_unregister_hw(wl
->hw
);
5689 wl
->mac80211_registered
= false;
5693 static const struct ieee80211_iface_limit wlcore_iface_limits
[] = {
5696 .types
= BIT(NL80211_IFTYPE_STATION
),
5700 .types
= BIT(NL80211_IFTYPE_AP
) |
5701 BIT(NL80211_IFTYPE_P2P_GO
) |
5702 BIT(NL80211_IFTYPE_P2P_CLIENT
),
5706 static struct ieee80211_iface_combination
5707 wlcore_iface_combinations
[] = {
5709 .max_interfaces
= 3,
5710 .limits
= wlcore_iface_limits
,
5711 .n_limits
= ARRAY_SIZE(wlcore_iface_limits
),
5715 static int wl1271_init_ieee80211(struct wl1271
*wl
)
5718 static const u32 cipher_suites
[] = {
5719 WLAN_CIPHER_SUITE_WEP40
,
5720 WLAN_CIPHER_SUITE_WEP104
,
5721 WLAN_CIPHER_SUITE_TKIP
,
5722 WLAN_CIPHER_SUITE_CCMP
,
5723 WL1271_CIPHER_SUITE_GEM
,
5726 /* The tx descriptor buffer */
5727 wl
->hw
->extra_tx_headroom
= sizeof(struct wl1271_tx_hw_descr
);
5729 if (wl
->quirks
& WLCORE_QUIRK_TKIP_HEADER_SPACE
)
5730 wl
->hw
->extra_tx_headroom
+= WL1271_EXTRA_SPACE_TKIP
;
5733 /* FIXME: find a proper value */
5734 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
5736 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
5737 IEEE80211_HW_SUPPORTS_PS
|
5738 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
5739 IEEE80211_HW_SUPPORTS_UAPSD
|
5740 IEEE80211_HW_HAS_RATE_CONTROL
|
5741 IEEE80211_HW_CONNECTION_MONITOR
|
5742 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
5743 IEEE80211_HW_SPECTRUM_MGMT
|
5744 IEEE80211_HW_AP_LINK_PS
|
5745 IEEE80211_HW_AMPDU_AGGREGATION
|
5746 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
5747 IEEE80211_HW_QUEUE_CONTROL
;
5749 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
5750 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
5752 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
5753 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5754 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5755 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5756 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5757 wl
->hw
->wiphy
->max_match_sets
= 16;
5759 * Maximum length of elements in scanning probe request templates
5760 * should be the maximum length possible for a template, without
5761 * the IEEE80211 header of the template
5763 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5764 sizeof(struct ieee80211_header
);
5766 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5767 sizeof(struct ieee80211_header
);
5769 wl
->hw
->wiphy
->max_remain_on_channel_duration
= 5000;
5771 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5772 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
|
5773 WIPHY_FLAG_SUPPORTS_SCHED_SCAN
;
5775 /* make sure all our channels fit in the scanned_ch bitmask */
5776 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5777 ARRAY_SIZE(wl1271_channels_5ghz
) >
5778 WL1271_MAX_CHANNELS
);
5780 * clear channel flags from the previous usage
5781 * and restore max_power & max_antenna_gain values.
5783 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels
); i
++) {
5784 wl1271_band_2ghz
.channels
[i
].flags
= 0;
5785 wl1271_band_2ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
5786 wl1271_band_2ghz
.channels
[i
].max_antenna_gain
= 0;
5789 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels_5ghz
); i
++) {
5790 wl1271_band_5ghz
.channels
[i
].flags
= 0;
5791 wl1271_band_5ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
5792 wl1271_band_5ghz
.channels
[i
].max_antenna_gain
= 0;
5796 * We keep local copies of the band structs because we need to
5797 * modify them on a per-device basis.
5799 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5800 sizeof(wl1271_band_2ghz
));
5801 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
,
5802 &wl
->ht_cap
[IEEE80211_BAND_2GHZ
],
5803 sizeof(*wl
->ht_cap
));
5804 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5805 sizeof(wl1271_band_5ghz
));
5806 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
,
5807 &wl
->ht_cap
[IEEE80211_BAND_5GHZ
],
5808 sizeof(*wl
->ht_cap
));
5810 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5811 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5812 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5813 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5816 * allow 4 queues per mac address we support +
5817 * 1 cab queue per mac + one global offchannel Tx queue
5819 wl
->hw
->queues
= (NUM_TX_QUEUES
+ 1) * WLCORE_NUM_MAC_ADDRESSES
+ 1;
5821 /* the last queue is the offchannel queue */
5822 wl
->hw
->offchannel_tx_hw_queue
= wl
->hw
->queues
- 1;
5823 wl
->hw
->max_rates
= 1;
5825 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5827 /* the FW answers probe-requests in AP-mode */
5828 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5829 wl
->hw
->wiphy
->probe_resp_offload
=
5830 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5831 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5832 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5834 /* allowed interface combinations */
5835 wlcore_iface_combinations
[0].num_different_channels
= wl
->num_channels
;
5836 wl
->hw
->wiphy
->iface_combinations
= wlcore_iface_combinations
;
5837 wl
->hw
->wiphy
->n_iface_combinations
=
5838 ARRAY_SIZE(wlcore_iface_combinations
);
5840 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5842 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5843 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5845 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5850 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
, u32 aggr_buf_size
,
5853 struct ieee80211_hw
*hw
;
5858 BUILD_BUG_ON(AP_MAX_STATIONS
> WLCORE_MAX_LINKS
);
5860 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5862 wl1271_error("could not alloc ieee80211_hw");
5868 memset(wl
, 0, sizeof(*wl
));
5870 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5872 wl1271_error("could not alloc wl priv");
5874 goto err_priv_alloc
;
5877 INIT_LIST_HEAD(&wl
->wlvif_list
);
5882 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5883 * we don't allocate any additional resource here, so that's fine.
5885 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5886 for (j
= 0; j
< WLCORE_MAX_LINKS
; j
++)
5887 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5889 skb_queue_head_init(&wl
->deferred_rx_queue
);
5890 skb_queue_head_init(&wl
->deferred_tx_queue
);
5892 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5893 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5894 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5895 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5896 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5897 INIT_DELAYED_WORK(&wl
->roc_complete_work
, wlcore_roc_complete_work
);
5898 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5900 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5901 if (!wl
->freezable_wq
) {
5908 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
5909 wl
->band
= IEEE80211_BAND_2GHZ
;
5910 wl
->channel_type
= NL80211_CHAN_NO_HT
;
5912 wl
->sg_enabled
= true;
5913 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
5914 wl
->recovery_count
= 0;
5917 wl
->ap_fw_ps_map
= 0;
5919 wl
->platform_quirks
= 0;
5920 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
5921 wl
->active_sta_count
= 0;
5922 wl
->active_link_count
= 0;
5924 init_waitqueue_head(&wl
->fwlog_waitq
);
5926 /* The system link is always allocated */
5927 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
5929 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
5930 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
5931 wl
->tx_frames
[i
] = NULL
;
5933 spin_lock_init(&wl
->wl_lock
);
5935 wl
->state
= WLCORE_STATE_OFF
;
5936 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5937 mutex_init(&wl
->mutex
);
5938 mutex_init(&wl
->flush_mutex
);
5939 init_completion(&wl
->nvs_loading_complete
);
5941 order
= get_order(aggr_buf_size
);
5942 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
5943 if (!wl
->aggr_buf
) {
5947 wl
->aggr_buf_size
= aggr_buf_size
;
5949 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
5950 if (!wl
->dummy_packet
) {
5955 /* Allocate one page for the FW log */
5956 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
5959 goto err_dummy_packet
;
5962 wl
->mbox_size
= mbox_size
;
5963 wl
->mbox
= kmalloc(wl
->mbox_size
, GFP_KERNEL
| GFP_DMA
);
5969 wl
->buffer_32
= kmalloc(sizeof(*wl
->buffer_32
), GFP_KERNEL
);
5970 if (!wl
->buffer_32
) {
5981 free_page((unsigned long)wl
->fwlog
);
5984 dev_kfree_skb(wl
->dummy_packet
);
5987 free_pages((unsigned long)wl
->aggr_buf
, order
);
5990 destroy_workqueue(wl
->freezable_wq
);
5993 wl1271_debugfs_exit(wl
);
5997 ieee80211_free_hw(hw
);
6001 return ERR_PTR(ret
);
6003 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
6005 int wlcore_free_hw(struct wl1271
*wl
)
6007 /* Unblock any fwlog readers */
6008 mutex_lock(&wl
->mutex
);
6009 wl
->fwlog_size
= -1;
6010 wake_up_interruptible_all(&wl
->fwlog_waitq
);
6011 mutex_unlock(&wl
->mutex
);
6013 wlcore_sysfs_free(wl
);
6015 kfree(wl
->buffer_32
);
6017 free_page((unsigned long)wl
->fwlog
);
6018 dev_kfree_skb(wl
->dummy_packet
);
6019 free_pages((unsigned long)wl
->aggr_buf
, get_order(wl
->aggr_buf_size
));
6021 wl1271_debugfs_exit(wl
);
6025 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
6029 kfree(wl
->raw_fw_status
);
6030 kfree(wl
->fw_status
);
6031 kfree(wl
->tx_res_if
);
6032 destroy_workqueue(wl
->freezable_wq
);
6035 ieee80211_free_hw(wl
->hw
);
6039 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
6042 static const struct wiphy_wowlan_support wlcore_wowlan_support
= {
6043 .flags
= WIPHY_WOWLAN_ANY
,
6044 .n_patterns
= WL1271_MAX_RX_FILTERS
,
6045 .pattern_min_len
= 1,
6046 .pattern_max_len
= WL1271_RX_FILTER_MAX_PATTERN_SIZE
,
6050 static irqreturn_t
wlcore_hardirq(int irq
, void *cookie
)
6052 return IRQ_WAKE_THREAD
;
6055 static void wlcore_nvs_cb(const struct firmware
*fw
, void *context
)
6057 struct wl1271
*wl
= context
;
6058 struct platform_device
*pdev
= wl
->pdev
;
6059 struct wlcore_platdev_data
*pdev_data
= dev_get_platdata(&pdev
->dev
);
6060 struct wl12xx_platform_data
*pdata
= pdev_data
->pdata
;
6061 unsigned long irqflags
;
6063 irq_handler_t hardirq_fn
= NULL
;
6066 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
6068 wl1271_error("Could not allocate nvs data");
6071 wl
->nvs_len
= fw
->size
;
6073 wl1271_debug(DEBUG_BOOT
, "Could not get nvs file %s",
6079 ret
= wl
->ops
->setup(wl
);
6083 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
6085 /* adjust some runtime configuration parameters */
6086 wlcore_adjust_conf(wl
);
6088 wl
->irq
= platform_get_irq(pdev
, 0);
6089 wl
->platform_quirks
= pdata
->platform_quirks
;
6090 wl
->if_ops
= pdev_data
->if_ops
;
6092 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
) {
6093 irqflags
= IRQF_TRIGGER_RISING
;
6094 hardirq_fn
= wlcore_hardirq
;
6096 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
6099 ret
= request_threaded_irq(wl
->irq
, hardirq_fn
, wlcore_irq
,
6100 irqflags
, pdev
->name
, wl
);
6102 wl1271_error("request_irq() failed: %d", ret
);
6107 ret
= enable_irq_wake(wl
->irq
);
6109 wl
->irq_wake_enabled
= true;
6110 device_init_wakeup(wl
->dev
, 1);
6111 if (pdata
->pwr_in_suspend
)
6112 wl
->hw
->wiphy
->wowlan
= &wlcore_wowlan_support
;
6115 disable_irq(wl
->irq
);
6117 ret
= wl12xx_get_hw_info(wl
);
6119 wl1271_error("couldn't get hw info");
6123 ret
= wl
->ops
->identify_chip(wl
);
6127 ret
= wl1271_init_ieee80211(wl
);
6131 ret
= wl1271_register_hw(wl
);
6135 ret
= wlcore_sysfs_init(wl
);
6139 wl
->initialized
= true;
6143 wl1271_unregister_hw(wl
);
6146 free_irq(wl
->irq
, wl
);
6152 release_firmware(fw
);
6153 complete_all(&wl
->nvs_loading_complete
);
6156 int wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
6160 if (!wl
->ops
|| !wl
->ptable
)
6163 wl
->dev
= &pdev
->dev
;
6165 platform_set_drvdata(pdev
, wl
);
6167 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_HOTPLUG
,
6168 WL12XX_NVS_NAME
, &pdev
->dev
, GFP_KERNEL
,
6171 wl1271_error("request_firmware_nowait failed: %d", ret
);
6172 complete_all(&wl
->nvs_loading_complete
);
6177 EXPORT_SYMBOL_GPL(wlcore_probe
);
6179 int wlcore_remove(struct platform_device
*pdev
)
6181 struct wl1271
*wl
= platform_get_drvdata(pdev
);
6183 wait_for_completion(&wl
->nvs_loading_complete
);
6184 if (!wl
->initialized
)
6187 if (wl
->irq_wake_enabled
) {
6188 device_init_wakeup(wl
->dev
, 0);
6189 disable_irq_wake(wl
->irq
);
6191 wl1271_unregister_hw(wl
);
6192 free_irq(wl
->irq
, wl
);
6197 EXPORT_SYMBOL_GPL(wlcore_remove
);
6199 u32 wl12xx_debug_level
= DEBUG_NONE
;
6200 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
6201 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
6202 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
6204 module_param_named(fwlog
, fwlog_param
, charp
, 0);
6205 MODULE_PARM_DESC(fwlog
,
6206 "FW logger options: continuous, ondemand, dbgpins or disable");
6208 module_param(fwlog_mem_blocks
, int, S_IRUSR
| S_IWUSR
);
6209 MODULE_PARM_DESC(fwlog_mem_blocks
, "fwlog mem_blocks");
6211 module_param(bug_on_recovery
, int, S_IRUSR
| S_IWUSR
);
6212 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
6214 module_param(no_recovery
, int, S_IRUSR
| S_IWUSR
);
6215 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
6217 MODULE_LICENSE("GPL");
6218 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6219 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6220 MODULE_FIRMWARE(WL12XX_NVS_NAME
);