3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
40 #include "vendor_cmd.h"
45 #define WL1271_BOOT_RETRIES 3
47 static char *fwlog_param
;
48 static int fwlog_mem_blocks
= -1;
49 static int bug_on_recovery
= -1;
50 static int no_recovery
= -1;
52 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
53 struct ieee80211_vif
*vif
,
54 bool reset_tx_queues
);
55 static void wlcore_op_stop_locked(struct wl1271
*wl
);
56 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
58 static int wl12xx_set_authorized(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
62 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
71 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, wlvif
->sta
.hlid
);
75 wl1271_info("Association completed.");
79 static void wl1271_reg_notify(struct wiphy
*wiphy
,
80 struct regulatory_request
*request
)
82 struct ieee80211_supported_band
*band
;
83 struct ieee80211_channel
*ch
;
85 struct ieee80211_hw
*hw
= wiphy_to_ieee80211_hw(wiphy
);
86 struct wl1271
*wl
= hw
->priv
;
88 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
89 for (i
= 0; i
< band
->n_channels
; i
++) {
90 ch
= &band
->channels
[i
];
91 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
94 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
95 ch
->flags
|= IEEE80211_CHAN_NO_IR
;
99 wlcore_regdomain_config(wl
);
102 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
107 /* we should hold wl->mutex */
108 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
113 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
115 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
121 * this function is being called when the rx_streaming interval
122 * has beed changed or rx_streaming should be disabled
124 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
127 int period
= wl
->conf
.rx_streaming
.interval
;
129 /* don't reconfigure if rx_streaming is disabled */
130 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
133 /* reconfigure/disable according to new streaming_period */
135 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
136 (wl
->conf
.rx_streaming
.always
||
137 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
138 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
140 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
141 /* don't cancel_work_sync since we might deadlock */
142 del_timer_sync(&wlvif
->rx_streaming_timer
);
148 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
151 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
152 rx_streaming_enable_work
);
153 struct wl1271
*wl
= wlvif
->wl
;
155 mutex_lock(&wl
->mutex
);
157 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
158 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
159 (!wl
->conf
.rx_streaming
.always
&&
160 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
163 if (!wl
->conf
.rx_streaming
.interval
)
166 ret
= wl1271_ps_elp_wakeup(wl
);
170 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
174 /* stop it after some time of inactivity */
175 mod_timer(&wlvif
->rx_streaming_timer
,
176 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
179 wl1271_ps_elp_sleep(wl
);
181 mutex_unlock(&wl
->mutex
);
184 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
187 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
188 rx_streaming_disable_work
);
189 struct wl1271
*wl
= wlvif
->wl
;
191 mutex_lock(&wl
->mutex
);
193 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
196 ret
= wl1271_ps_elp_wakeup(wl
);
200 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
205 wl1271_ps_elp_sleep(wl
);
207 mutex_unlock(&wl
->mutex
);
210 static void wl1271_rx_streaming_timer(unsigned long data
)
212 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
213 struct wl1271
*wl
= wlvif
->wl
;
214 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
217 /* wl->mutex must be taken */
218 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
220 /* if the watchdog is not armed, don't do anything */
221 if (wl
->tx_allocated_blocks
== 0)
224 cancel_delayed_work(&wl
->tx_watchdog_work
);
225 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
226 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
229 static void wlcore_rc_update_work(struct work_struct
*work
)
232 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
234 struct wl1271
*wl
= wlvif
->wl
;
236 mutex_lock(&wl
->mutex
);
238 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
241 ret
= wl1271_ps_elp_wakeup(wl
);
245 wlcore_hw_sta_rc_update(wl
, wlvif
);
247 wl1271_ps_elp_sleep(wl
);
249 mutex_unlock(&wl
->mutex
);
252 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
254 struct delayed_work
*dwork
;
257 dwork
= container_of(work
, struct delayed_work
, work
);
258 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
260 mutex_lock(&wl
->mutex
);
262 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
265 /* Tx went out in the meantime - everything is ok */
266 if (unlikely(wl
->tx_allocated_blocks
== 0))
270 * if a ROC is in progress, we might not have any Tx for a long
271 * time (e.g. pending Tx on the non-ROC channels)
273 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
274 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
275 wl
->conf
.tx
.tx_watchdog_timeout
);
276 wl12xx_rearm_tx_watchdog_locked(wl
);
281 * if a scan is in progress, we might not have any Tx for a long
284 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
285 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
286 wl
->conf
.tx
.tx_watchdog_timeout
);
287 wl12xx_rearm_tx_watchdog_locked(wl
);
292 * AP might cache a frame for a long time for a sleeping station,
293 * so rearm the timer if there's an AP interface with stations. If
294 * Tx is genuinely stuck we will most hopefully discover it when all
295 * stations are removed due to inactivity.
297 if (wl
->active_sta_count
) {
298 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
300 wl
->conf
.tx
.tx_watchdog_timeout
,
301 wl
->active_sta_count
);
302 wl12xx_rearm_tx_watchdog_locked(wl
);
306 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
307 wl
->conf
.tx
.tx_watchdog_timeout
);
308 wl12xx_queue_recovery_work(wl
);
311 mutex_unlock(&wl
->mutex
);
314 static void wlcore_adjust_conf(struct wl1271
*wl
)
316 /* Adjust settings according to optional module parameters */
318 /* Firmware Logger params */
319 if (fwlog_mem_blocks
!= -1) {
320 if (fwlog_mem_blocks
>= CONF_FWLOG_MIN_MEM_BLOCKS
&&
321 fwlog_mem_blocks
<= CONF_FWLOG_MAX_MEM_BLOCKS
) {
322 wl
->conf
.fwlog
.mem_blocks
= fwlog_mem_blocks
;
325 "Illegal fwlog_mem_blocks=%d using default %d",
326 fwlog_mem_blocks
, wl
->conf
.fwlog
.mem_blocks
);
331 if (!strcmp(fwlog_param
, "continuous")) {
332 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
333 } else if (!strcmp(fwlog_param
, "ondemand")) {
334 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
335 } else if (!strcmp(fwlog_param
, "dbgpins")) {
336 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
337 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
338 } else if (!strcmp(fwlog_param
, "disable")) {
339 wl
->conf
.fwlog
.mem_blocks
= 0;
340 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
342 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
346 if (bug_on_recovery
!= -1)
347 wl
->conf
.recovery
.bug_on_recovery
= (u8
) bug_on_recovery
;
349 if (no_recovery
!= -1)
350 wl
->conf
.recovery
.no_recovery
= (u8
) no_recovery
;
353 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
354 struct wl12xx_vif
*wlvif
,
359 fw_ps
= test_bit(hlid
, &wl
->ap_fw_ps_map
);
362 * Wake up from high level PS if the STA is asleep with too little
363 * packets in FW or if the STA is awake.
365 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
366 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
369 * Start high-level PS if the STA is asleep with enough blocks in FW.
370 * Make an exception if this is the only connected link. In this
371 * case FW-memory congestion is less of a problem.
372 * Note that a single connected STA means 2*ap_count + 1 active links,
373 * since we must account for the global and broadcast AP links
374 * for each AP. The "fw_ps" check assures us the other link is a STA
375 * connected to the AP. Otherwise the FW would not set the PSM bit.
377 else if (wl
->active_link_count
> (wl
->ap_count
*2 + 1) && fw_ps
&&
378 tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
379 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
382 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
383 struct wl12xx_vif
*wlvif
,
384 struct wl_fw_status
*status
)
386 unsigned long cur_fw_ps_map
;
389 cur_fw_ps_map
= status
->link_ps_bitmap
;
390 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
391 wl1271_debug(DEBUG_PSM
,
392 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
393 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
394 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
396 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
399 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, wl
->num_links
)
400 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
401 wl
->links
[hlid
].allocated_pkts
);
404 static int wlcore_fw_status(struct wl1271
*wl
, struct wl_fw_status
*status
)
406 struct wl12xx_vif
*wlvif
;
408 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
409 int avail
, freed_blocks
;
412 struct wl1271_link
*lnk
;
414 ret
= wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
,
416 wl
->fw_status_len
, false);
420 wlcore_hw_convert_fw_status(wl
, wl
->raw_fw_status
, wl
->fw_status
);
422 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
423 "drv_rx_counter = %d, tx_results_counter = %d)",
425 status
->fw_rx_counter
,
426 status
->drv_rx_counter
,
427 status
->tx_results_counter
);
429 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
430 /* prevent wrap-around in freed-packets counter */
431 wl
->tx_allocated_pkts
[i
] -=
432 (status
->counters
.tx_released_pkts
[i
] -
433 wl
->tx_pkts_freed
[i
]) & 0xff;
435 wl
->tx_pkts_freed
[i
] = status
->counters
.tx_released_pkts
[i
];
439 for_each_set_bit(i
, wl
->links_map
, wl
->num_links
) {
443 /* prevent wrap-around in freed-packets counter */
444 diff
= (status
->counters
.tx_lnk_free_pkts
[i
] -
445 lnk
->prev_freed_pkts
) & 0xff;
450 lnk
->allocated_pkts
-= diff
;
451 lnk
->prev_freed_pkts
= status
->counters
.tx_lnk_free_pkts
[i
];
453 /* accumulate the prev_freed_pkts counter */
454 lnk
->total_freed_pkts
+= diff
;
457 /* prevent wrap-around in total blocks counter */
458 if (likely(wl
->tx_blocks_freed
<= status
->total_released_blks
))
459 freed_blocks
= status
->total_released_blks
-
462 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
463 status
->total_released_blks
;
465 wl
->tx_blocks_freed
= status
->total_released_blks
;
467 wl
->tx_allocated_blocks
-= freed_blocks
;
470 * If the FW freed some blocks:
471 * If we still have allocated blocks - re-arm the timer, Tx is
472 * not stuck. Otherwise, cancel the timer (no Tx currently).
475 if (wl
->tx_allocated_blocks
)
476 wl12xx_rearm_tx_watchdog_locked(wl
);
478 cancel_delayed_work(&wl
->tx_watchdog_work
);
481 avail
= status
->tx_total
- wl
->tx_allocated_blocks
;
484 * The FW might change the total number of TX memblocks before
485 * we get a notification about blocks being released. Thus, the
486 * available blocks calculation might yield a temporary result
487 * which is lower than the actual available blocks. Keeping in
488 * mind that only blocks that were allocated can be moved from
489 * TX to RX, tx_blocks_available should never decrease here.
491 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
494 /* if more blocks are available now, tx work can be scheduled */
495 if (wl
->tx_blocks_available
> old_tx_blk_count
)
496 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
498 /* for AP update num of allocated TX blocks per link and ps status */
499 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
500 wl12xx_irq_update_links_status(wl
, wlvif
, status
);
503 /* update the host-chipset time offset */
505 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
506 (s64
)(status
->fw_localtime
);
508 wl
->fw_fast_lnk_map
= status
->link_fast_bitmap
;
513 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
517 /* Pass all received frames to the network stack */
518 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
519 ieee80211_rx_ni(wl
->hw
, skb
);
521 /* Return sent skbs to the network stack */
522 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
523 ieee80211_tx_status_ni(wl
->hw
, skb
);
526 static void wl1271_netstack_work(struct work_struct
*work
)
529 container_of(work
, struct wl1271
, netstack_work
);
532 wl1271_flush_deferred_work(wl
);
533 } while (skb_queue_len(&wl
->deferred_rx_queue
));
536 #define WL1271_IRQ_MAX_LOOPS 256
538 static int wlcore_irq_locked(struct wl1271
*wl
)
542 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
544 unsigned int defer_count
;
548 * In case edge triggered interrupt must be used, we cannot iterate
549 * more than once without introducing race conditions with the hardirq.
551 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
554 wl1271_debug(DEBUG_IRQ
, "IRQ work");
556 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
559 ret
= wl1271_ps_elp_wakeup(wl
);
563 while (!done
&& loopcount
--) {
565 * In order to avoid a race with the hardirq, clear the flag
566 * before acknowledging the chip. Since the mutex is held,
567 * wl1271_ps_elp_wakeup cannot be called concurrently.
569 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
570 smp_mb__after_atomic();
572 ret
= wlcore_fw_status(wl
, wl
->fw_status
);
576 wlcore_hw_tx_immediate_compl(wl
);
578 intr
= wl
->fw_status
->intr
;
579 intr
&= WLCORE_ALL_INTR_MASK
;
585 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
586 wl1271_error("HW watchdog interrupt received! starting recovery.");
587 wl
->watchdog_recovery
= true;
590 /* restarting the chip. ignore any other interrupt. */
594 if (unlikely(intr
& WL1271_ACX_SW_INTR_WATCHDOG
)) {
595 wl1271_error("SW watchdog interrupt received! "
596 "starting recovery.");
597 wl
->watchdog_recovery
= true;
600 /* restarting the chip. ignore any other interrupt. */
604 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
605 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
607 ret
= wlcore_rx(wl
, wl
->fw_status
);
611 /* Check if any tx blocks were freed */
612 spin_lock_irqsave(&wl
->wl_lock
, flags
);
613 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
614 wl1271_tx_total_queue_count(wl
) > 0) {
615 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
617 * In order to avoid starvation of the TX path,
618 * call the work function directly.
620 ret
= wlcore_tx_work_locked(wl
);
624 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
627 /* check for tx results */
628 ret
= wlcore_hw_tx_delayed_compl(wl
);
632 /* Make sure the deferred queues don't get too long */
633 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
634 skb_queue_len(&wl
->deferred_rx_queue
);
635 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
636 wl1271_flush_deferred_work(wl
);
639 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
640 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
641 ret
= wl1271_event_handle(wl
, 0);
646 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
647 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
648 ret
= wl1271_event_handle(wl
, 1);
653 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
654 wl1271_debug(DEBUG_IRQ
,
655 "WL1271_ACX_INTR_INIT_COMPLETE");
657 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
658 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
661 wl1271_ps_elp_sleep(wl
);
667 static irqreturn_t
wlcore_irq(int irq
, void *cookie
)
671 struct wl1271
*wl
= cookie
;
673 /* complete the ELP completion */
674 spin_lock_irqsave(&wl
->wl_lock
, flags
);
675 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
677 complete(wl
->elp_compl
);
678 wl
->elp_compl
= NULL
;
681 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
682 /* don't enqueue a work right now. mark it as pending */
683 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
684 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
685 disable_irq_nosync(wl
->irq
);
686 pm_wakeup_event(wl
->dev
, 0);
687 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
690 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
692 /* TX might be handled here, avoid redundant work */
693 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
694 cancel_work_sync(&wl
->tx_work
);
696 mutex_lock(&wl
->mutex
);
698 ret
= wlcore_irq_locked(wl
);
700 wl12xx_queue_recovery_work(wl
);
702 spin_lock_irqsave(&wl
->wl_lock
, flags
);
703 /* In case TX was not handled here, queue TX work */
704 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
705 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
706 wl1271_tx_total_queue_count(wl
) > 0)
707 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
708 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
710 mutex_unlock(&wl
->mutex
);
715 struct vif_counter_data
{
718 struct ieee80211_vif
*cur_vif
;
719 bool cur_vif_running
;
722 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
723 struct ieee80211_vif
*vif
)
725 struct vif_counter_data
*counter
= data
;
728 if (counter
->cur_vif
== vif
)
729 counter
->cur_vif_running
= true;
732 /* caller must not hold wl->mutex, as it might deadlock */
733 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
734 struct ieee80211_vif
*cur_vif
,
735 struct vif_counter_data
*data
)
737 memset(data
, 0, sizeof(*data
));
738 data
->cur_vif
= cur_vif
;
740 ieee80211_iterate_active_interfaces(hw
, IEEE80211_IFACE_ITER_RESUME_ALL
,
741 wl12xx_vif_count_iter
, data
);
744 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
746 const struct firmware
*fw
;
748 enum wl12xx_fw_type fw_type
;
752 fw_type
= WL12XX_FW_TYPE_PLT
;
753 fw_name
= wl
->plt_fw_name
;
756 * we can't call wl12xx_get_vif_count() here because
757 * wl->mutex is taken, so use the cached last_vif_count value
759 if (wl
->last_vif_count
> 1 && wl
->mr_fw_name
) {
760 fw_type
= WL12XX_FW_TYPE_MULTI
;
761 fw_name
= wl
->mr_fw_name
;
763 fw_type
= WL12XX_FW_TYPE_NORMAL
;
764 fw_name
= wl
->sr_fw_name
;
768 if (wl
->fw_type
== fw_type
)
771 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
773 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
776 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
781 wl1271_error("firmware size is not multiple of 32 bits: %zu",
788 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
789 wl
->fw_len
= fw
->size
;
790 wl
->fw
= vmalloc(wl
->fw_len
);
793 wl1271_error("could not allocate memory for the firmware");
798 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
800 wl
->fw_type
= fw_type
;
802 release_firmware(fw
);
807 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
809 /* Avoid a recursive recovery */
810 if (wl
->state
== WLCORE_STATE_ON
) {
811 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
,
814 wl
->state
= WLCORE_STATE_RESTARTING
;
815 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
816 wl1271_ps_elp_wakeup(wl
);
817 wlcore_disable_interrupts_nosync(wl
);
818 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
822 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
826 /* Make sure we have enough room */
827 len
= min_t(size_t, maxlen
, PAGE_SIZE
- wl
->fwlog_size
);
829 /* Fill the FW log file, consumed by the sysfs fwlog entry */
830 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
831 wl
->fwlog_size
+= len
;
836 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
838 struct wlcore_partition_set part
, old_part
;
845 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
846 (wl
->conf
.fwlog
.mem_blocks
== 0))
849 wl1271_info("Reading FW panic log");
851 block
= kmalloc(wl
->fw_mem_block_size
, GFP_KERNEL
);
856 * Make sure the chip is awake and the logger isn't active.
857 * Do not send a stop fwlog command if the fw is hanged or if
858 * dbgpins are used (due to some fw bug).
860 if (wl1271_ps_elp_wakeup(wl
))
862 if (!wl
->watchdog_recovery
&&
863 wl
->conf
.fwlog
.output
!= WL12XX_FWLOG_OUTPUT_DBG_PINS
)
864 wl12xx_cmd_stop_fwlog(wl
);
866 /* Read the first memory block address */
867 ret
= wlcore_fw_status(wl
, wl
->fw_status
);
871 addr
= wl
->fw_status
->log_start_addr
;
875 if (wl
->conf
.fwlog
.mode
== WL12XX_FWLOG_CONTINUOUS
) {
876 offset
= sizeof(addr
) + sizeof(struct wl1271_rx_descriptor
);
877 end_of_log
= wl
->fwlog_end
;
879 offset
= sizeof(addr
);
883 old_part
= wl
->curr_part
;
884 memset(&part
, 0, sizeof(part
));
886 /* Traverse the memory blocks linked list */
888 part
.mem
.start
= wlcore_hw_convert_hwaddr(wl
, addr
);
889 part
.mem
.size
= PAGE_SIZE
;
891 ret
= wlcore_set_partition(wl
, &part
);
893 wl1271_error("%s: set_partition start=0x%X size=%d",
894 __func__
, part
.mem
.start
, part
.mem
.size
);
898 memset(block
, 0, wl
->fw_mem_block_size
);
899 ret
= wlcore_read_hwaddr(wl
, addr
, block
,
900 wl
->fw_mem_block_size
, false);
906 * Memory blocks are linked to one another. The first 4 bytes
907 * of each memory block hold the hardware address of the next
908 * one. The last memory block points to the first one in
909 * on demand mode and is equal to 0x2000000 in continuous mode.
911 addr
= le32_to_cpup((__le32
*)block
);
913 if (!wl12xx_copy_fwlog(wl
, block
+ offset
,
914 wl
->fw_mem_block_size
- offset
))
916 } while (addr
&& (addr
!= end_of_log
));
918 wake_up_interruptible(&wl
->fwlog_waitq
);
922 wlcore_set_partition(wl
, &old_part
);
925 static void wlcore_save_freed_pkts(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
926 u8 hlid
, struct ieee80211_sta
*sta
)
928 struct wl1271_station
*wl_sta
;
929 u32 sqn_recovery_padding
= WL1271_TX_SQN_POST_RECOVERY_PADDING
;
931 wl_sta
= (void *)sta
->drv_priv
;
932 wl_sta
->total_freed_pkts
= wl
->links
[hlid
].total_freed_pkts
;
935 * increment the initial seq number on recovery to account for
936 * transmitted packets that we haven't yet got in the FW status
938 if (wlvif
->encryption_type
== KEY_GEM
)
939 sqn_recovery_padding
= WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM
;
941 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
942 wl_sta
->total_freed_pkts
+= sqn_recovery_padding
;
945 static void wlcore_save_freed_pkts_addr(struct wl1271
*wl
,
946 struct wl12xx_vif
*wlvif
,
947 u8 hlid
, const u8
*addr
)
949 struct ieee80211_sta
*sta
;
950 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
952 if (WARN_ON(hlid
== WL12XX_INVALID_LINK_ID
||
953 is_zero_ether_addr(addr
)))
957 sta
= ieee80211_find_sta(vif
, addr
);
959 wlcore_save_freed_pkts(wl
, wlvif
, hlid
, sta
);
963 static void wlcore_print_recovery(struct wl1271
*wl
)
969 wl1271_info("Hardware recovery in progress. FW ver: %s",
970 wl
->chip
.fw_ver_str
);
972 /* change partitions momentarily so we can read the FW pc */
973 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
977 ret
= wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
, &pc
);
981 ret
= wlcore_read_reg(wl
, REG_INTERRUPT_NO_CLEAR
, &hint_sts
);
985 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
986 pc
, hint_sts
, ++wl
->recovery_count
);
988 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
992 static void wl1271_recovery_work(struct work_struct
*work
)
995 container_of(work
, struct wl1271
, recovery_work
);
996 struct wl12xx_vif
*wlvif
;
997 struct ieee80211_vif
*vif
;
999 mutex_lock(&wl
->mutex
);
1001 if (wl
->state
== WLCORE_STATE_OFF
|| wl
->plt
)
1004 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
)) {
1005 if (wl
->conf
.fwlog
.output
== WL12XX_FWLOG_OUTPUT_HOST
)
1006 wl12xx_read_fwlog_panic(wl
);
1007 wlcore_print_recovery(wl
);
1010 BUG_ON(wl
->conf
.recovery
.bug_on_recovery
&&
1011 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
1013 if (wl
->conf
.recovery
.no_recovery
) {
1014 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1018 /* Prevent spurious TX during FW restart */
1019 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
1021 /* reboot the chipset */
1022 while (!list_empty(&wl
->wlvif_list
)) {
1023 wlvif
= list_first_entry(&wl
->wlvif_list
,
1024 struct wl12xx_vif
, list
);
1025 vif
= wl12xx_wlvif_to_vif(wlvif
);
1027 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
1028 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
1029 wlcore_save_freed_pkts_addr(wl
, wlvif
, wlvif
->sta
.hlid
,
1030 vif
->bss_conf
.bssid
);
1033 __wl1271_op_remove_interface(wl
, vif
, false);
1036 wlcore_op_stop_locked(wl
);
1038 ieee80211_restart_hw(wl
->hw
);
1041 * Its safe to enable TX now - the queues are stopped after a request
1042 * to restart the HW.
1044 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
1047 wl
->watchdog_recovery
= false;
1048 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
1049 mutex_unlock(&wl
->mutex
);
1052 static int wlcore_fw_wakeup(struct wl1271
*wl
)
1054 return wlcore_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
1057 static int wl1271_setup(struct wl1271
*wl
)
1059 wl
->raw_fw_status
= kzalloc(wl
->fw_status_len
, GFP_KERNEL
);
1060 if (!wl
->raw_fw_status
)
1063 wl
->fw_status
= kzalloc(sizeof(*wl
->fw_status
), GFP_KERNEL
);
1067 wl
->tx_res_if
= kzalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
1073 kfree(wl
->fw_status
);
1074 kfree(wl
->raw_fw_status
);
1078 static int wl12xx_set_power_on(struct wl1271
*wl
)
1082 msleep(WL1271_PRE_POWER_ON_SLEEP
);
1083 ret
= wl1271_power_on(wl
);
1086 msleep(WL1271_POWER_ON_SLEEP
);
1087 wl1271_io_reset(wl
);
1090 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
1094 /* ELP module wake up */
1095 ret
= wlcore_fw_wakeup(wl
);
1103 wl1271_power_off(wl
);
1107 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
1111 ret
= wl12xx_set_power_on(wl
);
1116 * For wl127x based devices we could use the default block
1117 * size (512 bytes), but due to a bug in the sdio driver, we
1118 * need to set it explicitly after the chip is powered on. To
1119 * simplify the code and since the performance impact is
1120 * negligible, we use the same block size for all different
1123 * Check if the bus supports blocksize alignment and, if it
1124 * doesn't, make sure we don't have the quirk.
1126 if (!wl1271_set_block_size(wl
))
1127 wl
->quirks
&= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
1129 /* TODO: make sure the lower driver has set things up correctly */
1131 ret
= wl1271_setup(wl
);
1135 ret
= wl12xx_fetch_firmware(wl
, plt
);
1143 int wl1271_plt_start(struct wl1271
*wl
, const enum plt_mode plt_mode
)
1145 int retries
= WL1271_BOOT_RETRIES
;
1146 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1148 static const char* const PLT_MODE
[] = {
1157 mutex_lock(&wl
->mutex
);
1159 wl1271_notice("power up");
1161 if (wl
->state
!= WLCORE_STATE_OFF
) {
1162 wl1271_error("cannot go into PLT state because not "
1163 "in off state: %d", wl
->state
);
1168 /* Indicate to lower levels that we are now in PLT mode */
1170 wl
->plt_mode
= plt_mode
;
1174 ret
= wl12xx_chip_wakeup(wl
, true);
1178 if (plt_mode
!= PLT_CHIP_AWAKE
) {
1179 ret
= wl
->ops
->plt_init(wl
);
1184 wl
->state
= WLCORE_STATE_ON
;
1185 wl1271_notice("firmware booted in PLT mode %s (%s)",
1187 wl
->chip
.fw_ver_str
);
1189 /* update hw/fw version info in wiphy struct */
1190 wiphy
->hw_version
= wl
->chip
.id
;
1191 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1192 sizeof(wiphy
->fw_version
));
1197 wl1271_power_off(wl
);
1201 wl
->plt_mode
= PLT_OFF
;
1203 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1204 WL1271_BOOT_RETRIES
);
1206 mutex_unlock(&wl
->mutex
);
1211 int wl1271_plt_stop(struct wl1271
*wl
)
1215 wl1271_notice("power down");
1218 * Interrupts must be disabled before setting the state to OFF.
1219 * Otherwise, the interrupt handler might be called and exit without
1220 * reading the interrupt status.
1222 wlcore_disable_interrupts(wl
);
1223 mutex_lock(&wl
->mutex
);
1225 mutex_unlock(&wl
->mutex
);
1228 * This will not necessarily enable interrupts as interrupts
1229 * may have been disabled when op_stop was called. It will,
1230 * however, balance the above call to disable_interrupts().
1232 wlcore_enable_interrupts(wl
);
1234 wl1271_error("cannot power down because not in PLT "
1235 "state: %d", wl
->state
);
1240 mutex_unlock(&wl
->mutex
);
1242 wl1271_flush_deferred_work(wl
);
1243 cancel_work_sync(&wl
->netstack_work
);
1244 cancel_work_sync(&wl
->recovery_work
);
1245 cancel_delayed_work_sync(&wl
->elp_work
);
1246 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1248 mutex_lock(&wl
->mutex
);
1249 wl1271_power_off(wl
);
1251 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1252 wl
->state
= WLCORE_STATE_OFF
;
1254 wl
->plt_mode
= PLT_OFF
;
1256 mutex_unlock(&wl
->mutex
);
1262 static void wl1271_op_tx(struct ieee80211_hw
*hw
,
1263 struct ieee80211_tx_control
*control
,
1264 struct sk_buff
*skb
)
1266 struct wl1271
*wl
= hw
->priv
;
1267 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1268 struct ieee80211_vif
*vif
= info
->control
.vif
;
1269 struct wl12xx_vif
*wlvif
= NULL
;
1270 unsigned long flags
;
1275 wl1271_debug(DEBUG_TX
, "DROP skb with no vif");
1276 ieee80211_free_txskb(hw
, skb
);
1280 wlvif
= wl12xx_vif_to_data(vif
);
1281 mapping
= skb_get_queue_mapping(skb
);
1282 q
= wl1271_tx_get_queue(mapping
);
1284 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
, control
->sta
);
1286 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1289 * drop the packet if the link is invalid or the queue is stopped
1290 * for any reason but watermark. Watermark is a "soft"-stop so we
1291 * allow these packets through.
1293 if (hlid
== WL12XX_INVALID_LINK_ID
||
1294 (!test_bit(hlid
, wlvif
->links_map
)) ||
1295 (wlcore_is_queue_stopped_locked(wl
, wlvif
, q
) &&
1296 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1297 WLCORE_QUEUE_STOP_REASON_WATERMARK
))) {
1298 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1299 ieee80211_free_txskb(hw
, skb
);
1303 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1305 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1307 wl
->tx_queue_count
[q
]++;
1308 wlvif
->tx_queue_count
[q
]++;
1311 * The workqueue is slow to process the tx_queue and we need stop
1312 * the queue here, otherwise the queue will get too long.
1314 if (wlvif
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
&&
1315 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1316 WLCORE_QUEUE_STOP_REASON_WATERMARK
)) {
1317 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1318 wlcore_stop_queue_locked(wl
, wlvif
, q
,
1319 WLCORE_QUEUE_STOP_REASON_WATERMARK
);
1323 * The chip specific setup must run before the first TX packet -
1324 * before that, the tx_work will not be initialized!
1327 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1328 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1329 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1332 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1335 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1337 unsigned long flags
;
1340 /* no need to queue a new dummy packet if one is already pending */
1341 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1344 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1346 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1347 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1348 wl
->tx_queue_count
[q
]++;
1349 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1351 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1352 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1353 return wlcore_tx_work_locked(wl
);
1356 * If the FW TX is busy, TX work will be scheduled by the threaded
1357 * interrupt handler function
1363 * The size of the dummy packet should be at least 1400 bytes. However, in
1364 * order to minimize the number of bus transactions, aligning it to 512 bytes
1365 * boundaries could be beneficial, performance wise
1367 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1369 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1371 struct sk_buff
*skb
;
1372 struct ieee80211_hdr_3addr
*hdr
;
1373 unsigned int dummy_packet_size
;
1375 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1376 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1378 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1380 wl1271_warning("Failed to allocate a dummy packet skb");
1384 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1386 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1387 memset(hdr
, 0, sizeof(*hdr
));
1388 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1389 IEEE80211_STYPE_NULLFUNC
|
1390 IEEE80211_FCTL_TODS
);
1392 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1394 /* Dummy packets require the TID to be management */
1395 skb
->priority
= WL1271_TID_MGMT
;
1397 /* Initialize all fields that might be used */
1398 skb_set_queue_mapping(skb
, 0);
1399 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1407 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern
*p
)
1409 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1410 int i
, pattern_len
= 0;
1413 wl1271_warning("No mask in WoWLAN pattern");
1418 * The pattern is broken up into segments of bytes at different offsets
1419 * that need to be checked by the FW filter. Each segment is called
1420 * a field in the FW API. We verify that the total number of fields
1421 * required for this pattern won't exceed FW limits (8)
1422 * as well as the total fields buffer won't exceed the FW limit.
1423 * Note that if there's a pattern which crosses Ethernet/IP header
1424 * boundary a new field is required.
1426 for (i
= 0; i
< p
->pattern_len
; i
++) {
1427 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1432 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1434 fields_size
+= pattern_len
+
1435 RX_FILTER_FIELD_OVERHEAD
;
1443 fields_size
+= pattern_len
+
1444 RX_FILTER_FIELD_OVERHEAD
;
1451 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1455 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1456 wl1271_warning("RX Filter too complex. Too many segments");
1460 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1461 wl1271_warning("RX filter pattern is too big");
1468 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1470 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1473 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1480 for (i
= 0; i
< filter
->num_fields
; i
++)
1481 kfree(filter
->fields
[i
].pattern
);
1486 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1487 u16 offset
, u8 flags
,
1488 const u8
*pattern
, u8 len
)
1490 struct wl12xx_rx_filter_field
*field
;
1492 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1493 wl1271_warning("Max fields per RX filter. can't alloc another");
1497 field
= &filter
->fields
[filter
->num_fields
];
1499 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1500 if (!field
->pattern
) {
1501 wl1271_warning("Failed to allocate RX filter pattern");
1505 filter
->num_fields
++;
1507 field
->offset
= cpu_to_le16(offset
);
1508 field
->flags
= flags
;
1510 memcpy(field
->pattern
, pattern
, len
);
1515 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1517 int i
, fields_size
= 0;
1519 for (i
= 0; i
< filter
->num_fields
; i
++)
1520 fields_size
+= filter
->fields
[i
].len
+
1521 sizeof(struct wl12xx_rx_filter_field
) -
1527 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1531 struct wl12xx_rx_filter_field
*field
;
1533 for (i
= 0; i
< filter
->num_fields
; i
++) {
1534 field
= (struct wl12xx_rx_filter_field
*)buf
;
1536 field
->offset
= filter
->fields
[i
].offset
;
1537 field
->flags
= filter
->fields
[i
].flags
;
1538 field
->len
= filter
->fields
[i
].len
;
1540 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1541 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1542 sizeof(u8
*) + field
->len
;
1547 * Allocates an RX filter returned through f
1548 * which needs to be freed using rx_filter_free()
1551 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern
*p
,
1552 struct wl12xx_rx_filter
**f
)
1555 struct wl12xx_rx_filter
*filter
;
1559 filter
= wl1271_rx_filter_alloc();
1561 wl1271_warning("Failed to alloc rx filter");
1567 while (i
< p
->pattern_len
) {
1568 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1573 for (j
= i
; j
< p
->pattern_len
; j
++) {
1574 if (!test_bit(j
, (unsigned long *)p
->mask
))
1577 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1578 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1582 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1584 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1586 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1587 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1592 ret
= wl1271_rx_filter_alloc_field(filter
,
1595 &p
->pattern
[i
], len
);
1602 filter
->action
= FILTER_SIGNAL
;
1608 wl1271_rx_filter_free(filter
);
1614 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1615 struct cfg80211_wowlan
*wow
)
1619 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1620 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0,
1625 ret
= wl1271_rx_filter_clear_all(wl
);
1632 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1635 /* Validate all incoming patterns before clearing current FW state */
1636 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1637 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1639 wl1271_warning("Bad wowlan pattern %d", i
);
1644 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1648 ret
= wl1271_rx_filter_clear_all(wl
);
1652 /* Translate WoWLAN patterns into filters */
1653 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1654 struct cfg80211_pkt_pattern
*p
;
1655 struct wl12xx_rx_filter
*filter
= NULL
;
1657 p
= &wow
->patterns
[i
];
1659 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1661 wl1271_warning("Failed to create an RX filter from "
1662 "wowlan pattern %d", i
);
1666 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1668 wl1271_rx_filter_free(filter
);
1673 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1679 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1680 struct wl12xx_vif
*wlvif
,
1681 struct cfg80211_wowlan
*wow
)
1685 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1688 ret
= wl1271_configure_wowlan(wl
, wow
);
1692 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1693 wl
->conf
.conn
.wake_up_event
) &&
1694 (wl
->conf
.conn
.suspend_listen_interval
==
1695 wl
->conf
.conn
.listen_interval
))
1698 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1699 wl
->conf
.conn
.suspend_wake_up_event
,
1700 wl
->conf
.conn
.suspend_listen_interval
);
1703 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1709 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1710 struct wl12xx_vif
*wlvif
)
1714 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1717 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1724 static int wl1271_configure_suspend(struct wl1271
*wl
,
1725 struct wl12xx_vif
*wlvif
,
1726 struct cfg80211_wowlan
*wow
)
1728 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1729 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1730 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1731 return wl1271_configure_suspend_ap(wl
, wlvif
);
1735 static void wl1271_configure_resume(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1738 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1739 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1741 if ((!is_ap
) && (!is_sta
))
1744 if (is_sta
&& !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1748 wl1271_configure_wowlan(wl
, NULL
);
1750 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1751 wl
->conf
.conn
.wake_up_event
) &&
1752 (wl
->conf
.conn
.suspend_listen_interval
==
1753 wl
->conf
.conn
.listen_interval
))
1756 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1757 wl
->conf
.conn
.wake_up_event
,
1758 wl
->conf
.conn
.listen_interval
);
1761 wl1271_error("resume: wake up conditions failed: %d",
1765 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1769 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1770 struct cfg80211_wowlan
*wow
)
1772 struct wl1271
*wl
= hw
->priv
;
1773 struct wl12xx_vif
*wlvif
;
1776 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1779 /* we want to perform the recovery before suspending */
1780 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
1781 wl1271_warning("postponing suspend to perform recovery");
1785 wl1271_tx_flush(wl
);
1787 mutex_lock(&wl
->mutex
);
1789 ret
= wl1271_ps_elp_wakeup(wl
);
1793 wl
->wow_enabled
= true;
1794 wl12xx_for_each_wlvif(wl
, wlvif
) {
1795 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1797 mutex_unlock(&wl
->mutex
);
1798 wl1271_warning("couldn't prepare device to suspend");
1803 /* disable fast link flow control notifications from FW */
1804 ret
= wlcore_hw_interrupt_notify(wl
, false);
1808 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1809 ret
= wlcore_hw_rx_ba_filter(wl
,
1810 !!wl
->conf
.conn
.suspend_rx_ba_activity
);
1815 wl1271_ps_elp_sleep(wl
);
1816 mutex_unlock(&wl
->mutex
);
1819 wl1271_warning("couldn't prepare device to suspend");
1823 /* flush any remaining work */
1824 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1827 * disable and re-enable interrupts in order to flush
1830 wlcore_disable_interrupts(wl
);
1833 * set suspended flag to avoid triggering a new threaded_irq
1834 * work. no need for spinlock as interrupts are disabled.
1836 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1838 wlcore_enable_interrupts(wl
);
1839 flush_work(&wl
->tx_work
);
1840 flush_delayed_work(&wl
->elp_work
);
1843 * Cancel the watchdog even if above tx_flush failed. We will detect
1844 * it on resume anyway.
1846 cancel_delayed_work(&wl
->tx_watchdog_work
);
1851 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1853 struct wl1271
*wl
= hw
->priv
;
1854 struct wl12xx_vif
*wlvif
;
1855 unsigned long flags
;
1856 bool run_irq_work
= false, pending_recovery
;
1859 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1861 WARN_ON(!wl
->wow_enabled
);
1864 * re-enable irq_work enqueuing, and call irq_work directly if
1865 * there is a pending work.
1867 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1868 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1869 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1870 run_irq_work
= true;
1871 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1873 mutex_lock(&wl
->mutex
);
1875 /* test the recovery flag before calling any SDIO functions */
1876 pending_recovery
= test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1880 wl1271_debug(DEBUG_MAC80211
,
1881 "run postponed irq_work directly");
1883 /* don't talk to the HW if recovery is pending */
1884 if (!pending_recovery
) {
1885 ret
= wlcore_irq_locked(wl
);
1887 wl12xx_queue_recovery_work(wl
);
1890 wlcore_enable_interrupts(wl
);
1893 if (pending_recovery
) {
1894 wl1271_warning("queuing forgotten recovery on resume");
1895 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
1899 ret
= wl1271_ps_elp_wakeup(wl
);
1903 wl12xx_for_each_wlvif(wl
, wlvif
) {
1904 wl1271_configure_resume(wl
, wlvif
);
1907 ret
= wlcore_hw_interrupt_notify(wl
, true);
1911 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1912 ret
= wlcore_hw_rx_ba_filter(wl
, false);
1917 wl1271_ps_elp_sleep(wl
);
1920 wl
->wow_enabled
= false;
1923 * Set a flag to re-init the watchdog on the first Tx after resume.
1924 * That way we avoid possible conditions where Tx-complete interrupts
1925 * fail to arrive and we perform a spurious recovery.
1927 set_bit(WL1271_FLAG_REINIT_TX_WDOG
, &wl
->flags
);
1928 mutex_unlock(&wl
->mutex
);
1934 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1936 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1939 * We have to delay the booting of the hardware because
1940 * we need to know the local MAC address before downloading and
1941 * initializing the firmware. The MAC address cannot be changed
1942 * after boot, and without the proper MAC address, the firmware
1943 * will not function properly.
1945 * The MAC address is first known when the corresponding interface
1946 * is added. That is where we will initialize the hardware.
1952 static void wlcore_op_stop_locked(struct wl1271
*wl
)
1956 if (wl
->state
== WLCORE_STATE_OFF
) {
1957 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1959 wlcore_enable_interrupts(wl
);
1965 * this must be before the cancel_work calls below, so that the work
1966 * functions don't perform further work.
1968 wl
->state
= WLCORE_STATE_OFF
;
1971 * Use the nosync variant to disable interrupts, so the mutex could be
1972 * held while doing so without deadlocking.
1974 wlcore_disable_interrupts_nosync(wl
);
1976 mutex_unlock(&wl
->mutex
);
1978 wlcore_synchronize_interrupts(wl
);
1979 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1980 cancel_work_sync(&wl
->recovery_work
);
1981 wl1271_flush_deferred_work(wl
);
1982 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1983 cancel_work_sync(&wl
->netstack_work
);
1984 cancel_work_sync(&wl
->tx_work
);
1985 cancel_delayed_work_sync(&wl
->elp_work
);
1986 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1988 /* let's notify MAC80211 about the remaining pending TX frames */
1989 mutex_lock(&wl
->mutex
);
1990 wl12xx_tx_reset(wl
);
1992 wl1271_power_off(wl
);
1994 * In case a recovery was scheduled, interrupts were disabled to avoid
1995 * an interrupt storm. Now that the power is down, it is safe to
1996 * re-enable interrupts to balance the disable depth
1998 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1999 wlcore_enable_interrupts(wl
);
2001 wl
->band
= IEEE80211_BAND_2GHZ
;
2004 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
2005 wl
->channel_type
= NL80211_CHAN_NO_HT
;
2006 wl
->tx_blocks_available
= 0;
2007 wl
->tx_allocated_blocks
= 0;
2008 wl
->tx_results_count
= 0;
2009 wl
->tx_packets_count
= 0;
2010 wl
->time_offset
= 0;
2011 wl
->ap_fw_ps_map
= 0;
2013 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
2014 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
2015 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
2016 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
2017 memset(wl
->session_ids
, 0, sizeof(wl
->session_ids
));
2018 memset(wl
->rx_filter_enabled
, 0, sizeof(wl
->rx_filter_enabled
));
2019 wl
->active_sta_count
= 0;
2020 wl
->active_link_count
= 0;
2022 /* The system link is always allocated */
2023 wl
->links
[WL12XX_SYSTEM_HLID
].allocated_pkts
= 0;
2024 wl
->links
[WL12XX_SYSTEM_HLID
].prev_freed_pkts
= 0;
2025 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
2028 * this is performed after the cancel_work calls and the associated
2029 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2030 * get executed before all these vars have been reset.
2034 wl
->tx_blocks_freed
= 0;
2036 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
2037 wl
->tx_pkts_freed
[i
] = 0;
2038 wl
->tx_allocated_pkts
[i
] = 0;
2041 wl1271_debugfs_reset(wl
);
2043 kfree(wl
->raw_fw_status
);
2044 wl
->raw_fw_status
= NULL
;
2045 kfree(wl
->fw_status
);
2046 wl
->fw_status
= NULL
;
2047 kfree(wl
->tx_res_if
);
2048 wl
->tx_res_if
= NULL
;
2049 kfree(wl
->target_mem_map
);
2050 wl
->target_mem_map
= NULL
;
2053 * FW channels must be re-calibrated after recovery,
2054 * save current Reg-Domain channel configuration and clear it.
2056 memcpy(wl
->reg_ch_conf_pending
, wl
->reg_ch_conf_last
,
2057 sizeof(wl
->reg_ch_conf_pending
));
2058 memset(wl
->reg_ch_conf_last
, 0, sizeof(wl
->reg_ch_conf_last
));
2061 static void wlcore_op_stop(struct ieee80211_hw
*hw
)
2063 struct wl1271
*wl
= hw
->priv
;
2065 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
2067 mutex_lock(&wl
->mutex
);
2069 wlcore_op_stop_locked(wl
);
2071 mutex_unlock(&wl
->mutex
);
2074 static void wlcore_channel_switch_work(struct work_struct
*work
)
2076 struct delayed_work
*dwork
;
2078 struct ieee80211_vif
*vif
;
2079 struct wl12xx_vif
*wlvif
;
2082 dwork
= container_of(work
, struct delayed_work
, work
);
2083 wlvif
= container_of(dwork
, struct wl12xx_vif
, channel_switch_work
);
2086 wl1271_info("channel switch failed (role_id: %d).", wlvif
->role_id
);
2088 mutex_lock(&wl
->mutex
);
2090 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2093 /* check the channel switch is still ongoing */
2094 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
))
2097 vif
= wl12xx_wlvif_to_vif(wlvif
);
2098 ieee80211_chswitch_done(vif
, false);
2100 ret
= wl1271_ps_elp_wakeup(wl
);
2104 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
2106 wl1271_ps_elp_sleep(wl
);
2108 mutex_unlock(&wl
->mutex
);
2111 static void wlcore_connection_loss_work(struct work_struct
*work
)
2113 struct delayed_work
*dwork
;
2115 struct ieee80211_vif
*vif
;
2116 struct wl12xx_vif
*wlvif
;
2118 dwork
= container_of(work
, struct delayed_work
, work
);
2119 wlvif
= container_of(dwork
, struct wl12xx_vif
, connection_loss_work
);
2122 wl1271_info("Connection loss work (role_id: %d).", wlvif
->role_id
);
2124 mutex_lock(&wl
->mutex
);
2126 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2129 /* Call mac80211 connection loss */
2130 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2133 vif
= wl12xx_wlvif_to_vif(wlvif
);
2134 ieee80211_connection_loss(vif
);
2136 mutex_unlock(&wl
->mutex
);
2139 static void wlcore_pending_auth_complete_work(struct work_struct
*work
)
2141 struct delayed_work
*dwork
;
2143 struct wl12xx_vif
*wlvif
;
2144 unsigned long time_spare
;
2147 dwork
= container_of(work
, struct delayed_work
, work
);
2148 wlvif
= container_of(dwork
, struct wl12xx_vif
,
2149 pending_auth_complete_work
);
2152 mutex_lock(&wl
->mutex
);
2154 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2158 * Make sure a second really passed since the last auth reply. Maybe
2159 * a second auth reply arrived while we were stuck on the mutex.
2160 * Check for a little less than the timeout to protect from scheduler
2163 time_spare
= jiffies
+
2164 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT
- 50);
2165 if (!time_after(time_spare
, wlvif
->pending_auth_reply_time
))
2168 ret
= wl1271_ps_elp_wakeup(wl
);
2172 /* cancel the ROC if active */
2173 wlcore_update_inconn_sta(wl
, wlvif
, NULL
, false);
2175 wl1271_ps_elp_sleep(wl
);
2177 mutex_unlock(&wl
->mutex
);
2180 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
2182 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
2183 WL12XX_MAX_RATE_POLICIES
);
2184 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
2187 __set_bit(policy
, wl
->rate_policies_map
);
2192 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
2194 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
2197 __clear_bit(*idx
, wl
->rate_policies_map
);
2198 *idx
= WL12XX_MAX_RATE_POLICIES
;
2201 static int wlcore_allocate_klv_template(struct wl1271
*wl
, u8
*idx
)
2203 u8 policy
= find_first_zero_bit(wl
->klv_templates_map
,
2204 WLCORE_MAX_KLV_TEMPLATES
);
2205 if (policy
>= WLCORE_MAX_KLV_TEMPLATES
)
2208 __set_bit(policy
, wl
->klv_templates_map
);
2213 static void wlcore_free_klv_template(struct wl1271
*wl
, u8
*idx
)
2215 if (WARN_ON(*idx
>= WLCORE_MAX_KLV_TEMPLATES
))
2218 __clear_bit(*idx
, wl
->klv_templates_map
);
2219 *idx
= WLCORE_MAX_KLV_TEMPLATES
;
2222 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2224 switch (wlvif
->bss_type
) {
2225 case BSS_TYPE_AP_BSS
:
2227 return WL1271_ROLE_P2P_GO
;
2229 return WL1271_ROLE_AP
;
2231 case BSS_TYPE_STA_BSS
:
2233 return WL1271_ROLE_P2P_CL
;
2235 return WL1271_ROLE_STA
;
2238 return WL1271_ROLE_IBSS
;
2241 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
2243 return WL12XX_INVALID_ROLE_TYPE
;
2246 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
2248 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2251 /* clear everything but the persistent data */
2252 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
2254 switch (ieee80211_vif_type_p2p(vif
)) {
2255 case NL80211_IFTYPE_P2P_CLIENT
:
2258 case NL80211_IFTYPE_STATION
:
2259 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
2261 case NL80211_IFTYPE_ADHOC
:
2262 wlvif
->bss_type
= BSS_TYPE_IBSS
;
2264 case NL80211_IFTYPE_P2P_GO
:
2267 case NL80211_IFTYPE_AP
:
2268 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
2271 wlvif
->bss_type
= MAX_BSS_TYPE
;
2275 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2276 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2277 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2279 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2280 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2281 /* init sta/ibss data */
2282 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2283 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2284 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2285 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2286 wlcore_allocate_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2287 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
2288 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
2289 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
2292 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2293 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2294 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2295 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2296 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2297 wl12xx_allocate_rate_policy(wl
,
2298 &wlvif
->ap
.ucast_rate_idx
[i
]);
2299 wlvif
->basic_rate_set
= CONF_TX_ENABLED_RATES
;
2301 * TODO: check if basic_rate shouldn't be
2302 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2303 * instead (the same thing for STA above).
2305 wlvif
->basic_rate
= CONF_TX_ENABLED_RATES
;
2306 /* TODO: this seems to be used only for STA, check it */
2307 wlvif
->rate_set
= CONF_TX_ENABLED_RATES
;
2310 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
2311 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
2312 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
2315 * mac80211 configures some values globally, while we treat them
2316 * per-interface. thus, on init, we have to copy them from wl
2318 wlvif
->band
= wl
->band
;
2319 wlvif
->channel
= wl
->channel
;
2320 wlvif
->power_level
= wl
->power_level
;
2321 wlvif
->channel_type
= wl
->channel_type
;
2323 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
2324 wl1271_rx_streaming_enable_work
);
2325 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
2326 wl1271_rx_streaming_disable_work
);
2327 INIT_WORK(&wlvif
->rc_update_work
, wlcore_rc_update_work
);
2328 INIT_DELAYED_WORK(&wlvif
->channel_switch_work
,
2329 wlcore_channel_switch_work
);
2330 INIT_DELAYED_WORK(&wlvif
->connection_loss_work
,
2331 wlcore_connection_loss_work
);
2332 INIT_DELAYED_WORK(&wlvif
->pending_auth_complete_work
,
2333 wlcore_pending_auth_complete_work
);
2334 INIT_LIST_HEAD(&wlvif
->list
);
2336 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
2337 (unsigned long) wlvif
);
2341 static int wl12xx_init_fw(struct wl1271
*wl
)
2343 int retries
= WL1271_BOOT_RETRIES
;
2344 bool booted
= false;
2345 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
2350 ret
= wl12xx_chip_wakeup(wl
, false);
2354 ret
= wl
->ops
->boot(wl
);
2358 ret
= wl1271_hw_init(wl
);
2366 mutex_unlock(&wl
->mutex
);
2367 /* Unlocking the mutex in the middle of handling is
2368 inherently unsafe. In this case we deem it safe to do,
2369 because we need to let any possibly pending IRQ out of
2370 the system (and while we are WLCORE_STATE_OFF the IRQ
2371 work function will not do anything.) Also, any other
2372 possible concurrent operations will fail due to the
2373 current state, hence the wl1271 struct should be safe. */
2374 wlcore_disable_interrupts(wl
);
2375 wl1271_flush_deferred_work(wl
);
2376 cancel_work_sync(&wl
->netstack_work
);
2377 mutex_lock(&wl
->mutex
);
2379 wl1271_power_off(wl
);
2383 wl1271_error("firmware boot failed despite %d retries",
2384 WL1271_BOOT_RETRIES
);
2388 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
2390 /* update hw/fw version info in wiphy struct */
2391 wiphy
->hw_version
= wl
->chip
.id
;
2392 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
2393 sizeof(wiphy
->fw_version
));
2396 * Now we know if 11a is supported (info from the NVS), so disable
2397 * 11a channels if not supported
2399 if (!wl
->enable_11a
)
2400 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
2402 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
2403 wl
->enable_11a
? "" : "not ");
2405 wl
->state
= WLCORE_STATE_ON
;
2410 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2412 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2416 * Check whether a fw switch (i.e. moving from one loaded
2417 * fw to another) is needed. This function is also responsible
2418 * for updating wl->last_vif_count, so it must be called before
2419 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2422 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2423 struct vif_counter_data vif_counter_data
,
2426 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2427 u8 vif_count
= vif_counter_data
.counter
;
2429 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2432 /* increase the vif count if this is a new vif */
2433 if (add
&& !vif_counter_data
.cur_vif_running
)
2436 wl
->last_vif_count
= vif_count
;
2438 /* no need for fw change if the device is OFF */
2439 if (wl
->state
== WLCORE_STATE_OFF
)
2442 /* no need for fw change if a single fw is used */
2443 if (!wl
->mr_fw_name
)
2446 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2448 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2455 * Enter "forced psm". Make sure the sta is in psm against the ap,
2456 * to make the fw switch a bit more disconnection-persistent.
2458 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2460 struct wl12xx_vif
*wlvif
;
2462 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2463 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2467 struct wlcore_hw_queue_iter_data
{
2468 unsigned long hw_queue_map
[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES
)];
2470 struct ieee80211_vif
*vif
;
2471 /* is the current vif among those iterated */
2475 static void wlcore_hw_queue_iter(void *data
, u8
*mac
,
2476 struct ieee80211_vif
*vif
)
2478 struct wlcore_hw_queue_iter_data
*iter_data
= data
;
2480 if (WARN_ON_ONCE(vif
->hw_queue
[0] == IEEE80211_INVAL_HW_QUEUE
))
2483 if (iter_data
->cur_running
|| vif
== iter_data
->vif
) {
2484 iter_data
->cur_running
= true;
2488 __set_bit(vif
->hw_queue
[0] / NUM_TX_QUEUES
, iter_data
->hw_queue_map
);
2491 static int wlcore_allocate_hw_queue_base(struct wl1271
*wl
,
2492 struct wl12xx_vif
*wlvif
)
2494 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2495 struct wlcore_hw_queue_iter_data iter_data
= {};
2498 iter_data
.vif
= vif
;
2500 /* mark all bits taken by active interfaces */
2501 ieee80211_iterate_active_interfaces_atomic(wl
->hw
,
2502 IEEE80211_IFACE_ITER_RESUME_ALL
,
2503 wlcore_hw_queue_iter
, &iter_data
);
2505 /* the current vif is already running in mac80211 (resume/recovery) */
2506 if (iter_data
.cur_running
) {
2507 wlvif
->hw_queue_base
= vif
->hw_queue
[0];
2508 wl1271_debug(DEBUG_MAC80211
,
2509 "using pre-allocated hw queue base %d",
2510 wlvif
->hw_queue_base
);
2512 /* interface type might have changed type */
2513 goto adjust_cab_queue
;
2516 q_base
= find_first_zero_bit(iter_data
.hw_queue_map
,
2517 WLCORE_NUM_MAC_ADDRESSES
);
2518 if (q_base
>= WLCORE_NUM_MAC_ADDRESSES
)
2521 wlvif
->hw_queue_base
= q_base
* NUM_TX_QUEUES
;
2522 wl1271_debug(DEBUG_MAC80211
, "allocating hw queue base: %d",
2523 wlvif
->hw_queue_base
);
2525 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
2526 wl
->queue_stop_reasons
[wlvif
->hw_queue_base
+ i
] = 0;
2527 /* register hw queues in mac80211 */
2528 vif
->hw_queue
[i
] = wlvif
->hw_queue_base
+ i
;
2532 /* the last places are reserved for cab queues per interface */
2533 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2534 vif
->cab_queue
= NUM_TX_QUEUES
* WLCORE_NUM_MAC_ADDRESSES
+
2535 wlvif
->hw_queue_base
/ NUM_TX_QUEUES
;
2537 vif
->cab_queue
= IEEE80211_INVAL_HW_QUEUE
;
2542 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2543 struct ieee80211_vif
*vif
)
2545 struct wl1271
*wl
= hw
->priv
;
2546 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2547 struct vif_counter_data vif_count
;
2552 wl1271_error("Adding Interface not allowed while in PLT mode");
2556 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2557 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2559 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2560 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2562 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2564 mutex_lock(&wl
->mutex
);
2565 ret
= wl1271_ps_elp_wakeup(wl
);
2570 * in some very corner case HW recovery scenarios its possible to
2571 * get here before __wl1271_op_remove_interface is complete, so
2572 * opt out if that is the case.
2574 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2575 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2581 ret
= wl12xx_init_vif_data(wl
, vif
);
2586 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2587 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2592 ret
= wlcore_allocate_hw_queue_base(wl
, wlvif
);
2596 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2597 wl12xx_force_active_psm(wl
);
2598 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2599 mutex_unlock(&wl
->mutex
);
2600 wl1271_recovery_work(&wl
->recovery_work
);
2605 * TODO: after the nvs issue will be solved, move this block
2606 * to start(), and make sure here the driver is ON.
2608 if (wl
->state
== WLCORE_STATE_OFF
) {
2610 * we still need this in order to configure the fw
2611 * while uploading the nvs
2613 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2615 ret
= wl12xx_init_fw(wl
);
2620 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2621 role_type
, &wlvif
->role_id
);
2625 ret
= wl1271_init_vif_specific(wl
, vif
);
2629 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2630 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2632 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2637 wl1271_ps_elp_sleep(wl
);
2639 mutex_unlock(&wl
->mutex
);
2644 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2645 struct ieee80211_vif
*vif
,
2646 bool reset_tx_queues
)
2648 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2650 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2652 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2654 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2657 /* because of hardware recovery, we may get here twice */
2658 if (wl
->state
== WLCORE_STATE_OFF
)
2661 wl1271_info("down");
2663 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2664 wl
->scan_wlvif
== wlvif
) {
2666 * Rearm the tx watchdog just before idling scan. This
2667 * prevents just-finished scans from triggering the watchdog
2669 wl12xx_rearm_tx_watchdog_locked(wl
);
2671 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2672 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2673 wl
->scan_wlvif
= NULL
;
2674 wl
->scan
.req
= NULL
;
2675 ieee80211_scan_completed(wl
->hw
, true);
2678 if (wl
->sched_vif
== wlvif
)
2679 wl
->sched_vif
= NULL
;
2681 if (wl
->roc_vif
== vif
) {
2683 ieee80211_remain_on_channel_expired(wl
->hw
);
2686 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2687 /* disable active roles */
2688 ret
= wl1271_ps_elp_wakeup(wl
);
2692 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2693 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2694 if (wl12xx_dev_role_started(wlvif
))
2695 wl12xx_stop_dev(wl
, wlvif
);
2698 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2702 wl1271_ps_elp_sleep(wl
);
2705 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2707 /* clear all hlids (except system_hlid) */
2708 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2710 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2711 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2712 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2713 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2714 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2715 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2716 wlcore_free_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2718 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2719 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2720 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2721 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2722 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2723 wl12xx_free_rate_policy(wl
,
2724 &wlvif
->ap
.ucast_rate_idx
[i
]);
2725 wl1271_free_ap_keys(wl
, wlvif
);
2728 dev_kfree_skb(wlvif
->probereq
);
2729 wlvif
->probereq
= NULL
;
2730 if (wl
->last_wlvif
== wlvif
)
2731 wl
->last_wlvif
= NULL
;
2732 list_del(&wlvif
->list
);
2733 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2734 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2735 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2743 * Last AP, have more stations. Configure sleep auth according to STA.
2744 * Don't do thin on unintended recovery.
2746 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) &&
2747 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
))
2750 if (wl
->ap_count
== 0 && is_ap
) {
2751 /* mask ap events */
2752 wl
->event_mask
&= ~wl
->ap_event_mask
;
2753 wl1271_event_unmask(wl
);
2756 if (wl
->ap_count
== 0 && is_ap
&& wl
->sta_count
) {
2757 u8 sta_auth
= wl
->conf
.conn
.sta_sleep_auth
;
2758 /* Configure for power according to debugfs */
2759 if (sta_auth
!= WL1271_PSM_ILLEGAL
)
2760 wl1271_acx_sleep_auth(wl
, sta_auth
);
2761 /* Configure for ELP power saving */
2763 wl1271_acx_sleep_auth(wl
, WL1271_PSM_ELP
);
2767 mutex_unlock(&wl
->mutex
);
2769 del_timer_sync(&wlvif
->rx_streaming_timer
);
2770 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2771 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2772 cancel_work_sync(&wlvif
->rc_update_work
);
2773 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
2774 cancel_delayed_work_sync(&wlvif
->channel_switch_work
);
2775 cancel_delayed_work_sync(&wlvif
->pending_auth_complete_work
);
2777 mutex_lock(&wl
->mutex
);
2780 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2781 struct ieee80211_vif
*vif
)
2783 struct wl1271
*wl
= hw
->priv
;
2784 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2785 struct wl12xx_vif
*iter
;
2786 struct vif_counter_data vif_count
;
2788 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2789 mutex_lock(&wl
->mutex
);
2791 if (wl
->state
== WLCORE_STATE_OFF
||
2792 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2796 * wl->vif can be null here if someone shuts down the interface
2797 * just when hardware recovery has been started.
2799 wl12xx_for_each_wlvif(wl
, iter
) {
2803 __wl1271_op_remove_interface(wl
, vif
, true);
2806 WARN_ON(iter
!= wlvif
);
2807 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2808 wl12xx_force_active_psm(wl
);
2809 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2810 wl12xx_queue_recovery_work(wl
);
2813 mutex_unlock(&wl
->mutex
);
2816 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2817 struct ieee80211_vif
*vif
,
2818 enum nl80211_iftype new_type
, bool p2p
)
2820 struct wl1271
*wl
= hw
->priv
;
2823 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2824 wl1271_op_remove_interface(hw
, vif
);
2826 vif
->type
= new_type
;
2828 ret
= wl1271_op_add_interface(hw
, vif
);
2830 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2834 static int wlcore_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2837 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2840 * One of the side effects of the JOIN command is that is clears
2841 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2842 * to a WPA/WPA2 access point will therefore kill the data-path.
2843 * Currently the only valid scenario for JOIN during association
2844 * is on roaming, in which case we will also be given new keys.
2845 * Keep the below message for now, unless it starts bothering
2846 * users who really like to roam a lot :)
2848 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2849 wl1271_info("JOIN while associated.");
2851 /* clear encryption type */
2852 wlvif
->encryption_type
= KEY_NONE
;
2855 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2857 if (wl
->quirks
& WLCORE_QUIRK_START_STA_FAILS
) {
2859 * TODO: this is an ugly workaround for wl12xx fw
2860 * bug - we are not able to tx/rx after the first
2861 * start_sta, so make dummy start+stop calls,
2862 * and then call start_sta again.
2863 * this should be fixed in the fw.
2865 wl12xx_cmd_role_start_sta(wl
, wlvif
);
2866 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2869 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2875 static int wl1271_ssid_set(struct wl12xx_vif
*wlvif
, struct sk_buff
*skb
,
2879 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
2883 wl1271_error("No SSID in IEs!");
2888 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
2889 wl1271_error("SSID is too long!");
2893 wlvif
->ssid_len
= ssid_len
;
2894 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
2898 static int wlcore_set_ssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2900 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2901 struct sk_buff
*skb
;
2904 /* we currently only support setting the ssid from the ap probe req */
2905 if (wlvif
->bss_type
!= BSS_TYPE_STA_BSS
)
2908 skb
= ieee80211_ap_probereq_get(wl
->hw
, vif
);
2912 ieoffset
= offsetof(struct ieee80211_mgmt
,
2913 u
.probe_req
.variable
);
2914 wl1271_ssid_set(wlvif
, skb
, ieoffset
);
2920 static int wlcore_set_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2921 struct ieee80211_bss_conf
*bss_conf
,
2927 wlvif
->aid
= bss_conf
->aid
;
2928 wlvif
->channel_type
= cfg80211_get_chandef_type(&bss_conf
->chandef
);
2929 wlvif
->beacon_int
= bss_conf
->beacon_int
;
2930 wlvif
->wmm_enabled
= bss_conf
->qos
;
2932 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2935 * with wl1271, we don't need to update the
2936 * beacon_int and dtim_period, because the firmware
2937 * updates it by itself when the first beacon is
2938 * received after a join.
2940 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
2945 * Get a template for hardware connection maintenance
2947 dev_kfree_skb(wlvif
->probereq
);
2948 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
2951 ieoffset
= offsetof(struct ieee80211_mgmt
,
2952 u
.probe_req
.variable
);
2953 wl1271_ssid_set(wlvif
, wlvif
->probereq
, ieoffset
);
2955 /* enable the connection monitoring feature */
2956 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
2961 * The join command disable the keep-alive mode, shut down its process,
2962 * and also clear the template config, so we need to reset it all after
2963 * the join. The acx_aid starts the keep-alive process, and the order
2964 * of the commands below is relevant.
2966 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2970 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2974 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2978 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2979 wlvif
->sta
.klv_template_id
,
2980 ACX_KEEP_ALIVE_TPL_VALID
);
2985 * The default fw psm configuration is AUTO, while mac80211 default
2986 * setting is off (ACTIVE), so sync the fw with the correct value.
2988 ret
= wl1271_ps_set_mode(wl
, wlvif
, STATION_ACTIVE_MODE
);
2994 wl1271_tx_enabled_rates_get(wl
,
2997 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3005 static int wlcore_unset_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3008 bool sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
3010 /* make sure we are connected (sta) joined */
3012 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
3015 /* make sure we are joined (ibss) */
3017 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
))
3021 /* use defaults when not associated */
3024 /* free probe-request template */
3025 dev_kfree_skb(wlvif
->probereq
);
3026 wlvif
->probereq
= NULL
;
3028 /* disable connection monitor features */
3029 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
3033 /* Disable the keep-alive feature */
3034 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
3038 /* disable beacon filtering */
3039 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
3044 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
3045 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
3047 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
3048 ieee80211_chswitch_done(vif
, false);
3049 cancel_delayed_work(&wlvif
->channel_switch_work
);
3052 /* invalidate keep-alive template */
3053 wl1271_acx_keep_alive_config(wl
, wlvif
,
3054 wlvif
->sta
.klv_template_id
,
3055 ACX_KEEP_ALIVE_TPL_INVALID
);
3060 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3062 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
3063 wlvif
->rate_set
= wlvif
->basic_rate_set
;
3066 static void wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3069 bool cur_idle
= !test_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
3071 if (idle
== cur_idle
)
3075 clear_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
3077 /* The current firmware only supports sched_scan in idle */
3078 if (wl
->sched_vif
== wlvif
)
3079 wl
->ops
->sched_scan_stop(wl
, wlvif
);
3081 set_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
3085 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3086 struct ieee80211_conf
*conf
, u32 changed
)
3090 if (conf
->power_level
!= wlvif
->power_level
) {
3091 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
3095 wlvif
->power_level
= conf
->power_level
;
3101 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
3103 struct wl1271
*wl
= hw
->priv
;
3104 struct wl12xx_vif
*wlvif
;
3105 struct ieee80211_conf
*conf
= &hw
->conf
;
3108 wl1271_debug(DEBUG_MAC80211
, "mac80211 config psm %s power %d %s"
3110 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
3112 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
3115 mutex_lock(&wl
->mutex
);
3117 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
3118 wl
->power_level
= conf
->power_level
;
3120 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3123 ret
= wl1271_ps_elp_wakeup(wl
);
3127 /* configure each interface */
3128 wl12xx_for_each_wlvif(wl
, wlvif
) {
3129 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
3135 wl1271_ps_elp_sleep(wl
);
3138 mutex_unlock(&wl
->mutex
);
3143 struct wl1271_filter_params
{
3146 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
3149 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
3150 struct netdev_hw_addr_list
*mc_list
)
3152 struct wl1271_filter_params
*fp
;
3153 struct netdev_hw_addr
*ha
;
3155 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
3157 wl1271_error("Out of memory setting filters.");
3161 /* update multicast filtering parameters */
3162 fp
->mc_list_length
= 0;
3163 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
3164 fp
->enabled
= false;
3167 netdev_hw_addr_list_for_each(ha
, mc_list
) {
3168 memcpy(fp
->mc_list
[fp
->mc_list_length
],
3169 ha
->addr
, ETH_ALEN
);
3170 fp
->mc_list_length
++;
3174 return (u64
)(unsigned long)fp
;
3177 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3180 FIF_BCN_PRBRESP_PROMISC | \
3184 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
3185 unsigned int changed
,
3186 unsigned int *total
, u64 multicast
)
3188 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
3189 struct wl1271
*wl
= hw
->priv
;
3190 struct wl12xx_vif
*wlvif
;
3194 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
3195 " total %x", changed
, *total
);
3197 mutex_lock(&wl
->mutex
);
3199 *total
&= WL1271_SUPPORTED_FILTERS
;
3200 changed
&= WL1271_SUPPORTED_FILTERS
;
3202 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3205 ret
= wl1271_ps_elp_wakeup(wl
);
3209 wl12xx_for_each_wlvif(wl
, wlvif
) {
3210 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
3211 if (*total
& FIF_ALLMULTI
)
3212 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3216 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3219 fp
->mc_list_length
);
3226 * the fw doesn't provide an api to configure the filters. instead,
3227 * the filters configuration is based on the active roles / ROC
3232 wl1271_ps_elp_sleep(wl
);
3235 mutex_unlock(&wl
->mutex
);
3239 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3240 u8 id
, u8 key_type
, u8 key_size
,
3241 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
3244 struct wl1271_ap_key
*ap_key
;
3247 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
3249 if (key_size
> MAX_KEY_SIZE
)
3253 * Find next free entry in ap_keys. Also check we are not replacing
3256 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3257 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3260 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
3261 wl1271_warning("trying to record key replacement");
3266 if (i
== MAX_NUM_KEYS
)
3269 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
3274 ap_key
->key_type
= key_type
;
3275 ap_key
->key_size
= key_size
;
3276 memcpy(ap_key
->key
, key
, key_size
);
3277 ap_key
->hlid
= hlid
;
3278 ap_key
->tx_seq_32
= tx_seq_32
;
3279 ap_key
->tx_seq_16
= tx_seq_16
;
3281 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
3285 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3289 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3290 kfree(wlvif
->ap
.recorded_keys
[i
]);
3291 wlvif
->ap
.recorded_keys
[i
] = NULL
;
3295 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3298 struct wl1271_ap_key
*key
;
3299 bool wep_key_added
= false;
3301 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3303 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3306 key
= wlvif
->ap
.recorded_keys
[i
];
3308 if (hlid
== WL12XX_INVALID_LINK_ID
)
3309 hlid
= wlvif
->ap
.bcast_hlid
;
3311 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3312 key
->id
, key
->key_type
,
3313 key
->key_size
, key
->key
,
3314 hlid
, key
->tx_seq_32
,
3319 if (key
->key_type
== KEY_WEP
)
3320 wep_key_added
= true;
3323 if (wep_key_added
) {
3324 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
3325 wlvif
->ap
.bcast_hlid
);
3331 wl1271_free_ap_keys(wl
, wlvif
);
3335 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3336 u16 action
, u8 id
, u8 key_type
,
3337 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
3338 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
3341 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3344 struct wl1271_station
*wl_sta
;
3348 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
3349 hlid
= wl_sta
->hlid
;
3351 hlid
= wlvif
->ap
.bcast_hlid
;
3354 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3356 * We do not support removing keys after AP shutdown.
3357 * Pretend we do to make mac80211 happy.
3359 if (action
!= KEY_ADD_OR_REPLACE
)
3362 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
3364 key
, hlid
, tx_seq_32
,
3367 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
3368 id
, key_type
, key_size
,
3369 key
, hlid
, tx_seq_32
,
3377 static const u8 bcast_addr
[ETH_ALEN
] = {
3378 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3381 addr
= sta
? sta
->addr
: bcast_addr
;
3383 if (is_zero_ether_addr(addr
)) {
3384 /* We dont support TX only encryption */
3388 /* The wl1271 does not allow to remove unicast keys - they
3389 will be cleared automatically on next CMD_JOIN. Ignore the
3390 request silently, as we dont want the mac80211 to emit
3391 an error message. */
3392 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
3395 /* don't remove key if hlid was already deleted */
3396 if (action
== KEY_REMOVE
&&
3397 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
3400 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
3401 id
, key_type
, key_size
,
3402 key
, addr
, tx_seq_32
,
3412 static int wlcore_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
3413 struct ieee80211_vif
*vif
,
3414 struct ieee80211_sta
*sta
,
3415 struct ieee80211_key_conf
*key_conf
)
3417 struct wl1271
*wl
= hw
->priv
;
3419 bool might_change_spare
=
3420 key_conf
->cipher
== WL1271_CIPHER_SUITE_GEM
||
3421 key_conf
->cipher
== WLAN_CIPHER_SUITE_TKIP
;
3423 if (might_change_spare
) {
3425 * stop the queues and flush to ensure the next packets are
3426 * in sync with FW spare block accounting
3428 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3429 wl1271_tx_flush(wl
);
3432 mutex_lock(&wl
->mutex
);
3434 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3436 goto out_wake_queues
;
3439 ret
= wl1271_ps_elp_wakeup(wl
);
3441 goto out_wake_queues
;
3443 ret
= wlcore_hw_set_key(wl
, cmd
, vif
, sta
, key_conf
);
3445 wl1271_ps_elp_sleep(wl
);
3448 if (might_change_spare
)
3449 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3451 mutex_unlock(&wl
->mutex
);
3456 int wlcore_set_key(struct wl1271
*wl
, enum set_key_cmd cmd
,
3457 struct ieee80211_vif
*vif
,
3458 struct ieee80211_sta
*sta
,
3459 struct ieee80211_key_conf
*key_conf
)
3461 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3468 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
3470 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
3471 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3472 key_conf
->cipher
, key_conf
->keyidx
,
3473 key_conf
->keylen
, key_conf
->flags
);
3474 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
3476 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
3478 struct wl1271_station
*wl_sta
= (void *)sta
->drv_priv
;
3479 hlid
= wl_sta
->hlid
;
3481 hlid
= wlvif
->ap
.bcast_hlid
;
3484 hlid
= wlvif
->sta
.hlid
;
3486 if (hlid
!= WL12XX_INVALID_LINK_ID
) {
3487 u64 tx_seq
= wl
->links
[hlid
].total_freed_pkts
;
3488 tx_seq_32
= WL1271_TX_SECURITY_HI32(tx_seq
);
3489 tx_seq_16
= WL1271_TX_SECURITY_LO16(tx_seq
);
3492 switch (key_conf
->cipher
) {
3493 case WLAN_CIPHER_SUITE_WEP40
:
3494 case WLAN_CIPHER_SUITE_WEP104
:
3497 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3499 case WLAN_CIPHER_SUITE_TKIP
:
3500 key_type
= KEY_TKIP
;
3501 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3503 case WLAN_CIPHER_SUITE_CCMP
:
3505 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
3507 case WL1271_CIPHER_SUITE_GEM
:
3511 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
3518 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3519 key_conf
->keyidx
, key_type
,
3520 key_conf
->keylen
, key_conf
->key
,
3521 tx_seq_32
, tx_seq_16
, sta
);
3523 wl1271_error("Could not add or replace key");
3528 * reconfiguring arp response if the unicast (or common)
3529 * encryption key type was changed
3531 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
3532 (sta
|| key_type
== KEY_WEP
) &&
3533 wlvif
->encryption_type
!= key_type
) {
3534 wlvif
->encryption_type
= key_type
;
3535 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3537 wl1271_warning("build arp rsp failed: %d", ret
);
3544 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3545 key_conf
->keyidx
, key_type
,
3546 key_conf
->keylen
, key_conf
->key
,
3549 wl1271_error("Could not remove key");
3555 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3561 EXPORT_SYMBOL_GPL(wlcore_set_key
);
3563 static void wl1271_op_set_default_key_idx(struct ieee80211_hw
*hw
,
3564 struct ieee80211_vif
*vif
,
3567 struct wl1271
*wl
= hw
->priv
;
3568 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3571 wl1271_debug(DEBUG_MAC80211
, "mac80211 set default key idx %d",
3574 /* we don't handle unsetting of default key */
3578 mutex_lock(&wl
->mutex
);
3580 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3585 ret
= wl1271_ps_elp_wakeup(wl
);
3589 wlvif
->default_key
= key_idx
;
3591 /* the default WEP key needs to be configured at least once */
3592 if (wlvif
->encryption_type
== KEY_WEP
) {
3593 ret
= wl12xx_cmd_set_default_wep_key(wl
,
3601 wl1271_ps_elp_sleep(wl
);
3604 mutex_unlock(&wl
->mutex
);
3607 void wlcore_regdomain_config(struct wl1271
*wl
)
3611 if (!(wl
->quirks
& WLCORE_QUIRK_REGDOMAIN_CONF
))
3614 mutex_lock(&wl
->mutex
);
3616 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3619 ret
= wl1271_ps_elp_wakeup(wl
);
3623 ret
= wlcore_cmd_regdomain_config_locked(wl
);
3625 wl12xx_queue_recovery_work(wl
);
3629 wl1271_ps_elp_sleep(wl
);
3631 mutex_unlock(&wl
->mutex
);
3634 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3635 struct ieee80211_vif
*vif
,
3636 struct ieee80211_scan_request
*hw_req
)
3638 struct cfg80211_scan_request
*req
= &hw_req
->req
;
3639 struct wl1271
*wl
= hw
->priv
;
3644 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3647 ssid
= req
->ssids
[0].ssid
;
3648 len
= req
->ssids
[0].ssid_len
;
3651 mutex_lock(&wl
->mutex
);
3653 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3655 * We cannot return -EBUSY here because cfg80211 will expect
3656 * a call to ieee80211_scan_completed if we do - in this case
3657 * there won't be any call.
3663 ret
= wl1271_ps_elp_wakeup(wl
);
3667 /* fail if there is any role in ROC */
3668 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3669 /* don't allow scanning right now */
3674 ret
= wlcore_scan(hw
->priv
, vif
, ssid
, len
, req
);
3676 wl1271_ps_elp_sleep(wl
);
3678 mutex_unlock(&wl
->mutex
);
3683 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3684 struct ieee80211_vif
*vif
)
3686 struct wl1271
*wl
= hw
->priv
;
3687 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3690 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3692 mutex_lock(&wl
->mutex
);
3694 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3697 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3700 ret
= wl1271_ps_elp_wakeup(wl
);
3704 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3705 ret
= wl
->ops
->scan_stop(wl
, wlvif
);
3711 * Rearm the tx watchdog just before idling scan. This
3712 * prevents just-finished scans from triggering the watchdog
3714 wl12xx_rearm_tx_watchdog_locked(wl
);
3716 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3717 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3718 wl
->scan_wlvif
= NULL
;
3719 wl
->scan
.req
= NULL
;
3720 ieee80211_scan_completed(wl
->hw
, true);
3723 wl1271_ps_elp_sleep(wl
);
3725 mutex_unlock(&wl
->mutex
);
3727 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3730 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3731 struct ieee80211_vif
*vif
,
3732 struct cfg80211_sched_scan_request
*req
,
3733 struct ieee80211_scan_ies
*ies
)
3735 struct wl1271
*wl
= hw
->priv
;
3736 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3739 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3741 mutex_lock(&wl
->mutex
);
3743 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3748 ret
= wl1271_ps_elp_wakeup(wl
);
3752 ret
= wl
->ops
->sched_scan_start(wl
, wlvif
, req
, ies
);
3756 wl
->sched_vif
= wlvif
;
3759 wl1271_ps_elp_sleep(wl
);
3761 mutex_unlock(&wl
->mutex
);
3765 static int wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3766 struct ieee80211_vif
*vif
)
3768 struct wl1271
*wl
= hw
->priv
;
3769 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3772 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3774 mutex_lock(&wl
->mutex
);
3776 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3779 ret
= wl1271_ps_elp_wakeup(wl
);
3783 wl
->ops
->sched_scan_stop(wl
, wlvif
);
3785 wl1271_ps_elp_sleep(wl
);
3787 mutex_unlock(&wl
->mutex
);
3792 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3794 struct wl1271
*wl
= hw
->priv
;
3797 mutex_lock(&wl
->mutex
);
3799 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3804 ret
= wl1271_ps_elp_wakeup(wl
);
3808 ret
= wl1271_acx_frag_threshold(wl
, value
);
3810 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3812 wl1271_ps_elp_sleep(wl
);
3815 mutex_unlock(&wl
->mutex
);
3820 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3822 struct wl1271
*wl
= hw
->priv
;
3823 struct wl12xx_vif
*wlvif
;
3826 mutex_lock(&wl
->mutex
);
3828 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3833 ret
= wl1271_ps_elp_wakeup(wl
);
3837 wl12xx_for_each_wlvif(wl
, wlvif
) {
3838 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3840 wl1271_warning("set rts threshold failed: %d", ret
);
3842 wl1271_ps_elp_sleep(wl
);
3845 mutex_unlock(&wl
->mutex
);
3850 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3853 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3854 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3855 skb
->len
- ieoffset
);
3860 memmove(ie
, next
, end
- next
);
3861 skb_trim(skb
, skb
->len
- len
);
3864 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3865 unsigned int oui
, u8 oui_type
,
3869 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3870 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3871 skb
->data
+ ieoffset
,
3872 skb
->len
- ieoffset
);
3877 memmove(ie
, next
, end
- next
);
3878 skb_trim(skb
, skb
->len
- len
);
3881 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3882 struct ieee80211_vif
*vif
)
3884 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3885 struct sk_buff
*skb
;
3888 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3892 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3893 CMD_TEMPL_AP_PROBE_RESPONSE
,
3902 wl1271_debug(DEBUG_AP
, "probe response updated");
3903 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3909 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3910 struct ieee80211_vif
*vif
,
3912 size_t probe_rsp_len
,
3915 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3916 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3917 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3918 int ssid_ie_offset
, ie_offset
, templ_len
;
3921 /* no need to change probe response if the SSID is set correctly */
3922 if (wlvif
->ssid_len
> 0)
3923 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3924 CMD_TEMPL_AP_PROBE_RESPONSE
,
3929 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3930 wl1271_error("probe_rsp template too big");
3934 /* start searching from IE offset */
3935 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3937 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3938 probe_rsp_len
- ie_offset
);
3940 wl1271_error("No SSID in beacon!");
3944 ssid_ie_offset
= ptr
- probe_rsp_data
;
3945 ptr
+= (ptr
[1] + 2);
3947 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3949 /* insert SSID from bss_conf */
3950 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3951 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3952 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3953 bss_conf
->ssid
, bss_conf
->ssid_len
);
3954 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3956 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3957 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3958 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3960 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3961 CMD_TEMPL_AP_PROBE_RESPONSE
,
3967 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3968 struct ieee80211_vif
*vif
,
3969 struct ieee80211_bss_conf
*bss_conf
,
3972 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3975 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3976 if (bss_conf
->use_short_slot
)
3977 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3979 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3981 wl1271_warning("Set slot time failed %d", ret
);
3986 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3987 if (bss_conf
->use_short_preamble
)
3988 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3990 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3993 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3994 if (bss_conf
->use_cts_prot
)
3995 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3998 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3999 CTSPROTECT_DISABLE
);
4001 wl1271_warning("Set ctsprotect failed %d", ret
);
4010 static int wlcore_set_beacon_template(struct wl1271
*wl
,
4011 struct ieee80211_vif
*vif
,
4014 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4015 struct ieee80211_hdr
*hdr
;
4018 int ieoffset
= offsetof(struct ieee80211_mgmt
, u
.beacon
.variable
);
4019 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
4027 wl1271_debug(DEBUG_MASTER
, "beacon updated");
4029 ret
= wl1271_ssid_set(wlvif
, beacon
, ieoffset
);
4031 dev_kfree_skb(beacon
);
4034 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4035 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
4037 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
4042 dev_kfree_skb(beacon
);
4046 wlvif
->wmm_enabled
=
4047 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT
,
4048 WLAN_OUI_TYPE_MICROSOFT_WMM
,
4049 beacon
->data
+ ieoffset
,
4050 beacon
->len
- ieoffset
);
4053 * In case we already have a probe-resp beacon set explicitly
4054 * by usermode, don't use the beacon data.
4056 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
4059 /* remove TIM ie from probe response */
4060 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
4063 * remove p2p ie from probe response.
4064 * the fw reponds to probe requests that don't include
4065 * the p2p ie. probe requests with p2p ie will be passed,
4066 * and will be responded by the supplicant (the spec
4067 * forbids including the p2p ie when responding to probe
4068 * requests that didn't include it).
4070 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
4071 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
4073 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
4074 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
4075 IEEE80211_STYPE_PROBE_RESP
);
4077 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
4082 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
4083 CMD_TEMPL_PROBE_RESPONSE
,
4088 dev_kfree_skb(beacon
);
4096 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
4097 struct ieee80211_vif
*vif
,
4098 struct ieee80211_bss_conf
*bss_conf
,
4101 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4102 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4105 if (changed
& BSS_CHANGED_BEACON_INT
) {
4106 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
4107 bss_conf
->beacon_int
);
4109 wlvif
->beacon_int
= bss_conf
->beacon_int
;
4112 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
4113 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4115 wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
);
4118 if (changed
& BSS_CHANGED_BEACON
) {
4119 ret
= wlcore_set_beacon_template(wl
, vif
, is_ap
);
4126 wl1271_error("beacon info change failed: %d", ret
);
4130 /* AP mode changes */
4131 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
4132 struct ieee80211_vif
*vif
,
4133 struct ieee80211_bss_conf
*bss_conf
,
4136 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4139 if (changed
& BSS_CHANGED_BASIC_RATES
) {
4140 u32 rates
= bss_conf
->basic_rates
;
4142 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
4144 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
4145 wlvif
->basic_rate_set
);
4147 ret
= wl1271_init_ap_rates(wl
, wlvif
);
4149 wl1271_error("AP rate policy change failed %d", ret
);
4153 ret
= wl1271_ap_init_templates(wl
, vif
);
4157 ret
= wl1271_ap_set_probe_resp_tmpl(wl
, wlvif
->basic_rate
, vif
);
4161 ret
= wlcore_set_beacon_template(wl
, vif
, true);
4166 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
4170 if (changed
& BSS_CHANGED_BEACON_ENABLED
) {
4171 if (bss_conf
->enable_beacon
) {
4172 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
4173 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
4177 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
4181 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
4182 wl1271_debug(DEBUG_AP
, "started AP");
4185 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
4187 * AP might be in ROC in case we have just
4188 * sent auth reply. handle it.
4190 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
4191 wl12xx_croc(wl
, wlvif
->role_id
);
4193 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
4197 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
4198 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
4200 wl1271_debug(DEBUG_AP
, "stopped AP");
4205 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4209 /* Handle HT information change */
4210 if ((changed
& BSS_CHANGED_HT
) &&
4211 (bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
)) {
4212 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4213 bss_conf
->ht_operation_mode
);
4215 wl1271_warning("Set ht information failed %d", ret
);
4224 static int wlcore_set_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
4225 struct ieee80211_bss_conf
*bss_conf
,
4231 wl1271_debug(DEBUG_MAC80211
,
4232 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4233 bss_conf
->bssid
, bss_conf
->aid
,
4234 bss_conf
->beacon_int
,
4235 bss_conf
->basic_rates
, sta_rate_set
);
4237 wlvif
->beacon_int
= bss_conf
->beacon_int
;
4238 rates
= bss_conf
->basic_rates
;
4239 wlvif
->basic_rate_set
=
4240 wl1271_tx_enabled_rates_get(wl
, rates
,
4243 wl1271_tx_min_rate_get(wl
,
4244 wlvif
->basic_rate_set
);
4248 wl1271_tx_enabled_rates_get(wl
,
4252 /* we only support sched_scan while not connected */
4253 if (wl
->sched_vif
== wlvif
)
4254 wl
->ops
->sched_scan_stop(wl
, wlvif
);
4256 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4260 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
4264 ret
= wl1271_build_qos_null_data(wl
, wl12xx_wlvif_to_vif(wlvif
));
4268 wlcore_set_ssid(wl
, wlvif
);
4270 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4275 static int wlcore_clear_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
4279 /* revert back to minimum rates for the current band */
4280 wl1271_set_band_rate(wl
, wlvif
);
4281 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4283 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4287 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4288 test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
)) {
4289 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4294 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4297 /* STA/IBSS mode changes */
4298 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
4299 struct ieee80211_vif
*vif
,
4300 struct ieee80211_bss_conf
*bss_conf
,
4303 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4304 bool do_join
= false;
4305 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
4306 bool ibss_joined
= false;
4307 u32 sta_rate_set
= 0;
4309 struct ieee80211_sta
*sta
;
4310 bool sta_exists
= false;
4311 struct ieee80211_sta_ht_cap sta_ht_cap
;
4314 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
4320 if (changed
& BSS_CHANGED_IBSS
) {
4321 if (bss_conf
->ibss_joined
) {
4322 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
4325 wlcore_unset_assoc(wl
, wlvif
);
4326 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4330 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
4333 /* Need to update the SSID (for filtering etc) */
4334 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
4337 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
4338 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
4339 bss_conf
->enable_beacon
? "enabled" : "disabled");
4344 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
)
4345 wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
4347 if (changed
& BSS_CHANGED_CQM
) {
4348 bool enable
= false;
4349 if (bss_conf
->cqm_rssi_thold
)
4351 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
4352 bss_conf
->cqm_rssi_thold
,
4353 bss_conf
->cqm_rssi_hyst
);
4356 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
4359 if (changed
& (BSS_CHANGED_BSSID
| BSS_CHANGED_HT
|
4360 BSS_CHANGED_ASSOC
)) {
4362 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
4364 u8
*rx_mask
= sta
->ht_cap
.mcs
.rx_mask
;
4366 /* save the supp_rates of the ap */
4367 sta_rate_set
= sta
->supp_rates
[wlvif
->band
];
4368 if (sta
->ht_cap
.ht_supported
)
4370 (rx_mask
[0] << HW_HT_RATES_OFFSET
) |
4371 (rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
4372 sta_ht_cap
= sta
->ht_cap
;
4379 if (changed
& BSS_CHANGED_BSSID
) {
4380 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
4381 ret
= wlcore_set_bssid(wl
, wlvif
, bss_conf
,
4386 /* Need to update the BSSID (for filtering etc) */
4389 ret
= wlcore_clear_bssid(wl
, wlvif
);
4395 if (changed
& BSS_CHANGED_IBSS
) {
4396 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
4397 bss_conf
->ibss_joined
);
4399 if (bss_conf
->ibss_joined
) {
4400 u32 rates
= bss_conf
->basic_rates
;
4401 wlvif
->basic_rate_set
=
4402 wl1271_tx_enabled_rates_get(wl
, rates
,
4405 wl1271_tx_min_rate_get(wl
,
4406 wlvif
->basic_rate_set
);
4408 /* by default, use 11b + OFDM rates */
4409 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
4410 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4416 if ((changed
& BSS_CHANGED_BEACON_INFO
) && bss_conf
->dtim_period
) {
4417 /* enable beacon filtering */
4418 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
4423 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4428 ret
= wlcore_join(wl
, wlvif
);
4430 wl1271_warning("cmd join failed %d", ret
);
4435 if (changed
& BSS_CHANGED_ASSOC
) {
4436 if (bss_conf
->assoc
) {
4437 ret
= wlcore_set_assoc(wl
, wlvif
, bss_conf
,
4442 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
4443 wl12xx_set_authorized(wl
, wlvif
);
4445 wlcore_unset_assoc(wl
, wlvif
);
4449 if (changed
& BSS_CHANGED_PS
) {
4450 if ((bss_conf
->ps
) &&
4451 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
4452 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4456 if (wl
->conf
.conn
.forced_ps
) {
4457 ps_mode
= STATION_POWER_SAVE_MODE
;
4458 ps_mode_str
= "forced";
4460 ps_mode
= STATION_AUTO_PS_MODE
;
4461 ps_mode_str
= "auto";
4464 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
4466 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
4468 wl1271_warning("enter %s ps failed %d",
4470 } else if (!bss_conf
->ps
&&
4471 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4472 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
4474 ret
= wl1271_ps_set_mode(wl
, wlvif
,
4475 STATION_ACTIVE_MODE
);
4477 wl1271_warning("exit auto ps failed %d", ret
);
4481 /* Handle new association with HT. Do this after join. */
4484 bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
;
4486 ret
= wlcore_hw_set_peer_cap(wl
,
4492 wl1271_warning("Set ht cap failed %d", ret
);
4498 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4499 bss_conf
->ht_operation_mode
);
4501 wl1271_warning("Set ht information failed %d",
4508 /* Handle arp filtering. Done after join. */
4509 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
4510 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
4511 __be32 addr
= bss_conf
->arp_addr_list
[0];
4512 wlvif
->sta
.qos
= bss_conf
->qos
;
4513 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
4515 if (bss_conf
->arp_addr_cnt
== 1 && bss_conf
->assoc
) {
4516 wlvif
->ip_addr
= addr
;
4518 * The template should have been configured only upon
4519 * association. however, it seems that the correct ip
4520 * isn't being set (when sending), so we have to
4521 * reconfigure the template upon every ip change.
4523 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
4525 wl1271_warning("build arp rsp failed: %d", ret
);
4529 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
4530 (ACX_ARP_FILTER_ARP_FILTERING
|
4531 ACX_ARP_FILTER_AUTO_ARP
),
4535 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
4546 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
4547 struct ieee80211_vif
*vif
,
4548 struct ieee80211_bss_conf
*bss_conf
,
4551 struct wl1271
*wl
= hw
->priv
;
4552 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4553 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4556 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info role %d changed 0x%x",
4557 wlvif
->role_id
, (int)changed
);
4560 * make sure to cancel pending disconnections if our association
4563 if (!is_ap
&& (changed
& BSS_CHANGED_ASSOC
))
4564 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
4566 if (is_ap
&& (changed
& BSS_CHANGED_BEACON_ENABLED
) &&
4567 !bss_conf
->enable_beacon
)
4568 wl1271_tx_flush(wl
);
4570 mutex_lock(&wl
->mutex
);
4572 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4575 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4578 ret
= wl1271_ps_elp_wakeup(wl
);
4582 if ((changed
& BSS_CHANGED_TXPOWER
) &&
4583 bss_conf
->txpower
!= wlvif
->power_level
) {
4585 ret
= wl1271_acx_tx_power(wl
, wlvif
, bss_conf
->txpower
);
4589 wlvif
->power_level
= bss_conf
->txpower
;
4593 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
4595 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
4597 wl1271_ps_elp_sleep(wl
);
4600 mutex_unlock(&wl
->mutex
);
4603 static int wlcore_op_add_chanctx(struct ieee80211_hw
*hw
,
4604 struct ieee80211_chanctx_conf
*ctx
)
4606 wl1271_debug(DEBUG_MAC80211
, "mac80211 add chanctx %d (type %d)",
4607 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4608 cfg80211_get_chandef_type(&ctx
->def
));
4612 static void wlcore_op_remove_chanctx(struct ieee80211_hw
*hw
,
4613 struct ieee80211_chanctx_conf
*ctx
)
4615 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove chanctx %d (type %d)",
4616 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4617 cfg80211_get_chandef_type(&ctx
->def
));
4620 static void wlcore_op_change_chanctx(struct ieee80211_hw
*hw
,
4621 struct ieee80211_chanctx_conf
*ctx
,
4624 wl1271_debug(DEBUG_MAC80211
,
4625 "mac80211 change chanctx %d (type %d) changed 0x%x",
4626 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4627 cfg80211_get_chandef_type(&ctx
->def
), changed
);
4630 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw
*hw
,
4631 struct ieee80211_vif
*vif
,
4632 struct ieee80211_chanctx_conf
*ctx
)
4634 struct wl1271
*wl
= hw
->priv
;
4635 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4636 int channel
= ieee80211_frequency_to_channel(
4637 ctx
->def
.chan
->center_freq
);
4639 wl1271_debug(DEBUG_MAC80211
,
4640 "mac80211 assign chanctx (role %d) %d (type %d)",
4641 wlvif
->role_id
, channel
, cfg80211_get_chandef_type(&ctx
->def
));
4643 mutex_lock(&wl
->mutex
);
4645 wlvif
->band
= ctx
->def
.chan
->band
;
4646 wlvif
->channel
= channel
;
4647 wlvif
->channel_type
= cfg80211_get_chandef_type(&ctx
->def
);
4649 /* update default rates according to the band */
4650 wl1271_set_band_rate(wl
, wlvif
);
4652 mutex_unlock(&wl
->mutex
);
4657 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw
*hw
,
4658 struct ieee80211_vif
*vif
,
4659 struct ieee80211_chanctx_conf
*ctx
)
4661 struct wl1271
*wl
= hw
->priv
;
4662 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4664 wl1271_debug(DEBUG_MAC80211
,
4665 "mac80211 unassign chanctx (role %d) %d (type %d)",
4667 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4668 cfg80211_get_chandef_type(&ctx
->def
));
4670 wl1271_tx_flush(wl
);
4673 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4674 struct ieee80211_vif
*vif
, u16 queue
,
4675 const struct ieee80211_tx_queue_params
*params
)
4677 struct wl1271
*wl
= hw
->priv
;
4678 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4682 mutex_lock(&wl
->mutex
);
4684 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4687 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4689 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4691 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4694 ret
= wl1271_ps_elp_wakeup(wl
);
4699 * the txop is confed in units of 32us by the mac80211,
4702 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4703 params
->cw_min
, params
->cw_max
,
4704 params
->aifs
, params
->txop
<< 5);
4708 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4709 CONF_CHANNEL_TYPE_EDCF
,
4710 wl1271_tx_get_queue(queue
),
4711 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4715 wl1271_ps_elp_sleep(wl
);
4718 mutex_unlock(&wl
->mutex
);
4723 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4724 struct ieee80211_vif
*vif
)
4727 struct wl1271
*wl
= hw
->priv
;
4728 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4729 u64 mactime
= ULLONG_MAX
;
4732 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4734 mutex_lock(&wl
->mutex
);
4736 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4739 ret
= wl1271_ps_elp_wakeup(wl
);
4743 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4748 wl1271_ps_elp_sleep(wl
);
4751 mutex_unlock(&wl
->mutex
);
4755 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4756 struct survey_info
*survey
)
4758 struct ieee80211_conf
*conf
= &hw
->conf
;
4763 survey
->channel
= conf
->chandef
.chan
;
4768 static int wl1271_allocate_sta(struct wl1271
*wl
,
4769 struct wl12xx_vif
*wlvif
,
4770 struct ieee80211_sta
*sta
)
4772 struct wl1271_station
*wl_sta
;
4776 if (wl
->active_sta_count
>= wl
->max_ap_stations
) {
4777 wl1271_warning("could not allocate HLID - too much stations");
4781 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4782 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4784 wl1271_warning("could not allocate HLID - too many links");
4788 /* use the previous security seq, if this is a recovery/resume */
4789 wl
->links
[wl_sta
->hlid
].total_freed_pkts
= wl_sta
->total_freed_pkts
;
4791 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4792 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4793 wl
->active_sta_count
++;
4797 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4799 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4802 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4803 __clear_bit(hlid
, &wl
->ap_ps_map
);
4804 __clear_bit(hlid
, &wl
->ap_fw_ps_map
);
4807 * save the last used PN in the private part of iee80211_sta,
4808 * in case of recovery/suspend
4810 wlcore_save_freed_pkts_addr(wl
, wlvif
, hlid
, wl
->links
[hlid
].addr
);
4812 wl12xx_free_link(wl
, wlvif
, &hlid
);
4813 wl
->active_sta_count
--;
4816 * rearm the tx watchdog when the last STA is freed - give the FW a
4817 * chance to return STA-buffered packets before complaining.
4819 if (wl
->active_sta_count
== 0)
4820 wl12xx_rearm_tx_watchdog_locked(wl
);
4823 static int wl12xx_sta_add(struct wl1271
*wl
,
4824 struct wl12xx_vif
*wlvif
,
4825 struct ieee80211_sta
*sta
)
4827 struct wl1271_station
*wl_sta
;
4831 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4833 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4837 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4838 hlid
= wl_sta
->hlid
;
4840 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4842 wl1271_free_sta(wl
, wlvif
, hlid
);
4847 static int wl12xx_sta_remove(struct wl1271
*wl
,
4848 struct wl12xx_vif
*wlvif
,
4849 struct ieee80211_sta
*sta
)
4851 struct wl1271_station
*wl_sta
;
4854 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4856 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4858 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4861 ret
= wl12xx_cmd_remove_peer(wl
, wlvif
, wl_sta
->hlid
);
4865 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4869 static void wlcore_roc_if_possible(struct wl1271
*wl
,
4870 struct wl12xx_vif
*wlvif
)
4872 if (find_first_bit(wl
->roc_map
,
4873 WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
)
4876 if (WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
))
4879 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
, wlvif
->band
, wlvif
->channel
);
4883 * when wl_sta is NULL, we treat this call as if coming from a
4884 * pending auth reply.
4885 * wl->mutex must be taken and the FW must be awake when the call
4888 void wlcore_update_inconn_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
4889 struct wl1271_station
*wl_sta
, bool in_conn
)
4892 if (WARN_ON(wl_sta
&& wl_sta
->in_connection
))
4895 if (!wlvif
->ap_pending_auth_reply
&&
4896 !wlvif
->inconn_count
)
4897 wlcore_roc_if_possible(wl
, wlvif
);
4900 wl_sta
->in_connection
= true;
4901 wlvif
->inconn_count
++;
4903 wlvif
->ap_pending_auth_reply
= true;
4906 if (wl_sta
&& !wl_sta
->in_connection
)
4909 if (WARN_ON(!wl_sta
&& !wlvif
->ap_pending_auth_reply
))
4912 if (WARN_ON(wl_sta
&& !wlvif
->inconn_count
))
4916 wl_sta
->in_connection
= false;
4917 wlvif
->inconn_count
--;
4919 wlvif
->ap_pending_auth_reply
= false;
4922 if (!wlvif
->inconn_count
&& !wlvif
->ap_pending_auth_reply
&&
4923 test_bit(wlvif
->role_id
, wl
->roc_map
))
4924 wl12xx_croc(wl
, wlvif
->role_id
);
4928 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4929 struct wl12xx_vif
*wlvif
,
4930 struct ieee80211_sta
*sta
,
4931 enum ieee80211_sta_state old_state
,
4932 enum ieee80211_sta_state new_state
)
4934 struct wl1271_station
*wl_sta
;
4935 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4936 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4939 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4941 /* Add station (AP mode) */
4943 old_state
== IEEE80211_STA_NOTEXIST
&&
4944 new_state
== IEEE80211_STA_NONE
) {
4945 ret
= wl12xx_sta_add(wl
, wlvif
, sta
);
4949 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, true);
4952 /* Remove station (AP mode) */
4954 old_state
== IEEE80211_STA_NONE
&&
4955 new_state
== IEEE80211_STA_NOTEXIST
) {
4957 wl12xx_sta_remove(wl
, wlvif
, sta
);
4959 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
4962 /* Authorize station (AP mode) */
4964 new_state
== IEEE80211_STA_AUTHORIZED
) {
4965 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, wl_sta
->hlid
);
4969 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4974 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
4977 /* Authorize station */
4979 new_state
== IEEE80211_STA_AUTHORIZED
) {
4980 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4981 ret
= wl12xx_set_authorized(wl
, wlvif
);
4987 old_state
== IEEE80211_STA_AUTHORIZED
&&
4988 new_state
== IEEE80211_STA_ASSOC
) {
4989 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4990 clear_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
);
4993 /* save seq number on disassoc (suspend) */
4995 old_state
== IEEE80211_STA_ASSOC
&&
4996 new_state
== IEEE80211_STA_AUTH
) {
4997 wlcore_save_freed_pkts(wl
, wlvif
, wlvif
->sta
.hlid
, sta
);
4998 wlvif
->total_freed_pkts
= 0;
5001 /* restore seq number on assoc (resume) */
5003 old_state
== IEEE80211_STA_AUTH
&&
5004 new_state
== IEEE80211_STA_ASSOC
) {
5005 wlvif
->total_freed_pkts
= wl_sta
->total_freed_pkts
;
5008 /* clear ROCs on failure or authorization */
5010 (new_state
== IEEE80211_STA_AUTHORIZED
||
5011 new_state
== IEEE80211_STA_NOTEXIST
)) {
5012 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
5013 wl12xx_croc(wl
, wlvif
->role_id
);
5017 old_state
== IEEE80211_STA_NOTEXIST
&&
5018 new_state
== IEEE80211_STA_NONE
) {
5019 if (find_first_bit(wl
->roc_map
,
5020 WL12XX_MAX_ROLES
) >= WL12XX_MAX_ROLES
) {
5021 WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
);
5022 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
,
5023 wlvif
->band
, wlvif
->channel
);
5029 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
5030 struct ieee80211_vif
*vif
,
5031 struct ieee80211_sta
*sta
,
5032 enum ieee80211_sta_state old_state
,
5033 enum ieee80211_sta_state new_state
)
5035 struct wl1271
*wl
= hw
->priv
;
5036 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5039 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
5040 sta
->aid
, old_state
, new_state
);
5042 mutex_lock(&wl
->mutex
);
5044 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5049 ret
= wl1271_ps_elp_wakeup(wl
);
5053 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
5055 wl1271_ps_elp_sleep(wl
);
5057 mutex_unlock(&wl
->mutex
);
5058 if (new_state
< old_state
)
5063 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
5064 struct ieee80211_vif
*vif
,
5065 enum ieee80211_ampdu_mlme_action action
,
5066 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
5069 struct wl1271
*wl
= hw
->priv
;
5070 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5072 u8 hlid
, *ba_bitmap
;
5074 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
5077 /* sanity check - the fields in FW are only 8bits wide */
5078 if (WARN_ON(tid
> 0xFF))
5081 mutex_lock(&wl
->mutex
);
5083 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5088 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
5089 hlid
= wlvif
->sta
.hlid
;
5090 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
5091 struct wl1271_station
*wl_sta
;
5093 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
5094 hlid
= wl_sta
->hlid
;
5100 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
5102 ret
= wl1271_ps_elp_wakeup(wl
);
5106 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
5110 case IEEE80211_AMPDU_RX_START
:
5111 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
5116 if (wl
->ba_rx_session_count
>= wl
->ba_rx_session_count_max
) {
5118 wl1271_error("exceeded max RX BA sessions");
5122 if (*ba_bitmap
& BIT(tid
)) {
5124 wl1271_error("cannot enable RX BA session on active "
5129 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
5132 *ba_bitmap
|= BIT(tid
);
5133 wl
->ba_rx_session_count
++;
5137 case IEEE80211_AMPDU_RX_STOP
:
5138 if (!(*ba_bitmap
& BIT(tid
))) {
5140 * this happens on reconfig - so only output a debug
5141 * message for now, and don't fail the function.
5143 wl1271_debug(DEBUG_MAC80211
,
5144 "no active RX BA session on tid: %d",
5150 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
5153 *ba_bitmap
&= ~BIT(tid
);
5154 wl
->ba_rx_session_count
--;
5159 * The BA initiator session management in FW independently.
5160 * Falling break here on purpose for all TX APDU commands.
5162 case IEEE80211_AMPDU_TX_START
:
5163 case IEEE80211_AMPDU_TX_STOP_CONT
:
5164 case IEEE80211_AMPDU_TX_STOP_FLUSH
:
5165 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT
:
5166 case IEEE80211_AMPDU_TX_OPERATIONAL
:
5171 wl1271_error("Incorrect ampdu action id=%x\n", action
);
5175 wl1271_ps_elp_sleep(wl
);
5178 mutex_unlock(&wl
->mutex
);
5183 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
5184 struct ieee80211_vif
*vif
,
5185 const struct cfg80211_bitrate_mask
*mask
)
5187 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5188 struct wl1271
*wl
= hw
->priv
;
5191 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
5192 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
5193 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
5195 mutex_lock(&wl
->mutex
);
5197 for (i
= 0; i
< WLCORE_NUM_BANDS
; i
++)
5198 wlvif
->bitrate_masks
[i
] =
5199 wl1271_tx_enabled_rates_get(wl
,
5200 mask
->control
[i
].legacy
,
5203 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5206 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
5207 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
5209 ret
= wl1271_ps_elp_wakeup(wl
);
5213 wl1271_set_band_rate(wl
, wlvif
);
5215 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
5216 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
5218 wl1271_ps_elp_sleep(wl
);
5221 mutex_unlock(&wl
->mutex
);
5226 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
5227 struct ieee80211_vif
*vif
,
5228 struct ieee80211_channel_switch
*ch_switch
)
5230 struct wl1271
*wl
= hw
->priv
;
5231 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5234 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
5236 wl1271_tx_flush(wl
);
5238 mutex_lock(&wl
->mutex
);
5240 if (unlikely(wl
->state
== WLCORE_STATE_OFF
)) {
5241 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
5242 ieee80211_chswitch_done(vif
, false);
5244 } else if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5248 ret
= wl1271_ps_elp_wakeup(wl
);
5252 /* TODO: change mac80211 to pass vif as param */
5254 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
5255 unsigned long delay_usec
;
5257 ret
= wl
->ops
->channel_switch(wl
, wlvif
, ch_switch
);
5261 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
5263 /* indicate failure 5 seconds after channel switch time */
5264 delay_usec
= ieee80211_tu_to_usec(wlvif
->beacon_int
) *
5266 ieee80211_queue_delayed_work(hw
, &wlvif
->channel_switch_work
,
5267 usecs_to_jiffies(delay_usec
) +
5268 msecs_to_jiffies(5000));
5272 wl1271_ps_elp_sleep(wl
);
5275 mutex_unlock(&wl
->mutex
);
5278 static void wlcore_op_flush(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
5279 u32 queues
, bool drop
)
5281 struct wl1271
*wl
= hw
->priv
;
5283 wl1271_tx_flush(wl
);
5286 static int wlcore_op_remain_on_channel(struct ieee80211_hw
*hw
,
5287 struct ieee80211_vif
*vif
,
5288 struct ieee80211_channel
*chan
,
5290 enum ieee80211_roc_type type
)
5292 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5293 struct wl1271
*wl
= hw
->priv
;
5294 int channel
, ret
= 0;
5296 channel
= ieee80211_frequency_to_channel(chan
->center_freq
);
5298 wl1271_debug(DEBUG_MAC80211
, "mac80211 roc %d (%d)",
5299 channel
, wlvif
->role_id
);
5301 mutex_lock(&wl
->mutex
);
5303 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5306 /* return EBUSY if we can't ROC right now */
5307 if (WARN_ON(wl
->roc_vif
||
5308 find_first_bit(wl
->roc_map
,
5309 WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
)) {
5314 ret
= wl1271_ps_elp_wakeup(wl
);
5318 ret
= wl12xx_start_dev(wl
, wlvif
, chan
->band
, channel
);
5323 ieee80211_queue_delayed_work(hw
, &wl
->roc_complete_work
,
5324 msecs_to_jiffies(duration
));
5326 wl1271_ps_elp_sleep(wl
);
5328 mutex_unlock(&wl
->mutex
);
5332 static int __wlcore_roc_completed(struct wl1271
*wl
)
5334 struct wl12xx_vif
*wlvif
;
5337 /* already completed */
5338 if (unlikely(!wl
->roc_vif
))
5341 wlvif
= wl12xx_vif_to_data(wl
->roc_vif
);
5343 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
5346 ret
= wl12xx_stop_dev(wl
, wlvif
);
5355 static int wlcore_roc_completed(struct wl1271
*wl
)
5359 wl1271_debug(DEBUG_MAC80211
, "roc complete");
5361 mutex_lock(&wl
->mutex
);
5363 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5368 ret
= wl1271_ps_elp_wakeup(wl
);
5372 ret
= __wlcore_roc_completed(wl
);
5374 wl1271_ps_elp_sleep(wl
);
5376 mutex_unlock(&wl
->mutex
);
5381 static void wlcore_roc_complete_work(struct work_struct
*work
)
5383 struct delayed_work
*dwork
;
5387 dwork
= container_of(work
, struct delayed_work
, work
);
5388 wl
= container_of(dwork
, struct wl1271
, roc_complete_work
);
5390 ret
= wlcore_roc_completed(wl
);
5392 ieee80211_remain_on_channel_expired(wl
->hw
);
5395 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw
*hw
)
5397 struct wl1271
*wl
= hw
->priv
;
5399 wl1271_debug(DEBUG_MAC80211
, "mac80211 croc");
5402 wl1271_tx_flush(wl
);
5405 * we can't just flush_work here, because it might deadlock
5406 * (as we might get called from the same workqueue)
5408 cancel_delayed_work_sync(&wl
->roc_complete_work
);
5409 wlcore_roc_completed(wl
);
5414 static void wlcore_op_sta_rc_update(struct ieee80211_hw
*hw
,
5415 struct ieee80211_vif
*vif
,
5416 struct ieee80211_sta
*sta
,
5419 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5421 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta_rc_update");
5423 if (!(changed
& IEEE80211_RC_BW_CHANGED
))
5426 /* this callback is atomic, so schedule a new work */
5427 wlvif
->rc_update_bw
= sta
->bandwidth
;
5428 ieee80211_queue_work(hw
, &wlvif
->rc_update_work
);
5431 static int wlcore_op_get_rssi(struct ieee80211_hw
*hw
,
5432 struct ieee80211_vif
*vif
,
5433 struct ieee80211_sta
*sta
,
5436 struct wl1271
*wl
= hw
->priv
;
5437 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5440 wl1271_debug(DEBUG_MAC80211
, "mac80211 get_rssi");
5442 mutex_lock(&wl
->mutex
);
5444 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5447 ret
= wl1271_ps_elp_wakeup(wl
);
5451 ret
= wlcore_acx_average_rssi(wl
, wlvif
, rssi_dbm
);
5456 wl1271_ps_elp_sleep(wl
);
5459 mutex_unlock(&wl
->mutex
);
5464 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
5466 struct wl1271
*wl
= hw
->priv
;
5469 mutex_lock(&wl
->mutex
);
5471 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5474 /* packets are considered pending if in the TX queue or the FW */
5475 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
5477 mutex_unlock(&wl
->mutex
);
5482 /* can't be const, mac80211 writes to this */
5483 static struct ieee80211_rate wl1271_rates
[] = {
5485 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
5486 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
5488 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
5489 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
5490 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5492 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
5493 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
5494 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5496 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
5497 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
5498 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5500 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5501 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5503 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5504 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5506 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5507 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5509 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5510 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5512 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5513 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5515 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5516 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5518 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5519 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5521 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5522 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5525 /* can't be const, mac80211 writes to this */
5526 static struct ieee80211_channel wl1271_channels
[] = {
5527 { .hw_value
= 1, .center_freq
= 2412, .max_power
= WLCORE_MAX_TXPWR
},
5528 { .hw_value
= 2, .center_freq
= 2417, .max_power
= WLCORE_MAX_TXPWR
},
5529 { .hw_value
= 3, .center_freq
= 2422, .max_power
= WLCORE_MAX_TXPWR
},
5530 { .hw_value
= 4, .center_freq
= 2427, .max_power
= WLCORE_MAX_TXPWR
},
5531 { .hw_value
= 5, .center_freq
= 2432, .max_power
= WLCORE_MAX_TXPWR
},
5532 { .hw_value
= 6, .center_freq
= 2437, .max_power
= WLCORE_MAX_TXPWR
},
5533 { .hw_value
= 7, .center_freq
= 2442, .max_power
= WLCORE_MAX_TXPWR
},
5534 { .hw_value
= 8, .center_freq
= 2447, .max_power
= WLCORE_MAX_TXPWR
},
5535 { .hw_value
= 9, .center_freq
= 2452, .max_power
= WLCORE_MAX_TXPWR
},
5536 { .hw_value
= 10, .center_freq
= 2457, .max_power
= WLCORE_MAX_TXPWR
},
5537 { .hw_value
= 11, .center_freq
= 2462, .max_power
= WLCORE_MAX_TXPWR
},
5538 { .hw_value
= 12, .center_freq
= 2467, .max_power
= WLCORE_MAX_TXPWR
},
5539 { .hw_value
= 13, .center_freq
= 2472, .max_power
= WLCORE_MAX_TXPWR
},
5540 { .hw_value
= 14, .center_freq
= 2484, .max_power
= WLCORE_MAX_TXPWR
},
5543 /* can't be const, mac80211 writes to this */
5544 static struct ieee80211_supported_band wl1271_band_2ghz
= {
5545 .channels
= wl1271_channels
,
5546 .n_channels
= ARRAY_SIZE(wl1271_channels
),
5547 .bitrates
= wl1271_rates
,
5548 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
5551 /* 5 GHz data rates for WL1273 */
5552 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
5554 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5555 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5557 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5558 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5560 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5561 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5563 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5564 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5566 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5567 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5569 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5570 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5572 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5573 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5575 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5576 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5579 /* 5 GHz band channels for WL1273 */
5580 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
5581 { .hw_value
= 8, .center_freq
= 5040, .max_power
= WLCORE_MAX_TXPWR
},
5582 { .hw_value
= 12, .center_freq
= 5060, .max_power
= WLCORE_MAX_TXPWR
},
5583 { .hw_value
= 16, .center_freq
= 5080, .max_power
= WLCORE_MAX_TXPWR
},
5584 { .hw_value
= 34, .center_freq
= 5170, .max_power
= WLCORE_MAX_TXPWR
},
5585 { .hw_value
= 36, .center_freq
= 5180, .max_power
= WLCORE_MAX_TXPWR
},
5586 { .hw_value
= 38, .center_freq
= 5190, .max_power
= WLCORE_MAX_TXPWR
},
5587 { .hw_value
= 40, .center_freq
= 5200, .max_power
= WLCORE_MAX_TXPWR
},
5588 { .hw_value
= 42, .center_freq
= 5210, .max_power
= WLCORE_MAX_TXPWR
},
5589 { .hw_value
= 44, .center_freq
= 5220, .max_power
= WLCORE_MAX_TXPWR
},
5590 { .hw_value
= 46, .center_freq
= 5230, .max_power
= WLCORE_MAX_TXPWR
},
5591 { .hw_value
= 48, .center_freq
= 5240, .max_power
= WLCORE_MAX_TXPWR
},
5592 { .hw_value
= 52, .center_freq
= 5260, .max_power
= WLCORE_MAX_TXPWR
},
5593 { .hw_value
= 56, .center_freq
= 5280, .max_power
= WLCORE_MAX_TXPWR
},
5594 { .hw_value
= 60, .center_freq
= 5300, .max_power
= WLCORE_MAX_TXPWR
},
5595 { .hw_value
= 64, .center_freq
= 5320, .max_power
= WLCORE_MAX_TXPWR
},
5596 { .hw_value
= 100, .center_freq
= 5500, .max_power
= WLCORE_MAX_TXPWR
},
5597 { .hw_value
= 104, .center_freq
= 5520, .max_power
= WLCORE_MAX_TXPWR
},
5598 { .hw_value
= 108, .center_freq
= 5540, .max_power
= WLCORE_MAX_TXPWR
},
5599 { .hw_value
= 112, .center_freq
= 5560, .max_power
= WLCORE_MAX_TXPWR
},
5600 { .hw_value
= 116, .center_freq
= 5580, .max_power
= WLCORE_MAX_TXPWR
},
5601 { .hw_value
= 120, .center_freq
= 5600, .max_power
= WLCORE_MAX_TXPWR
},
5602 { .hw_value
= 124, .center_freq
= 5620, .max_power
= WLCORE_MAX_TXPWR
},
5603 { .hw_value
= 128, .center_freq
= 5640, .max_power
= WLCORE_MAX_TXPWR
},
5604 { .hw_value
= 132, .center_freq
= 5660, .max_power
= WLCORE_MAX_TXPWR
},
5605 { .hw_value
= 136, .center_freq
= 5680, .max_power
= WLCORE_MAX_TXPWR
},
5606 { .hw_value
= 140, .center_freq
= 5700, .max_power
= WLCORE_MAX_TXPWR
},
5607 { .hw_value
= 149, .center_freq
= 5745, .max_power
= WLCORE_MAX_TXPWR
},
5608 { .hw_value
= 153, .center_freq
= 5765, .max_power
= WLCORE_MAX_TXPWR
},
5609 { .hw_value
= 157, .center_freq
= 5785, .max_power
= WLCORE_MAX_TXPWR
},
5610 { .hw_value
= 161, .center_freq
= 5805, .max_power
= WLCORE_MAX_TXPWR
},
5611 { .hw_value
= 165, .center_freq
= 5825, .max_power
= WLCORE_MAX_TXPWR
},
5614 static struct ieee80211_supported_band wl1271_band_5ghz
= {
5615 .channels
= wl1271_channels_5ghz
,
5616 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
5617 .bitrates
= wl1271_rates_5ghz
,
5618 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
5621 static const struct ieee80211_ops wl1271_ops
= {
5622 .start
= wl1271_op_start
,
5623 .stop
= wlcore_op_stop
,
5624 .add_interface
= wl1271_op_add_interface
,
5625 .remove_interface
= wl1271_op_remove_interface
,
5626 .change_interface
= wl12xx_op_change_interface
,
5628 .suspend
= wl1271_op_suspend
,
5629 .resume
= wl1271_op_resume
,
5631 .config
= wl1271_op_config
,
5632 .prepare_multicast
= wl1271_op_prepare_multicast
,
5633 .configure_filter
= wl1271_op_configure_filter
,
5635 .set_key
= wlcore_op_set_key
,
5636 .hw_scan
= wl1271_op_hw_scan
,
5637 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
5638 .sched_scan_start
= wl1271_op_sched_scan_start
,
5639 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
5640 .bss_info_changed
= wl1271_op_bss_info_changed
,
5641 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
5642 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
5643 .conf_tx
= wl1271_op_conf_tx
,
5644 .get_tsf
= wl1271_op_get_tsf
,
5645 .get_survey
= wl1271_op_get_survey
,
5646 .sta_state
= wl12xx_op_sta_state
,
5647 .ampdu_action
= wl1271_op_ampdu_action
,
5648 .tx_frames_pending
= wl1271_tx_frames_pending
,
5649 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
5650 .set_default_unicast_key
= wl1271_op_set_default_key_idx
,
5651 .channel_switch
= wl12xx_op_channel_switch
,
5652 .flush
= wlcore_op_flush
,
5653 .remain_on_channel
= wlcore_op_remain_on_channel
,
5654 .cancel_remain_on_channel
= wlcore_op_cancel_remain_on_channel
,
5655 .add_chanctx
= wlcore_op_add_chanctx
,
5656 .remove_chanctx
= wlcore_op_remove_chanctx
,
5657 .change_chanctx
= wlcore_op_change_chanctx
,
5658 .assign_vif_chanctx
= wlcore_op_assign_vif_chanctx
,
5659 .unassign_vif_chanctx
= wlcore_op_unassign_vif_chanctx
,
5660 .sta_rc_update
= wlcore_op_sta_rc_update
,
5661 .get_rssi
= wlcore_op_get_rssi
,
5662 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
5666 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
5672 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
5673 wl1271_error("Illegal RX rate from HW: %d", rate
);
5677 idx
= wl
->band_rate_to_idx
[band
][rate
];
5678 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
5679 wl1271_error("Unsupported RX rate from HW: %d", rate
);
5686 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
, u32 oui
, u32 nic
)
5690 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x",
5693 if (nic
+ WLCORE_NUM_MAC_ADDRESSES
- wl
->num_mac_addr
> 0xffffff)
5694 wl1271_warning("NIC part of the MAC address wraps around!");
5696 for (i
= 0; i
< wl
->num_mac_addr
; i
++) {
5697 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
5698 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
5699 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
5700 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
5701 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
5702 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
5706 /* we may be one address short at the most */
5707 WARN_ON(wl
->num_mac_addr
+ 1 < WLCORE_NUM_MAC_ADDRESSES
);
5710 * turn on the LAA bit in the first address and use it as
5713 if (wl
->num_mac_addr
< WLCORE_NUM_MAC_ADDRESSES
) {
5714 int idx
= WLCORE_NUM_MAC_ADDRESSES
- 1;
5715 memcpy(&wl
->addresses
[idx
], &wl
->addresses
[0],
5716 sizeof(wl
->addresses
[0]));
5718 wl
->addresses
[idx
].addr
[0] |= BIT(1);
5721 wl
->hw
->wiphy
->n_addresses
= WLCORE_NUM_MAC_ADDRESSES
;
5722 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
5725 static int wl12xx_get_hw_info(struct wl1271
*wl
)
5729 ret
= wl12xx_set_power_on(wl
);
5733 ret
= wlcore_read_reg(wl
, REG_CHIP_ID_B
, &wl
->chip
.id
);
5737 wl
->fuse_oui_addr
= 0;
5738 wl
->fuse_nic_addr
= 0;
5740 ret
= wl
->ops
->get_pg_ver(wl
, &wl
->hw_pg_ver
);
5744 if (wl
->ops
->get_mac
)
5745 ret
= wl
->ops
->get_mac(wl
);
5748 wl1271_power_off(wl
);
5752 static int wl1271_register_hw(struct wl1271
*wl
)
5755 u32 oui_addr
= 0, nic_addr
= 0;
5757 if (wl
->mac80211_registered
)
5760 if (wl
->nvs_len
>= 12) {
5761 /* NOTE: The wl->nvs->nvs element must be first, in
5762 * order to simplify the casting, we assume it is at
5763 * the beginning of the wl->nvs structure.
5765 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
5768 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
5770 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
5773 /* if the MAC address is zeroed in the NVS derive from fuse */
5774 if (oui_addr
== 0 && nic_addr
== 0) {
5775 oui_addr
= wl
->fuse_oui_addr
;
5776 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5777 nic_addr
= wl
->fuse_nic_addr
+ 1;
5780 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
);
5782 ret
= ieee80211_register_hw(wl
->hw
);
5784 wl1271_error("unable to register mac80211 hw: %d", ret
);
5788 wl
->mac80211_registered
= true;
5790 wl1271_debugfs_init(wl
);
5792 wl1271_notice("loaded");
5798 static void wl1271_unregister_hw(struct wl1271
*wl
)
5801 wl1271_plt_stop(wl
);
5803 ieee80211_unregister_hw(wl
->hw
);
5804 wl
->mac80211_registered
= false;
5808 static int wl1271_init_ieee80211(struct wl1271
*wl
)
5811 static const u32 cipher_suites
[] = {
5812 WLAN_CIPHER_SUITE_WEP40
,
5813 WLAN_CIPHER_SUITE_WEP104
,
5814 WLAN_CIPHER_SUITE_TKIP
,
5815 WLAN_CIPHER_SUITE_CCMP
,
5816 WL1271_CIPHER_SUITE_GEM
,
5819 /* The tx descriptor buffer */
5820 wl
->hw
->extra_tx_headroom
= sizeof(struct wl1271_tx_hw_descr
);
5822 if (wl
->quirks
& WLCORE_QUIRK_TKIP_HEADER_SPACE
)
5823 wl
->hw
->extra_tx_headroom
+= WL1271_EXTRA_SPACE_TKIP
;
5826 /* FIXME: find a proper value */
5827 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
5829 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
5830 IEEE80211_HW_SUPPORTS_PS
|
5831 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
5832 IEEE80211_HW_SUPPORTS_UAPSD
|
5833 IEEE80211_HW_HAS_RATE_CONTROL
|
5834 IEEE80211_HW_CONNECTION_MONITOR
|
5835 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
5836 IEEE80211_HW_SPECTRUM_MGMT
|
5837 IEEE80211_HW_AP_LINK_PS
|
5838 IEEE80211_HW_AMPDU_AGGREGATION
|
5839 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
5840 IEEE80211_HW_QUEUE_CONTROL
|
5841 IEEE80211_HW_CHANCTX_STA_CSA
;
5843 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
5844 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
5846 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
5847 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5848 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5849 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5850 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5851 wl
->hw
->wiphy
->max_match_sets
= 16;
5853 * Maximum length of elements in scanning probe request templates
5854 * should be the maximum length possible for a template, without
5855 * the IEEE80211 header of the template
5857 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5858 sizeof(struct ieee80211_header
);
5860 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5861 sizeof(struct ieee80211_header
);
5863 wl
->hw
->wiphy
->max_remain_on_channel_duration
= 30000;
5865 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5866 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
|
5867 WIPHY_FLAG_SUPPORTS_SCHED_SCAN
;
5869 /* make sure all our channels fit in the scanned_ch bitmask */
5870 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5871 ARRAY_SIZE(wl1271_channels_5ghz
) >
5872 WL1271_MAX_CHANNELS
);
5874 * clear channel flags from the previous usage
5875 * and restore max_power & max_antenna_gain values.
5877 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels
); i
++) {
5878 wl1271_band_2ghz
.channels
[i
].flags
= 0;
5879 wl1271_band_2ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
5880 wl1271_band_2ghz
.channels
[i
].max_antenna_gain
= 0;
5883 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels_5ghz
); i
++) {
5884 wl1271_band_5ghz
.channels
[i
].flags
= 0;
5885 wl1271_band_5ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
5886 wl1271_band_5ghz
.channels
[i
].max_antenna_gain
= 0;
5890 * We keep local copies of the band structs because we need to
5891 * modify them on a per-device basis.
5893 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5894 sizeof(wl1271_band_2ghz
));
5895 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
,
5896 &wl
->ht_cap
[IEEE80211_BAND_2GHZ
],
5897 sizeof(*wl
->ht_cap
));
5898 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5899 sizeof(wl1271_band_5ghz
));
5900 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
,
5901 &wl
->ht_cap
[IEEE80211_BAND_5GHZ
],
5902 sizeof(*wl
->ht_cap
));
5904 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5905 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5906 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5907 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5910 * allow 4 queues per mac address we support +
5911 * 1 cab queue per mac + one global offchannel Tx queue
5913 wl
->hw
->queues
= (NUM_TX_QUEUES
+ 1) * WLCORE_NUM_MAC_ADDRESSES
+ 1;
5915 /* the last queue is the offchannel queue */
5916 wl
->hw
->offchannel_tx_hw_queue
= wl
->hw
->queues
- 1;
5917 wl
->hw
->max_rates
= 1;
5919 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5921 /* the FW answers probe-requests in AP-mode */
5922 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5923 wl
->hw
->wiphy
->probe_resp_offload
=
5924 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5925 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5926 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5928 /* allowed interface combinations */
5929 wl
->hw
->wiphy
->iface_combinations
= wl
->iface_combinations
;
5930 wl
->hw
->wiphy
->n_iface_combinations
= wl
->n_iface_combinations
;
5932 /* register vendor commands */
5933 wlcore_set_vendor_commands(wl
->hw
->wiphy
);
5935 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5937 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5938 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5940 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5945 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
, u32 aggr_buf_size
,
5948 struct ieee80211_hw
*hw
;
5953 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5955 wl1271_error("could not alloc ieee80211_hw");
5961 memset(wl
, 0, sizeof(*wl
));
5963 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5965 wl1271_error("could not alloc wl priv");
5967 goto err_priv_alloc
;
5970 INIT_LIST_HEAD(&wl
->wlvif_list
);
5975 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
5976 * we don't allocate any additional resource here, so that's fine.
5978 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5979 for (j
= 0; j
< WLCORE_MAX_LINKS
; j
++)
5980 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5982 skb_queue_head_init(&wl
->deferred_rx_queue
);
5983 skb_queue_head_init(&wl
->deferred_tx_queue
);
5985 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5986 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5987 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5988 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5989 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5990 INIT_DELAYED_WORK(&wl
->roc_complete_work
, wlcore_roc_complete_work
);
5991 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5993 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5994 if (!wl
->freezable_wq
) {
6001 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
6002 wl
->band
= IEEE80211_BAND_2GHZ
;
6003 wl
->channel_type
= NL80211_CHAN_NO_HT
;
6005 wl
->sg_enabled
= true;
6006 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
6007 wl
->recovery_count
= 0;
6010 wl
->ap_fw_ps_map
= 0;
6012 wl
->platform_quirks
= 0;
6013 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
6014 wl
->active_sta_count
= 0;
6015 wl
->active_link_count
= 0;
6017 init_waitqueue_head(&wl
->fwlog_waitq
);
6019 /* The system link is always allocated */
6020 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
6022 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
6023 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
6024 wl
->tx_frames
[i
] = NULL
;
6026 spin_lock_init(&wl
->wl_lock
);
6028 wl
->state
= WLCORE_STATE_OFF
;
6029 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
6030 mutex_init(&wl
->mutex
);
6031 mutex_init(&wl
->flush_mutex
);
6032 init_completion(&wl
->nvs_loading_complete
);
6034 order
= get_order(aggr_buf_size
);
6035 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
6036 if (!wl
->aggr_buf
) {
6040 wl
->aggr_buf_size
= aggr_buf_size
;
6042 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
6043 if (!wl
->dummy_packet
) {
6048 /* Allocate one page for the FW log */
6049 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
6052 goto err_dummy_packet
;
6055 wl
->mbox_size
= mbox_size
;
6056 wl
->mbox
= kmalloc(wl
->mbox_size
, GFP_KERNEL
| GFP_DMA
);
6062 wl
->buffer_32
= kmalloc(sizeof(*wl
->buffer_32
), GFP_KERNEL
);
6063 if (!wl
->buffer_32
) {
6074 free_page((unsigned long)wl
->fwlog
);
6077 dev_kfree_skb(wl
->dummy_packet
);
6080 free_pages((unsigned long)wl
->aggr_buf
, order
);
6083 destroy_workqueue(wl
->freezable_wq
);
6086 wl1271_debugfs_exit(wl
);
6090 ieee80211_free_hw(hw
);
6094 return ERR_PTR(ret
);
6096 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
6098 int wlcore_free_hw(struct wl1271
*wl
)
6100 /* Unblock any fwlog readers */
6101 mutex_lock(&wl
->mutex
);
6102 wl
->fwlog_size
= -1;
6103 wake_up_interruptible_all(&wl
->fwlog_waitq
);
6104 mutex_unlock(&wl
->mutex
);
6106 wlcore_sysfs_free(wl
);
6108 kfree(wl
->buffer_32
);
6110 free_page((unsigned long)wl
->fwlog
);
6111 dev_kfree_skb(wl
->dummy_packet
);
6112 free_pages((unsigned long)wl
->aggr_buf
, get_order(wl
->aggr_buf_size
));
6114 wl1271_debugfs_exit(wl
);
6118 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
6122 kfree(wl
->raw_fw_status
);
6123 kfree(wl
->fw_status
);
6124 kfree(wl
->tx_res_if
);
6125 destroy_workqueue(wl
->freezable_wq
);
6128 ieee80211_free_hw(wl
->hw
);
6132 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
6135 static const struct wiphy_wowlan_support wlcore_wowlan_support
= {
6136 .flags
= WIPHY_WOWLAN_ANY
,
6137 .n_patterns
= WL1271_MAX_RX_FILTERS
,
6138 .pattern_min_len
= 1,
6139 .pattern_max_len
= WL1271_RX_FILTER_MAX_PATTERN_SIZE
,
6143 static irqreturn_t
wlcore_hardirq(int irq
, void *cookie
)
6145 return IRQ_WAKE_THREAD
;
6148 static void wlcore_nvs_cb(const struct firmware
*fw
, void *context
)
6150 struct wl1271
*wl
= context
;
6151 struct platform_device
*pdev
= wl
->pdev
;
6152 struct wlcore_platdev_data
*pdev_data
= dev_get_platdata(&pdev
->dev
);
6153 struct wl12xx_platform_data
*pdata
= pdev_data
->pdata
;
6154 unsigned long irqflags
;
6156 irq_handler_t hardirq_fn
= NULL
;
6159 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
6161 wl1271_error("Could not allocate nvs data");
6164 wl
->nvs_len
= fw
->size
;
6166 wl1271_debug(DEBUG_BOOT
, "Could not get nvs file %s",
6172 ret
= wl
->ops
->setup(wl
);
6176 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
6178 /* adjust some runtime configuration parameters */
6179 wlcore_adjust_conf(wl
);
6181 wl
->irq
= platform_get_irq(pdev
, 0);
6182 wl
->platform_quirks
= pdata
->platform_quirks
;
6183 wl
->if_ops
= pdev_data
->if_ops
;
6185 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
) {
6186 irqflags
= IRQF_TRIGGER_RISING
;
6187 hardirq_fn
= wlcore_hardirq
;
6189 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
6192 ret
= request_threaded_irq(wl
->irq
, hardirq_fn
, wlcore_irq
,
6193 irqflags
, pdev
->name
, wl
);
6195 wl1271_error("request_irq() failed: %d", ret
);
6200 ret
= enable_irq_wake(wl
->irq
);
6202 wl
->irq_wake_enabled
= true;
6203 device_init_wakeup(wl
->dev
, 1);
6204 if (pdata
->pwr_in_suspend
)
6205 wl
->hw
->wiphy
->wowlan
= &wlcore_wowlan_support
;
6208 disable_irq(wl
->irq
);
6210 ret
= wl12xx_get_hw_info(wl
);
6212 wl1271_error("couldn't get hw info");
6216 ret
= wl
->ops
->identify_chip(wl
);
6220 ret
= wl1271_init_ieee80211(wl
);
6224 ret
= wl1271_register_hw(wl
);
6228 ret
= wlcore_sysfs_init(wl
);
6232 wl
->initialized
= true;
6236 wl1271_unregister_hw(wl
);
6239 free_irq(wl
->irq
, wl
);
6245 release_firmware(fw
);
6246 complete_all(&wl
->nvs_loading_complete
);
6249 int wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
6253 if (!wl
->ops
|| !wl
->ptable
)
6256 wl
->dev
= &pdev
->dev
;
6258 platform_set_drvdata(pdev
, wl
);
6260 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_HOTPLUG
,
6261 WL12XX_NVS_NAME
, &pdev
->dev
, GFP_KERNEL
,
6264 wl1271_error("request_firmware_nowait failed: %d", ret
);
6265 complete_all(&wl
->nvs_loading_complete
);
6270 EXPORT_SYMBOL_GPL(wlcore_probe
);
6272 int wlcore_remove(struct platform_device
*pdev
)
6274 struct wl1271
*wl
= platform_get_drvdata(pdev
);
6276 wait_for_completion(&wl
->nvs_loading_complete
);
6277 if (!wl
->initialized
)
6280 if (wl
->irq_wake_enabled
) {
6281 device_init_wakeup(wl
->dev
, 0);
6282 disable_irq_wake(wl
->irq
);
6284 wl1271_unregister_hw(wl
);
6285 free_irq(wl
->irq
, wl
);
6290 EXPORT_SYMBOL_GPL(wlcore_remove
);
6292 u32 wl12xx_debug_level
= DEBUG_NONE
;
6293 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
6294 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
6295 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
6297 module_param_named(fwlog
, fwlog_param
, charp
, 0);
6298 MODULE_PARM_DESC(fwlog
,
6299 "FW logger options: continuous, ondemand, dbgpins or disable");
6301 module_param(fwlog_mem_blocks
, int, S_IRUSR
| S_IWUSR
);
6302 MODULE_PARM_DESC(fwlog_mem_blocks
, "fwlog mem_blocks");
6304 module_param(bug_on_recovery
, int, S_IRUSR
| S_IWUSR
);
6305 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
6307 module_param(no_recovery
, int, S_IRUSR
| S_IWUSR
);
6308 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
6310 MODULE_LICENSE("GPL");
6311 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6312 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6313 MODULE_FIRMWARE(WL12XX_NVS_NAME
);