3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
44 #define WL1271_BOOT_RETRIES 3
46 static char *fwlog_param
;
47 static int fwlog_mem_blocks
= -1;
48 static int bug_on_recovery
= -1;
49 static int no_recovery
= -1;
51 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
52 struct ieee80211_vif
*vif
,
53 bool reset_tx_queues
);
54 static void wlcore_op_stop_locked(struct wl1271
*wl
);
55 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
57 static int wl12xx_set_authorized(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
61 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
70 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, wlvif
->sta
.hlid
);
74 wl1271_info("Association completed.");
78 static void wl1271_reg_notify(struct wiphy
*wiphy
,
79 struct regulatory_request
*request
)
81 struct ieee80211_supported_band
*band
;
82 struct ieee80211_channel
*ch
;
84 struct ieee80211_hw
*hw
= wiphy_to_ieee80211_hw(wiphy
);
85 struct wl1271
*wl
= hw
->priv
;
87 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
88 for (i
= 0; i
< band
->n_channels
; i
++) {
89 ch
= &band
->channels
[i
];
90 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
93 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
94 ch
->flags
|= IEEE80211_CHAN_NO_IBSS
|
95 IEEE80211_CHAN_PASSIVE_SCAN
;
99 wlcore_regdomain_config(wl
);
102 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
107 /* we should hold wl->mutex */
108 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
113 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
115 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
121 * this function is being called when the rx_streaming interval
122 * has beed changed or rx_streaming should be disabled
124 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
127 int period
= wl
->conf
.rx_streaming
.interval
;
129 /* don't reconfigure if rx_streaming is disabled */
130 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
133 /* reconfigure/disable according to new streaming_period */
135 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
136 (wl
->conf
.rx_streaming
.always
||
137 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
138 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
140 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
141 /* don't cancel_work_sync since we might deadlock */
142 del_timer_sync(&wlvif
->rx_streaming_timer
);
148 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
151 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
152 rx_streaming_enable_work
);
153 struct wl1271
*wl
= wlvif
->wl
;
155 mutex_lock(&wl
->mutex
);
157 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
158 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
159 (!wl
->conf
.rx_streaming
.always
&&
160 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
163 if (!wl
->conf
.rx_streaming
.interval
)
166 ret
= wl1271_ps_elp_wakeup(wl
);
170 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
174 /* stop it after some time of inactivity */
175 mod_timer(&wlvif
->rx_streaming_timer
,
176 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
179 wl1271_ps_elp_sleep(wl
);
181 mutex_unlock(&wl
->mutex
);
184 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
187 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
188 rx_streaming_disable_work
);
189 struct wl1271
*wl
= wlvif
->wl
;
191 mutex_lock(&wl
->mutex
);
193 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
196 ret
= wl1271_ps_elp_wakeup(wl
);
200 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
205 wl1271_ps_elp_sleep(wl
);
207 mutex_unlock(&wl
->mutex
);
210 static void wl1271_rx_streaming_timer(unsigned long data
)
212 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
213 struct wl1271
*wl
= wlvif
->wl
;
214 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
217 /* wl->mutex must be taken */
218 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
220 /* if the watchdog is not armed, don't do anything */
221 if (wl
->tx_allocated_blocks
== 0)
224 cancel_delayed_work(&wl
->tx_watchdog_work
);
225 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
226 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
229 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
231 struct delayed_work
*dwork
;
234 dwork
= container_of(work
, struct delayed_work
, work
);
235 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
237 mutex_lock(&wl
->mutex
);
239 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
242 /* Tx went out in the meantime - everything is ok */
243 if (unlikely(wl
->tx_allocated_blocks
== 0))
247 * if a ROC is in progress, we might not have any Tx for a long
248 * time (e.g. pending Tx on the non-ROC channels)
250 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
251 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
252 wl
->conf
.tx
.tx_watchdog_timeout
);
253 wl12xx_rearm_tx_watchdog_locked(wl
);
258 * if a scan is in progress, we might not have any Tx for a long
261 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
262 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
263 wl
->conf
.tx
.tx_watchdog_timeout
);
264 wl12xx_rearm_tx_watchdog_locked(wl
);
269 * AP might cache a frame for a long time for a sleeping station,
270 * so rearm the timer if there's an AP interface with stations. If
271 * Tx is genuinely stuck we will most hopefully discover it when all
272 * stations are removed due to inactivity.
274 if (wl
->active_sta_count
) {
275 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
277 wl
->conf
.tx
.tx_watchdog_timeout
,
278 wl
->active_sta_count
);
279 wl12xx_rearm_tx_watchdog_locked(wl
);
283 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
284 wl
->conf
.tx
.tx_watchdog_timeout
);
285 wl12xx_queue_recovery_work(wl
);
288 mutex_unlock(&wl
->mutex
);
291 static void wlcore_adjust_conf(struct wl1271
*wl
)
293 /* Adjust settings according to optional module parameters */
295 /* Firmware Logger params */
296 if (fwlog_mem_blocks
!= -1) {
297 if (fwlog_mem_blocks
>= CONF_FWLOG_MIN_MEM_BLOCKS
&&
298 fwlog_mem_blocks
<= CONF_FWLOG_MAX_MEM_BLOCKS
) {
299 wl
->conf
.fwlog
.mem_blocks
= fwlog_mem_blocks
;
302 "Illegal fwlog_mem_blocks=%d using default %d",
303 fwlog_mem_blocks
, wl
->conf
.fwlog
.mem_blocks
);
308 if (!strcmp(fwlog_param
, "continuous")) {
309 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
310 } else if (!strcmp(fwlog_param
, "ondemand")) {
311 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
312 } else if (!strcmp(fwlog_param
, "dbgpins")) {
313 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
314 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
315 } else if (!strcmp(fwlog_param
, "disable")) {
316 wl
->conf
.fwlog
.mem_blocks
= 0;
317 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
319 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
323 if (bug_on_recovery
!= -1)
324 wl
->conf
.recovery
.bug_on_recovery
= (u8
) bug_on_recovery
;
326 if (no_recovery
!= -1)
327 wl
->conf
.recovery
.no_recovery
= (u8
) no_recovery
;
330 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
331 struct wl12xx_vif
*wlvif
,
336 fw_ps
= test_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
339 * Wake up from high level PS if the STA is asleep with too little
340 * packets in FW or if the STA is awake.
342 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
343 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
346 * Start high-level PS if the STA is asleep with enough blocks in FW.
347 * Make an exception if this is the only connected link. In this
348 * case FW-memory congestion is less of a problem.
349 * Note that a single connected STA means 3 active links, since we must
350 * account for the global and broadcast AP links. The "fw_ps" check
351 * assures us the third link is a STA connected to the AP. Otherwise
352 * the FW would not set the PSM bit.
354 else if (wl
->active_link_count
> 3 && fw_ps
&&
355 tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
356 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
359 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
360 struct wl12xx_vif
*wlvif
,
361 struct wl_fw_status_2
*status
)
366 cur_fw_ps_map
= le32_to_cpu(status
->link_ps_bitmap
);
367 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
368 wl1271_debug(DEBUG_PSM
,
369 "link ps prev 0x%x cur 0x%x changed 0x%x",
370 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
371 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
373 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
376 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, WL12XX_MAX_LINKS
)
377 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
378 wl
->links
[hlid
].allocated_pkts
);
381 static int wlcore_fw_status(struct wl1271
*wl
,
382 struct wl_fw_status_1
*status_1
,
383 struct wl_fw_status_2
*status_2
)
385 struct wl12xx_vif
*wlvif
;
387 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
388 int avail
, freed_blocks
;
392 struct wl1271_link
*lnk
;
394 status_len
= WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
395 sizeof(*status_2
) + wl
->fw_status_priv_len
;
397 ret
= wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
, status_1
,
402 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
403 "drv_rx_counter = %d, tx_results_counter = %d)",
405 status_1
->fw_rx_counter
,
406 status_1
->drv_rx_counter
,
407 status_1
->tx_results_counter
);
409 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
410 /* prevent wrap-around in freed-packets counter */
411 wl
->tx_allocated_pkts
[i
] -=
412 (status_2
->counters
.tx_released_pkts
[i
] -
413 wl
->tx_pkts_freed
[i
]) & 0xff;
415 wl
->tx_pkts_freed
[i
] = status_2
->counters
.tx_released_pkts
[i
];
419 for_each_set_bit(i
, wl
->links_map
, WL12XX_MAX_LINKS
) {
423 /* prevent wrap-around in freed-packets counter */
424 diff
= (status_2
->counters
.tx_lnk_free_pkts
[i
] -
425 lnk
->prev_freed_pkts
) & 0xff;
430 lnk
->allocated_pkts
-= diff
;
431 lnk
->prev_freed_pkts
= status_2
->counters
.tx_lnk_free_pkts
[i
];
433 /* accumulate the prev_freed_pkts counter */
434 lnk
->total_freed_pkts
+= diff
;
437 /* prevent wrap-around in total blocks counter */
438 if (likely(wl
->tx_blocks_freed
<=
439 le32_to_cpu(status_2
->total_released_blks
)))
440 freed_blocks
= le32_to_cpu(status_2
->total_released_blks
) -
443 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
444 le32_to_cpu(status_2
->total_released_blks
);
446 wl
->tx_blocks_freed
= le32_to_cpu(status_2
->total_released_blks
);
448 wl
->tx_allocated_blocks
-= freed_blocks
;
451 * If the FW freed some blocks:
452 * If we still have allocated blocks - re-arm the timer, Tx is
453 * not stuck. Otherwise, cancel the timer (no Tx currently).
456 if (wl
->tx_allocated_blocks
)
457 wl12xx_rearm_tx_watchdog_locked(wl
);
459 cancel_delayed_work(&wl
->tx_watchdog_work
);
462 avail
= le32_to_cpu(status_2
->tx_total
) - wl
->tx_allocated_blocks
;
465 * The FW might change the total number of TX memblocks before
466 * we get a notification about blocks being released. Thus, the
467 * available blocks calculation might yield a temporary result
468 * which is lower than the actual available blocks. Keeping in
469 * mind that only blocks that were allocated can be moved from
470 * TX to RX, tx_blocks_available should never decrease here.
472 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
475 /* if more blocks are available now, tx work can be scheduled */
476 if (wl
->tx_blocks_available
> old_tx_blk_count
)
477 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
479 /* for AP update num of allocated TX blocks per link and ps status */
480 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
481 wl12xx_irq_update_links_status(wl
, wlvif
, status_2
);
484 /* update the host-chipset time offset */
486 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
487 (s64
)le32_to_cpu(status_2
->fw_localtime
);
489 wl
->fw_fast_lnk_map
= le32_to_cpu(status_2
->link_fast_bitmap
);
494 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
498 /* Pass all received frames to the network stack */
499 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
500 ieee80211_rx_ni(wl
->hw
, skb
);
502 /* Return sent skbs to the network stack */
503 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
504 ieee80211_tx_status_ni(wl
->hw
, skb
);
507 static void wl1271_netstack_work(struct work_struct
*work
)
510 container_of(work
, struct wl1271
, netstack_work
);
513 wl1271_flush_deferred_work(wl
);
514 } while (skb_queue_len(&wl
->deferred_rx_queue
));
517 #define WL1271_IRQ_MAX_LOOPS 256
519 static int wlcore_irq_locked(struct wl1271
*wl
)
523 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
525 unsigned int defer_count
;
529 * In case edge triggered interrupt must be used, we cannot iterate
530 * more than once without introducing race conditions with the hardirq.
532 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
535 wl1271_debug(DEBUG_IRQ
, "IRQ work");
537 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
540 ret
= wl1271_ps_elp_wakeup(wl
);
544 while (!done
&& loopcount
--) {
546 * In order to avoid a race with the hardirq, clear the flag
547 * before acknowledging the chip. Since the mutex is held,
548 * wl1271_ps_elp_wakeup cannot be called concurrently.
550 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
551 smp_mb__after_clear_bit();
553 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
557 wlcore_hw_tx_immediate_compl(wl
);
559 intr
= le32_to_cpu(wl
->fw_status_1
->intr
);
560 intr
&= WLCORE_ALL_INTR_MASK
;
566 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
567 wl1271_error("HW watchdog interrupt received! starting recovery.");
568 wl
->watchdog_recovery
= true;
571 /* restarting the chip. ignore any other interrupt. */
575 if (unlikely(intr
& WL1271_ACX_SW_INTR_WATCHDOG
)) {
576 wl1271_error("SW watchdog interrupt received! "
577 "starting recovery.");
578 wl
->watchdog_recovery
= true;
581 /* restarting the chip. ignore any other interrupt. */
585 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
586 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
588 ret
= wlcore_rx(wl
, wl
->fw_status_1
);
592 /* Check if any tx blocks were freed */
593 spin_lock_irqsave(&wl
->wl_lock
, flags
);
594 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
595 wl1271_tx_total_queue_count(wl
) > 0) {
596 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
598 * In order to avoid starvation of the TX path,
599 * call the work function directly.
601 ret
= wlcore_tx_work_locked(wl
);
605 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
608 /* check for tx results */
609 ret
= wlcore_hw_tx_delayed_compl(wl
);
613 /* Make sure the deferred queues don't get too long */
614 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
615 skb_queue_len(&wl
->deferred_rx_queue
);
616 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
617 wl1271_flush_deferred_work(wl
);
620 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
621 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
622 ret
= wl1271_event_handle(wl
, 0);
627 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
628 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
629 ret
= wl1271_event_handle(wl
, 1);
634 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
635 wl1271_debug(DEBUG_IRQ
,
636 "WL1271_ACX_INTR_INIT_COMPLETE");
638 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
639 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
642 wl1271_ps_elp_sleep(wl
);
648 static irqreturn_t
wlcore_irq(int irq
, void *cookie
)
652 struct wl1271
*wl
= cookie
;
654 /* complete the ELP completion */
655 spin_lock_irqsave(&wl
->wl_lock
, flags
);
656 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
658 complete(wl
->elp_compl
);
659 wl
->elp_compl
= NULL
;
662 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
663 /* don't enqueue a work right now. mark it as pending */
664 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
665 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
666 disable_irq_nosync(wl
->irq
);
667 pm_wakeup_event(wl
->dev
, 0);
668 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
671 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
673 /* TX might be handled here, avoid redundant work */
674 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
675 cancel_work_sync(&wl
->tx_work
);
677 mutex_lock(&wl
->mutex
);
679 ret
= wlcore_irq_locked(wl
);
681 wl12xx_queue_recovery_work(wl
);
683 spin_lock_irqsave(&wl
->wl_lock
, flags
);
684 /* In case TX was not handled here, queue TX work */
685 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
686 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
687 wl1271_tx_total_queue_count(wl
) > 0)
688 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
689 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
691 mutex_unlock(&wl
->mutex
);
696 struct vif_counter_data
{
699 struct ieee80211_vif
*cur_vif
;
700 bool cur_vif_running
;
703 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
704 struct ieee80211_vif
*vif
)
706 struct vif_counter_data
*counter
= data
;
709 if (counter
->cur_vif
== vif
)
710 counter
->cur_vif_running
= true;
713 /* caller must not hold wl->mutex, as it might deadlock */
714 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
715 struct ieee80211_vif
*cur_vif
,
716 struct vif_counter_data
*data
)
718 memset(data
, 0, sizeof(*data
));
719 data
->cur_vif
= cur_vif
;
721 ieee80211_iterate_active_interfaces(hw
, IEEE80211_IFACE_ITER_RESUME_ALL
,
722 wl12xx_vif_count_iter
, data
);
725 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
727 const struct firmware
*fw
;
729 enum wl12xx_fw_type fw_type
;
733 fw_type
= WL12XX_FW_TYPE_PLT
;
734 fw_name
= wl
->plt_fw_name
;
737 * we can't call wl12xx_get_vif_count() here because
738 * wl->mutex is taken, so use the cached last_vif_count value
740 if (wl
->last_vif_count
> 1 && wl
->mr_fw_name
) {
741 fw_type
= WL12XX_FW_TYPE_MULTI
;
742 fw_name
= wl
->mr_fw_name
;
744 fw_type
= WL12XX_FW_TYPE_NORMAL
;
745 fw_name
= wl
->sr_fw_name
;
749 if (wl
->fw_type
== fw_type
)
752 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
754 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
757 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
762 wl1271_error("firmware size is not multiple of 32 bits: %zu",
769 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
770 wl
->fw_len
= fw
->size
;
771 wl
->fw
= vmalloc(wl
->fw_len
);
774 wl1271_error("could not allocate memory for the firmware");
779 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
781 wl
->fw_type
= fw_type
;
783 release_firmware(fw
);
788 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
790 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
792 /* Avoid a recursive recovery */
793 if (wl
->state
== WLCORE_STATE_ON
) {
794 wl
->state
= WLCORE_STATE_RESTARTING
;
795 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
796 wl1271_ps_elp_wakeup(wl
);
797 wlcore_disable_interrupts_nosync(wl
);
798 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
802 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
806 /* Make sure we have enough room */
807 len
= min(maxlen
, (size_t)(PAGE_SIZE
- wl
->fwlog_size
));
809 /* Fill the FW log file, consumed by the sysfs fwlog entry */
810 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
811 wl
->fwlog_size
+= len
;
816 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
818 struct wlcore_partition_set part
, old_part
;
825 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
826 (wl
->conf
.fwlog
.mem_blocks
== 0))
829 wl1271_info("Reading FW panic log");
831 block
= kmalloc(wl
->fw_mem_block_size
, GFP_KERNEL
);
836 * Make sure the chip is awake and the logger isn't active.
837 * Do not send a stop fwlog command if the fw is hanged or if
838 * dbgpins are used (due to some fw bug).
840 if (wl1271_ps_elp_wakeup(wl
))
842 if (!wl
->watchdog_recovery
&&
843 wl
->conf
.fwlog
.output
!= WL12XX_FWLOG_OUTPUT_DBG_PINS
)
844 wl12xx_cmd_stop_fwlog(wl
);
846 /* Read the first memory block address */
847 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
851 addr
= le32_to_cpu(wl
->fw_status_2
->log_start_addr
);
855 if (wl
->conf
.fwlog
.mode
== WL12XX_FWLOG_CONTINUOUS
) {
856 offset
= sizeof(addr
) + sizeof(struct wl1271_rx_descriptor
);
857 end_of_log
= wl
->fwlog_end
;
859 offset
= sizeof(addr
);
863 old_part
= wl
->curr_part
;
864 memset(&part
, 0, sizeof(part
));
866 /* Traverse the memory blocks linked list */
868 part
.mem
.start
= wlcore_hw_convert_hwaddr(wl
, addr
);
869 part
.mem
.size
= PAGE_SIZE
;
871 ret
= wlcore_set_partition(wl
, &part
);
873 wl1271_error("%s: set_partition start=0x%X size=%d",
874 __func__
, part
.mem
.start
, part
.mem
.size
);
878 memset(block
, 0, wl
->fw_mem_block_size
);
879 ret
= wlcore_read_hwaddr(wl
, addr
, block
,
880 wl
->fw_mem_block_size
, false);
886 * Memory blocks are linked to one another. The first 4 bytes
887 * of each memory block hold the hardware address of the next
888 * one. The last memory block points to the first one in
889 * on demand mode and is equal to 0x2000000 in continuous mode.
891 addr
= le32_to_cpup((__le32
*)block
);
893 if (!wl12xx_copy_fwlog(wl
, block
+ offset
,
894 wl
->fw_mem_block_size
- offset
))
896 } while (addr
&& (addr
!= end_of_log
));
898 wake_up_interruptible(&wl
->fwlog_waitq
);
902 wlcore_set_partition(wl
, &old_part
);
905 static void wlcore_print_recovery(struct wl1271
*wl
)
911 wl1271_info("Hardware recovery in progress. FW ver: %s",
912 wl
->chip
.fw_ver_str
);
914 /* change partitions momentarily so we can read the FW pc */
915 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
919 ret
= wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
, &pc
);
923 ret
= wlcore_read_reg(wl
, REG_INTERRUPT_NO_CLEAR
, &hint_sts
);
927 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
928 pc
, hint_sts
, ++wl
->recovery_count
);
930 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
934 static void wl1271_recovery_work(struct work_struct
*work
)
937 container_of(work
, struct wl1271
, recovery_work
);
938 struct wl12xx_vif
*wlvif
;
939 struct ieee80211_vif
*vif
;
941 mutex_lock(&wl
->mutex
);
943 if (wl
->state
== WLCORE_STATE_OFF
|| wl
->plt
)
946 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
)) {
947 if (wl
->conf
.fwlog
.output
== WL12XX_FWLOG_OUTPUT_HOST
)
948 wl12xx_read_fwlog_panic(wl
);
949 wlcore_print_recovery(wl
);
952 BUG_ON(wl
->conf
.recovery
.bug_on_recovery
&&
953 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
955 if (wl
->conf
.recovery
.no_recovery
) {
956 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
960 /* Prevent spurious TX during FW restart */
961 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
963 /* reboot the chipset */
964 while (!list_empty(&wl
->wlvif_list
)) {
965 wlvif
= list_first_entry(&wl
->wlvif_list
,
966 struct wl12xx_vif
, list
);
967 vif
= wl12xx_wlvif_to_vif(wlvif
);
968 __wl1271_op_remove_interface(wl
, vif
, false);
971 wlcore_op_stop_locked(wl
);
973 ieee80211_restart_hw(wl
->hw
);
976 * Its safe to enable TX now - the queues are stopped after a request
979 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
982 wl
->watchdog_recovery
= false;
983 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
984 mutex_unlock(&wl
->mutex
);
987 static int wlcore_fw_wakeup(struct wl1271
*wl
)
989 return wlcore_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
992 static int wl1271_setup(struct wl1271
*wl
)
994 wl
->fw_status_1
= kzalloc(WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
995 sizeof(*wl
->fw_status_2
) +
996 wl
->fw_status_priv_len
, GFP_KERNEL
);
997 if (!wl
->fw_status_1
)
1000 wl
->fw_status_2
= (struct wl_fw_status_2
*)
1001 (((u8
*) wl
->fw_status_1
) +
1002 WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
));
1004 wl
->tx_res_if
= kzalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
1005 if (!wl
->tx_res_if
) {
1006 kfree(wl
->fw_status_1
);
1013 static int wl12xx_set_power_on(struct wl1271
*wl
)
1017 msleep(WL1271_PRE_POWER_ON_SLEEP
);
1018 ret
= wl1271_power_on(wl
);
1021 msleep(WL1271_POWER_ON_SLEEP
);
1022 wl1271_io_reset(wl
);
1025 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
1029 /* ELP module wake up */
1030 ret
= wlcore_fw_wakeup(wl
);
1038 wl1271_power_off(wl
);
1042 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
1046 ret
= wl12xx_set_power_on(wl
);
1051 * For wl127x based devices we could use the default block
1052 * size (512 bytes), but due to a bug in the sdio driver, we
1053 * need to set it explicitly after the chip is powered on. To
1054 * simplify the code and since the performance impact is
1055 * negligible, we use the same block size for all different
1058 * Check if the bus supports blocksize alignment and, if it
1059 * doesn't, make sure we don't have the quirk.
1061 if (!wl1271_set_block_size(wl
))
1062 wl
->quirks
&= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
1064 /* TODO: make sure the lower driver has set things up correctly */
1066 ret
= wl1271_setup(wl
);
1070 ret
= wl12xx_fetch_firmware(wl
, plt
);
1078 int wl1271_plt_start(struct wl1271
*wl
, const enum plt_mode plt_mode
)
1080 int retries
= WL1271_BOOT_RETRIES
;
1081 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1083 static const char* const PLT_MODE
[] = {
1092 mutex_lock(&wl
->mutex
);
1094 wl1271_notice("power up");
1096 if (wl
->state
!= WLCORE_STATE_OFF
) {
1097 wl1271_error("cannot go into PLT state because not "
1098 "in off state: %d", wl
->state
);
1103 /* Indicate to lower levels that we are now in PLT mode */
1105 wl
->plt_mode
= plt_mode
;
1109 ret
= wl12xx_chip_wakeup(wl
, true);
1113 if (plt_mode
!= PLT_CHIP_AWAKE
) {
1114 ret
= wl
->ops
->plt_init(wl
);
1119 wl
->state
= WLCORE_STATE_ON
;
1120 wl1271_notice("firmware booted in PLT mode %s (%s)",
1122 wl
->chip
.fw_ver_str
);
1124 /* update hw/fw version info in wiphy struct */
1125 wiphy
->hw_version
= wl
->chip
.id
;
1126 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1127 sizeof(wiphy
->fw_version
));
1132 wl1271_power_off(wl
);
1136 wl
->plt_mode
= PLT_OFF
;
1138 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1139 WL1271_BOOT_RETRIES
);
1141 mutex_unlock(&wl
->mutex
);
1146 int wl1271_plt_stop(struct wl1271
*wl
)
1150 wl1271_notice("power down");
1153 * Interrupts must be disabled before setting the state to OFF.
1154 * Otherwise, the interrupt handler might be called and exit without
1155 * reading the interrupt status.
1157 wlcore_disable_interrupts(wl
);
1158 mutex_lock(&wl
->mutex
);
1160 mutex_unlock(&wl
->mutex
);
1163 * This will not necessarily enable interrupts as interrupts
1164 * may have been disabled when op_stop was called. It will,
1165 * however, balance the above call to disable_interrupts().
1167 wlcore_enable_interrupts(wl
);
1169 wl1271_error("cannot power down because not in PLT "
1170 "state: %d", wl
->state
);
1175 mutex_unlock(&wl
->mutex
);
1177 wl1271_flush_deferred_work(wl
);
1178 cancel_work_sync(&wl
->netstack_work
);
1179 cancel_work_sync(&wl
->recovery_work
);
1180 cancel_delayed_work_sync(&wl
->elp_work
);
1181 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1183 mutex_lock(&wl
->mutex
);
1184 wl1271_power_off(wl
);
1186 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1187 wl
->state
= WLCORE_STATE_OFF
;
1189 wl
->plt_mode
= PLT_OFF
;
1191 mutex_unlock(&wl
->mutex
);
1197 static void wl1271_op_tx(struct ieee80211_hw
*hw
,
1198 struct ieee80211_tx_control
*control
,
1199 struct sk_buff
*skb
)
1201 struct wl1271
*wl
= hw
->priv
;
1202 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1203 struct ieee80211_vif
*vif
= info
->control
.vif
;
1204 struct wl12xx_vif
*wlvif
= NULL
;
1205 unsigned long flags
;
1210 wl1271_debug(DEBUG_TX
, "DROP skb with no vif");
1211 ieee80211_free_txskb(hw
, skb
);
1215 wlvif
= wl12xx_vif_to_data(vif
);
1216 mapping
= skb_get_queue_mapping(skb
);
1217 q
= wl1271_tx_get_queue(mapping
);
1219 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
, control
->sta
);
1221 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1224 * drop the packet if the link is invalid or the queue is stopped
1225 * for any reason but watermark. Watermark is a "soft"-stop so we
1226 * allow these packets through.
1228 if (hlid
== WL12XX_INVALID_LINK_ID
||
1229 (!test_bit(hlid
, wlvif
->links_map
)) ||
1230 (wlcore_is_queue_stopped_locked(wl
, wlvif
, q
) &&
1231 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1232 WLCORE_QUEUE_STOP_REASON_WATERMARK
))) {
1233 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1234 ieee80211_free_txskb(hw
, skb
);
1238 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1240 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1242 wl
->tx_queue_count
[q
]++;
1243 wlvif
->tx_queue_count
[q
]++;
1246 * The workqueue is slow to process the tx_queue and we need stop
1247 * the queue here, otherwise the queue will get too long.
1249 if (wlvif
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
&&
1250 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1251 WLCORE_QUEUE_STOP_REASON_WATERMARK
)) {
1252 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1253 wlcore_stop_queue_locked(wl
, wlvif
, q
,
1254 WLCORE_QUEUE_STOP_REASON_WATERMARK
);
1258 * The chip specific setup must run before the first TX packet -
1259 * before that, the tx_work will not be initialized!
1262 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1263 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1264 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1267 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1270 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1272 unsigned long flags
;
1275 /* no need to queue a new dummy packet if one is already pending */
1276 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1279 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1281 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1282 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1283 wl
->tx_queue_count
[q
]++;
1284 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1286 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1287 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1288 return wlcore_tx_work_locked(wl
);
1291 * If the FW TX is busy, TX work will be scheduled by the threaded
1292 * interrupt handler function
1298 * The size of the dummy packet should be at least 1400 bytes. However, in
1299 * order to minimize the number of bus transactions, aligning it to 512 bytes
1300 * boundaries could be beneficial, performance wise
1302 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1304 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1306 struct sk_buff
*skb
;
1307 struct ieee80211_hdr_3addr
*hdr
;
1308 unsigned int dummy_packet_size
;
1310 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1311 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1313 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1315 wl1271_warning("Failed to allocate a dummy packet skb");
1319 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1321 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1322 memset(hdr
, 0, sizeof(*hdr
));
1323 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1324 IEEE80211_STYPE_NULLFUNC
|
1325 IEEE80211_FCTL_TODS
);
1327 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1329 /* Dummy packets require the TID to be management */
1330 skb
->priority
= WL1271_TID_MGMT
;
1332 /* Initialize all fields that might be used */
1333 skb_set_queue_mapping(skb
, 0);
1334 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1342 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern
*p
)
1344 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1345 int i
, pattern_len
= 0;
1348 wl1271_warning("No mask in WoWLAN pattern");
1353 * The pattern is broken up into segments of bytes at different offsets
1354 * that need to be checked by the FW filter. Each segment is called
1355 * a field in the FW API. We verify that the total number of fields
1356 * required for this pattern won't exceed FW limits (8)
1357 * as well as the total fields buffer won't exceed the FW limit.
1358 * Note that if there's a pattern which crosses Ethernet/IP header
1359 * boundary a new field is required.
1361 for (i
= 0; i
< p
->pattern_len
; i
++) {
1362 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1367 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1369 fields_size
+= pattern_len
+
1370 RX_FILTER_FIELD_OVERHEAD
;
1378 fields_size
+= pattern_len
+
1379 RX_FILTER_FIELD_OVERHEAD
;
1386 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1390 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1391 wl1271_warning("RX Filter too complex. Too many segments");
1395 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1396 wl1271_warning("RX filter pattern is too big");
1403 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1405 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1408 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1415 for (i
= 0; i
< filter
->num_fields
; i
++)
1416 kfree(filter
->fields
[i
].pattern
);
1421 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1422 u16 offset
, u8 flags
,
1423 u8
*pattern
, u8 len
)
1425 struct wl12xx_rx_filter_field
*field
;
1427 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1428 wl1271_warning("Max fields per RX filter. can't alloc another");
1432 field
= &filter
->fields
[filter
->num_fields
];
1434 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1435 if (!field
->pattern
) {
1436 wl1271_warning("Failed to allocate RX filter pattern");
1440 filter
->num_fields
++;
1442 field
->offset
= cpu_to_le16(offset
);
1443 field
->flags
= flags
;
1445 memcpy(field
->pattern
, pattern
, len
);
1450 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1452 int i
, fields_size
= 0;
1454 for (i
= 0; i
< filter
->num_fields
; i
++)
1455 fields_size
+= filter
->fields
[i
].len
+
1456 sizeof(struct wl12xx_rx_filter_field
) -
1462 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1466 struct wl12xx_rx_filter_field
*field
;
1468 for (i
= 0; i
< filter
->num_fields
; i
++) {
1469 field
= (struct wl12xx_rx_filter_field
*)buf
;
1471 field
->offset
= filter
->fields
[i
].offset
;
1472 field
->flags
= filter
->fields
[i
].flags
;
1473 field
->len
= filter
->fields
[i
].len
;
1475 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1476 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1477 sizeof(u8
*) + field
->len
;
1482 * Allocates an RX filter returned through f
1483 * which needs to be freed using rx_filter_free()
1486 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern
*p
,
1487 struct wl12xx_rx_filter
**f
)
1490 struct wl12xx_rx_filter
*filter
;
1494 filter
= wl1271_rx_filter_alloc();
1496 wl1271_warning("Failed to alloc rx filter");
1502 while (i
< p
->pattern_len
) {
1503 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1508 for (j
= i
; j
< p
->pattern_len
; j
++) {
1509 if (!test_bit(j
, (unsigned long *)p
->mask
))
1512 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1513 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1517 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1519 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1521 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1522 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1527 ret
= wl1271_rx_filter_alloc_field(filter
,
1530 &p
->pattern
[i
], len
);
1537 filter
->action
= FILTER_SIGNAL
;
1543 wl1271_rx_filter_free(filter
);
1549 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1550 struct cfg80211_wowlan
*wow
)
1554 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1555 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0,
1560 ret
= wl1271_rx_filter_clear_all(wl
);
1567 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1570 /* Validate all incoming patterns before clearing current FW state */
1571 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1572 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1574 wl1271_warning("Bad wowlan pattern %d", i
);
1579 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1583 ret
= wl1271_rx_filter_clear_all(wl
);
1587 /* Translate WoWLAN patterns into filters */
1588 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1589 struct cfg80211_pkt_pattern
*p
;
1590 struct wl12xx_rx_filter
*filter
= NULL
;
1592 p
= &wow
->patterns
[i
];
1594 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1596 wl1271_warning("Failed to create an RX filter from "
1597 "wowlan pattern %d", i
);
1601 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1603 wl1271_rx_filter_free(filter
);
1608 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1614 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1615 struct wl12xx_vif
*wlvif
,
1616 struct cfg80211_wowlan
*wow
)
1620 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1623 ret
= wl1271_ps_elp_wakeup(wl
);
1627 ret
= wl1271_configure_wowlan(wl
, wow
);
1631 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1632 wl
->conf
.conn
.wake_up_event
) &&
1633 (wl
->conf
.conn
.suspend_listen_interval
==
1634 wl
->conf
.conn
.listen_interval
))
1637 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1638 wl
->conf
.conn
.suspend_wake_up_event
,
1639 wl
->conf
.conn
.suspend_listen_interval
);
1642 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1645 wl1271_ps_elp_sleep(wl
);
1651 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1652 struct wl12xx_vif
*wlvif
)
1656 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1659 ret
= wl1271_ps_elp_wakeup(wl
);
1663 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1665 wl1271_ps_elp_sleep(wl
);
1671 static int wl1271_configure_suspend(struct wl1271
*wl
,
1672 struct wl12xx_vif
*wlvif
,
1673 struct cfg80211_wowlan
*wow
)
1675 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1676 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1677 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1678 return wl1271_configure_suspend_ap(wl
, wlvif
);
1682 static void wl1271_configure_resume(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1685 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1686 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1688 if ((!is_ap
) && (!is_sta
))
1691 if (is_sta
&& !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1694 ret
= wl1271_ps_elp_wakeup(wl
);
1699 wl1271_configure_wowlan(wl
, NULL
);
1701 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1702 wl
->conf
.conn
.wake_up_event
) &&
1703 (wl
->conf
.conn
.suspend_listen_interval
==
1704 wl
->conf
.conn
.listen_interval
))
1707 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1708 wl
->conf
.conn
.wake_up_event
,
1709 wl
->conf
.conn
.listen_interval
);
1712 wl1271_error("resume: wake up conditions failed: %d",
1716 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1720 wl1271_ps_elp_sleep(wl
);
1723 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1724 struct cfg80211_wowlan
*wow
)
1726 struct wl1271
*wl
= hw
->priv
;
1727 struct wl12xx_vif
*wlvif
;
1730 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1733 /* we want to perform the recovery before suspending */
1734 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
1735 wl1271_warning("postponing suspend to perform recovery");
1739 wl1271_tx_flush(wl
);
1741 mutex_lock(&wl
->mutex
);
1742 wl
->wow_enabled
= true;
1743 wl12xx_for_each_wlvif(wl
, wlvif
) {
1744 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1746 mutex_unlock(&wl
->mutex
);
1747 wl1271_warning("couldn't prepare device to suspend");
1751 mutex_unlock(&wl
->mutex
);
1752 /* flush any remaining work */
1753 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1756 * disable and re-enable interrupts in order to flush
1759 wlcore_disable_interrupts(wl
);
1762 * set suspended flag to avoid triggering a new threaded_irq
1763 * work. no need for spinlock as interrupts are disabled.
1765 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1767 wlcore_enable_interrupts(wl
);
1768 flush_work(&wl
->tx_work
);
1769 flush_delayed_work(&wl
->elp_work
);
1774 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1776 struct wl1271
*wl
= hw
->priv
;
1777 struct wl12xx_vif
*wlvif
;
1778 unsigned long flags
;
1779 bool run_irq_work
= false, pending_recovery
;
1782 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1784 WARN_ON(!wl
->wow_enabled
);
1787 * re-enable irq_work enqueuing, and call irq_work directly if
1788 * there is a pending work.
1790 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1791 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1792 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1793 run_irq_work
= true;
1794 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1796 mutex_lock(&wl
->mutex
);
1798 /* test the recovery flag before calling any SDIO functions */
1799 pending_recovery
= test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1803 wl1271_debug(DEBUG_MAC80211
,
1804 "run postponed irq_work directly");
1806 /* don't talk to the HW if recovery is pending */
1807 if (!pending_recovery
) {
1808 ret
= wlcore_irq_locked(wl
);
1810 wl12xx_queue_recovery_work(wl
);
1813 wlcore_enable_interrupts(wl
);
1816 if (pending_recovery
) {
1817 wl1271_warning("queuing forgotten recovery on resume");
1818 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
1822 wl12xx_for_each_wlvif(wl
, wlvif
) {
1823 wl1271_configure_resume(wl
, wlvif
);
1827 wl
->wow_enabled
= false;
1828 mutex_unlock(&wl
->mutex
);
1834 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1836 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1839 * We have to delay the booting of the hardware because
1840 * we need to know the local MAC address before downloading and
1841 * initializing the firmware. The MAC address cannot be changed
1842 * after boot, and without the proper MAC address, the firmware
1843 * will not function properly.
1845 * The MAC address is first known when the corresponding interface
1846 * is added. That is where we will initialize the hardware.
1852 static void wlcore_op_stop_locked(struct wl1271
*wl
)
1856 if (wl
->state
== WLCORE_STATE_OFF
) {
1857 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1859 wlcore_enable_interrupts(wl
);
1865 * this must be before the cancel_work calls below, so that the work
1866 * functions don't perform further work.
1868 wl
->state
= WLCORE_STATE_OFF
;
1871 * Use the nosync variant to disable interrupts, so the mutex could be
1872 * held while doing so without deadlocking.
1874 wlcore_disable_interrupts_nosync(wl
);
1876 mutex_unlock(&wl
->mutex
);
1878 wlcore_synchronize_interrupts(wl
);
1879 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1880 cancel_work_sync(&wl
->recovery_work
);
1881 wl1271_flush_deferred_work(wl
);
1882 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1883 cancel_work_sync(&wl
->netstack_work
);
1884 cancel_work_sync(&wl
->tx_work
);
1885 cancel_delayed_work_sync(&wl
->elp_work
);
1886 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1888 /* let's notify MAC80211 about the remaining pending TX frames */
1889 mutex_lock(&wl
->mutex
);
1890 wl12xx_tx_reset(wl
);
1892 wl1271_power_off(wl
);
1894 * In case a recovery was scheduled, interrupts were disabled to avoid
1895 * an interrupt storm. Now that the power is down, it is safe to
1896 * re-enable interrupts to balance the disable depth
1898 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1899 wlcore_enable_interrupts(wl
);
1901 wl
->band
= IEEE80211_BAND_2GHZ
;
1904 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1905 wl
->channel_type
= NL80211_CHAN_NO_HT
;
1906 wl
->tx_blocks_available
= 0;
1907 wl
->tx_allocated_blocks
= 0;
1908 wl
->tx_results_count
= 0;
1909 wl
->tx_packets_count
= 0;
1910 wl
->time_offset
= 0;
1911 wl
->ap_fw_ps_map
= 0;
1913 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1914 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1915 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1916 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1917 memset(wl
->session_ids
, 0, sizeof(wl
->session_ids
));
1918 wl
->active_sta_count
= 0;
1919 wl
->active_link_count
= 0;
1921 /* The system link is always allocated */
1922 wl
->links
[WL12XX_SYSTEM_HLID
].allocated_pkts
= 0;
1923 wl
->links
[WL12XX_SYSTEM_HLID
].prev_freed_pkts
= 0;
1924 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1927 * this is performed after the cancel_work calls and the associated
1928 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1929 * get executed before all these vars have been reset.
1933 wl
->tx_blocks_freed
= 0;
1935 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
1936 wl
->tx_pkts_freed
[i
] = 0;
1937 wl
->tx_allocated_pkts
[i
] = 0;
1940 wl1271_debugfs_reset(wl
);
1942 kfree(wl
->fw_status_1
);
1943 wl
->fw_status_1
= NULL
;
1944 wl
->fw_status_2
= NULL
;
1945 kfree(wl
->tx_res_if
);
1946 wl
->tx_res_if
= NULL
;
1947 kfree(wl
->target_mem_map
);
1948 wl
->target_mem_map
= NULL
;
1951 * FW channels must be re-calibrated after recovery,
1952 * save current Reg-Domain channel configuration and clear it.
1954 memcpy(wl
->reg_ch_conf_pending
, wl
->reg_ch_conf_last
,
1955 sizeof(wl
->reg_ch_conf_pending
));
1956 memset(wl
->reg_ch_conf_last
, 0, sizeof(wl
->reg_ch_conf_last
));
1959 static void wlcore_op_stop(struct ieee80211_hw
*hw
)
1961 struct wl1271
*wl
= hw
->priv
;
1963 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
1965 mutex_lock(&wl
->mutex
);
1967 wlcore_op_stop_locked(wl
);
1969 mutex_unlock(&wl
->mutex
);
1972 static void wlcore_channel_switch_work(struct work_struct
*work
)
1974 struct delayed_work
*dwork
;
1976 struct ieee80211_vif
*vif
;
1977 struct wl12xx_vif
*wlvif
;
1980 dwork
= container_of(work
, struct delayed_work
, work
);
1981 wlvif
= container_of(dwork
, struct wl12xx_vif
, channel_switch_work
);
1984 wl1271_info("channel switch failed (role_id: %d).", wlvif
->role_id
);
1986 mutex_lock(&wl
->mutex
);
1988 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
1991 /* check the channel switch is still ongoing */
1992 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
))
1995 vif
= wl12xx_wlvif_to_vif(wlvif
);
1996 ieee80211_chswitch_done(vif
, false);
1998 ret
= wl1271_ps_elp_wakeup(wl
);
2002 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
2004 wl1271_ps_elp_sleep(wl
);
2006 mutex_unlock(&wl
->mutex
);
2009 static void wlcore_connection_loss_work(struct work_struct
*work
)
2011 struct delayed_work
*dwork
;
2013 struct ieee80211_vif
*vif
;
2014 struct wl12xx_vif
*wlvif
;
2016 dwork
= container_of(work
, struct delayed_work
, work
);
2017 wlvif
= container_of(dwork
, struct wl12xx_vif
, connection_loss_work
);
2020 wl1271_info("Connection loss work (role_id: %d).", wlvif
->role_id
);
2022 mutex_lock(&wl
->mutex
);
2024 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2027 /* Call mac80211 connection loss */
2028 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2031 vif
= wl12xx_wlvif_to_vif(wlvif
);
2032 ieee80211_connection_loss(vif
);
2034 mutex_unlock(&wl
->mutex
);
2037 static void wlcore_pending_auth_complete_work(struct work_struct
*work
)
2039 struct delayed_work
*dwork
;
2041 struct wl12xx_vif
*wlvif
;
2042 unsigned long time_spare
;
2045 dwork
= container_of(work
, struct delayed_work
, work
);
2046 wlvif
= container_of(dwork
, struct wl12xx_vif
,
2047 pending_auth_complete_work
);
2050 mutex_lock(&wl
->mutex
);
2052 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2056 * Make sure a second really passed since the last auth reply. Maybe
2057 * a second auth reply arrived while we were stuck on the mutex.
2058 * Check for a little less than the timeout to protect from scheduler
2061 time_spare
= jiffies
+
2062 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT
- 50);
2063 if (!time_after(time_spare
, wlvif
->pending_auth_reply_time
))
2066 ret
= wl1271_ps_elp_wakeup(wl
);
2070 /* cancel the ROC if active */
2071 wlcore_update_inconn_sta(wl
, wlvif
, NULL
, false);
2073 wl1271_ps_elp_sleep(wl
);
2075 mutex_unlock(&wl
->mutex
);
2078 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
2080 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
2081 WL12XX_MAX_RATE_POLICIES
);
2082 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
2085 __set_bit(policy
, wl
->rate_policies_map
);
2090 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
2092 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
2095 __clear_bit(*idx
, wl
->rate_policies_map
);
2096 *idx
= WL12XX_MAX_RATE_POLICIES
;
2099 static int wlcore_allocate_klv_template(struct wl1271
*wl
, u8
*idx
)
2101 u8 policy
= find_first_zero_bit(wl
->klv_templates_map
,
2102 WLCORE_MAX_KLV_TEMPLATES
);
2103 if (policy
>= WLCORE_MAX_KLV_TEMPLATES
)
2106 __set_bit(policy
, wl
->klv_templates_map
);
2111 static void wlcore_free_klv_template(struct wl1271
*wl
, u8
*idx
)
2113 if (WARN_ON(*idx
>= WLCORE_MAX_KLV_TEMPLATES
))
2116 __clear_bit(*idx
, wl
->klv_templates_map
);
2117 *idx
= WLCORE_MAX_KLV_TEMPLATES
;
2120 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2122 switch (wlvif
->bss_type
) {
2123 case BSS_TYPE_AP_BSS
:
2125 return WL1271_ROLE_P2P_GO
;
2127 return WL1271_ROLE_AP
;
2129 case BSS_TYPE_STA_BSS
:
2131 return WL1271_ROLE_P2P_CL
;
2133 return WL1271_ROLE_STA
;
2136 return WL1271_ROLE_IBSS
;
2139 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
2141 return WL12XX_INVALID_ROLE_TYPE
;
2144 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
2146 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2149 /* clear everything but the persistent data */
2150 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
2152 switch (ieee80211_vif_type_p2p(vif
)) {
2153 case NL80211_IFTYPE_P2P_CLIENT
:
2156 case NL80211_IFTYPE_STATION
:
2157 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
2159 case NL80211_IFTYPE_ADHOC
:
2160 wlvif
->bss_type
= BSS_TYPE_IBSS
;
2162 case NL80211_IFTYPE_P2P_GO
:
2165 case NL80211_IFTYPE_AP
:
2166 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
2169 wlvif
->bss_type
= MAX_BSS_TYPE
;
2173 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2174 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2175 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2177 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2178 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2179 /* init sta/ibss data */
2180 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2181 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2182 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2183 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2184 wlcore_allocate_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2185 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
2186 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
2187 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
2190 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2191 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2192 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2193 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2194 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2195 wl12xx_allocate_rate_policy(wl
,
2196 &wlvif
->ap
.ucast_rate_idx
[i
]);
2197 wlvif
->basic_rate_set
= CONF_TX_ENABLED_RATES
;
2199 * TODO: check if basic_rate shouldn't be
2200 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2201 * instead (the same thing for STA above).
2203 wlvif
->basic_rate
= CONF_TX_ENABLED_RATES
;
2204 /* TODO: this seems to be used only for STA, check it */
2205 wlvif
->rate_set
= CONF_TX_ENABLED_RATES
;
2208 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
2209 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
2210 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
2213 * mac80211 configures some values globally, while we treat them
2214 * per-interface. thus, on init, we have to copy them from wl
2216 wlvif
->band
= wl
->band
;
2217 wlvif
->channel
= wl
->channel
;
2218 wlvif
->power_level
= wl
->power_level
;
2219 wlvif
->channel_type
= wl
->channel_type
;
2221 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
2222 wl1271_rx_streaming_enable_work
);
2223 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
2224 wl1271_rx_streaming_disable_work
);
2225 INIT_DELAYED_WORK(&wlvif
->channel_switch_work
,
2226 wlcore_channel_switch_work
);
2227 INIT_DELAYED_WORK(&wlvif
->connection_loss_work
,
2228 wlcore_connection_loss_work
);
2229 INIT_DELAYED_WORK(&wlvif
->pending_auth_complete_work
,
2230 wlcore_pending_auth_complete_work
);
2231 INIT_LIST_HEAD(&wlvif
->list
);
2233 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
2234 (unsigned long) wlvif
);
2238 static int wl12xx_init_fw(struct wl1271
*wl
)
2240 int retries
= WL1271_BOOT_RETRIES
;
2241 bool booted
= false;
2242 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
2247 ret
= wl12xx_chip_wakeup(wl
, false);
2251 ret
= wl
->ops
->boot(wl
);
2255 ret
= wl1271_hw_init(wl
);
2263 mutex_unlock(&wl
->mutex
);
2264 /* Unlocking the mutex in the middle of handling is
2265 inherently unsafe. In this case we deem it safe to do,
2266 because we need to let any possibly pending IRQ out of
2267 the system (and while we are WLCORE_STATE_OFF the IRQ
2268 work function will not do anything.) Also, any other
2269 possible concurrent operations will fail due to the
2270 current state, hence the wl1271 struct should be safe. */
2271 wlcore_disable_interrupts(wl
);
2272 wl1271_flush_deferred_work(wl
);
2273 cancel_work_sync(&wl
->netstack_work
);
2274 mutex_lock(&wl
->mutex
);
2276 wl1271_power_off(wl
);
2280 wl1271_error("firmware boot failed despite %d retries",
2281 WL1271_BOOT_RETRIES
);
2285 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
2287 /* update hw/fw version info in wiphy struct */
2288 wiphy
->hw_version
= wl
->chip
.id
;
2289 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
2290 sizeof(wiphy
->fw_version
));
2293 * Now we know if 11a is supported (info from the NVS), so disable
2294 * 11a channels if not supported
2296 if (!wl
->enable_11a
)
2297 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
2299 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
2300 wl
->enable_11a
? "" : "not ");
2302 wl
->state
= WLCORE_STATE_ON
;
2307 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2309 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2313 * Check whether a fw switch (i.e. moving from one loaded
2314 * fw to another) is needed. This function is also responsible
2315 * for updating wl->last_vif_count, so it must be called before
2316 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2319 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2320 struct vif_counter_data vif_counter_data
,
2323 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2324 u8 vif_count
= vif_counter_data
.counter
;
2326 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2329 /* increase the vif count if this is a new vif */
2330 if (add
&& !vif_counter_data
.cur_vif_running
)
2333 wl
->last_vif_count
= vif_count
;
2335 /* no need for fw change if the device is OFF */
2336 if (wl
->state
== WLCORE_STATE_OFF
)
2339 /* no need for fw change if a single fw is used */
2340 if (!wl
->mr_fw_name
)
2343 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2345 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2352 * Enter "forced psm". Make sure the sta is in psm against the ap,
2353 * to make the fw switch a bit more disconnection-persistent.
2355 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2357 struct wl12xx_vif
*wlvif
;
2359 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2360 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2364 struct wlcore_hw_queue_iter_data
{
2365 unsigned long hw_queue_map
[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES
)];
2367 struct ieee80211_vif
*vif
;
2368 /* is the current vif among those iterated */
2372 static void wlcore_hw_queue_iter(void *data
, u8
*mac
,
2373 struct ieee80211_vif
*vif
)
2375 struct wlcore_hw_queue_iter_data
*iter_data
= data
;
2377 if (WARN_ON_ONCE(vif
->hw_queue
[0] == IEEE80211_INVAL_HW_QUEUE
))
2380 if (iter_data
->cur_running
|| vif
== iter_data
->vif
) {
2381 iter_data
->cur_running
= true;
2385 __set_bit(vif
->hw_queue
[0] / NUM_TX_QUEUES
, iter_data
->hw_queue_map
);
2388 static int wlcore_allocate_hw_queue_base(struct wl1271
*wl
,
2389 struct wl12xx_vif
*wlvif
)
2391 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2392 struct wlcore_hw_queue_iter_data iter_data
= {};
2395 iter_data
.vif
= vif
;
2397 /* mark all bits taken by active interfaces */
2398 ieee80211_iterate_active_interfaces_atomic(wl
->hw
,
2399 IEEE80211_IFACE_ITER_RESUME_ALL
,
2400 wlcore_hw_queue_iter
, &iter_data
);
2402 /* the current vif is already running in mac80211 (resume/recovery) */
2403 if (iter_data
.cur_running
) {
2404 wlvif
->hw_queue_base
= vif
->hw_queue
[0];
2405 wl1271_debug(DEBUG_MAC80211
,
2406 "using pre-allocated hw queue base %d",
2407 wlvif
->hw_queue_base
);
2409 /* interface type might have changed type */
2410 goto adjust_cab_queue
;
2413 q_base
= find_first_zero_bit(iter_data
.hw_queue_map
,
2414 WLCORE_NUM_MAC_ADDRESSES
);
2415 if (q_base
>= WLCORE_NUM_MAC_ADDRESSES
)
2418 wlvif
->hw_queue_base
= q_base
* NUM_TX_QUEUES
;
2419 wl1271_debug(DEBUG_MAC80211
, "allocating hw queue base: %d",
2420 wlvif
->hw_queue_base
);
2422 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
2423 wl
->queue_stop_reasons
[wlvif
->hw_queue_base
+ i
] = 0;
2424 /* register hw queues in mac80211 */
2425 vif
->hw_queue
[i
] = wlvif
->hw_queue_base
+ i
;
2429 /* the last places are reserved for cab queues per interface */
2430 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2431 vif
->cab_queue
= NUM_TX_QUEUES
* WLCORE_NUM_MAC_ADDRESSES
+
2432 wlvif
->hw_queue_base
/ NUM_TX_QUEUES
;
2434 vif
->cab_queue
= IEEE80211_INVAL_HW_QUEUE
;
2439 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2440 struct ieee80211_vif
*vif
)
2442 struct wl1271
*wl
= hw
->priv
;
2443 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2444 struct vif_counter_data vif_count
;
2449 wl1271_error("Adding Interface not allowed while in PLT mode");
2453 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2454 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2456 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2457 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2459 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2461 mutex_lock(&wl
->mutex
);
2462 ret
= wl1271_ps_elp_wakeup(wl
);
2467 * in some very corner case HW recovery scenarios its possible to
2468 * get here before __wl1271_op_remove_interface is complete, so
2469 * opt out if that is the case.
2471 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2472 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2478 ret
= wl12xx_init_vif_data(wl
, vif
);
2483 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2484 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2489 ret
= wlcore_allocate_hw_queue_base(wl
, wlvif
);
2493 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2494 wl12xx_force_active_psm(wl
);
2495 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2496 mutex_unlock(&wl
->mutex
);
2497 wl1271_recovery_work(&wl
->recovery_work
);
2502 * TODO: after the nvs issue will be solved, move this block
2503 * to start(), and make sure here the driver is ON.
2505 if (wl
->state
== WLCORE_STATE_OFF
) {
2507 * we still need this in order to configure the fw
2508 * while uploading the nvs
2510 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2512 ret
= wl12xx_init_fw(wl
);
2517 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2518 role_type
, &wlvif
->role_id
);
2522 ret
= wl1271_init_vif_specific(wl
, vif
);
2526 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2527 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2529 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2534 wl1271_ps_elp_sleep(wl
);
2536 mutex_unlock(&wl
->mutex
);
2541 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2542 struct ieee80211_vif
*vif
,
2543 bool reset_tx_queues
)
2545 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2547 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2549 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2551 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2554 /* because of hardware recovery, we may get here twice */
2555 if (wl
->state
== WLCORE_STATE_OFF
)
2558 wl1271_info("down");
2560 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2561 wl
->scan_wlvif
== wlvif
) {
2563 * Rearm the tx watchdog just before idling scan. This
2564 * prevents just-finished scans from triggering the watchdog
2566 wl12xx_rearm_tx_watchdog_locked(wl
);
2568 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2569 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2570 wl
->scan_wlvif
= NULL
;
2571 wl
->scan
.req
= NULL
;
2572 ieee80211_scan_completed(wl
->hw
, true);
2575 if (wl
->sched_vif
== wlvif
) {
2576 ieee80211_sched_scan_stopped(wl
->hw
);
2577 wl
->sched_vif
= NULL
;
2580 if (wl
->roc_vif
== vif
) {
2582 ieee80211_remain_on_channel_expired(wl
->hw
);
2585 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2586 /* disable active roles */
2587 ret
= wl1271_ps_elp_wakeup(wl
);
2591 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2592 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2593 if (wl12xx_dev_role_started(wlvif
))
2594 wl12xx_stop_dev(wl
, wlvif
);
2597 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2601 wl1271_ps_elp_sleep(wl
);
2604 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2606 /* clear all hlids (except system_hlid) */
2607 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2609 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2610 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2611 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2612 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2613 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2614 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2615 wlcore_free_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2617 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2618 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2619 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2620 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2621 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2622 wl12xx_free_rate_policy(wl
,
2623 &wlvif
->ap
.ucast_rate_idx
[i
]);
2624 wl1271_free_ap_keys(wl
, wlvif
);
2627 dev_kfree_skb(wlvif
->probereq
);
2628 wlvif
->probereq
= NULL
;
2629 if (wl
->last_wlvif
== wlvif
)
2630 wl
->last_wlvif
= NULL
;
2631 list_del(&wlvif
->list
);
2632 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2633 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2634 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2642 * Last AP, have more stations. Configure sleep auth according to STA.
2643 * Don't do thin on unintended recovery.
2645 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) &&
2646 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
))
2649 if (wl
->ap_count
== 0 && is_ap
) {
2650 /* mask ap events */
2651 wl
->event_mask
&= ~wl
->ap_event_mask
;
2652 wl1271_event_unmask(wl
);
2655 if (wl
->ap_count
== 0 && is_ap
&& wl
->sta_count
) {
2656 u8 sta_auth
= wl
->conf
.conn
.sta_sleep_auth
;
2657 /* Configure for power according to debugfs */
2658 if (sta_auth
!= WL1271_PSM_ILLEGAL
)
2659 wl1271_acx_sleep_auth(wl
, sta_auth
);
2660 /* Configure for ELP power saving */
2662 wl1271_acx_sleep_auth(wl
, WL1271_PSM_ELP
);
2666 mutex_unlock(&wl
->mutex
);
2668 del_timer_sync(&wlvif
->rx_streaming_timer
);
2669 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2670 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2671 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
2672 cancel_delayed_work_sync(&wlvif
->channel_switch_work
);
2673 cancel_delayed_work_sync(&wlvif
->pending_auth_complete_work
);
2675 mutex_lock(&wl
->mutex
);
2678 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2679 struct ieee80211_vif
*vif
)
2681 struct wl1271
*wl
= hw
->priv
;
2682 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2683 struct wl12xx_vif
*iter
;
2684 struct vif_counter_data vif_count
;
2686 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2687 mutex_lock(&wl
->mutex
);
2689 if (wl
->state
== WLCORE_STATE_OFF
||
2690 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2694 * wl->vif can be null here if someone shuts down the interface
2695 * just when hardware recovery has been started.
2697 wl12xx_for_each_wlvif(wl
, iter
) {
2701 __wl1271_op_remove_interface(wl
, vif
, true);
2704 WARN_ON(iter
!= wlvif
);
2705 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2706 wl12xx_force_active_psm(wl
);
2707 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2708 wl12xx_queue_recovery_work(wl
);
2711 mutex_unlock(&wl
->mutex
);
2714 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2715 struct ieee80211_vif
*vif
,
2716 enum nl80211_iftype new_type
, bool p2p
)
2718 struct wl1271
*wl
= hw
->priv
;
2721 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2722 wl1271_op_remove_interface(hw
, vif
);
2724 vif
->type
= new_type
;
2726 ret
= wl1271_op_add_interface(hw
, vif
);
2728 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2732 static int wlcore_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2735 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2738 * One of the side effects of the JOIN command is that is clears
2739 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2740 * to a WPA/WPA2 access point will therefore kill the data-path.
2741 * Currently the only valid scenario for JOIN during association
2742 * is on roaming, in which case we will also be given new keys.
2743 * Keep the below message for now, unless it starts bothering
2744 * users who really like to roam a lot :)
2746 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2747 wl1271_info("JOIN while associated.");
2749 /* clear encryption type */
2750 wlvif
->encryption_type
= KEY_NONE
;
2753 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2755 if (wl
->quirks
& WLCORE_QUIRK_START_STA_FAILS
) {
2757 * TODO: this is an ugly workaround for wl12xx fw
2758 * bug - we are not able to tx/rx after the first
2759 * start_sta, so make dummy start+stop calls,
2760 * and then call start_sta again.
2761 * this should be fixed in the fw.
2763 wl12xx_cmd_role_start_sta(wl
, wlvif
);
2764 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2767 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2773 static int wl1271_ssid_set(struct wl12xx_vif
*wlvif
, struct sk_buff
*skb
,
2777 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
2781 wl1271_error("No SSID in IEs!");
2786 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
2787 wl1271_error("SSID is too long!");
2791 wlvif
->ssid_len
= ssid_len
;
2792 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
2796 static int wlcore_set_ssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2798 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2799 struct sk_buff
*skb
;
2802 /* we currently only support setting the ssid from the ap probe req */
2803 if (wlvif
->bss_type
!= BSS_TYPE_STA_BSS
)
2806 skb
= ieee80211_ap_probereq_get(wl
->hw
, vif
);
2810 ieoffset
= offsetof(struct ieee80211_mgmt
,
2811 u
.probe_req
.variable
);
2812 wl1271_ssid_set(wlvif
, skb
, ieoffset
);
2818 static int wlcore_set_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2819 struct ieee80211_bss_conf
*bss_conf
,
2825 wlvif
->aid
= bss_conf
->aid
;
2826 wlvif
->channel_type
= cfg80211_get_chandef_type(&bss_conf
->chandef
);
2827 wlvif
->beacon_int
= bss_conf
->beacon_int
;
2828 wlvif
->wmm_enabled
= bss_conf
->qos
;
2830 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2833 * with wl1271, we don't need to update the
2834 * beacon_int and dtim_period, because the firmware
2835 * updates it by itself when the first beacon is
2836 * received after a join.
2838 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
2843 * Get a template for hardware connection maintenance
2845 dev_kfree_skb(wlvif
->probereq
);
2846 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
2849 ieoffset
= offsetof(struct ieee80211_mgmt
,
2850 u
.probe_req
.variable
);
2851 wl1271_ssid_set(wlvif
, wlvif
->probereq
, ieoffset
);
2853 /* enable the connection monitoring feature */
2854 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
2859 * The join command disable the keep-alive mode, shut down its process,
2860 * and also clear the template config, so we need to reset it all after
2861 * the join. The acx_aid starts the keep-alive process, and the order
2862 * of the commands below is relevant.
2864 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2868 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2872 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2876 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2877 wlvif
->sta
.klv_template_id
,
2878 ACX_KEEP_ALIVE_TPL_VALID
);
2883 * The default fw psm configuration is AUTO, while mac80211 default
2884 * setting is off (ACTIVE), so sync the fw with the correct value.
2886 ret
= wl1271_ps_set_mode(wl
, wlvif
, STATION_ACTIVE_MODE
);
2892 wl1271_tx_enabled_rates_get(wl
,
2895 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2903 static int wlcore_unset_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2906 bool sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
2908 /* make sure we are connected (sta) joined */
2910 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2913 /* make sure we are joined (ibss) */
2915 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
))
2919 /* use defaults when not associated */
2922 /* free probe-request template */
2923 dev_kfree_skb(wlvif
->probereq
);
2924 wlvif
->probereq
= NULL
;
2926 /* disable connection monitor features */
2927 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
2931 /* Disable the keep-alive feature */
2932 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
2937 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
2938 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2940 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
2941 ieee80211_chswitch_done(vif
, false);
2942 cancel_delayed_work(&wlvif
->channel_switch_work
);
2945 /* invalidate keep-alive template */
2946 wl1271_acx_keep_alive_config(wl
, wlvif
,
2947 wlvif
->sta
.klv_template_id
,
2948 ACX_KEEP_ALIVE_TPL_INVALID
);
2953 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2955 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
2956 wlvif
->rate_set
= wlvif
->basic_rate_set
;
2959 static void wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2962 bool cur_idle
= !test_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
2964 if (idle
== cur_idle
)
2968 clear_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
2970 /* The current firmware only supports sched_scan in idle */
2971 if (wl
->sched_vif
== wlvif
)
2972 wl
->ops
->sched_scan_stop(wl
, wlvif
);
2974 set_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
2978 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2979 struct ieee80211_conf
*conf
, u32 changed
)
2983 if (conf
->power_level
!= wlvif
->power_level
) {
2984 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
2988 wlvif
->power_level
= conf
->power_level
;
2994 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
2996 struct wl1271
*wl
= hw
->priv
;
2997 struct wl12xx_vif
*wlvif
;
2998 struct ieee80211_conf
*conf
= &hw
->conf
;
3001 wl1271_debug(DEBUG_MAC80211
, "mac80211 config psm %s power %d %s"
3003 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
3005 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
3008 mutex_lock(&wl
->mutex
);
3010 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
3011 wl
->power_level
= conf
->power_level
;
3013 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3016 ret
= wl1271_ps_elp_wakeup(wl
);
3020 /* configure each interface */
3021 wl12xx_for_each_wlvif(wl
, wlvif
) {
3022 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
3028 wl1271_ps_elp_sleep(wl
);
3031 mutex_unlock(&wl
->mutex
);
3036 struct wl1271_filter_params
{
3039 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
3042 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
3043 struct netdev_hw_addr_list
*mc_list
)
3045 struct wl1271_filter_params
*fp
;
3046 struct netdev_hw_addr
*ha
;
3048 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
3050 wl1271_error("Out of memory setting filters.");
3054 /* update multicast filtering parameters */
3055 fp
->mc_list_length
= 0;
3056 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
3057 fp
->enabled
= false;
3060 netdev_hw_addr_list_for_each(ha
, mc_list
) {
3061 memcpy(fp
->mc_list
[fp
->mc_list_length
],
3062 ha
->addr
, ETH_ALEN
);
3063 fp
->mc_list_length
++;
3067 return (u64
)(unsigned long)fp
;
3070 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3073 FIF_BCN_PRBRESP_PROMISC | \
3077 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
3078 unsigned int changed
,
3079 unsigned int *total
, u64 multicast
)
3081 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
3082 struct wl1271
*wl
= hw
->priv
;
3083 struct wl12xx_vif
*wlvif
;
3087 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
3088 " total %x", changed
, *total
);
3090 mutex_lock(&wl
->mutex
);
3092 *total
&= WL1271_SUPPORTED_FILTERS
;
3093 changed
&= WL1271_SUPPORTED_FILTERS
;
3095 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3098 ret
= wl1271_ps_elp_wakeup(wl
);
3102 wl12xx_for_each_wlvif(wl
, wlvif
) {
3103 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
3104 if (*total
& FIF_ALLMULTI
)
3105 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3109 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3112 fp
->mc_list_length
);
3119 * the fw doesn't provide an api to configure the filters. instead,
3120 * the filters configuration is based on the active roles / ROC
3125 wl1271_ps_elp_sleep(wl
);
3128 mutex_unlock(&wl
->mutex
);
3132 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3133 u8 id
, u8 key_type
, u8 key_size
,
3134 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
3137 struct wl1271_ap_key
*ap_key
;
3140 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
3142 if (key_size
> MAX_KEY_SIZE
)
3146 * Find next free entry in ap_keys. Also check we are not replacing
3149 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3150 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3153 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
3154 wl1271_warning("trying to record key replacement");
3159 if (i
== MAX_NUM_KEYS
)
3162 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
3167 ap_key
->key_type
= key_type
;
3168 ap_key
->key_size
= key_size
;
3169 memcpy(ap_key
->key
, key
, key_size
);
3170 ap_key
->hlid
= hlid
;
3171 ap_key
->tx_seq_32
= tx_seq_32
;
3172 ap_key
->tx_seq_16
= tx_seq_16
;
3174 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
3178 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3182 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3183 kfree(wlvif
->ap
.recorded_keys
[i
]);
3184 wlvif
->ap
.recorded_keys
[i
] = NULL
;
3188 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3191 struct wl1271_ap_key
*key
;
3192 bool wep_key_added
= false;
3194 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3196 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3199 key
= wlvif
->ap
.recorded_keys
[i
];
3201 if (hlid
== WL12XX_INVALID_LINK_ID
)
3202 hlid
= wlvif
->ap
.bcast_hlid
;
3204 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3205 key
->id
, key
->key_type
,
3206 key
->key_size
, key
->key
,
3207 hlid
, key
->tx_seq_32
,
3212 if (key
->key_type
== KEY_WEP
)
3213 wep_key_added
= true;
3216 if (wep_key_added
) {
3217 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
3218 wlvif
->ap
.bcast_hlid
);
3224 wl1271_free_ap_keys(wl
, wlvif
);
3228 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3229 u16 action
, u8 id
, u8 key_type
,
3230 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
3231 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
3234 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3237 struct wl1271_station
*wl_sta
;
3241 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
3242 hlid
= wl_sta
->hlid
;
3244 hlid
= wlvif
->ap
.bcast_hlid
;
3247 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3249 * We do not support removing keys after AP shutdown.
3250 * Pretend we do to make mac80211 happy.
3252 if (action
!= KEY_ADD_OR_REPLACE
)
3255 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
3257 key
, hlid
, tx_seq_32
,
3260 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
3261 id
, key_type
, key_size
,
3262 key
, hlid
, tx_seq_32
,
3270 static const u8 bcast_addr
[ETH_ALEN
] = {
3271 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3274 addr
= sta
? sta
->addr
: bcast_addr
;
3276 if (is_zero_ether_addr(addr
)) {
3277 /* We dont support TX only encryption */
3281 /* The wl1271 does not allow to remove unicast keys - they
3282 will be cleared automatically on next CMD_JOIN. Ignore the
3283 request silently, as we dont want the mac80211 to emit
3284 an error message. */
3285 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
3288 /* don't remove key if hlid was already deleted */
3289 if (action
== KEY_REMOVE
&&
3290 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
3293 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
3294 id
, key_type
, key_size
,
3295 key
, addr
, tx_seq_32
,
3305 static int wlcore_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
3306 struct ieee80211_vif
*vif
,
3307 struct ieee80211_sta
*sta
,
3308 struct ieee80211_key_conf
*key_conf
)
3310 struct wl1271
*wl
= hw
->priv
;
3312 bool might_change_spare
=
3313 key_conf
->cipher
== WL1271_CIPHER_SUITE_GEM
||
3314 key_conf
->cipher
== WLAN_CIPHER_SUITE_TKIP
;
3316 if (might_change_spare
) {
3318 * stop the queues and flush to ensure the next packets are
3319 * in sync with FW spare block accounting
3321 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3322 wl1271_tx_flush(wl
);
3325 mutex_lock(&wl
->mutex
);
3327 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3329 goto out_wake_queues
;
3332 ret
= wl1271_ps_elp_wakeup(wl
);
3334 goto out_wake_queues
;
3336 ret
= wlcore_hw_set_key(wl
, cmd
, vif
, sta
, key_conf
);
3338 wl1271_ps_elp_sleep(wl
);
3341 if (might_change_spare
)
3342 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3344 mutex_unlock(&wl
->mutex
);
3349 int wlcore_set_key(struct wl1271
*wl
, enum set_key_cmd cmd
,
3350 struct ieee80211_vif
*vif
,
3351 struct ieee80211_sta
*sta
,
3352 struct ieee80211_key_conf
*key_conf
)
3354 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3361 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
3363 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
3364 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3365 key_conf
->cipher
, key_conf
->keyidx
,
3366 key_conf
->keylen
, key_conf
->flags
);
3367 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
3369 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
3371 struct wl1271_station
*wl_sta
= (void *)sta
->drv_priv
;
3372 hlid
= wl_sta
->hlid
;
3374 hlid
= wlvif
->ap
.bcast_hlid
;
3377 hlid
= wlvif
->sta
.hlid
;
3379 if (hlid
!= WL12XX_INVALID_LINK_ID
) {
3380 u64 tx_seq
= wl
->links
[hlid
].total_freed_pkts
;
3381 tx_seq_32
= WL1271_TX_SECURITY_HI32(tx_seq
);
3382 tx_seq_16
= WL1271_TX_SECURITY_LO16(tx_seq
);
3385 switch (key_conf
->cipher
) {
3386 case WLAN_CIPHER_SUITE_WEP40
:
3387 case WLAN_CIPHER_SUITE_WEP104
:
3390 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3392 case WLAN_CIPHER_SUITE_TKIP
:
3393 key_type
= KEY_TKIP
;
3394 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3396 case WLAN_CIPHER_SUITE_CCMP
:
3398 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
3400 case WL1271_CIPHER_SUITE_GEM
:
3404 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
3411 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3412 key_conf
->keyidx
, key_type
,
3413 key_conf
->keylen
, key_conf
->key
,
3414 tx_seq_32
, tx_seq_16
, sta
);
3416 wl1271_error("Could not add or replace key");
3421 * reconfiguring arp response if the unicast (or common)
3422 * encryption key type was changed
3424 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
3425 (sta
|| key_type
== KEY_WEP
) &&
3426 wlvif
->encryption_type
!= key_type
) {
3427 wlvif
->encryption_type
= key_type
;
3428 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3430 wl1271_warning("build arp rsp failed: %d", ret
);
3437 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3438 key_conf
->keyidx
, key_type
,
3439 key_conf
->keylen
, key_conf
->key
,
3442 wl1271_error("Could not remove key");
3448 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3454 EXPORT_SYMBOL_GPL(wlcore_set_key
);
3456 static void wl1271_op_set_default_key_idx(struct ieee80211_hw
*hw
,
3457 struct ieee80211_vif
*vif
,
3460 struct wl1271
*wl
= hw
->priv
;
3461 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3464 wl1271_debug(DEBUG_MAC80211
, "mac80211 set default key idx %d",
3467 mutex_lock(&wl
->mutex
);
3469 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3474 ret
= wl1271_ps_elp_wakeup(wl
);
3478 wlvif
->default_key
= key_idx
;
3480 /* the default WEP key needs to be configured at least once */
3481 if (wlvif
->encryption_type
== KEY_WEP
) {
3482 ret
= wl12xx_cmd_set_default_wep_key(wl
,
3490 wl1271_ps_elp_sleep(wl
);
3493 mutex_unlock(&wl
->mutex
);
3496 void wlcore_regdomain_config(struct wl1271
*wl
)
3500 if (!(wl
->quirks
& WLCORE_QUIRK_REGDOMAIN_CONF
))
3503 mutex_lock(&wl
->mutex
);
3505 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3508 ret
= wl1271_ps_elp_wakeup(wl
);
3512 ret
= wlcore_cmd_regdomain_config_locked(wl
);
3514 wl12xx_queue_recovery_work(wl
);
3518 wl1271_ps_elp_sleep(wl
);
3520 mutex_unlock(&wl
->mutex
);
3523 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3524 struct ieee80211_vif
*vif
,
3525 struct cfg80211_scan_request
*req
)
3527 struct wl1271
*wl
= hw
->priv
;
3532 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3535 ssid
= req
->ssids
[0].ssid
;
3536 len
= req
->ssids
[0].ssid_len
;
3539 mutex_lock(&wl
->mutex
);
3541 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3543 * We cannot return -EBUSY here because cfg80211 will expect
3544 * a call to ieee80211_scan_completed if we do - in this case
3545 * there won't be any call.
3551 ret
= wl1271_ps_elp_wakeup(wl
);
3555 /* fail if there is any role in ROC */
3556 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3557 /* don't allow scanning right now */
3562 ret
= wlcore_scan(hw
->priv
, vif
, ssid
, len
, req
);
3564 wl1271_ps_elp_sleep(wl
);
3566 mutex_unlock(&wl
->mutex
);
3571 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3572 struct ieee80211_vif
*vif
)
3574 struct wl1271
*wl
= hw
->priv
;
3575 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3578 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3580 mutex_lock(&wl
->mutex
);
3582 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3585 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3588 ret
= wl1271_ps_elp_wakeup(wl
);
3592 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3593 ret
= wl
->ops
->scan_stop(wl
, wlvif
);
3599 * Rearm the tx watchdog just before idling scan. This
3600 * prevents just-finished scans from triggering the watchdog
3602 wl12xx_rearm_tx_watchdog_locked(wl
);
3604 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3605 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3606 wl
->scan_wlvif
= NULL
;
3607 wl
->scan
.req
= NULL
;
3608 ieee80211_scan_completed(wl
->hw
, true);
3611 wl1271_ps_elp_sleep(wl
);
3613 mutex_unlock(&wl
->mutex
);
3615 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3618 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3619 struct ieee80211_vif
*vif
,
3620 struct cfg80211_sched_scan_request
*req
,
3621 struct ieee80211_sched_scan_ies
*ies
)
3623 struct wl1271
*wl
= hw
->priv
;
3624 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3627 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3629 mutex_lock(&wl
->mutex
);
3631 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3636 ret
= wl1271_ps_elp_wakeup(wl
);
3640 ret
= wl
->ops
->sched_scan_start(wl
, wlvif
, req
, ies
);
3644 wl
->sched_vif
= wlvif
;
3647 wl1271_ps_elp_sleep(wl
);
3649 mutex_unlock(&wl
->mutex
);
3653 static void wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3654 struct ieee80211_vif
*vif
)
3656 struct wl1271
*wl
= hw
->priv
;
3657 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3660 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3662 mutex_lock(&wl
->mutex
);
3664 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3667 ret
= wl1271_ps_elp_wakeup(wl
);
3671 wl
->ops
->sched_scan_stop(wl
, wlvif
);
3673 wl1271_ps_elp_sleep(wl
);
3675 mutex_unlock(&wl
->mutex
);
3678 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3680 struct wl1271
*wl
= hw
->priv
;
3683 mutex_lock(&wl
->mutex
);
3685 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3690 ret
= wl1271_ps_elp_wakeup(wl
);
3694 ret
= wl1271_acx_frag_threshold(wl
, value
);
3696 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3698 wl1271_ps_elp_sleep(wl
);
3701 mutex_unlock(&wl
->mutex
);
3706 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3708 struct wl1271
*wl
= hw
->priv
;
3709 struct wl12xx_vif
*wlvif
;
3712 mutex_lock(&wl
->mutex
);
3714 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3719 ret
= wl1271_ps_elp_wakeup(wl
);
3723 wl12xx_for_each_wlvif(wl
, wlvif
) {
3724 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3726 wl1271_warning("set rts threshold failed: %d", ret
);
3728 wl1271_ps_elp_sleep(wl
);
3731 mutex_unlock(&wl
->mutex
);
3736 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3739 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3740 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3741 skb
->len
- ieoffset
);
3746 memmove(ie
, next
, end
- next
);
3747 skb_trim(skb
, skb
->len
- len
);
3750 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3751 unsigned int oui
, u8 oui_type
,
3755 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3756 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3757 skb
->data
+ ieoffset
,
3758 skb
->len
- ieoffset
);
3763 memmove(ie
, next
, end
- next
);
3764 skb_trim(skb
, skb
->len
- len
);
3767 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3768 struct ieee80211_vif
*vif
)
3770 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3771 struct sk_buff
*skb
;
3774 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3778 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3779 CMD_TEMPL_AP_PROBE_RESPONSE
,
3788 wl1271_debug(DEBUG_AP
, "probe response updated");
3789 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3795 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3796 struct ieee80211_vif
*vif
,
3798 size_t probe_rsp_len
,
3801 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3802 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3803 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3804 int ssid_ie_offset
, ie_offset
, templ_len
;
3807 /* no need to change probe response if the SSID is set correctly */
3808 if (wlvif
->ssid_len
> 0)
3809 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3810 CMD_TEMPL_AP_PROBE_RESPONSE
,
3815 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3816 wl1271_error("probe_rsp template too big");
3820 /* start searching from IE offset */
3821 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3823 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3824 probe_rsp_len
- ie_offset
);
3826 wl1271_error("No SSID in beacon!");
3830 ssid_ie_offset
= ptr
- probe_rsp_data
;
3831 ptr
+= (ptr
[1] + 2);
3833 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3835 /* insert SSID from bss_conf */
3836 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3837 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3838 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3839 bss_conf
->ssid
, bss_conf
->ssid_len
);
3840 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3842 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3843 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3844 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3846 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3847 CMD_TEMPL_AP_PROBE_RESPONSE
,
3853 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3854 struct ieee80211_vif
*vif
,
3855 struct ieee80211_bss_conf
*bss_conf
,
3858 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3861 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3862 if (bss_conf
->use_short_slot
)
3863 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3865 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3867 wl1271_warning("Set slot time failed %d", ret
);
3872 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3873 if (bss_conf
->use_short_preamble
)
3874 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3876 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3879 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3880 if (bss_conf
->use_cts_prot
)
3881 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3884 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3885 CTSPROTECT_DISABLE
);
3887 wl1271_warning("Set ctsprotect failed %d", ret
);
3896 static int wlcore_set_beacon_template(struct wl1271
*wl
,
3897 struct ieee80211_vif
*vif
,
3900 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3901 struct ieee80211_hdr
*hdr
;
3904 int ieoffset
= offsetof(struct ieee80211_mgmt
, u
.beacon
.variable
);
3905 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
3913 wl1271_debug(DEBUG_MASTER
, "beacon updated");
3915 ret
= wl1271_ssid_set(wlvif
, beacon
, ieoffset
);
3917 dev_kfree_skb(beacon
);
3920 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3921 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
3923 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
3928 dev_kfree_skb(beacon
);
3932 wlvif
->wmm_enabled
=
3933 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT
,
3934 WLAN_OUI_TYPE_MICROSOFT_WMM
,
3935 beacon
->data
+ ieoffset
,
3936 beacon
->len
- ieoffset
);
3939 * In case we already have a probe-resp beacon set explicitly
3940 * by usermode, don't use the beacon data.
3942 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
3945 /* remove TIM ie from probe response */
3946 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
3949 * remove p2p ie from probe response.
3950 * the fw reponds to probe requests that don't include
3951 * the p2p ie. probe requests with p2p ie will be passed,
3952 * and will be responded by the supplicant (the spec
3953 * forbids including the p2p ie when responding to probe
3954 * requests that didn't include it).
3956 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
3957 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
3959 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
3960 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
3961 IEEE80211_STYPE_PROBE_RESP
);
3963 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
3968 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3969 CMD_TEMPL_PROBE_RESPONSE
,
3974 dev_kfree_skb(beacon
);
3982 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
3983 struct ieee80211_vif
*vif
,
3984 struct ieee80211_bss_conf
*bss_conf
,
3987 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3988 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3991 if (changed
& BSS_CHANGED_BEACON_INT
) {
3992 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
3993 bss_conf
->beacon_int
);
3995 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3998 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
3999 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4001 wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
);
4004 if (changed
& BSS_CHANGED_BEACON
) {
4005 ret
= wlcore_set_beacon_template(wl
, vif
, is_ap
);
4012 wl1271_error("beacon info change failed: %d", ret
);
4016 /* AP mode changes */
4017 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
4018 struct ieee80211_vif
*vif
,
4019 struct ieee80211_bss_conf
*bss_conf
,
4022 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4025 if (changed
& BSS_CHANGED_BASIC_RATES
) {
4026 u32 rates
= bss_conf
->basic_rates
;
4028 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
4030 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
4031 wlvif
->basic_rate_set
);
4033 ret
= wl1271_init_ap_rates(wl
, wlvif
);
4035 wl1271_error("AP rate policy change failed %d", ret
);
4039 ret
= wl1271_ap_init_templates(wl
, vif
);
4043 ret
= wl1271_ap_set_probe_resp_tmpl(wl
, wlvif
->basic_rate
, vif
);
4047 ret
= wlcore_set_beacon_template(wl
, vif
, true);
4052 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
4056 if (changed
& BSS_CHANGED_BEACON_ENABLED
) {
4057 if (bss_conf
->enable_beacon
) {
4058 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
4059 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
4063 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
4067 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
4068 wl1271_debug(DEBUG_AP
, "started AP");
4071 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
4073 * AP might be in ROC in case we have just
4074 * sent auth reply. handle it.
4076 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
4077 wl12xx_croc(wl
, wlvif
->role_id
);
4079 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
4083 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
4084 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
4086 wl1271_debug(DEBUG_AP
, "stopped AP");
4091 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4095 /* Handle HT information change */
4096 if ((changed
& BSS_CHANGED_HT
) &&
4097 (bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
)) {
4098 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4099 bss_conf
->ht_operation_mode
);
4101 wl1271_warning("Set ht information failed %d", ret
);
4110 static int wlcore_set_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
4111 struct ieee80211_bss_conf
*bss_conf
,
4117 wl1271_debug(DEBUG_MAC80211
,
4118 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4119 bss_conf
->bssid
, bss_conf
->aid
,
4120 bss_conf
->beacon_int
,
4121 bss_conf
->basic_rates
, sta_rate_set
);
4123 wlvif
->beacon_int
= bss_conf
->beacon_int
;
4124 rates
= bss_conf
->basic_rates
;
4125 wlvif
->basic_rate_set
=
4126 wl1271_tx_enabled_rates_get(wl
, rates
,
4129 wl1271_tx_min_rate_get(wl
,
4130 wlvif
->basic_rate_set
);
4134 wl1271_tx_enabled_rates_get(wl
,
4138 /* we only support sched_scan while not connected */
4139 if (wl
->sched_vif
== wlvif
)
4140 wl
->ops
->sched_scan_stop(wl
, wlvif
);
4142 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4146 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
4150 ret
= wl1271_build_qos_null_data(wl
, wl12xx_wlvif_to_vif(wlvif
));
4154 wlcore_set_ssid(wl
, wlvif
);
4156 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4161 static int wlcore_clear_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
4165 /* revert back to minimum rates for the current band */
4166 wl1271_set_band_rate(wl
, wlvif
);
4167 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4169 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4173 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4174 test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
)) {
4175 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4180 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4183 /* STA/IBSS mode changes */
4184 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
4185 struct ieee80211_vif
*vif
,
4186 struct ieee80211_bss_conf
*bss_conf
,
4189 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4190 bool do_join
= false;
4191 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
4192 bool ibss_joined
= false;
4193 u32 sta_rate_set
= 0;
4195 struct ieee80211_sta
*sta
;
4196 bool sta_exists
= false;
4197 struct ieee80211_sta_ht_cap sta_ht_cap
;
4200 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
4206 if (changed
& BSS_CHANGED_IBSS
) {
4207 if (bss_conf
->ibss_joined
) {
4208 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
4211 wlcore_unset_assoc(wl
, wlvif
);
4212 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4216 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
4219 /* Need to update the SSID (for filtering etc) */
4220 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
4223 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
4224 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
4225 bss_conf
->enable_beacon
? "enabled" : "disabled");
4230 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
)
4231 wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
4233 if (changed
& BSS_CHANGED_CQM
) {
4234 bool enable
= false;
4235 if (bss_conf
->cqm_rssi_thold
)
4237 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
4238 bss_conf
->cqm_rssi_thold
,
4239 bss_conf
->cqm_rssi_hyst
);
4242 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
4245 if (changed
& (BSS_CHANGED_BSSID
| BSS_CHANGED_HT
|
4246 BSS_CHANGED_ASSOC
)) {
4248 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
4250 u8
*rx_mask
= sta
->ht_cap
.mcs
.rx_mask
;
4252 /* save the supp_rates of the ap */
4253 sta_rate_set
= sta
->supp_rates
[wlvif
->band
];
4254 if (sta
->ht_cap
.ht_supported
)
4256 (rx_mask
[0] << HW_HT_RATES_OFFSET
) |
4257 (rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
4258 sta_ht_cap
= sta
->ht_cap
;
4265 if (changed
& BSS_CHANGED_BSSID
) {
4266 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
4267 ret
= wlcore_set_bssid(wl
, wlvif
, bss_conf
,
4272 /* Need to update the BSSID (for filtering etc) */
4275 ret
= wlcore_clear_bssid(wl
, wlvif
);
4281 if (changed
& BSS_CHANGED_IBSS
) {
4282 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
4283 bss_conf
->ibss_joined
);
4285 if (bss_conf
->ibss_joined
) {
4286 u32 rates
= bss_conf
->basic_rates
;
4287 wlvif
->basic_rate_set
=
4288 wl1271_tx_enabled_rates_get(wl
, rates
,
4291 wl1271_tx_min_rate_get(wl
,
4292 wlvif
->basic_rate_set
);
4294 /* by default, use 11b + OFDM rates */
4295 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
4296 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4302 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4307 ret
= wlcore_join(wl
, wlvif
);
4309 wl1271_warning("cmd join failed %d", ret
);
4314 if (changed
& BSS_CHANGED_ASSOC
) {
4315 if (bss_conf
->assoc
) {
4316 ret
= wlcore_set_assoc(wl
, wlvif
, bss_conf
,
4321 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
4322 wl12xx_set_authorized(wl
, wlvif
);
4324 wlcore_unset_assoc(wl
, wlvif
);
4328 if (changed
& BSS_CHANGED_PS
) {
4329 if ((bss_conf
->ps
) &&
4330 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
4331 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4335 if (wl
->conf
.conn
.forced_ps
) {
4336 ps_mode
= STATION_POWER_SAVE_MODE
;
4337 ps_mode_str
= "forced";
4339 ps_mode
= STATION_AUTO_PS_MODE
;
4340 ps_mode_str
= "auto";
4343 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
4345 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
4347 wl1271_warning("enter %s ps failed %d",
4349 } else if (!bss_conf
->ps
&&
4350 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4351 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
4353 ret
= wl1271_ps_set_mode(wl
, wlvif
,
4354 STATION_ACTIVE_MODE
);
4356 wl1271_warning("exit auto ps failed %d", ret
);
4360 /* Handle new association with HT. Do this after join. */
4363 bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
;
4365 ret
= wlcore_hw_set_peer_cap(wl
,
4371 wl1271_warning("Set ht cap failed %d", ret
);
4377 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4378 bss_conf
->ht_operation_mode
);
4380 wl1271_warning("Set ht information failed %d",
4387 /* Handle arp filtering. Done after join. */
4388 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
4389 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
4390 __be32 addr
= bss_conf
->arp_addr_list
[0];
4391 wlvif
->sta
.qos
= bss_conf
->qos
;
4392 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
4394 if (bss_conf
->arp_addr_cnt
== 1 && bss_conf
->assoc
) {
4395 wlvif
->ip_addr
= addr
;
4397 * The template should have been configured only upon
4398 * association. however, it seems that the correct ip
4399 * isn't being set (when sending), so we have to
4400 * reconfigure the template upon every ip change.
4402 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
4404 wl1271_warning("build arp rsp failed: %d", ret
);
4408 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
4409 (ACX_ARP_FILTER_ARP_FILTERING
|
4410 ACX_ARP_FILTER_AUTO_ARP
),
4414 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
4425 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
4426 struct ieee80211_vif
*vif
,
4427 struct ieee80211_bss_conf
*bss_conf
,
4430 struct wl1271
*wl
= hw
->priv
;
4431 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4432 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4435 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info role %d changed 0x%x",
4436 wlvif
->role_id
, (int)changed
);
4439 * make sure to cancel pending disconnections if our association
4442 if (!is_ap
&& (changed
& BSS_CHANGED_ASSOC
))
4443 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
4445 if (is_ap
&& (changed
& BSS_CHANGED_BEACON_ENABLED
) &&
4446 !bss_conf
->enable_beacon
)
4447 wl1271_tx_flush(wl
);
4449 mutex_lock(&wl
->mutex
);
4451 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4454 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4457 ret
= wl1271_ps_elp_wakeup(wl
);
4462 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
4464 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
4466 wl1271_ps_elp_sleep(wl
);
4469 mutex_unlock(&wl
->mutex
);
4472 static int wlcore_op_add_chanctx(struct ieee80211_hw
*hw
,
4473 struct ieee80211_chanctx_conf
*ctx
)
4475 wl1271_debug(DEBUG_MAC80211
, "mac80211 add chanctx %d (type %d)",
4476 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4477 cfg80211_get_chandef_type(&ctx
->def
));
4481 static void wlcore_op_remove_chanctx(struct ieee80211_hw
*hw
,
4482 struct ieee80211_chanctx_conf
*ctx
)
4484 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove chanctx %d (type %d)",
4485 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4486 cfg80211_get_chandef_type(&ctx
->def
));
4489 static void wlcore_op_change_chanctx(struct ieee80211_hw
*hw
,
4490 struct ieee80211_chanctx_conf
*ctx
,
4493 wl1271_debug(DEBUG_MAC80211
,
4494 "mac80211 change chanctx %d (type %d) changed 0x%x",
4495 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4496 cfg80211_get_chandef_type(&ctx
->def
), changed
);
4499 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw
*hw
,
4500 struct ieee80211_vif
*vif
,
4501 struct ieee80211_chanctx_conf
*ctx
)
4503 struct wl1271
*wl
= hw
->priv
;
4504 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4505 int channel
= ieee80211_frequency_to_channel(
4506 ctx
->def
.chan
->center_freq
);
4508 wl1271_debug(DEBUG_MAC80211
,
4509 "mac80211 assign chanctx (role %d) %d (type %d)",
4510 wlvif
->role_id
, channel
, cfg80211_get_chandef_type(&ctx
->def
));
4512 mutex_lock(&wl
->mutex
);
4514 wlvif
->band
= ctx
->def
.chan
->band
;
4515 wlvif
->channel
= channel
;
4516 wlvif
->channel_type
= cfg80211_get_chandef_type(&ctx
->def
);
4518 /* update default rates according to the band */
4519 wl1271_set_band_rate(wl
, wlvif
);
4521 mutex_unlock(&wl
->mutex
);
4526 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw
*hw
,
4527 struct ieee80211_vif
*vif
,
4528 struct ieee80211_chanctx_conf
*ctx
)
4530 struct wl1271
*wl
= hw
->priv
;
4531 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4533 wl1271_debug(DEBUG_MAC80211
,
4534 "mac80211 unassign chanctx (role %d) %d (type %d)",
4536 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4537 cfg80211_get_chandef_type(&ctx
->def
));
4539 wl1271_tx_flush(wl
);
4542 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4543 struct ieee80211_vif
*vif
, u16 queue
,
4544 const struct ieee80211_tx_queue_params
*params
)
4546 struct wl1271
*wl
= hw
->priv
;
4547 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4551 mutex_lock(&wl
->mutex
);
4553 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4556 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4558 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4560 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4563 ret
= wl1271_ps_elp_wakeup(wl
);
4568 * the txop is confed in units of 32us by the mac80211,
4571 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4572 params
->cw_min
, params
->cw_max
,
4573 params
->aifs
, params
->txop
<< 5);
4577 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4578 CONF_CHANNEL_TYPE_EDCF
,
4579 wl1271_tx_get_queue(queue
),
4580 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4584 wl1271_ps_elp_sleep(wl
);
4587 mutex_unlock(&wl
->mutex
);
4592 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4593 struct ieee80211_vif
*vif
)
4596 struct wl1271
*wl
= hw
->priv
;
4597 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4598 u64 mactime
= ULLONG_MAX
;
4601 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4603 mutex_lock(&wl
->mutex
);
4605 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4608 ret
= wl1271_ps_elp_wakeup(wl
);
4612 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4617 wl1271_ps_elp_sleep(wl
);
4620 mutex_unlock(&wl
->mutex
);
4624 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4625 struct survey_info
*survey
)
4627 struct ieee80211_conf
*conf
= &hw
->conf
;
4632 survey
->channel
= conf
->chandef
.chan
;
4637 static int wl1271_allocate_sta(struct wl1271
*wl
,
4638 struct wl12xx_vif
*wlvif
,
4639 struct ieee80211_sta
*sta
)
4641 struct wl1271_station
*wl_sta
;
4645 if (wl
->active_sta_count
>= AP_MAX_STATIONS
) {
4646 wl1271_warning("could not allocate HLID - too much stations");
4650 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4651 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4653 wl1271_warning("could not allocate HLID - too many links");
4657 /* use the previous security seq, if this is a recovery/resume */
4658 wl
->links
[wl_sta
->hlid
].total_freed_pkts
= wl_sta
->total_freed_pkts
;
4660 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4661 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4662 wl
->active_sta_count
++;
4666 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4668 struct wl1271_station
*wl_sta
;
4669 struct ieee80211_sta
*sta
;
4670 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4672 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4675 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4676 __clear_bit(hlid
, &wl
->ap_ps_map
);
4677 __clear_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
4680 * save the last used PN in the private part of iee80211_sta,
4681 * in case of recovery/suspend
4684 sta
= ieee80211_find_sta(vif
, wl
->links
[hlid
].addr
);
4686 wl_sta
= (void *)sta
->drv_priv
;
4687 wl_sta
->total_freed_pkts
= wl
->links
[hlid
].total_freed_pkts
;
4690 * increment the initial seq number on recovery to account for
4691 * transmitted packets that we haven't yet got in the FW status
4693 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
4694 wl_sta
->total_freed_pkts
+=
4695 WL1271_TX_SQN_POST_RECOVERY_PADDING
;
4699 wl12xx_free_link(wl
, wlvif
, &hlid
);
4700 wl
->active_sta_count
--;
4703 * rearm the tx watchdog when the last STA is freed - give the FW a
4704 * chance to return STA-buffered packets before complaining.
4706 if (wl
->active_sta_count
== 0)
4707 wl12xx_rearm_tx_watchdog_locked(wl
);
4710 static int wl12xx_sta_add(struct wl1271
*wl
,
4711 struct wl12xx_vif
*wlvif
,
4712 struct ieee80211_sta
*sta
)
4714 struct wl1271_station
*wl_sta
;
4718 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4720 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4724 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4725 hlid
= wl_sta
->hlid
;
4727 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4729 wl1271_free_sta(wl
, wlvif
, hlid
);
4734 static int wl12xx_sta_remove(struct wl1271
*wl
,
4735 struct wl12xx_vif
*wlvif
,
4736 struct ieee80211_sta
*sta
)
4738 struct wl1271_station
*wl_sta
;
4741 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4743 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4745 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4748 ret
= wl12xx_cmd_remove_peer(wl
, wl_sta
->hlid
);
4752 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4756 static void wlcore_roc_if_possible(struct wl1271
*wl
,
4757 struct wl12xx_vif
*wlvif
)
4759 if (find_first_bit(wl
->roc_map
,
4760 WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
)
4763 if (WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
))
4766 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
, wlvif
->band
, wlvif
->channel
);
4770 * when wl_sta is NULL, we treat this call as if coming from a
4771 * pending auth reply.
4772 * wl->mutex must be taken and the FW must be awake when the call
4775 void wlcore_update_inconn_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
4776 struct wl1271_station
*wl_sta
, bool in_conn
)
4779 if (WARN_ON(wl_sta
&& wl_sta
->in_connection
))
4782 if (!wlvif
->ap_pending_auth_reply
&&
4783 !wlvif
->inconn_count
)
4784 wlcore_roc_if_possible(wl
, wlvif
);
4787 wl_sta
->in_connection
= true;
4788 wlvif
->inconn_count
++;
4790 wlvif
->ap_pending_auth_reply
= true;
4793 if (wl_sta
&& !wl_sta
->in_connection
)
4796 if (WARN_ON(!wl_sta
&& !wlvif
->ap_pending_auth_reply
))
4799 if (WARN_ON(wl_sta
&& !wlvif
->inconn_count
))
4803 wl_sta
->in_connection
= false;
4804 wlvif
->inconn_count
--;
4806 wlvif
->ap_pending_auth_reply
= false;
4809 if (!wlvif
->inconn_count
&& !wlvif
->ap_pending_auth_reply
&&
4810 test_bit(wlvif
->role_id
, wl
->roc_map
))
4811 wl12xx_croc(wl
, wlvif
->role_id
);
4815 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4816 struct wl12xx_vif
*wlvif
,
4817 struct ieee80211_sta
*sta
,
4818 enum ieee80211_sta_state old_state
,
4819 enum ieee80211_sta_state new_state
)
4821 struct wl1271_station
*wl_sta
;
4822 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4823 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4826 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4828 /* Add station (AP mode) */
4830 old_state
== IEEE80211_STA_NOTEXIST
&&
4831 new_state
== IEEE80211_STA_NONE
) {
4832 ret
= wl12xx_sta_add(wl
, wlvif
, sta
);
4836 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, true);
4839 /* Remove station (AP mode) */
4841 old_state
== IEEE80211_STA_NONE
&&
4842 new_state
== IEEE80211_STA_NOTEXIST
) {
4844 wl12xx_sta_remove(wl
, wlvif
, sta
);
4846 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
4849 /* Authorize station (AP mode) */
4851 new_state
== IEEE80211_STA_AUTHORIZED
) {
4852 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, wl_sta
->hlid
);
4856 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4861 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
4864 /* Authorize station */
4866 new_state
== IEEE80211_STA_AUTHORIZED
) {
4867 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4868 ret
= wl12xx_set_authorized(wl
, wlvif
);
4874 old_state
== IEEE80211_STA_AUTHORIZED
&&
4875 new_state
== IEEE80211_STA_ASSOC
) {
4876 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4877 clear_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
);
4880 /* clear ROCs on failure or authorization */
4882 (new_state
== IEEE80211_STA_AUTHORIZED
||
4883 new_state
== IEEE80211_STA_NOTEXIST
)) {
4884 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
4885 wl12xx_croc(wl
, wlvif
->role_id
);
4889 old_state
== IEEE80211_STA_NOTEXIST
&&
4890 new_state
== IEEE80211_STA_NONE
) {
4891 if (find_first_bit(wl
->roc_map
,
4892 WL12XX_MAX_ROLES
) >= WL12XX_MAX_ROLES
) {
4893 WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
);
4894 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
,
4895 wlvif
->band
, wlvif
->channel
);
4901 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
4902 struct ieee80211_vif
*vif
,
4903 struct ieee80211_sta
*sta
,
4904 enum ieee80211_sta_state old_state
,
4905 enum ieee80211_sta_state new_state
)
4907 struct wl1271
*wl
= hw
->priv
;
4908 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4911 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
4912 sta
->aid
, old_state
, new_state
);
4914 mutex_lock(&wl
->mutex
);
4916 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4921 ret
= wl1271_ps_elp_wakeup(wl
);
4925 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
4927 wl1271_ps_elp_sleep(wl
);
4929 mutex_unlock(&wl
->mutex
);
4930 if (new_state
< old_state
)
4935 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
4936 struct ieee80211_vif
*vif
,
4937 enum ieee80211_ampdu_mlme_action action
,
4938 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
4941 struct wl1271
*wl
= hw
->priv
;
4942 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4944 u8 hlid
, *ba_bitmap
;
4946 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
4949 /* sanity check - the fields in FW are only 8bits wide */
4950 if (WARN_ON(tid
> 0xFF))
4953 mutex_lock(&wl
->mutex
);
4955 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4960 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
4961 hlid
= wlvif
->sta
.hlid
;
4962 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
4963 struct wl1271_station
*wl_sta
;
4965 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4966 hlid
= wl_sta
->hlid
;
4972 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
4974 ret
= wl1271_ps_elp_wakeup(wl
);
4978 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
4982 case IEEE80211_AMPDU_RX_START
:
4983 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
4988 if (wl
->ba_rx_session_count
>= wl
->ba_rx_session_count_max
) {
4990 wl1271_error("exceeded max RX BA sessions");
4994 if (*ba_bitmap
& BIT(tid
)) {
4996 wl1271_error("cannot enable RX BA session on active "
5001 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
5004 *ba_bitmap
|= BIT(tid
);
5005 wl
->ba_rx_session_count
++;
5009 case IEEE80211_AMPDU_RX_STOP
:
5010 if (!(*ba_bitmap
& BIT(tid
))) {
5012 * this happens on reconfig - so only output a debug
5013 * message for now, and don't fail the function.
5015 wl1271_debug(DEBUG_MAC80211
,
5016 "no active RX BA session on tid: %d",
5022 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
5025 *ba_bitmap
&= ~BIT(tid
);
5026 wl
->ba_rx_session_count
--;
5031 * The BA initiator session management in FW independently.
5032 * Falling break here on purpose for all TX APDU commands.
5034 case IEEE80211_AMPDU_TX_START
:
5035 case IEEE80211_AMPDU_TX_STOP_CONT
:
5036 case IEEE80211_AMPDU_TX_STOP_FLUSH
:
5037 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT
:
5038 case IEEE80211_AMPDU_TX_OPERATIONAL
:
5043 wl1271_error("Incorrect ampdu action id=%x\n", action
);
5047 wl1271_ps_elp_sleep(wl
);
5050 mutex_unlock(&wl
->mutex
);
5055 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
5056 struct ieee80211_vif
*vif
,
5057 const struct cfg80211_bitrate_mask
*mask
)
5059 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5060 struct wl1271
*wl
= hw
->priv
;
5063 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
5064 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
5065 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
5067 mutex_lock(&wl
->mutex
);
5069 for (i
= 0; i
< WLCORE_NUM_BANDS
; i
++)
5070 wlvif
->bitrate_masks
[i
] =
5071 wl1271_tx_enabled_rates_get(wl
,
5072 mask
->control
[i
].legacy
,
5075 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5078 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
5079 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
5081 ret
= wl1271_ps_elp_wakeup(wl
);
5085 wl1271_set_band_rate(wl
, wlvif
);
5087 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
5088 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
5090 wl1271_ps_elp_sleep(wl
);
5093 mutex_unlock(&wl
->mutex
);
5098 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
5099 struct ieee80211_channel_switch
*ch_switch
)
5101 struct wl1271
*wl
= hw
->priv
;
5102 struct wl12xx_vif
*wlvif
;
5105 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
5107 wl1271_tx_flush(wl
);
5109 mutex_lock(&wl
->mutex
);
5111 if (unlikely(wl
->state
== WLCORE_STATE_OFF
)) {
5112 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
5113 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
5114 ieee80211_chswitch_done(vif
, false);
5117 } else if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5121 ret
= wl1271_ps_elp_wakeup(wl
);
5125 /* TODO: change mac80211 to pass vif as param */
5126 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
5127 unsigned long delay_usec
;
5129 ret
= wl
->ops
->channel_switch(wl
, wlvif
, ch_switch
);
5133 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
5135 /* indicate failure 5 seconds after channel switch time */
5136 delay_usec
= ieee80211_tu_to_usec(wlvif
->beacon_int
) *
5138 ieee80211_queue_delayed_work(hw
, &wlvif
->channel_switch_work
,
5139 usecs_to_jiffies(delay_usec
) +
5140 msecs_to_jiffies(5000));
5144 wl1271_ps_elp_sleep(wl
);
5147 mutex_unlock(&wl
->mutex
);
5150 static void wlcore_op_flush(struct ieee80211_hw
*hw
, u32 queues
, bool drop
)
5152 struct wl1271
*wl
= hw
->priv
;
5154 wl1271_tx_flush(wl
);
5157 static int wlcore_op_remain_on_channel(struct ieee80211_hw
*hw
,
5158 struct ieee80211_vif
*vif
,
5159 struct ieee80211_channel
*chan
,
5161 enum ieee80211_roc_type type
)
5163 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5164 struct wl1271
*wl
= hw
->priv
;
5165 int channel
, ret
= 0;
5167 channel
= ieee80211_frequency_to_channel(chan
->center_freq
);
5169 wl1271_debug(DEBUG_MAC80211
, "mac80211 roc %d (%d)",
5170 channel
, wlvif
->role_id
);
5172 mutex_lock(&wl
->mutex
);
5174 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5177 /* return EBUSY if we can't ROC right now */
5178 if (WARN_ON(wl
->roc_vif
||
5179 find_first_bit(wl
->roc_map
,
5180 WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
)) {
5185 ret
= wl1271_ps_elp_wakeup(wl
);
5189 ret
= wl12xx_start_dev(wl
, wlvif
, chan
->band
, channel
);
5194 ieee80211_queue_delayed_work(hw
, &wl
->roc_complete_work
,
5195 msecs_to_jiffies(duration
));
5197 wl1271_ps_elp_sleep(wl
);
5199 mutex_unlock(&wl
->mutex
);
5203 static int __wlcore_roc_completed(struct wl1271
*wl
)
5205 struct wl12xx_vif
*wlvif
;
5208 /* already completed */
5209 if (unlikely(!wl
->roc_vif
))
5212 wlvif
= wl12xx_vif_to_data(wl
->roc_vif
);
5214 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
5217 ret
= wl12xx_stop_dev(wl
, wlvif
);
5226 static int wlcore_roc_completed(struct wl1271
*wl
)
5230 wl1271_debug(DEBUG_MAC80211
, "roc complete");
5232 mutex_lock(&wl
->mutex
);
5234 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5239 ret
= wl1271_ps_elp_wakeup(wl
);
5243 ret
= __wlcore_roc_completed(wl
);
5245 wl1271_ps_elp_sleep(wl
);
5247 mutex_unlock(&wl
->mutex
);
5252 static void wlcore_roc_complete_work(struct work_struct
*work
)
5254 struct delayed_work
*dwork
;
5258 dwork
= container_of(work
, struct delayed_work
, work
);
5259 wl
= container_of(dwork
, struct wl1271
, roc_complete_work
);
5261 ret
= wlcore_roc_completed(wl
);
5263 ieee80211_remain_on_channel_expired(wl
->hw
);
5266 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw
*hw
)
5268 struct wl1271
*wl
= hw
->priv
;
5270 wl1271_debug(DEBUG_MAC80211
, "mac80211 croc");
5273 wl1271_tx_flush(wl
);
5276 * we can't just flush_work here, because it might deadlock
5277 * (as we might get called from the same workqueue)
5279 cancel_delayed_work_sync(&wl
->roc_complete_work
);
5280 wlcore_roc_completed(wl
);
5285 static void wlcore_op_sta_rc_update(struct ieee80211_hw
*hw
,
5286 struct ieee80211_vif
*vif
,
5287 struct ieee80211_sta
*sta
,
5290 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5291 struct wl1271
*wl
= hw
->priv
;
5293 wlcore_hw_sta_rc_update(wl
, wlvif
, sta
, changed
);
5296 static int wlcore_op_get_rssi(struct ieee80211_hw
*hw
,
5297 struct ieee80211_vif
*vif
,
5298 struct ieee80211_sta
*sta
,
5301 struct wl1271
*wl
= hw
->priv
;
5302 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5305 wl1271_debug(DEBUG_MAC80211
, "mac80211 get_rssi");
5307 mutex_lock(&wl
->mutex
);
5309 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5312 ret
= wl1271_ps_elp_wakeup(wl
);
5316 ret
= wlcore_acx_average_rssi(wl
, wlvif
, rssi_dbm
);
5321 wl1271_ps_elp_sleep(wl
);
5324 mutex_unlock(&wl
->mutex
);
5329 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
5331 struct wl1271
*wl
= hw
->priv
;
5334 mutex_lock(&wl
->mutex
);
5336 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5339 /* packets are considered pending if in the TX queue or the FW */
5340 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
5342 mutex_unlock(&wl
->mutex
);
5347 /* can't be const, mac80211 writes to this */
5348 static struct ieee80211_rate wl1271_rates
[] = {
5350 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
5351 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
5353 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
5354 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
5355 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5357 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
5358 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
5359 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5361 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
5362 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
5363 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5365 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5366 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5368 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5369 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5371 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5372 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5374 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5375 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5377 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5378 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5380 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5381 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5383 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5384 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5386 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5387 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5390 /* can't be const, mac80211 writes to this */
5391 static struct ieee80211_channel wl1271_channels
[] = {
5392 { .hw_value
= 1, .center_freq
= 2412, .max_power
= WLCORE_MAX_TXPWR
},
5393 { .hw_value
= 2, .center_freq
= 2417, .max_power
= WLCORE_MAX_TXPWR
},
5394 { .hw_value
= 3, .center_freq
= 2422, .max_power
= WLCORE_MAX_TXPWR
},
5395 { .hw_value
= 4, .center_freq
= 2427, .max_power
= WLCORE_MAX_TXPWR
},
5396 { .hw_value
= 5, .center_freq
= 2432, .max_power
= WLCORE_MAX_TXPWR
},
5397 { .hw_value
= 6, .center_freq
= 2437, .max_power
= WLCORE_MAX_TXPWR
},
5398 { .hw_value
= 7, .center_freq
= 2442, .max_power
= WLCORE_MAX_TXPWR
},
5399 { .hw_value
= 8, .center_freq
= 2447, .max_power
= WLCORE_MAX_TXPWR
},
5400 { .hw_value
= 9, .center_freq
= 2452, .max_power
= WLCORE_MAX_TXPWR
},
5401 { .hw_value
= 10, .center_freq
= 2457, .max_power
= WLCORE_MAX_TXPWR
},
5402 { .hw_value
= 11, .center_freq
= 2462, .max_power
= WLCORE_MAX_TXPWR
},
5403 { .hw_value
= 12, .center_freq
= 2467, .max_power
= WLCORE_MAX_TXPWR
},
5404 { .hw_value
= 13, .center_freq
= 2472, .max_power
= WLCORE_MAX_TXPWR
},
5405 { .hw_value
= 14, .center_freq
= 2484, .max_power
= WLCORE_MAX_TXPWR
},
5408 /* can't be const, mac80211 writes to this */
5409 static struct ieee80211_supported_band wl1271_band_2ghz
= {
5410 .channels
= wl1271_channels
,
5411 .n_channels
= ARRAY_SIZE(wl1271_channels
),
5412 .bitrates
= wl1271_rates
,
5413 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
5416 /* 5 GHz data rates for WL1273 */
5417 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
5419 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5420 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5422 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5423 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5425 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5426 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5428 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5429 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5431 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5432 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5434 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5435 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5437 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5438 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5440 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5441 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5444 /* 5 GHz band channels for WL1273 */
5445 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
5446 { .hw_value
= 8, .center_freq
= 5040, .max_power
= WLCORE_MAX_TXPWR
},
5447 { .hw_value
= 12, .center_freq
= 5060, .max_power
= WLCORE_MAX_TXPWR
},
5448 { .hw_value
= 16, .center_freq
= 5080, .max_power
= WLCORE_MAX_TXPWR
},
5449 { .hw_value
= 34, .center_freq
= 5170, .max_power
= WLCORE_MAX_TXPWR
},
5450 { .hw_value
= 36, .center_freq
= 5180, .max_power
= WLCORE_MAX_TXPWR
},
5451 { .hw_value
= 38, .center_freq
= 5190, .max_power
= WLCORE_MAX_TXPWR
},
5452 { .hw_value
= 40, .center_freq
= 5200, .max_power
= WLCORE_MAX_TXPWR
},
5453 { .hw_value
= 42, .center_freq
= 5210, .max_power
= WLCORE_MAX_TXPWR
},
5454 { .hw_value
= 44, .center_freq
= 5220, .max_power
= WLCORE_MAX_TXPWR
},
5455 { .hw_value
= 46, .center_freq
= 5230, .max_power
= WLCORE_MAX_TXPWR
},
5456 { .hw_value
= 48, .center_freq
= 5240, .max_power
= WLCORE_MAX_TXPWR
},
5457 { .hw_value
= 52, .center_freq
= 5260, .max_power
= WLCORE_MAX_TXPWR
},
5458 { .hw_value
= 56, .center_freq
= 5280, .max_power
= WLCORE_MAX_TXPWR
},
5459 { .hw_value
= 60, .center_freq
= 5300, .max_power
= WLCORE_MAX_TXPWR
},
5460 { .hw_value
= 64, .center_freq
= 5320, .max_power
= WLCORE_MAX_TXPWR
},
5461 { .hw_value
= 100, .center_freq
= 5500, .max_power
= WLCORE_MAX_TXPWR
},
5462 { .hw_value
= 104, .center_freq
= 5520, .max_power
= WLCORE_MAX_TXPWR
},
5463 { .hw_value
= 108, .center_freq
= 5540, .max_power
= WLCORE_MAX_TXPWR
},
5464 { .hw_value
= 112, .center_freq
= 5560, .max_power
= WLCORE_MAX_TXPWR
},
5465 { .hw_value
= 116, .center_freq
= 5580, .max_power
= WLCORE_MAX_TXPWR
},
5466 { .hw_value
= 120, .center_freq
= 5600, .max_power
= WLCORE_MAX_TXPWR
},
5467 { .hw_value
= 124, .center_freq
= 5620, .max_power
= WLCORE_MAX_TXPWR
},
5468 { .hw_value
= 128, .center_freq
= 5640, .max_power
= WLCORE_MAX_TXPWR
},
5469 { .hw_value
= 132, .center_freq
= 5660, .max_power
= WLCORE_MAX_TXPWR
},
5470 { .hw_value
= 136, .center_freq
= 5680, .max_power
= WLCORE_MAX_TXPWR
},
5471 { .hw_value
= 140, .center_freq
= 5700, .max_power
= WLCORE_MAX_TXPWR
},
5472 { .hw_value
= 149, .center_freq
= 5745, .max_power
= WLCORE_MAX_TXPWR
},
5473 { .hw_value
= 153, .center_freq
= 5765, .max_power
= WLCORE_MAX_TXPWR
},
5474 { .hw_value
= 157, .center_freq
= 5785, .max_power
= WLCORE_MAX_TXPWR
},
5475 { .hw_value
= 161, .center_freq
= 5805, .max_power
= WLCORE_MAX_TXPWR
},
5476 { .hw_value
= 165, .center_freq
= 5825, .max_power
= WLCORE_MAX_TXPWR
},
5479 static struct ieee80211_supported_band wl1271_band_5ghz
= {
5480 .channels
= wl1271_channels_5ghz
,
5481 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
5482 .bitrates
= wl1271_rates_5ghz
,
5483 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
5486 static const struct ieee80211_ops wl1271_ops
= {
5487 .start
= wl1271_op_start
,
5488 .stop
= wlcore_op_stop
,
5489 .add_interface
= wl1271_op_add_interface
,
5490 .remove_interface
= wl1271_op_remove_interface
,
5491 .change_interface
= wl12xx_op_change_interface
,
5493 .suspend
= wl1271_op_suspend
,
5494 .resume
= wl1271_op_resume
,
5496 .config
= wl1271_op_config
,
5497 .prepare_multicast
= wl1271_op_prepare_multicast
,
5498 .configure_filter
= wl1271_op_configure_filter
,
5500 .set_key
= wlcore_op_set_key
,
5501 .hw_scan
= wl1271_op_hw_scan
,
5502 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
5503 .sched_scan_start
= wl1271_op_sched_scan_start
,
5504 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
5505 .bss_info_changed
= wl1271_op_bss_info_changed
,
5506 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
5507 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
5508 .conf_tx
= wl1271_op_conf_tx
,
5509 .get_tsf
= wl1271_op_get_tsf
,
5510 .get_survey
= wl1271_op_get_survey
,
5511 .sta_state
= wl12xx_op_sta_state
,
5512 .ampdu_action
= wl1271_op_ampdu_action
,
5513 .tx_frames_pending
= wl1271_tx_frames_pending
,
5514 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
5515 .set_default_unicast_key
= wl1271_op_set_default_key_idx
,
5516 .channel_switch
= wl12xx_op_channel_switch
,
5517 .flush
= wlcore_op_flush
,
5518 .remain_on_channel
= wlcore_op_remain_on_channel
,
5519 .cancel_remain_on_channel
= wlcore_op_cancel_remain_on_channel
,
5520 .add_chanctx
= wlcore_op_add_chanctx
,
5521 .remove_chanctx
= wlcore_op_remove_chanctx
,
5522 .change_chanctx
= wlcore_op_change_chanctx
,
5523 .assign_vif_chanctx
= wlcore_op_assign_vif_chanctx
,
5524 .unassign_vif_chanctx
= wlcore_op_unassign_vif_chanctx
,
5525 .sta_rc_update
= wlcore_op_sta_rc_update
,
5526 .get_rssi
= wlcore_op_get_rssi
,
5527 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
5531 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
5537 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
5538 wl1271_error("Illegal RX rate from HW: %d", rate
);
5542 idx
= wl
->band_rate_to_idx
[band
][rate
];
5543 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
5544 wl1271_error("Unsupported RX rate from HW: %d", rate
);
5551 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
, u32 oui
, u32 nic
)
5555 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x",
5558 if (nic
+ WLCORE_NUM_MAC_ADDRESSES
- wl
->num_mac_addr
> 0xffffff)
5559 wl1271_warning("NIC part of the MAC address wraps around!");
5561 for (i
= 0; i
< wl
->num_mac_addr
; i
++) {
5562 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
5563 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
5564 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
5565 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
5566 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
5567 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
5571 /* we may be one address short at the most */
5572 WARN_ON(wl
->num_mac_addr
+ 1 < WLCORE_NUM_MAC_ADDRESSES
);
5575 * turn on the LAA bit in the first address and use it as
5578 if (wl
->num_mac_addr
< WLCORE_NUM_MAC_ADDRESSES
) {
5579 int idx
= WLCORE_NUM_MAC_ADDRESSES
- 1;
5580 memcpy(&wl
->addresses
[idx
], &wl
->addresses
[0],
5581 sizeof(wl
->addresses
[0]));
5583 wl
->addresses
[idx
].addr
[2] |= BIT(1);
5586 wl
->hw
->wiphy
->n_addresses
= WLCORE_NUM_MAC_ADDRESSES
;
5587 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
5590 static int wl12xx_get_hw_info(struct wl1271
*wl
)
5594 ret
= wl12xx_set_power_on(wl
);
5598 ret
= wlcore_read_reg(wl
, REG_CHIP_ID_B
, &wl
->chip
.id
);
5602 wl
->fuse_oui_addr
= 0;
5603 wl
->fuse_nic_addr
= 0;
5605 ret
= wl
->ops
->get_pg_ver(wl
, &wl
->hw_pg_ver
);
5609 if (wl
->ops
->get_mac
)
5610 ret
= wl
->ops
->get_mac(wl
);
5613 wl1271_power_off(wl
);
5617 static int wl1271_register_hw(struct wl1271
*wl
)
5620 u32 oui_addr
= 0, nic_addr
= 0;
5622 if (wl
->mac80211_registered
)
5625 if (wl
->nvs_len
>= 12) {
5626 /* NOTE: The wl->nvs->nvs element must be first, in
5627 * order to simplify the casting, we assume it is at
5628 * the beginning of the wl->nvs structure.
5630 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
5633 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
5635 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
5638 /* if the MAC address is zeroed in the NVS derive from fuse */
5639 if (oui_addr
== 0 && nic_addr
== 0) {
5640 oui_addr
= wl
->fuse_oui_addr
;
5641 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5642 nic_addr
= wl
->fuse_nic_addr
+ 1;
5645 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
);
5647 ret
= ieee80211_register_hw(wl
->hw
);
5649 wl1271_error("unable to register mac80211 hw: %d", ret
);
5653 wl
->mac80211_registered
= true;
5655 wl1271_debugfs_init(wl
);
5657 wl1271_notice("loaded");
5663 static void wl1271_unregister_hw(struct wl1271
*wl
)
5666 wl1271_plt_stop(wl
);
5668 ieee80211_unregister_hw(wl
->hw
);
5669 wl
->mac80211_registered
= false;
5673 static const struct ieee80211_iface_limit wlcore_iface_limits
[] = {
5676 .types
= BIT(NL80211_IFTYPE_STATION
),
5680 .types
= BIT(NL80211_IFTYPE_AP
) |
5681 BIT(NL80211_IFTYPE_P2P_GO
) |
5682 BIT(NL80211_IFTYPE_P2P_CLIENT
),
5686 static struct ieee80211_iface_combination
5687 wlcore_iface_combinations
[] = {
5689 .max_interfaces
= 3,
5690 .limits
= wlcore_iface_limits
,
5691 .n_limits
= ARRAY_SIZE(wlcore_iface_limits
),
5695 static int wl1271_init_ieee80211(struct wl1271
*wl
)
5698 static const u32 cipher_suites
[] = {
5699 WLAN_CIPHER_SUITE_WEP40
,
5700 WLAN_CIPHER_SUITE_WEP104
,
5701 WLAN_CIPHER_SUITE_TKIP
,
5702 WLAN_CIPHER_SUITE_CCMP
,
5703 WL1271_CIPHER_SUITE_GEM
,
5706 /* The tx descriptor buffer */
5707 wl
->hw
->extra_tx_headroom
= sizeof(struct wl1271_tx_hw_descr
);
5709 if (wl
->quirks
& WLCORE_QUIRK_TKIP_HEADER_SPACE
)
5710 wl
->hw
->extra_tx_headroom
+= WL1271_EXTRA_SPACE_TKIP
;
5713 /* FIXME: find a proper value */
5714 wl
->hw
->channel_change_time
= 10000;
5715 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
5717 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
5718 IEEE80211_HW_SUPPORTS_PS
|
5719 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
5720 IEEE80211_HW_SUPPORTS_UAPSD
|
5721 IEEE80211_HW_HAS_RATE_CONTROL
|
5722 IEEE80211_HW_CONNECTION_MONITOR
|
5723 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
5724 IEEE80211_HW_SPECTRUM_MGMT
|
5725 IEEE80211_HW_AP_LINK_PS
|
5726 IEEE80211_HW_AMPDU_AGGREGATION
|
5727 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
5728 IEEE80211_HW_QUEUE_CONTROL
;
5730 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
5731 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
5733 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
5734 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5735 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5736 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5737 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5738 wl
->hw
->wiphy
->max_match_sets
= 16;
5740 * Maximum length of elements in scanning probe request templates
5741 * should be the maximum length possible for a template, without
5742 * the IEEE80211 header of the template
5744 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5745 sizeof(struct ieee80211_header
);
5747 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5748 sizeof(struct ieee80211_header
);
5750 wl
->hw
->wiphy
->max_remain_on_channel_duration
= 5000;
5752 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5753 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
|
5754 WIPHY_FLAG_SUPPORTS_SCHED_SCAN
;
5756 /* make sure all our channels fit in the scanned_ch bitmask */
5757 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5758 ARRAY_SIZE(wl1271_channels_5ghz
) >
5759 WL1271_MAX_CHANNELS
);
5761 * clear channel flags from the previous usage
5762 * and restore max_power & max_antenna_gain values.
5764 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels
); i
++) {
5765 wl1271_band_2ghz
.channels
[i
].flags
= 0;
5766 wl1271_band_2ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
5767 wl1271_band_2ghz
.channels
[i
].max_antenna_gain
= 0;
5770 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels_5ghz
); i
++) {
5771 wl1271_band_5ghz
.channels
[i
].flags
= 0;
5772 wl1271_band_5ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
5773 wl1271_band_5ghz
.channels
[i
].max_antenna_gain
= 0;
5777 * We keep local copies of the band structs because we need to
5778 * modify them on a per-device basis.
5780 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5781 sizeof(wl1271_band_2ghz
));
5782 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
,
5783 &wl
->ht_cap
[IEEE80211_BAND_2GHZ
],
5784 sizeof(*wl
->ht_cap
));
5785 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5786 sizeof(wl1271_band_5ghz
));
5787 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
,
5788 &wl
->ht_cap
[IEEE80211_BAND_5GHZ
],
5789 sizeof(*wl
->ht_cap
));
5791 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5792 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5793 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5794 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5797 * allow 4 queues per mac address we support +
5798 * 1 cab queue per mac + one global offchannel Tx queue
5800 wl
->hw
->queues
= (NUM_TX_QUEUES
+ 1) * WLCORE_NUM_MAC_ADDRESSES
+ 1;
5802 /* the last queue is the offchannel queue */
5803 wl
->hw
->offchannel_tx_hw_queue
= wl
->hw
->queues
- 1;
5804 wl
->hw
->max_rates
= 1;
5806 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5808 /* the FW answers probe-requests in AP-mode */
5809 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5810 wl
->hw
->wiphy
->probe_resp_offload
=
5811 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5812 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5813 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5815 /* allowed interface combinations */
5816 wlcore_iface_combinations
[0].num_different_channels
= wl
->num_channels
;
5817 wl
->hw
->wiphy
->iface_combinations
= wlcore_iface_combinations
;
5818 wl
->hw
->wiphy
->n_iface_combinations
=
5819 ARRAY_SIZE(wlcore_iface_combinations
);
5821 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5823 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5824 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5826 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5831 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
, u32 aggr_buf_size
,
5834 struct ieee80211_hw
*hw
;
5839 BUILD_BUG_ON(AP_MAX_STATIONS
> WL12XX_MAX_LINKS
);
5841 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5843 wl1271_error("could not alloc ieee80211_hw");
5849 memset(wl
, 0, sizeof(*wl
));
5851 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5853 wl1271_error("could not alloc wl priv");
5855 goto err_priv_alloc
;
5858 INIT_LIST_HEAD(&wl
->wlvif_list
);
5862 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5863 for (j
= 0; j
< WL12XX_MAX_LINKS
; j
++)
5864 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5866 skb_queue_head_init(&wl
->deferred_rx_queue
);
5867 skb_queue_head_init(&wl
->deferred_tx_queue
);
5869 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5870 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5871 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5872 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5873 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5874 INIT_DELAYED_WORK(&wl
->roc_complete_work
, wlcore_roc_complete_work
);
5875 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5877 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5878 if (!wl
->freezable_wq
) {
5885 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
5886 wl
->band
= IEEE80211_BAND_2GHZ
;
5887 wl
->channel_type
= NL80211_CHAN_NO_HT
;
5889 wl
->sg_enabled
= true;
5890 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
5891 wl
->recovery_count
= 0;
5894 wl
->ap_fw_ps_map
= 0;
5896 wl
->platform_quirks
= 0;
5897 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
5898 wl
->active_sta_count
= 0;
5899 wl
->active_link_count
= 0;
5901 init_waitqueue_head(&wl
->fwlog_waitq
);
5903 /* The system link is always allocated */
5904 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
5906 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
5907 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
5908 wl
->tx_frames
[i
] = NULL
;
5910 spin_lock_init(&wl
->wl_lock
);
5912 wl
->state
= WLCORE_STATE_OFF
;
5913 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5914 mutex_init(&wl
->mutex
);
5915 mutex_init(&wl
->flush_mutex
);
5916 init_completion(&wl
->nvs_loading_complete
);
5918 order
= get_order(aggr_buf_size
);
5919 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
5920 if (!wl
->aggr_buf
) {
5924 wl
->aggr_buf_size
= aggr_buf_size
;
5926 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
5927 if (!wl
->dummy_packet
) {
5932 /* Allocate one page for the FW log */
5933 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
5936 goto err_dummy_packet
;
5939 wl
->mbox_size
= mbox_size
;
5940 wl
->mbox
= kmalloc(wl
->mbox_size
, GFP_KERNEL
| GFP_DMA
);
5946 wl
->buffer_32
= kmalloc(sizeof(*wl
->buffer_32
), GFP_KERNEL
);
5947 if (!wl
->buffer_32
) {
5958 free_page((unsigned long)wl
->fwlog
);
5961 dev_kfree_skb(wl
->dummy_packet
);
5964 free_pages((unsigned long)wl
->aggr_buf
, order
);
5967 destroy_workqueue(wl
->freezable_wq
);
5970 wl1271_debugfs_exit(wl
);
5974 ieee80211_free_hw(hw
);
5978 return ERR_PTR(ret
);
5980 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
5982 int wlcore_free_hw(struct wl1271
*wl
)
5984 /* Unblock any fwlog readers */
5985 mutex_lock(&wl
->mutex
);
5986 wl
->fwlog_size
= -1;
5987 wake_up_interruptible_all(&wl
->fwlog_waitq
);
5988 mutex_unlock(&wl
->mutex
);
5990 wlcore_sysfs_free(wl
);
5992 kfree(wl
->buffer_32
);
5994 free_page((unsigned long)wl
->fwlog
);
5995 dev_kfree_skb(wl
->dummy_packet
);
5996 free_pages((unsigned long)wl
->aggr_buf
, get_order(wl
->aggr_buf_size
));
5998 wl1271_debugfs_exit(wl
);
6002 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
6006 kfree(wl
->fw_status_1
);
6007 kfree(wl
->tx_res_if
);
6008 destroy_workqueue(wl
->freezable_wq
);
6011 ieee80211_free_hw(wl
->hw
);
6015 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
6018 static const struct wiphy_wowlan_support wlcore_wowlan_support
= {
6019 .flags
= WIPHY_WOWLAN_ANY
,
6020 .n_patterns
= WL1271_MAX_RX_FILTERS
,
6021 .pattern_min_len
= 1,
6022 .pattern_max_len
= WL1271_RX_FILTER_MAX_PATTERN_SIZE
,
6026 static irqreturn_t
wlcore_hardirq(int irq
, void *cookie
)
6028 return IRQ_WAKE_THREAD
;
6031 static void wlcore_nvs_cb(const struct firmware
*fw
, void *context
)
6033 struct wl1271
*wl
= context
;
6034 struct platform_device
*pdev
= wl
->pdev
;
6035 struct wlcore_platdev_data
*pdev_data
= dev_get_platdata(&pdev
->dev
);
6036 struct wl12xx_platform_data
*pdata
= pdev_data
->pdata
;
6037 unsigned long irqflags
;
6039 irq_handler_t hardirq_fn
= NULL
;
6042 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
6044 wl1271_error("Could not allocate nvs data");
6047 wl
->nvs_len
= fw
->size
;
6049 wl1271_debug(DEBUG_BOOT
, "Could not get nvs file %s",
6055 ret
= wl
->ops
->setup(wl
);
6059 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
6061 /* adjust some runtime configuration parameters */
6062 wlcore_adjust_conf(wl
);
6064 wl
->irq
= platform_get_irq(pdev
, 0);
6065 wl
->platform_quirks
= pdata
->platform_quirks
;
6066 wl
->if_ops
= pdev_data
->if_ops
;
6068 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
) {
6069 irqflags
= IRQF_TRIGGER_RISING
;
6070 hardirq_fn
= wlcore_hardirq
;
6072 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
6075 ret
= request_threaded_irq(wl
->irq
, hardirq_fn
, wlcore_irq
,
6076 irqflags
, pdev
->name
, wl
);
6078 wl1271_error("request_irq() failed: %d", ret
);
6083 ret
= enable_irq_wake(wl
->irq
);
6085 wl
->irq_wake_enabled
= true;
6086 device_init_wakeup(wl
->dev
, 1);
6087 if (pdata
->pwr_in_suspend
)
6088 wl
->hw
->wiphy
->wowlan
= &wlcore_wowlan_support
;
6091 disable_irq(wl
->irq
);
6093 ret
= wl12xx_get_hw_info(wl
);
6095 wl1271_error("couldn't get hw info");
6099 ret
= wl
->ops
->identify_chip(wl
);
6103 ret
= wl1271_init_ieee80211(wl
);
6107 ret
= wl1271_register_hw(wl
);
6111 ret
= wlcore_sysfs_init(wl
);
6115 wl
->initialized
= true;
6119 wl1271_unregister_hw(wl
);
6122 free_irq(wl
->irq
, wl
);
6128 release_firmware(fw
);
6129 complete_all(&wl
->nvs_loading_complete
);
6132 int wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
6136 if (!wl
->ops
|| !wl
->ptable
)
6139 wl
->dev
= &pdev
->dev
;
6141 platform_set_drvdata(pdev
, wl
);
6143 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_HOTPLUG
,
6144 WL12XX_NVS_NAME
, &pdev
->dev
, GFP_KERNEL
,
6147 wl1271_error("request_firmware_nowait failed: %d", ret
);
6148 complete_all(&wl
->nvs_loading_complete
);
6153 EXPORT_SYMBOL_GPL(wlcore_probe
);
6155 int wlcore_remove(struct platform_device
*pdev
)
6157 struct wl1271
*wl
= platform_get_drvdata(pdev
);
6159 wait_for_completion(&wl
->nvs_loading_complete
);
6160 if (!wl
->initialized
)
6163 if (wl
->irq_wake_enabled
) {
6164 device_init_wakeup(wl
->dev
, 0);
6165 disable_irq_wake(wl
->irq
);
6167 wl1271_unregister_hw(wl
);
6168 free_irq(wl
->irq
, wl
);
6173 EXPORT_SYMBOL_GPL(wlcore_remove
);
6175 u32 wl12xx_debug_level
= DEBUG_NONE
;
6176 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
6177 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
6178 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
6180 module_param_named(fwlog
, fwlog_param
, charp
, 0);
6181 MODULE_PARM_DESC(fwlog
,
6182 "FW logger options: continuous, ondemand, dbgpins or disable");
6184 module_param(fwlog_mem_blocks
, int, S_IRUSR
| S_IWUSR
);
6185 MODULE_PARM_DESC(fwlog_mem_blocks
, "fwlog mem_blocks");
6187 module_param(bug_on_recovery
, int, S_IRUSR
| S_IWUSR
);
6188 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
6190 module_param(no_recovery
, int, S_IRUSR
| S_IWUSR
);
6191 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
6193 MODULE_LICENSE("GPL");
6194 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6195 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6196 MODULE_FIRMWARE(WL12XX_NVS_NAME
);