3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
44 #define WL1271_BOOT_RETRIES 3
46 static char *fwlog_param
;
47 static int fwlog_mem_blocks
= -1;
48 static int bug_on_recovery
= -1;
49 static int no_recovery
= -1;
51 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
52 struct ieee80211_vif
*vif
,
53 bool reset_tx_queues
);
54 static void wlcore_op_stop_locked(struct wl1271
*wl
);
55 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
57 static int wl12xx_set_authorized(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
61 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
70 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, wlvif
->sta
.hlid
);
74 wl1271_info("Association completed.");
78 static void wl1271_reg_notify(struct wiphy
*wiphy
,
79 struct regulatory_request
*request
)
81 struct ieee80211_supported_band
*band
;
82 struct ieee80211_channel
*ch
;
84 struct ieee80211_hw
*hw
= wiphy_to_ieee80211_hw(wiphy
);
85 struct wl1271
*wl
= hw
->priv
;
87 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
88 for (i
= 0; i
< band
->n_channels
; i
++) {
89 ch
= &band
->channels
[i
];
90 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
93 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
94 ch
->flags
|= IEEE80211_CHAN_NO_IBSS
|
95 IEEE80211_CHAN_PASSIVE_SCAN
;
99 wlcore_regdomain_config(wl
);
102 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
107 /* we should hold wl->mutex */
108 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
113 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
115 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
121 * this function is being called when the rx_streaming interval
122 * has beed changed or rx_streaming should be disabled
124 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
127 int period
= wl
->conf
.rx_streaming
.interval
;
129 /* don't reconfigure if rx_streaming is disabled */
130 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
133 /* reconfigure/disable according to new streaming_period */
135 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
136 (wl
->conf
.rx_streaming
.always
||
137 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
138 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
140 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
141 /* don't cancel_work_sync since we might deadlock */
142 del_timer_sync(&wlvif
->rx_streaming_timer
);
148 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
151 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
152 rx_streaming_enable_work
);
153 struct wl1271
*wl
= wlvif
->wl
;
155 mutex_lock(&wl
->mutex
);
157 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
158 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
159 (!wl
->conf
.rx_streaming
.always
&&
160 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
163 if (!wl
->conf
.rx_streaming
.interval
)
166 ret
= wl1271_ps_elp_wakeup(wl
);
170 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
174 /* stop it after some time of inactivity */
175 mod_timer(&wlvif
->rx_streaming_timer
,
176 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
179 wl1271_ps_elp_sleep(wl
);
181 mutex_unlock(&wl
->mutex
);
184 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
187 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
188 rx_streaming_disable_work
);
189 struct wl1271
*wl
= wlvif
->wl
;
191 mutex_lock(&wl
->mutex
);
193 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
196 ret
= wl1271_ps_elp_wakeup(wl
);
200 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
205 wl1271_ps_elp_sleep(wl
);
207 mutex_unlock(&wl
->mutex
);
210 static void wl1271_rx_streaming_timer(unsigned long data
)
212 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
213 struct wl1271
*wl
= wlvif
->wl
;
214 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
217 /* wl->mutex must be taken */
218 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
220 /* if the watchdog is not armed, don't do anything */
221 if (wl
->tx_allocated_blocks
== 0)
224 cancel_delayed_work(&wl
->tx_watchdog_work
);
225 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
226 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
229 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
231 struct delayed_work
*dwork
;
234 dwork
= container_of(work
, struct delayed_work
, work
);
235 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
237 mutex_lock(&wl
->mutex
);
239 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
242 /* Tx went out in the meantime - everything is ok */
243 if (unlikely(wl
->tx_allocated_blocks
== 0))
247 * if a ROC is in progress, we might not have any Tx for a long
248 * time (e.g. pending Tx on the non-ROC channels)
250 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
251 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
252 wl
->conf
.tx
.tx_watchdog_timeout
);
253 wl12xx_rearm_tx_watchdog_locked(wl
);
258 * if a scan is in progress, we might not have any Tx for a long
261 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
262 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
263 wl
->conf
.tx
.tx_watchdog_timeout
);
264 wl12xx_rearm_tx_watchdog_locked(wl
);
269 * AP might cache a frame for a long time for a sleeping station,
270 * so rearm the timer if there's an AP interface with stations. If
271 * Tx is genuinely stuck we will most hopefully discover it when all
272 * stations are removed due to inactivity.
274 if (wl
->active_sta_count
) {
275 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
277 wl
->conf
.tx
.tx_watchdog_timeout
,
278 wl
->active_sta_count
);
279 wl12xx_rearm_tx_watchdog_locked(wl
);
283 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
284 wl
->conf
.tx
.tx_watchdog_timeout
);
285 wl12xx_queue_recovery_work(wl
);
288 mutex_unlock(&wl
->mutex
);
291 static void wlcore_adjust_conf(struct wl1271
*wl
)
293 /* Adjust settings according to optional module parameters */
295 /* Firmware Logger params */
296 if (fwlog_mem_blocks
!= -1) {
297 if (fwlog_mem_blocks
>= CONF_FWLOG_MIN_MEM_BLOCKS
&&
298 fwlog_mem_blocks
<= CONF_FWLOG_MAX_MEM_BLOCKS
) {
299 wl
->conf
.fwlog
.mem_blocks
= fwlog_mem_blocks
;
302 "Illegal fwlog_mem_blocks=%d using default %d",
303 fwlog_mem_blocks
, wl
->conf
.fwlog
.mem_blocks
);
308 if (!strcmp(fwlog_param
, "continuous")) {
309 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
310 } else if (!strcmp(fwlog_param
, "ondemand")) {
311 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
312 } else if (!strcmp(fwlog_param
, "dbgpins")) {
313 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
314 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
315 } else if (!strcmp(fwlog_param
, "disable")) {
316 wl
->conf
.fwlog
.mem_blocks
= 0;
317 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
319 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
323 if (bug_on_recovery
!= -1)
324 wl
->conf
.recovery
.bug_on_recovery
= (u8
) bug_on_recovery
;
326 if (no_recovery
!= -1)
327 wl
->conf
.recovery
.no_recovery
= (u8
) no_recovery
;
330 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
331 struct wl12xx_vif
*wlvif
,
336 fw_ps
= test_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
339 * Wake up from high level PS if the STA is asleep with too little
340 * packets in FW or if the STA is awake.
342 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
343 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
346 * Start high-level PS if the STA is asleep with enough blocks in FW.
347 * Make an exception if this is the only connected link. In this
348 * case FW-memory congestion is less of a problem.
349 * Note that a single connected STA means 3 active links, since we must
350 * account for the global and broadcast AP links. The "fw_ps" check
351 * assures us the third link is a STA connected to the AP. Otherwise
352 * the FW would not set the PSM bit.
354 else if (wl
->active_link_count
> 3 && fw_ps
&&
355 tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
356 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
359 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
360 struct wl12xx_vif
*wlvif
,
361 struct wl_fw_status_2
*status
)
366 cur_fw_ps_map
= le32_to_cpu(status
->link_ps_bitmap
);
367 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
368 wl1271_debug(DEBUG_PSM
,
369 "link ps prev 0x%x cur 0x%x changed 0x%x",
370 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
371 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
373 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
376 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, WL12XX_MAX_LINKS
)
377 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
378 wl
->links
[hlid
].allocated_pkts
);
381 static int wlcore_fw_status(struct wl1271
*wl
,
382 struct wl_fw_status_1
*status_1
,
383 struct wl_fw_status_2
*status_2
)
385 struct wl12xx_vif
*wlvif
;
387 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
388 int avail
, freed_blocks
;
392 struct wl1271_link
*lnk
;
394 status_len
= WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
395 sizeof(*status_2
) + wl
->fw_status_priv_len
;
397 ret
= wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
, status_1
,
402 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
403 "drv_rx_counter = %d, tx_results_counter = %d)",
405 status_1
->fw_rx_counter
,
406 status_1
->drv_rx_counter
,
407 status_1
->tx_results_counter
);
409 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
410 /* prevent wrap-around in freed-packets counter */
411 wl
->tx_allocated_pkts
[i
] -=
412 (status_2
->counters
.tx_released_pkts
[i
] -
413 wl
->tx_pkts_freed
[i
]) & 0xff;
415 wl
->tx_pkts_freed
[i
] = status_2
->counters
.tx_released_pkts
[i
];
419 for_each_set_bit(i
, wl
->links_map
, WL12XX_MAX_LINKS
) {
423 /* prevent wrap-around in freed-packets counter */
424 diff
= (status_2
->counters
.tx_lnk_free_pkts
[i
] -
425 lnk
->prev_freed_pkts
) & 0xff;
430 lnk
->allocated_pkts
-= diff
;
431 lnk
->prev_freed_pkts
= status_2
->counters
.tx_lnk_free_pkts
[i
];
433 /* accumulate the prev_freed_pkts counter */
434 lnk
->total_freed_pkts
+= diff
;
437 /* prevent wrap-around in total blocks counter */
438 if (likely(wl
->tx_blocks_freed
<=
439 le32_to_cpu(status_2
->total_released_blks
)))
440 freed_blocks
= le32_to_cpu(status_2
->total_released_blks
) -
443 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
444 le32_to_cpu(status_2
->total_released_blks
);
446 wl
->tx_blocks_freed
= le32_to_cpu(status_2
->total_released_blks
);
448 wl
->tx_allocated_blocks
-= freed_blocks
;
451 * If the FW freed some blocks:
452 * If we still have allocated blocks - re-arm the timer, Tx is
453 * not stuck. Otherwise, cancel the timer (no Tx currently).
456 if (wl
->tx_allocated_blocks
)
457 wl12xx_rearm_tx_watchdog_locked(wl
);
459 cancel_delayed_work(&wl
->tx_watchdog_work
);
462 avail
= le32_to_cpu(status_2
->tx_total
) - wl
->tx_allocated_blocks
;
465 * The FW might change the total number of TX memblocks before
466 * we get a notification about blocks being released. Thus, the
467 * available blocks calculation might yield a temporary result
468 * which is lower than the actual available blocks. Keeping in
469 * mind that only blocks that were allocated can be moved from
470 * TX to RX, tx_blocks_available should never decrease here.
472 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
475 /* if more blocks are available now, tx work can be scheduled */
476 if (wl
->tx_blocks_available
> old_tx_blk_count
)
477 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
479 /* for AP update num of allocated TX blocks per link and ps status */
480 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
481 wl12xx_irq_update_links_status(wl
, wlvif
, status_2
);
484 /* update the host-chipset time offset */
486 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
487 (s64
)le32_to_cpu(status_2
->fw_localtime
);
489 wl
->fw_fast_lnk_map
= le32_to_cpu(status_2
->link_fast_bitmap
);
494 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
498 /* Pass all received frames to the network stack */
499 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
500 ieee80211_rx_ni(wl
->hw
, skb
);
502 /* Return sent skbs to the network stack */
503 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
504 ieee80211_tx_status_ni(wl
->hw
, skb
);
507 static void wl1271_netstack_work(struct work_struct
*work
)
510 container_of(work
, struct wl1271
, netstack_work
);
513 wl1271_flush_deferred_work(wl
);
514 } while (skb_queue_len(&wl
->deferred_rx_queue
));
517 #define WL1271_IRQ_MAX_LOOPS 256
519 static int wlcore_irq_locked(struct wl1271
*wl
)
523 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
525 unsigned int defer_count
;
529 * In case edge triggered interrupt must be used, we cannot iterate
530 * more than once without introducing race conditions with the hardirq.
532 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
535 wl1271_debug(DEBUG_IRQ
, "IRQ work");
537 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
540 ret
= wl1271_ps_elp_wakeup(wl
);
544 while (!done
&& loopcount
--) {
546 * In order to avoid a race with the hardirq, clear the flag
547 * before acknowledging the chip. Since the mutex is held,
548 * wl1271_ps_elp_wakeup cannot be called concurrently.
550 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
551 smp_mb__after_clear_bit();
553 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
557 wlcore_hw_tx_immediate_compl(wl
);
559 intr
= le32_to_cpu(wl
->fw_status_1
->intr
);
560 intr
&= WLCORE_ALL_INTR_MASK
;
566 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
567 wl1271_error("HW watchdog interrupt received! starting recovery.");
568 wl
->watchdog_recovery
= true;
571 /* restarting the chip. ignore any other interrupt. */
575 if (unlikely(intr
& WL1271_ACX_SW_INTR_WATCHDOG
)) {
576 wl1271_error("SW watchdog interrupt received! "
577 "starting recovery.");
578 wl
->watchdog_recovery
= true;
581 /* restarting the chip. ignore any other interrupt. */
585 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
586 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
588 ret
= wlcore_rx(wl
, wl
->fw_status_1
);
592 /* Check if any tx blocks were freed */
593 spin_lock_irqsave(&wl
->wl_lock
, flags
);
594 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
595 wl1271_tx_total_queue_count(wl
) > 0) {
596 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
598 * In order to avoid starvation of the TX path,
599 * call the work function directly.
601 ret
= wlcore_tx_work_locked(wl
);
605 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
608 /* check for tx results */
609 ret
= wlcore_hw_tx_delayed_compl(wl
);
613 /* Make sure the deferred queues don't get too long */
614 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
615 skb_queue_len(&wl
->deferred_rx_queue
);
616 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
617 wl1271_flush_deferred_work(wl
);
620 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
621 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
622 ret
= wl1271_event_handle(wl
, 0);
627 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
628 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
629 ret
= wl1271_event_handle(wl
, 1);
634 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
635 wl1271_debug(DEBUG_IRQ
,
636 "WL1271_ACX_INTR_INIT_COMPLETE");
638 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
639 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
642 wl1271_ps_elp_sleep(wl
);
648 static irqreturn_t
wlcore_irq(int irq
, void *cookie
)
652 struct wl1271
*wl
= cookie
;
654 /* complete the ELP completion */
655 spin_lock_irqsave(&wl
->wl_lock
, flags
);
656 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
658 complete(wl
->elp_compl
);
659 wl
->elp_compl
= NULL
;
662 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
663 /* don't enqueue a work right now. mark it as pending */
664 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
665 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
666 disable_irq_nosync(wl
->irq
);
667 pm_wakeup_event(wl
->dev
, 0);
668 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
671 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
673 /* TX might be handled here, avoid redundant work */
674 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
675 cancel_work_sync(&wl
->tx_work
);
677 mutex_lock(&wl
->mutex
);
679 ret
= wlcore_irq_locked(wl
);
681 wl12xx_queue_recovery_work(wl
);
683 spin_lock_irqsave(&wl
->wl_lock
, flags
);
684 /* In case TX was not handled here, queue TX work */
685 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
686 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
687 wl1271_tx_total_queue_count(wl
) > 0)
688 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
689 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
691 mutex_unlock(&wl
->mutex
);
696 struct vif_counter_data
{
699 struct ieee80211_vif
*cur_vif
;
700 bool cur_vif_running
;
703 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
704 struct ieee80211_vif
*vif
)
706 struct vif_counter_data
*counter
= data
;
709 if (counter
->cur_vif
== vif
)
710 counter
->cur_vif_running
= true;
713 /* caller must not hold wl->mutex, as it might deadlock */
714 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
715 struct ieee80211_vif
*cur_vif
,
716 struct vif_counter_data
*data
)
718 memset(data
, 0, sizeof(*data
));
719 data
->cur_vif
= cur_vif
;
721 ieee80211_iterate_active_interfaces(hw
, IEEE80211_IFACE_ITER_RESUME_ALL
,
722 wl12xx_vif_count_iter
, data
);
725 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
727 const struct firmware
*fw
;
729 enum wl12xx_fw_type fw_type
;
733 fw_type
= WL12XX_FW_TYPE_PLT
;
734 fw_name
= wl
->plt_fw_name
;
737 * we can't call wl12xx_get_vif_count() here because
738 * wl->mutex is taken, so use the cached last_vif_count value
740 if (wl
->last_vif_count
> 1 && wl
->mr_fw_name
) {
741 fw_type
= WL12XX_FW_TYPE_MULTI
;
742 fw_name
= wl
->mr_fw_name
;
744 fw_type
= WL12XX_FW_TYPE_NORMAL
;
745 fw_name
= wl
->sr_fw_name
;
749 if (wl
->fw_type
== fw_type
)
752 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
754 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
757 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
762 wl1271_error("firmware size is not multiple of 32 bits: %zu",
769 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
770 wl
->fw_len
= fw
->size
;
771 wl
->fw
= vmalloc(wl
->fw_len
);
774 wl1271_error("could not allocate memory for the firmware");
779 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
781 wl
->fw_type
= fw_type
;
783 release_firmware(fw
);
788 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
790 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
792 /* Avoid a recursive recovery */
793 if (wl
->state
== WLCORE_STATE_ON
) {
794 wl
->state
= WLCORE_STATE_RESTARTING
;
795 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
796 wl1271_ps_elp_wakeup(wl
);
797 wlcore_disable_interrupts_nosync(wl
);
798 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
802 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
806 /* Make sure we have enough room */
807 len
= min(maxlen
, (size_t)(PAGE_SIZE
- wl
->fwlog_size
));
809 /* Fill the FW log file, consumed by the sysfs fwlog entry */
810 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
811 wl
->fwlog_size
+= len
;
816 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
818 struct wlcore_partition_set part
, old_part
;
825 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
826 (wl
->conf
.fwlog
.mem_blocks
== 0))
829 wl1271_info("Reading FW panic log");
831 block
= kmalloc(wl
->fw_mem_block_size
, GFP_KERNEL
);
836 * Make sure the chip is awake and the logger isn't active.
837 * Do not send a stop fwlog command if the fw is hanged or if
838 * dbgpins are used (due to some fw bug).
840 if (wl1271_ps_elp_wakeup(wl
))
842 if (!wl
->watchdog_recovery
&&
843 wl
->conf
.fwlog
.output
!= WL12XX_FWLOG_OUTPUT_DBG_PINS
)
844 wl12xx_cmd_stop_fwlog(wl
);
846 /* Read the first memory block address */
847 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
851 addr
= le32_to_cpu(wl
->fw_status_2
->log_start_addr
);
855 if (wl
->conf
.fwlog
.mode
== WL12XX_FWLOG_CONTINUOUS
) {
856 offset
= sizeof(addr
) + sizeof(struct wl1271_rx_descriptor
);
857 end_of_log
= wl
->fwlog_end
;
859 offset
= sizeof(addr
);
863 old_part
= wl
->curr_part
;
864 memset(&part
, 0, sizeof(part
));
866 /* Traverse the memory blocks linked list */
868 part
.mem
.start
= wlcore_hw_convert_hwaddr(wl
, addr
);
869 part
.mem
.size
= PAGE_SIZE
;
871 ret
= wlcore_set_partition(wl
, &part
);
873 wl1271_error("%s: set_partition start=0x%X size=%d",
874 __func__
, part
.mem
.start
, part
.mem
.size
);
878 memset(block
, 0, wl
->fw_mem_block_size
);
879 ret
= wlcore_read_hwaddr(wl
, addr
, block
,
880 wl
->fw_mem_block_size
, false);
886 * Memory blocks are linked to one another. The first 4 bytes
887 * of each memory block hold the hardware address of the next
888 * one. The last memory block points to the first one in
889 * on demand mode and is equal to 0x2000000 in continuous mode.
891 addr
= le32_to_cpup((__le32
*)block
);
893 if (!wl12xx_copy_fwlog(wl
, block
+ offset
,
894 wl
->fw_mem_block_size
- offset
))
896 } while (addr
&& (addr
!= end_of_log
));
898 wake_up_interruptible(&wl
->fwlog_waitq
);
902 wlcore_set_partition(wl
, &old_part
);
905 static void wlcore_print_recovery(struct wl1271
*wl
)
911 wl1271_info("Hardware recovery in progress. FW ver: %s",
912 wl
->chip
.fw_ver_str
);
914 /* change partitions momentarily so we can read the FW pc */
915 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
919 ret
= wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
, &pc
);
923 ret
= wlcore_read_reg(wl
, REG_INTERRUPT_NO_CLEAR
, &hint_sts
);
927 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
928 pc
, hint_sts
, ++wl
->recovery_count
);
930 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
934 static void wl1271_recovery_work(struct work_struct
*work
)
937 container_of(work
, struct wl1271
, recovery_work
);
938 struct wl12xx_vif
*wlvif
;
939 struct ieee80211_vif
*vif
;
941 mutex_lock(&wl
->mutex
);
943 if (wl
->state
== WLCORE_STATE_OFF
|| wl
->plt
)
946 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
)) {
947 if (wl
->conf
.fwlog
.output
== WL12XX_FWLOG_OUTPUT_HOST
)
948 wl12xx_read_fwlog_panic(wl
);
949 wlcore_print_recovery(wl
);
952 BUG_ON(wl
->conf
.recovery
.bug_on_recovery
&&
953 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
955 if (wl
->conf
.recovery
.no_recovery
) {
956 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
960 /* Prevent spurious TX during FW restart */
961 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
963 /* reboot the chipset */
964 while (!list_empty(&wl
->wlvif_list
)) {
965 wlvif
= list_first_entry(&wl
->wlvif_list
,
966 struct wl12xx_vif
, list
);
967 vif
= wl12xx_wlvif_to_vif(wlvif
);
968 __wl1271_op_remove_interface(wl
, vif
, false);
971 wlcore_op_stop_locked(wl
);
973 ieee80211_restart_hw(wl
->hw
);
976 * Its safe to enable TX now - the queues are stopped after a request
979 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
982 wl
->watchdog_recovery
= false;
983 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
984 mutex_unlock(&wl
->mutex
);
987 static int wlcore_fw_wakeup(struct wl1271
*wl
)
989 return wlcore_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
992 static int wl1271_setup(struct wl1271
*wl
)
994 wl
->fw_status_1
= kzalloc(WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
995 sizeof(*wl
->fw_status_2
) +
996 wl
->fw_status_priv_len
, GFP_KERNEL
);
997 if (!wl
->fw_status_1
)
1000 wl
->fw_status_2
= (struct wl_fw_status_2
*)
1001 (((u8
*) wl
->fw_status_1
) +
1002 WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
));
1004 wl
->tx_res_if
= kzalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
1005 if (!wl
->tx_res_if
) {
1006 kfree(wl
->fw_status_1
);
1013 static int wl12xx_set_power_on(struct wl1271
*wl
)
1017 msleep(WL1271_PRE_POWER_ON_SLEEP
);
1018 ret
= wl1271_power_on(wl
);
1021 msleep(WL1271_POWER_ON_SLEEP
);
1022 wl1271_io_reset(wl
);
1025 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
1029 /* ELP module wake up */
1030 ret
= wlcore_fw_wakeup(wl
);
1038 wl1271_power_off(wl
);
1042 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
1046 ret
= wl12xx_set_power_on(wl
);
1051 * For wl127x based devices we could use the default block
1052 * size (512 bytes), but due to a bug in the sdio driver, we
1053 * need to set it explicitly after the chip is powered on. To
1054 * simplify the code and since the performance impact is
1055 * negligible, we use the same block size for all different
1058 * Check if the bus supports blocksize alignment and, if it
1059 * doesn't, make sure we don't have the quirk.
1061 if (!wl1271_set_block_size(wl
))
1062 wl
->quirks
&= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
1064 /* TODO: make sure the lower driver has set things up correctly */
1066 ret
= wl1271_setup(wl
);
1070 ret
= wl12xx_fetch_firmware(wl
, plt
);
1078 int wl1271_plt_start(struct wl1271
*wl
, const enum plt_mode plt_mode
)
1080 int retries
= WL1271_BOOT_RETRIES
;
1081 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1083 static const char* const PLT_MODE
[] = {
1092 mutex_lock(&wl
->mutex
);
1094 wl1271_notice("power up");
1096 if (wl
->state
!= WLCORE_STATE_OFF
) {
1097 wl1271_error("cannot go into PLT state because not "
1098 "in off state: %d", wl
->state
);
1103 /* Indicate to lower levels that we are now in PLT mode */
1105 wl
->plt_mode
= plt_mode
;
1109 ret
= wl12xx_chip_wakeup(wl
, true);
1113 if (plt_mode
!= PLT_CHIP_AWAKE
) {
1114 ret
= wl
->ops
->plt_init(wl
);
1119 wl
->state
= WLCORE_STATE_ON
;
1120 wl1271_notice("firmware booted in PLT mode %s (%s)",
1122 wl
->chip
.fw_ver_str
);
1124 /* update hw/fw version info in wiphy struct */
1125 wiphy
->hw_version
= wl
->chip
.id
;
1126 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1127 sizeof(wiphy
->fw_version
));
1132 wl1271_power_off(wl
);
1136 wl
->plt_mode
= PLT_OFF
;
1138 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1139 WL1271_BOOT_RETRIES
);
1141 mutex_unlock(&wl
->mutex
);
1146 int wl1271_plt_stop(struct wl1271
*wl
)
1150 wl1271_notice("power down");
1153 * Interrupts must be disabled before setting the state to OFF.
1154 * Otherwise, the interrupt handler might be called and exit without
1155 * reading the interrupt status.
1157 wlcore_disable_interrupts(wl
);
1158 mutex_lock(&wl
->mutex
);
1160 mutex_unlock(&wl
->mutex
);
1163 * This will not necessarily enable interrupts as interrupts
1164 * may have been disabled when op_stop was called. It will,
1165 * however, balance the above call to disable_interrupts().
1167 wlcore_enable_interrupts(wl
);
1169 wl1271_error("cannot power down because not in PLT "
1170 "state: %d", wl
->state
);
1175 mutex_unlock(&wl
->mutex
);
1177 wl1271_flush_deferred_work(wl
);
1178 cancel_work_sync(&wl
->netstack_work
);
1179 cancel_work_sync(&wl
->recovery_work
);
1180 cancel_delayed_work_sync(&wl
->elp_work
);
1181 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1183 mutex_lock(&wl
->mutex
);
1184 wl1271_power_off(wl
);
1186 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1187 wl
->state
= WLCORE_STATE_OFF
;
1189 wl
->plt_mode
= PLT_OFF
;
1191 mutex_unlock(&wl
->mutex
);
1197 static void wl1271_op_tx(struct ieee80211_hw
*hw
,
1198 struct ieee80211_tx_control
*control
,
1199 struct sk_buff
*skb
)
1201 struct wl1271
*wl
= hw
->priv
;
1202 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1203 struct ieee80211_vif
*vif
= info
->control
.vif
;
1204 struct wl12xx_vif
*wlvif
= NULL
;
1205 unsigned long flags
;
1210 wl1271_debug(DEBUG_TX
, "DROP skb with no vif");
1211 ieee80211_free_txskb(hw
, skb
);
1215 wlvif
= wl12xx_vif_to_data(vif
);
1216 mapping
= skb_get_queue_mapping(skb
);
1217 q
= wl1271_tx_get_queue(mapping
);
1219 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
, control
->sta
);
1221 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1224 * drop the packet if the link is invalid or the queue is stopped
1225 * for any reason but watermark. Watermark is a "soft"-stop so we
1226 * allow these packets through.
1228 if (hlid
== WL12XX_INVALID_LINK_ID
||
1229 (!test_bit(hlid
, wlvif
->links_map
)) ||
1230 (wlcore_is_queue_stopped_locked(wl
, wlvif
, q
) &&
1231 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1232 WLCORE_QUEUE_STOP_REASON_WATERMARK
))) {
1233 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1234 ieee80211_free_txskb(hw
, skb
);
1238 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1240 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1242 wl
->tx_queue_count
[q
]++;
1243 wlvif
->tx_queue_count
[q
]++;
1246 * The workqueue is slow to process the tx_queue and we need stop
1247 * the queue here, otherwise the queue will get too long.
1249 if (wlvif
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
&&
1250 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1251 WLCORE_QUEUE_STOP_REASON_WATERMARK
)) {
1252 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1253 wlcore_stop_queue_locked(wl
, wlvif
, q
,
1254 WLCORE_QUEUE_STOP_REASON_WATERMARK
);
1258 * The chip specific setup must run before the first TX packet -
1259 * before that, the tx_work will not be initialized!
1262 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1263 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1264 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1267 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1270 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1272 unsigned long flags
;
1275 /* no need to queue a new dummy packet if one is already pending */
1276 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1279 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1281 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1282 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1283 wl
->tx_queue_count
[q
]++;
1284 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1286 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1287 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1288 return wlcore_tx_work_locked(wl
);
1291 * If the FW TX is busy, TX work will be scheduled by the threaded
1292 * interrupt handler function
1298 * The size of the dummy packet should be at least 1400 bytes. However, in
1299 * order to minimize the number of bus transactions, aligning it to 512 bytes
1300 * boundaries could be beneficial, performance wise
1302 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1304 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1306 struct sk_buff
*skb
;
1307 struct ieee80211_hdr_3addr
*hdr
;
1308 unsigned int dummy_packet_size
;
1310 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1311 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1313 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1315 wl1271_warning("Failed to allocate a dummy packet skb");
1319 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1321 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1322 memset(hdr
, 0, sizeof(*hdr
));
1323 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1324 IEEE80211_STYPE_NULLFUNC
|
1325 IEEE80211_FCTL_TODS
);
1327 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1329 /* Dummy packets require the TID to be management */
1330 skb
->priority
= WL1271_TID_MGMT
;
1332 /* Initialize all fields that might be used */
1333 skb_set_queue_mapping(skb
, 0);
1334 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1342 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern
*p
)
1344 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1345 int i
, pattern_len
= 0;
1348 wl1271_warning("No mask in WoWLAN pattern");
1353 * The pattern is broken up into segments of bytes at different offsets
1354 * that need to be checked by the FW filter. Each segment is called
1355 * a field in the FW API. We verify that the total number of fields
1356 * required for this pattern won't exceed FW limits (8)
1357 * as well as the total fields buffer won't exceed the FW limit.
1358 * Note that if there's a pattern which crosses Ethernet/IP header
1359 * boundary a new field is required.
1361 for (i
= 0; i
< p
->pattern_len
; i
++) {
1362 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1367 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1369 fields_size
+= pattern_len
+
1370 RX_FILTER_FIELD_OVERHEAD
;
1378 fields_size
+= pattern_len
+
1379 RX_FILTER_FIELD_OVERHEAD
;
1386 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1390 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1391 wl1271_warning("RX Filter too complex. Too many segments");
1395 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1396 wl1271_warning("RX filter pattern is too big");
1403 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1405 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1408 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1415 for (i
= 0; i
< filter
->num_fields
; i
++)
1416 kfree(filter
->fields
[i
].pattern
);
1421 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1422 u16 offset
, u8 flags
,
1423 u8
*pattern
, u8 len
)
1425 struct wl12xx_rx_filter_field
*field
;
1427 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1428 wl1271_warning("Max fields per RX filter. can't alloc another");
1432 field
= &filter
->fields
[filter
->num_fields
];
1434 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1435 if (!field
->pattern
) {
1436 wl1271_warning("Failed to allocate RX filter pattern");
1440 filter
->num_fields
++;
1442 field
->offset
= cpu_to_le16(offset
);
1443 field
->flags
= flags
;
1445 memcpy(field
->pattern
, pattern
, len
);
1450 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1452 int i
, fields_size
= 0;
1454 for (i
= 0; i
< filter
->num_fields
; i
++)
1455 fields_size
+= filter
->fields
[i
].len
+
1456 sizeof(struct wl12xx_rx_filter_field
) -
1462 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1466 struct wl12xx_rx_filter_field
*field
;
1468 for (i
= 0; i
< filter
->num_fields
; i
++) {
1469 field
= (struct wl12xx_rx_filter_field
*)buf
;
1471 field
->offset
= filter
->fields
[i
].offset
;
1472 field
->flags
= filter
->fields
[i
].flags
;
1473 field
->len
= filter
->fields
[i
].len
;
1475 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1476 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1477 sizeof(u8
*) + field
->len
;
1482 * Allocates an RX filter returned through f
1483 * which needs to be freed using rx_filter_free()
1486 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern
*p
,
1487 struct wl12xx_rx_filter
**f
)
1490 struct wl12xx_rx_filter
*filter
;
1494 filter
= wl1271_rx_filter_alloc();
1496 wl1271_warning("Failed to alloc rx filter");
1502 while (i
< p
->pattern_len
) {
1503 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1508 for (j
= i
; j
< p
->pattern_len
; j
++) {
1509 if (!test_bit(j
, (unsigned long *)p
->mask
))
1512 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1513 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1517 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1519 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1521 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1522 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1527 ret
= wl1271_rx_filter_alloc_field(filter
,
1530 &p
->pattern
[i
], len
);
1537 filter
->action
= FILTER_SIGNAL
;
1543 wl1271_rx_filter_free(filter
);
1549 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1550 struct cfg80211_wowlan
*wow
)
1554 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1555 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0,
1560 ret
= wl1271_rx_filter_clear_all(wl
);
1567 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1570 /* Validate all incoming patterns before clearing current FW state */
1571 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1572 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1574 wl1271_warning("Bad wowlan pattern %d", i
);
1579 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1583 ret
= wl1271_rx_filter_clear_all(wl
);
1587 /* Translate WoWLAN patterns into filters */
1588 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1589 struct cfg80211_pkt_pattern
*p
;
1590 struct wl12xx_rx_filter
*filter
= NULL
;
1592 p
= &wow
->patterns
[i
];
1594 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1596 wl1271_warning("Failed to create an RX filter from "
1597 "wowlan pattern %d", i
);
1601 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1603 wl1271_rx_filter_free(filter
);
1608 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1614 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1615 struct wl12xx_vif
*wlvif
,
1616 struct cfg80211_wowlan
*wow
)
1620 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1623 ret
= wl1271_ps_elp_wakeup(wl
);
1627 ret
= wl1271_configure_wowlan(wl
, wow
);
1631 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1632 wl
->conf
.conn
.wake_up_event
) &&
1633 (wl
->conf
.conn
.suspend_listen_interval
==
1634 wl
->conf
.conn
.listen_interval
))
1637 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1638 wl
->conf
.conn
.suspend_wake_up_event
,
1639 wl
->conf
.conn
.suspend_listen_interval
);
1642 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1645 wl1271_ps_elp_sleep(wl
);
1651 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1652 struct wl12xx_vif
*wlvif
)
1656 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1659 ret
= wl1271_ps_elp_wakeup(wl
);
1663 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1665 wl1271_ps_elp_sleep(wl
);
1671 static int wl1271_configure_suspend(struct wl1271
*wl
,
1672 struct wl12xx_vif
*wlvif
,
1673 struct cfg80211_wowlan
*wow
)
1675 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1676 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1677 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1678 return wl1271_configure_suspend_ap(wl
, wlvif
);
1682 static void wl1271_configure_resume(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1685 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1686 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1688 if ((!is_ap
) && (!is_sta
))
1691 if (is_sta
&& !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1694 ret
= wl1271_ps_elp_wakeup(wl
);
1699 wl1271_configure_wowlan(wl
, NULL
);
1701 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1702 wl
->conf
.conn
.wake_up_event
) &&
1703 (wl
->conf
.conn
.suspend_listen_interval
==
1704 wl
->conf
.conn
.listen_interval
))
1707 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1708 wl
->conf
.conn
.wake_up_event
,
1709 wl
->conf
.conn
.listen_interval
);
1712 wl1271_error("resume: wake up conditions failed: %d",
1716 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1720 wl1271_ps_elp_sleep(wl
);
1723 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1724 struct cfg80211_wowlan
*wow
)
1726 struct wl1271
*wl
= hw
->priv
;
1727 struct wl12xx_vif
*wlvif
;
1730 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1733 /* we want to perform the recovery before suspending */
1734 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
1735 wl1271_warning("postponing suspend to perform recovery");
1739 wl1271_tx_flush(wl
);
1741 mutex_lock(&wl
->mutex
);
1742 wl
->wow_enabled
= true;
1743 wl12xx_for_each_wlvif(wl
, wlvif
) {
1744 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1746 mutex_unlock(&wl
->mutex
);
1747 wl1271_warning("couldn't prepare device to suspend");
1751 mutex_unlock(&wl
->mutex
);
1752 /* flush any remaining work */
1753 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1756 * disable and re-enable interrupts in order to flush
1759 wlcore_disable_interrupts(wl
);
1762 * set suspended flag to avoid triggering a new threaded_irq
1763 * work. no need for spinlock as interrupts are disabled.
1765 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1767 wlcore_enable_interrupts(wl
);
1768 flush_work(&wl
->tx_work
);
1769 flush_delayed_work(&wl
->elp_work
);
1774 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1776 struct wl1271
*wl
= hw
->priv
;
1777 struct wl12xx_vif
*wlvif
;
1778 unsigned long flags
;
1779 bool run_irq_work
= false, pending_recovery
;
1782 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1784 WARN_ON(!wl
->wow_enabled
);
1787 * re-enable irq_work enqueuing, and call irq_work directly if
1788 * there is a pending work.
1790 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1791 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1792 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1793 run_irq_work
= true;
1794 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1796 mutex_lock(&wl
->mutex
);
1798 /* test the recovery flag before calling any SDIO functions */
1799 pending_recovery
= test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1803 wl1271_debug(DEBUG_MAC80211
,
1804 "run postponed irq_work directly");
1806 /* don't talk to the HW if recovery is pending */
1807 if (!pending_recovery
) {
1808 ret
= wlcore_irq_locked(wl
);
1810 wl12xx_queue_recovery_work(wl
);
1813 wlcore_enable_interrupts(wl
);
1816 if (pending_recovery
) {
1817 wl1271_warning("queuing forgotten recovery on resume");
1818 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
1822 wl12xx_for_each_wlvif(wl
, wlvif
) {
1823 wl1271_configure_resume(wl
, wlvif
);
1827 wl
->wow_enabled
= false;
1828 mutex_unlock(&wl
->mutex
);
1834 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1836 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1839 * We have to delay the booting of the hardware because
1840 * we need to know the local MAC address before downloading and
1841 * initializing the firmware. The MAC address cannot be changed
1842 * after boot, and without the proper MAC address, the firmware
1843 * will not function properly.
1845 * The MAC address is first known when the corresponding interface
1846 * is added. That is where we will initialize the hardware.
1852 static void wlcore_op_stop_locked(struct wl1271
*wl
)
1856 if (wl
->state
== WLCORE_STATE_OFF
) {
1857 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1859 wlcore_enable_interrupts(wl
);
1865 * this must be before the cancel_work calls below, so that the work
1866 * functions don't perform further work.
1868 wl
->state
= WLCORE_STATE_OFF
;
1871 * Use the nosync variant to disable interrupts, so the mutex could be
1872 * held while doing so without deadlocking.
1874 wlcore_disable_interrupts_nosync(wl
);
1876 mutex_unlock(&wl
->mutex
);
1878 wlcore_synchronize_interrupts(wl
);
1879 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1880 cancel_work_sync(&wl
->recovery_work
);
1881 wl1271_flush_deferred_work(wl
);
1882 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1883 cancel_work_sync(&wl
->netstack_work
);
1884 cancel_work_sync(&wl
->tx_work
);
1885 cancel_delayed_work_sync(&wl
->elp_work
);
1886 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1888 /* let's notify MAC80211 about the remaining pending TX frames */
1889 mutex_lock(&wl
->mutex
);
1890 wl12xx_tx_reset(wl
);
1892 wl1271_power_off(wl
);
1894 * In case a recovery was scheduled, interrupts were disabled to avoid
1895 * an interrupt storm. Now that the power is down, it is safe to
1896 * re-enable interrupts to balance the disable depth
1898 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1899 wlcore_enable_interrupts(wl
);
1901 wl
->band
= IEEE80211_BAND_2GHZ
;
1904 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1905 wl
->channel_type
= NL80211_CHAN_NO_HT
;
1906 wl
->tx_blocks_available
= 0;
1907 wl
->tx_allocated_blocks
= 0;
1908 wl
->tx_results_count
= 0;
1909 wl
->tx_packets_count
= 0;
1910 wl
->time_offset
= 0;
1911 wl
->ap_fw_ps_map
= 0;
1913 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1914 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1915 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1916 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1917 memset(wl
->session_ids
, 0, sizeof(wl
->session_ids
));
1918 wl
->active_sta_count
= 0;
1919 wl
->active_link_count
= 0;
1921 /* The system link is always allocated */
1922 wl
->links
[WL12XX_SYSTEM_HLID
].allocated_pkts
= 0;
1923 wl
->links
[WL12XX_SYSTEM_HLID
].prev_freed_pkts
= 0;
1924 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1927 * this is performed after the cancel_work calls and the associated
1928 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1929 * get executed before all these vars have been reset.
1933 wl
->tx_blocks_freed
= 0;
1935 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
1936 wl
->tx_pkts_freed
[i
] = 0;
1937 wl
->tx_allocated_pkts
[i
] = 0;
1940 wl1271_debugfs_reset(wl
);
1942 kfree(wl
->fw_status_1
);
1943 wl
->fw_status_1
= NULL
;
1944 wl
->fw_status_2
= NULL
;
1945 kfree(wl
->tx_res_if
);
1946 wl
->tx_res_if
= NULL
;
1947 kfree(wl
->target_mem_map
);
1948 wl
->target_mem_map
= NULL
;
1951 * FW channels must be re-calibrated after recovery,
1952 * clear the last Reg-Domain channel configuration.
1954 memset(wl
->reg_ch_conf_last
, 0, sizeof(wl
->reg_ch_conf_last
));
1957 static void wlcore_op_stop(struct ieee80211_hw
*hw
)
1959 struct wl1271
*wl
= hw
->priv
;
1961 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
1963 mutex_lock(&wl
->mutex
);
1965 wlcore_op_stop_locked(wl
);
1967 mutex_unlock(&wl
->mutex
);
1970 static void wlcore_channel_switch_work(struct work_struct
*work
)
1972 struct delayed_work
*dwork
;
1974 struct ieee80211_vif
*vif
;
1975 struct wl12xx_vif
*wlvif
;
1978 dwork
= container_of(work
, struct delayed_work
, work
);
1979 wlvif
= container_of(dwork
, struct wl12xx_vif
, channel_switch_work
);
1982 wl1271_info("channel switch failed (role_id: %d).", wlvif
->role_id
);
1984 mutex_lock(&wl
->mutex
);
1986 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
1989 /* check the channel switch is still ongoing */
1990 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
))
1993 vif
= wl12xx_wlvif_to_vif(wlvif
);
1994 ieee80211_chswitch_done(vif
, false);
1996 ret
= wl1271_ps_elp_wakeup(wl
);
2000 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
2002 wl1271_ps_elp_sleep(wl
);
2004 mutex_unlock(&wl
->mutex
);
2007 static void wlcore_connection_loss_work(struct work_struct
*work
)
2009 struct delayed_work
*dwork
;
2011 struct ieee80211_vif
*vif
;
2012 struct wl12xx_vif
*wlvif
;
2014 dwork
= container_of(work
, struct delayed_work
, work
);
2015 wlvif
= container_of(dwork
, struct wl12xx_vif
, connection_loss_work
);
2018 wl1271_info("Connection loss work (role_id: %d).", wlvif
->role_id
);
2020 mutex_lock(&wl
->mutex
);
2022 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2025 /* Call mac80211 connection loss */
2026 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2029 vif
= wl12xx_wlvif_to_vif(wlvif
);
2030 ieee80211_connection_loss(vif
);
2032 mutex_unlock(&wl
->mutex
);
2035 static void wlcore_pending_auth_complete_work(struct work_struct
*work
)
2037 struct delayed_work
*dwork
;
2039 struct wl12xx_vif
*wlvif
;
2040 unsigned long time_spare
;
2043 dwork
= container_of(work
, struct delayed_work
, work
);
2044 wlvif
= container_of(dwork
, struct wl12xx_vif
,
2045 pending_auth_complete_work
);
2048 mutex_lock(&wl
->mutex
);
2050 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2054 * Make sure a second really passed since the last auth reply. Maybe
2055 * a second auth reply arrived while we were stuck on the mutex.
2056 * Check for a little less than the timeout to protect from scheduler
2059 time_spare
= jiffies
+
2060 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT
- 50);
2061 if (!time_after(time_spare
, wlvif
->pending_auth_reply_time
))
2064 ret
= wl1271_ps_elp_wakeup(wl
);
2068 /* cancel the ROC if active */
2069 wlcore_update_inconn_sta(wl
, wlvif
, NULL
, false);
2071 wl1271_ps_elp_sleep(wl
);
2073 mutex_unlock(&wl
->mutex
);
2076 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
2078 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
2079 WL12XX_MAX_RATE_POLICIES
);
2080 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
2083 __set_bit(policy
, wl
->rate_policies_map
);
2088 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
2090 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
2093 __clear_bit(*idx
, wl
->rate_policies_map
);
2094 *idx
= WL12XX_MAX_RATE_POLICIES
;
2097 static int wlcore_allocate_klv_template(struct wl1271
*wl
, u8
*idx
)
2099 u8 policy
= find_first_zero_bit(wl
->klv_templates_map
,
2100 WLCORE_MAX_KLV_TEMPLATES
);
2101 if (policy
>= WLCORE_MAX_KLV_TEMPLATES
)
2104 __set_bit(policy
, wl
->klv_templates_map
);
2109 static void wlcore_free_klv_template(struct wl1271
*wl
, u8
*idx
)
2111 if (WARN_ON(*idx
>= WLCORE_MAX_KLV_TEMPLATES
))
2114 __clear_bit(*idx
, wl
->klv_templates_map
);
2115 *idx
= WLCORE_MAX_KLV_TEMPLATES
;
2118 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2120 switch (wlvif
->bss_type
) {
2121 case BSS_TYPE_AP_BSS
:
2123 return WL1271_ROLE_P2P_GO
;
2125 return WL1271_ROLE_AP
;
2127 case BSS_TYPE_STA_BSS
:
2129 return WL1271_ROLE_P2P_CL
;
2131 return WL1271_ROLE_STA
;
2134 return WL1271_ROLE_IBSS
;
2137 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
2139 return WL12XX_INVALID_ROLE_TYPE
;
2142 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
2144 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2147 /* clear everything but the persistent data */
2148 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
2150 switch (ieee80211_vif_type_p2p(vif
)) {
2151 case NL80211_IFTYPE_P2P_CLIENT
:
2154 case NL80211_IFTYPE_STATION
:
2155 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
2157 case NL80211_IFTYPE_ADHOC
:
2158 wlvif
->bss_type
= BSS_TYPE_IBSS
;
2160 case NL80211_IFTYPE_P2P_GO
:
2163 case NL80211_IFTYPE_AP
:
2164 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
2167 wlvif
->bss_type
= MAX_BSS_TYPE
;
2171 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2172 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2173 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2175 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2176 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2177 /* init sta/ibss data */
2178 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2179 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2180 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2181 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2182 wlcore_allocate_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2183 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
2184 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
2185 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
2188 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2189 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2190 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2191 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2192 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2193 wl12xx_allocate_rate_policy(wl
,
2194 &wlvif
->ap
.ucast_rate_idx
[i
]);
2195 wlvif
->basic_rate_set
= CONF_TX_ENABLED_RATES
;
2197 * TODO: check if basic_rate shouldn't be
2198 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2199 * instead (the same thing for STA above).
2201 wlvif
->basic_rate
= CONF_TX_ENABLED_RATES
;
2202 /* TODO: this seems to be used only for STA, check it */
2203 wlvif
->rate_set
= CONF_TX_ENABLED_RATES
;
2206 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
2207 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
2208 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
2211 * mac80211 configures some values globally, while we treat them
2212 * per-interface. thus, on init, we have to copy them from wl
2214 wlvif
->band
= wl
->band
;
2215 wlvif
->channel
= wl
->channel
;
2216 wlvif
->power_level
= wl
->power_level
;
2217 wlvif
->channel_type
= wl
->channel_type
;
2219 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
2220 wl1271_rx_streaming_enable_work
);
2221 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
2222 wl1271_rx_streaming_disable_work
);
2223 INIT_DELAYED_WORK(&wlvif
->channel_switch_work
,
2224 wlcore_channel_switch_work
);
2225 INIT_DELAYED_WORK(&wlvif
->connection_loss_work
,
2226 wlcore_connection_loss_work
);
2227 INIT_DELAYED_WORK(&wlvif
->pending_auth_complete_work
,
2228 wlcore_pending_auth_complete_work
);
2229 INIT_LIST_HEAD(&wlvif
->list
);
2231 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
2232 (unsigned long) wlvif
);
2236 static int wl12xx_init_fw(struct wl1271
*wl
)
2238 int retries
= WL1271_BOOT_RETRIES
;
2239 bool booted
= false;
2240 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
2245 ret
= wl12xx_chip_wakeup(wl
, false);
2249 ret
= wl
->ops
->boot(wl
);
2253 ret
= wl1271_hw_init(wl
);
2261 mutex_unlock(&wl
->mutex
);
2262 /* Unlocking the mutex in the middle of handling is
2263 inherently unsafe. In this case we deem it safe to do,
2264 because we need to let any possibly pending IRQ out of
2265 the system (and while we are WLCORE_STATE_OFF the IRQ
2266 work function will not do anything.) Also, any other
2267 possible concurrent operations will fail due to the
2268 current state, hence the wl1271 struct should be safe. */
2269 wlcore_disable_interrupts(wl
);
2270 wl1271_flush_deferred_work(wl
);
2271 cancel_work_sync(&wl
->netstack_work
);
2272 mutex_lock(&wl
->mutex
);
2274 wl1271_power_off(wl
);
2278 wl1271_error("firmware boot failed despite %d retries",
2279 WL1271_BOOT_RETRIES
);
2283 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
2285 /* update hw/fw version info in wiphy struct */
2286 wiphy
->hw_version
= wl
->chip
.id
;
2287 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
2288 sizeof(wiphy
->fw_version
));
2291 * Now we know if 11a is supported (info from the NVS), so disable
2292 * 11a channels if not supported
2294 if (!wl
->enable_11a
)
2295 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
2297 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
2298 wl
->enable_11a
? "" : "not ");
2300 wl
->state
= WLCORE_STATE_ON
;
2305 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2307 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2311 * Check whether a fw switch (i.e. moving from one loaded
2312 * fw to another) is needed. This function is also responsible
2313 * for updating wl->last_vif_count, so it must be called before
2314 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2317 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2318 struct vif_counter_data vif_counter_data
,
2321 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2322 u8 vif_count
= vif_counter_data
.counter
;
2324 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2327 /* increase the vif count if this is a new vif */
2328 if (add
&& !vif_counter_data
.cur_vif_running
)
2331 wl
->last_vif_count
= vif_count
;
2333 /* no need for fw change if the device is OFF */
2334 if (wl
->state
== WLCORE_STATE_OFF
)
2337 /* no need for fw change if a single fw is used */
2338 if (!wl
->mr_fw_name
)
2341 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2343 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2350 * Enter "forced psm". Make sure the sta is in psm against the ap,
2351 * to make the fw switch a bit more disconnection-persistent.
2353 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2355 struct wl12xx_vif
*wlvif
;
2357 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2358 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2362 struct wlcore_hw_queue_iter_data
{
2363 unsigned long hw_queue_map
[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES
)];
2365 struct ieee80211_vif
*vif
;
2366 /* is the current vif among those iterated */
2370 static void wlcore_hw_queue_iter(void *data
, u8
*mac
,
2371 struct ieee80211_vif
*vif
)
2373 struct wlcore_hw_queue_iter_data
*iter_data
= data
;
2375 if (WARN_ON_ONCE(vif
->hw_queue
[0] == IEEE80211_INVAL_HW_QUEUE
))
2378 if (iter_data
->cur_running
|| vif
== iter_data
->vif
) {
2379 iter_data
->cur_running
= true;
2383 __set_bit(vif
->hw_queue
[0] / NUM_TX_QUEUES
, iter_data
->hw_queue_map
);
2386 static int wlcore_allocate_hw_queue_base(struct wl1271
*wl
,
2387 struct wl12xx_vif
*wlvif
)
2389 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2390 struct wlcore_hw_queue_iter_data iter_data
= {};
2393 iter_data
.vif
= vif
;
2395 /* mark all bits taken by active interfaces */
2396 ieee80211_iterate_active_interfaces_atomic(wl
->hw
,
2397 IEEE80211_IFACE_ITER_RESUME_ALL
,
2398 wlcore_hw_queue_iter
, &iter_data
);
2400 /* the current vif is already running in mac80211 (resume/recovery) */
2401 if (iter_data
.cur_running
) {
2402 wlvif
->hw_queue_base
= vif
->hw_queue
[0];
2403 wl1271_debug(DEBUG_MAC80211
,
2404 "using pre-allocated hw queue base %d",
2405 wlvif
->hw_queue_base
);
2407 /* interface type might have changed type */
2408 goto adjust_cab_queue
;
2411 q_base
= find_first_zero_bit(iter_data
.hw_queue_map
,
2412 WLCORE_NUM_MAC_ADDRESSES
);
2413 if (q_base
>= WLCORE_NUM_MAC_ADDRESSES
)
2416 wlvif
->hw_queue_base
= q_base
* NUM_TX_QUEUES
;
2417 wl1271_debug(DEBUG_MAC80211
, "allocating hw queue base: %d",
2418 wlvif
->hw_queue_base
);
2420 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
2421 wl
->queue_stop_reasons
[wlvif
->hw_queue_base
+ i
] = 0;
2422 /* register hw queues in mac80211 */
2423 vif
->hw_queue
[i
] = wlvif
->hw_queue_base
+ i
;
2427 /* the last places are reserved for cab queues per interface */
2428 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2429 vif
->cab_queue
= NUM_TX_QUEUES
* WLCORE_NUM_MAC_ADDRESSES
+
2430 wlvif
->hw_queue_base
/ NUM_TX_QUEUES
;
2432 vif
->cab_queue
= IEEE80211_INVAL_HW_QUEUE
;
2437 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2438 struct ieee80211_vif
*vif
)
2440 struct wl1271
*wl
= hw
->priv
;
2441 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2442 struct vif_counter_data vif_count
;
2447 wl1271_error("Adding Interface not allowed while in PLT mode");
2451 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2452 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2454 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2455 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2457 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2459 mutex_lock(&wl
->mutex
);
2460 ret
= wl1271_ps_elp_wakeup(wl
);
2465 * in some very corner case HW recovery scenarios its possible to
2466 * get here before __wl1271_op_remove_interface is complete, so
2467 * opt out if that is the case.
2469 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2470 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2476 ret
= wl12xx_init_vif_data(wl
, vif
);
2481 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2482 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2487 ret
= wlcore_allocate_hw_queue_base(wl
, wlvif
);
2491 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2492 wl12xx_force_active_psm(wl
);
2493 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2494 mutex_unlock(&wl
->mutex
);
2495 wl1271_recovery_work(&wl
->recovery_work
);
2500 * TODO: after the nvs issue will be solved, move this block
2501 * to start(), and make sure here the driver is ON.
2503 if (wl
->state
== WLCORE_STATE_OFF
) {
2505 * we still need this in order to configure the fw
2506 * while uploading the nvs
2508 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2510 ret
= wl12xx_init_fw(wl
);
2515 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2516 role_type
, &wlvif
->role_id
);
2520 ret
= wl1271_init_vif_specific(wl
, vif
);
2524 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2525 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2527 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2532 wl1271_ps_elp_sleep(wl
);
2534 mutex_unlock(&wl
->mutex
);
2539 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2540 struct ieee80211_vif
*vif
,
2541 bool reset_tx_queues
)
2543 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2545 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2547 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2549 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2552 /* because of hardware recovery, we may get here twice */
2553 if (wl
->state
== WLCORE_STATE_OFF
)
2556 wl1271_info("down");
2558 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2559 wl
->scan_wlvif
== wlvif
) {
2561 * Rearm the tx watchdog just before idling scan. This
2562 * prevents just-finished scans from triggering the watchdog
2564 wl12xx_rearm_tx_watchdog_locked(wl
);
2566 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2567 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2568 wl
->scan_wlvif
= NULL
;
2569 wl
->scan
.req
= NULL
;
2570 ieee80211_scan_completed(wl
->hw
, true);
2573 if (wl
->sched_vif
== wlvif
) {
2574 ieee80211_sched_scan_stopped(wl
->hw
);
2575 wl
->sched_vif
= NULL
;
2578 if (wl
->roc_vif
== vif
) {
2580 ieee80211_remain_on_channel_expired(wl
->hw
);
2583 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2584 /* disable active roles */
2585 ret
= wl1271_ps_elp_wakeup(wl
);
2589 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2590 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2591 if (wl12xx_dev_role_started(wlvif
))
2592 wl12xx_stop_dev(wl
, wlvif
);
2595 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2599 wl1271_ps_elp_sleep(wl
);
2602 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2604 /* clear all hlids (except system_hlid) */
2605 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2607 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2608 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2609 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2610 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2611 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2612 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2613 wlcore_free_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2615 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2616 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2617 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2618 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2619 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2620 wl12xx_free_rate_policy(wl
,
2621 &wlvif
->ap
.ucast_rate_idx
[i
]);
2622 wl1271_free_ap_keys(wl
, wlvif
);
2625 dev_kfree_skb(wlvif
->probereq
);
2626 wlvif
->probereq
= NULL
;
2627 if (wl
->last_wlvif
== wlvif
)
2628 wl
->last_wlvif
= NULL
;
2629 list_del(&wlvif
->list
);
2630 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2631 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2632 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2640 * Last AP, have more stations. Configure sleep auth according to STA.
2641 * Don't do thin on unintended recovery.
2643 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) &&
2644 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
))
2647 if (wl
->ap_count
== 0 && is_ap
) {
2648 /* mask ap events */
2649 wl
->event_mask
&= ~wl
->ap_event_mask
;
2650 wl1271_event_unmask(wl
);
2653 if (wl
->ap_count
== 0 && is_ap
&& wl
->sta_count
) {
2654 u8 sta_auth
= wl
->conf
.conn
.sta_sleep_auth
;
2655 /* Configure for power according to debugfs */
2656 if (sta_auth
!= WL1271_PSM_ILLEGAL
)
2657 wl1271_acx_sleep_auth(wl
, sta_auth
);
2658 /* Configure for ELP power saving */
2660 wl1271_acx_sleep_auth(wl
, WL1271_PSM_ELP
);
2664 mutex_unlock(&wl
->mutex
);
2666 del_timer_sync(&wlvif
->rx_streaming_timer
);
2667 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2668 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2669 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
2670 cancel_delayed_work_sync(&wlvif
->channel_switch_work
);
2671 cancel_delayed_work_sync(&wlvif
->pending_auth_complete_work
);
2673 mutex_lock(&wl
->mutex
);
2676 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2677 struct ieee80211_vif
*vif
)
2679 struct wl1271
*wl
= hw
->priv
;
2680 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2681 struct wl12xx_vif
*iter
;
2682 struct vif_counter_data vif_count
;
2684 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2685 mutex_lock(&wl
->mutex
);
2687 if (wl
->state
== WLCORE_STATE_OFF
||
2688 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2692 * wl->vif can be null here if someone shuts down the interface
2693 * just when hardware recovery has been started.
2695 wl12xx_for_each_wlvif(wl
, iter
) {
2699 __wl1271_op_remove_interface(wl
, vif
, true);
2702 WARN_ON(iter
!= wlvif
);
2703 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2704 wl12xx_force_active_psm(wl
);
2705 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2706 wl12xx_queue_recovery_work(wl
);
2709 mutex_unlock(&wl
->mutex
);
2712 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2713 struct ieee80211_vif
*vif
,
2714 enum nl80211_iftype new_type
, bool p2p
)
2716 struct wl1271
*wl
= hw
->priv
;
2719 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2720 wl1271_op_remove_interface(hw
, vif
);
2722 vif
->type
= new_type
;
2724 ret
= wl1271_op_add_interface(hw
, vif
);
2726 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2730 static int wlcore_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2733 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2736 * One of the side effects of the JOIN command is that is clears
2737 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2738 * to a WPA/WPA2 access point will therefore kill the data-path.
2739 * Currently the only valid scenario for JOIN during association
2740 * is on roaming, in which case we will also be given new keys.
2741 * Keep the below message for now, unless it starts bothering
2742 * users who really like to roam a lot :)
2744 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2745 wl1271_info("JOIN while associated.");
2747 /* clear encryption type */
2748 wlvif
->encryption_type
= KEY_NONE
;
2751 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2753 if (wl
->quirks
& WLCORE_QUIRK_START_STA_FAILS
) {
2755 * TODO: this is an ugly workaround for wl12xx fw
2756 * bug - we are not able to tx/rx after the first
2757 * start_sta, so make dummy start+stop calls,
2758 * and then call start_sta again.
2759 * this should be fixed in the fw.
2761 wl12xx_cmd_role_start_sta(wl
, wlvif
);
2762 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2765 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2771 static int wl1271_ssid_set(struct wl12xx_vif
*wlvif
, struct sk_buff
*skb
,
2775 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
2779 wl1271_error("No SSID in IEs!");
2784 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
2785 wl1271_error("SSID is too long!");
2789 wlvif
->ssid_len
= ssid_len
;
2790 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
2794 static int wlcore_set_ssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2796 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2797 struct sk_buff
*skb
;
2800 /* we currently only support setting the ssid from the ap probe req */
2801 if (wlvif
->bss_type
!= BSS_TYPE_STA_BSS
)
2804 skb
= ieee80211_ap_probereq_get(wl
->hw
, vif
);
2808 ieoffset
= offsetof(struct ieee80211_mgmt
,
2809 u
.probe_req
.variable
);
2810 wl1271_ssid_set(wlvif
, skb
, ieoffset
);
2816 static int wlcore_set_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2817 struct ieee80211_bss_conf
*bss_conf
,
2823 wlvif
->aid
= bss_conf
->aid
;
2824 wlvif
->channel_type
= cfg80211_get_chandef_type(&bss_conf
->chandef
);
2825 wlvif
->beacon_int
= bss_conf
->beacon_int
;
2826 wlvif
->wmm_enabled
= bss_conf
->qos
;
2828 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2831 * with wl1271, we don't need to update the
2832 * beacon_int and dtim_period, because the firmware
2833 * updates it by itself when the first beacon is
2834 * received after a join.
2836 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
2841 * Get a template for hardware connection maintenance
2843 dev_kfree_skb(wlvif
->probereq
);
2844 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
2847 ieoffset
= offsetof(struct ieee80211_mgmt
,
2848 u
.probe_req
.variable
);
2849 wl1271_ssid_set(wlvif
, wlvif
->probereq
, ieoffset
);
2851 /* enable the connection monitoring feature */
2852 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
2857 * The join command disable the keep-alive mode, shut down its process,
2858 * and also clear the template config, so we need to reset it all after
2859 * the join. The acx_aid starts the keep-alive process, and the order
2860 * of the commands below is relevant.
2862 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2866 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2870 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2874 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2875 wlvif
->sta
.klv_template_id
,
2876 ACX_KEEP_ALIVE_TPL_VALID
);
2881 * The default fw psm configuration is AUTO, while mac80211 default
2882 * setting is off (ACTIVE), so sync the fw with the correct value.
2884 ret
= wl1271_ps_set_mode(wl
, wlvif
, STATION_ACTIVE_MODE
);
2890 wl1271_tx_enabled_rates_get(wl
,
2893 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2901 static int wlcore_unset_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2904 bool sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
2906 /* make sure we are connected (sta) joined */
2908 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2911 /* make sure we are joined (ibss) */
2913 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
))
2917 /* use defaults when not associated */
2920 /* free probe-request template */
2921 dev_kfree_skb(wlvif
->probereq
);
2922 wlvif
->probereq
= NULL
;
2924 /* disable connection monitor features */
2925 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
2929 /* Disable the keep-alive feature */
2930 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
2935 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
2936 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2938 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
2939 ieee80211_chswitch_done(vif
, false);
2940 cancel_delayed_work(&wlvif
->channel_switch_work
);
2943 /* invalidate keep-alive template */
2944 wl1271_acx_keep_alive_config(wl
, wlvif
,
2945 wlvif
->sta
.klv_template_id
,
2946 ACX_KEEP_ALIVE_TPL_INVALID
);
2951 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2953 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
2954 wlvif
->rate_set
= wlvif
->basic_rate_set
;
2957 static void wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2960 bool cur_idle
= !test_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
2962 if (idle
== cur_idle
)
2966 clear_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
2968 /* The current firmware only supports sched_scan in idle */
2969 if (wl
->sched_vif
== wlvif
)
2970 wl
->ops
->sched_scan_stop(wl
, wlvif
);
2972 set_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
2976 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2977 struct ieee80211_conf
*conf
, u32 changed
)
2981 if (conf
->power_level
!= wlvif
->power_level
) {
2982 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
2986 wlvif
->power_level
= conf
->power_level
;
2992 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
2994 struct wl1271
*wl
= hw
->priv
;
2995 struct wl12xx_vif
*wlvif
;
2996 struct ieee80211_conf
*conf
= &hw
->conf
;
2999 wl1271_debug(DEBUG_MAC80211
, "mac80211 config psm %s power %d %s"
3001 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
3003 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
3006 mutex_lock(&wl
->mutex
);
3008 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
3009 wl
->power_level
= conf
->power_level
;
3011 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3014 ret
= wl1271_ps_elp_wakeup(wl
);
3018 /* configure each interface */
3019 wl12xx_for_each_wlvif(wl
, wlvif
) {
3020 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
3026 wl1271_ps_elp_sleep(wl
);
3029 mutex_unlock(&wl
->mutex
);
3034 struct wl1271_filter_params
{
3037 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
3040 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
3041 struct netdev_hw_addr_list
*mc_list
)
3043 struct wl1271_filter_params
*fp
;
3044 struct netdev_hw_addr
*ha
;
3046 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
3048 wl1271_error("Out of memory setting filters.");
3052 /* update multicast filtering parameters */
3053 fp
->mc_list_length
= 0;
3054 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
3055 fp
->enabled
= false;
3058 netdev_hw_addr_list_for_each(ha
, mc_list
) {
3059 memcpy(fp
->mc_list
[fp
->mc_list_length
],
3060 ha
->addr
, ETH_ALEN
);
3061 fp
->mc_list_length
++;
3065 return (u64
)(unsigned long)fp
;
3068 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3071 FIF_BCN_PRBRESP_PROMISC | \
3075 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
3076 unsigned int changed
,
3077 unsigned int *total
, u64 multicast
)
3079 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
3080 struct wl1271
*wl
= hw
->priv
;
3081 struct wl12xx_vif
*wlvif
;
3085 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
3086 " total %x", changed
, *total
);
3088 mutex_lock(&wl
->mutex
);
3090 *total
&= WL1271_SUPPORTED_FILTERS
;
3091 changed
&= WL1271_SUPPORTED_FILTERS
;
3093 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3096 ret
= wl1271_ps_elp_wakeup(wl
);
3100 wl12xx_for_each_wlvif(wl
, wlvif
) {
3101 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
3102 if (*total
& FIF_ALLMULTI
)
3103 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3107 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3110 fp
->mc_list_length
);
3117 * the fw doesn't provide an api to configure the filters. instead,
3118 * the filters configuration is based on the active roles / ROC
3123 wl1271_ps_elp_sleep(wl
);
3126 mutex_unlock(&wl
->mutex
);
3130 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3131 u8 id
, u8 key_type
, u8 key_size
,
3132 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
3135 struct wl1271_ap_key
*ap_key
;
3138 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
3140 if (key_size
> MAX_KEY_SIZE
)
3144 * Find next free entry in ap_keys. Also check we are not replacing
3147 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3148 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3151 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
3152 wl1271_warning("trying to record key replacement");
3157 if (i
== MAX_NUM_KEYS
)
3160 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
3165 ap_key
->key_type
= key_type
;
3166 ap_key
->key_size
= key_size
;
3167 memcpy(ap_key
->key
, key
, key_size
);
3168 ap_key
->hlid
= hlid
;
3169 ap_key
->tx_seq_32
= tx_seq_32
;
3170 ap_key
->tx_seq_16
= tx_seq_16
;
3172 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
3176 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3180 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3181 kfree(wlvif
->ap
.recorded_keys
[i
]);
3182 wlvif
->ap
.recorded_keys
[i
] = NULL
;
3186 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3189 struct wl1271_ap_key
*key
;
3190 bool wep_key_added
= false;
3192 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3194 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3197 key
= wlvif
->ap
.recorded_keys
[i
];
3199 if (hlid
== WL12XX_INVALID_LINK_ID
)
3200 hlid
= wlvif
->ap
.bcast_hlid
;
3202 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3203 key
->id
, key
->key_type
,
3204 key
->key_size
, key
->key
,
3205 hlid
, key
->tx_seq_32
,
3210 if (key
->key_type
== KEY_WEP
)
3211 wep_key_added
= true;
3214 if (wep_key_added
) {
3215 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
3216 wlvif
->ap
.bcast_hlid
);
3222 wl1271_free_ap_keys(wl
, wlvif
);
3226 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3227 u16 action
, u8 id
, u8 key_type
,
3228 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
3229 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
3232 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3235 struct wl1271_station
*wl_sta
;
3239 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
3240 hlid
= wl_sta
->hlid
;
3242 hlid
= wlvif
->ap
.bcast_hlid
;
3245 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3247 * We do not support removing keys after AP shutdown.
3248 * Pretend we do to make mac80211 happy.
3250 if (action
!= KEY_ADD_OR_REPLACE
)
3253 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
3255 key
, hlid
, tx_seq_32
,
3258 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
3259 id
, key_type
, key_size
,
3260 key
, hlid
, tx_seq_32
,
3268 static const u8 bcast_addr
[ETH_ALEN
] = {
3269 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3272 addr
= sta
? sta
->addr
: bcast_addr
;
3274 if (is_zero_ether_addr(addr
)) {
3275 /* We dont support TX only encryption */
3279 /* The wl1271 does not allow to remove unicast keys - they
3280 will be cleared automatically on next CMD_JOIN. Ignore the
3281 request silently, as we dont want the mac80211 to emit
3282 an error message. */
3283 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
3286 /* don't remove key if hlid was already deleted */
3287 if (action
== KEY_REMOVE
&&
3288 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
3291 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
3292 id
, key_type
, key_size
,
3293 key
, addr
, tx_seq_32
,
3303 static int wlcore_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
3304 struct ieee80211_vif
*vif
,
3305 struct ieee80211_sta
*sta
,
3306 struct ieee80211_key_conf
*key_conf
)
3308 struct wl1271
*wl
= hw
->priv
;
3310 bool might_change_spare
=
3311 key_conf
->cipher
== WL1271_CIPHER_SUITE_GEM
||
3312 key_conf
->cipher
== WLAN_CIPHER_SUITE_TKIP
;
3314 if (might_change_spare
) {
3316 * stop the queues and flush to ensure the next packets are
3317 * in sync with FW spare block accounting
3319 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3320 wl1271_tx_flush(wl
);
3323 mutex_lock(&wl
->mutex
);
3325 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3327 goto out_wake_queues
;
3330 ret
= wl1271_ps_elp_wakeup(wl
);
3332 goto out_wake_queues
;
3334 ret
= wlcore_hw_set_key(wl
, cmd
, vif
, sta
, key_conf
);
3336 wl1271_ps_elp_sleep(wl
);
3339 if (might_change_spare
)
3340 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3342 mutex_unlock(&wl
->mutex
);
3347 int wlcore_set_key(struct wl1271
*wl
, enum set_key_cmd cmd
,
3348 struct ieee80211_vif
*vif
,
3349 struct ieee80211_sta
*sta
,
3350 struct ieee80211_key_conf
*key_conf
)
3352 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3359 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
3361 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
3362 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3363 key_conf
->cipher
, key_conf
->keyidx
,
3364 key_conf
->keylen
, key_conf
->flags
);
3365 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
3367 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
3369 struct wl1271_station
*wl_sta
= (void *)sta
->drv_priv
;
3370 hlid
= wl_sta
->hlid
;
3372 hlid
= wlvif
->ap
.bcast_hlid
;
3375 hlid
= wlvif
->sta
.hlid
;
3377 if (hlid
!= WL12XX_INVALID_LINK_ID
) {
3378 u64 tx_seq
= wl
->links
[hlid
].total_freed_pkts
;
3379 tx_seq_32
= WL1271_TX_SECURITY_HI32(tx_seq
);
3380 tx_seq_16
= WL1271_TX_SECURITY_LO16(tx_seq
);
3383 switch (key_conf
->cipher
) {
3384 case WLAN_CIPHER_SUITE_WEP40
:
3385 case WLAN_CIPHER_SUITE_WEP104
:
3388 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3390 case WLAN_CIPHER_SUITE_TKIP
:
3391 key_type
= KEY_TKIP
;
3392 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3394 case WLAN_CIPHER_SUITE_CCMP
:
3396 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
3398 case WL1271_CIPHER_SUITE_GEM
:
3402 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
3409 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3410 key_conf
->keyidx
, key_type
,
3411 key_conf
->keylen
, key_conf
->key
,
3412 tx_seq_32
, tx_seq_16
, sta
);
3414 wl1271_error("Could not add or replace key");
3419 * reconfiguring arp response if the unicast (or common)
3420 * encryption key type was changed
3422 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
3423 (sta
|| key_type
== KEY_WEP
) &&
3424 wlvif
->encryption_type
!= key_type
) {
3425 wlvif
->encryption_type
= key_type
;
3426 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3428 wl1271_warning("build arp rsp failed: %d", ret
);
3435 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3436 key_conf
->keyidx
, key_type
,
3437 key_conf
->keylen
, key_conf
->key
,
3440 wl1271_error("Could not remove key");
3446 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3452 EXPORT_SYMBOL_GPL(wlcore_set_key
);
3454 static void wl1271_op_set_default_key_idx(struct ieee80211_hw
*hw
,
3455 struct ieee80211_vif
*vif
,
3458 struct wl1271
*wl
= hw
->priv
;
3459 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3462 wl1271_debug(DEBUG_MAC80211
, "mac80211 set default key idx %d",
3465 mutex_lock(&wl
->mutex
);
3467 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3472 ret
= wl1271_ps_elp_wakeup(wl
);
3476 wlvif
->default_key
= key_idx
;
3478 /* the default WEP key needs to be configured at least once */
3479 if (wlvif
->encryption_type
== KEY_WEP
) {
3480 ret
= wl12xx_cmd_set_default_wep_key(wl
,
3488 wl1271_ps_elp_sleep(wl
);
3491 mutex_unlock(&wl
->mutex
);
3494 void wlcore_regdomain_config(struct wl1271
*wl
)
3498 if (!(wl
->quirks
& WLCORE_QUIRK_REGDOMAIN_CONF
))
3501 mutex_lock(&wl
->mutex
);
3503 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3506 ret
= wl1271_ps_elp_wakeup(wl
);
3510 ret
= wlcore_cmd_regdomain_config_locked(wl
);
3512 wl12xx_queue_recovery_work(wl
);
3516 wl1271_ps_elp_sleep(wl
);
3518 mutex_unlock(&wl
->mutex
);
3521 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3522 struct ieee80211_vif
*vif
,
3523 struct cfg80211_scan_request
*req
)
3525 struct wl1271
*wl
= hw
->priv
;
3530 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3533 ssid
= req
->ssids
[0].ssid
;
3534 len
= req
->ssids
[0].ssid_len
;
3537 mutex_lock(&wl
->mutex
);
3539 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3541 * We cannot return -EBUSY here because cfg80211 will expect
3542 * a call to ieee80211_scan_completed if we do - in this case
3543 * there won't be any call.
3549 ret
= wl1271_ps_elp_wakeup(wl
);
3553 /* fail if there is any role in ROC */
3554 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3555 /* don't allow scanning right now */
3560 ret
= wlcore_scan(hw
->priv
, vif
, ssid
, len
, req
);
3562 wl1271_ps_elp_sleep(wl
);
3564 mutex_unlock(&wl
->mutex
);
3569 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3570 struct ieee80211_vif
*vif
)
3572 struct wl1271
*wl
= hw
->priv
;
3573 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3576 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3578 mutex_lock(&wl
->mutex
);
3580 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3583 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3586 ret
= wl1271_ps_elp_wakeup(wl
);
3590 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3591 ret
= wl
->ops
->scan_stop(wl
, wlvif
);
3597 * Rearm the tx watchdog just before idling scan. This
3598 * prevents just-finished scans from triggering the watchdog
3600 wl12xx_rearm_tx_watchdog_locked(wl
);
3602 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3603 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3604 wl
->scan_wlvif
= NULL
;
3605 wl
->scan
.req
= NULL
;
3606 ieee80211_scan_completed(wl
->hw
, true);
3609 wl1271_ps_elp_sleep(wl
);
3611 mutex_unlock(&wl
->mutex
);
3613 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3616 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3617 struct ieee80211_vif
*vif
,
3618 struct cfg80211_sched_scan_request
*req
,
3619 struct ieee80211_sched_scan_ies
*ies
)
3621 struct wl1271
*wl
= hw
->priv
;
3622 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3625 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3627 mutex_lock(&wl
->mutex
);
3629 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3634 ret
= wl1271_ps_elp_wakeup(wl
);
3638 ret
= wl
->ops
->sched_scan_start(wl
, wlvif
, req
, ies
);
3642 wl
->sched_vif
= wlvif
;
3645 wl1271_ps_elp_sleep(wl
);
3647 mutex_unlock(&wl
->mutex
);
3651 static void wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3652 struct ieee80211_vif
*vif
)
3654 struct wl1271
*wl
= hw
->priv
;
3655 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3658 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3660 mutex_lock(&wl
->mutex
);
3662 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3665 ret
= wl1271_ps_elp_wakeup(wl
);
3669 wl
->ops
->sched_scan_stop(wl
, wlvif
);
3671 wl1271_ps_elp_sleep(wl
);
3673 mutex_unlock(&wl
->mutex
);
3676 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3678 struct wl1271
*wl
= hw
->priv
;
3681 mutex_lock(&wl
->mutex
);
3683 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3688 ret
= wl1271_ps_elp_wakeup(wl
);
3692 ret
= wl1271_acx_frag_threshold(wl
, value
);
3694 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3696 wl1271_ps_elp_sleep(wl
);
3699 mutex_unlock(&wl
->mutex
);
3704 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3706 struct wl1271
*wl
= hw
->priv
;
3707 struct wl12xx_vif
*wlvif
;
3710 mutex_lock(&wl
->mutex
);
3712 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3717 ret
= wl1271_ps_elp_wakeup(wl
);
3721 wl12xx_for_each_wlvif(wl
, wlvif
) {
3722 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3724 wl1271_warning("set rts threshold failed: %d", ret
);
3726 wl1271_ps_elp_sleep(wl
);
3729 mutex_unlock(&wl
->mutex
);
3734 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3737 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3738 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3739 skb
->len
- ieoffset
);
3744 memmove(ie
, next
, end
- next
);
3745 skb_trim(skb
, skb
->len
- len
);
3748 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3749 unsigned int oui
, u8 oui_type
,
3753 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3754 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3755 skb
->data
+ ieoffset
,
3756 skb
->len
- ieoffset
);
3761 memmove(ie
, next
, end
- next
);
3762 skb_trim(skb
, skb
->len
- len
);
3765 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3766 struct ieee80211_vif
*vif
)
3768 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3769 struct sk_buff
*skb
;
3772 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3776 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3777 CMD_TEMPL_AP_PROBE_RESPONSE
,
3786 wl1271_debug(DEBUG_AP
, "probe response updated");
3787 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3793 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3794 struct ieee80211_vif
*vif
,
3796 size_t probe_rsp_len
,
3799 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3800 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3801 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3802 int ssid_ie_offset
, ie_offset
, templ_len
;
3805 /* no need to change probe response if the SSID is set correctly */
3806 if (wlvif
->ssid_len
> 0)
3807 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3808 CMD_TEMPL_AP_PROBE_RESPONSE
,
3813 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3814 wl1271_error("probe_rsp template too big");
3818 /* start searching from IE offset */
3819 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3821 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3822 probe_rsp_len
- ie_offset
);
3824 wl1271_error("No SSID in beacon!");
3828 ssid_ie_offset
= ptr
- probe_rsp_data
;
3829 ptr
+= (ptr
[1] + 2);
3831 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3833 /* insert SSID from bss_conf */
3834 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3835 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3836 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3837 bss_conf
->ssid
, bss_conf
->ssid_len
);
3838 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3840 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3841 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3842 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3844 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3845 CMD_TEMPL_AP_PROBE_RESPONSE
,
3851 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3852 struct ieee80211_vif
*vif
,
3853 struct ieee80211_bss_conf
*bss_conf
,
3856 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3859 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3860 if (bss_conf
->use_short_slot
)
3861 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3863 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3865 wl1271_warning("Set slot time failed %d", ret
);
3870 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3871 if (bss_conf
->use_short_preamble
)
3872 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3874 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3877 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3878 if (bss_conf
->use_cts_prot
)
3879 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3882 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3883 CTSPROTECT_DISABLE
);
3885 wl1271_warning("Set ctsprotect failed %d", ret
);
3894 static int wlcore_set_beacon_template(struct wl1271
*wl
,
3895 struct ieee80211_vif
*vif
,
3898 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3899 struct ieee80211_hdr
*hdr
;
3902 int ieoffset
= offsetof(struct ieee80211_mgmt
, u
.beacon
.variable
);
3903 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
3911 wl1271_debug(DEBUG_MASTER
, "beacon updated");
3913 ret
= wl1271_ssid_set(wlvif
, beacon
, ieoffset
);
3915 dev_kfree_skb(beacon
);
3918 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3919 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
3921 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
3926 dev_kfree_skb(beacon
);
3930 wlvif
->wmm_enabled
=
3931 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT
,
3932 WLAN_OUI_TYPE_MICROSOFT_WMM
,
3933 beacon
->data
+ ieoffset
,
3934 beacon
->len
- ieoffset
);
3937 * In case we already have a probe-resp beacon set explicitly
3938 * by usermode, don't use the beacon data.
3940 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
3943 /* remove TIM ie from probe response */
3944 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
3947 * remove p2p ie from probe response.
3948 * the fw reponds to probe requests that don't include
3949 * the p2p ie. probe requests with p2p ie will be passed,
3950 * and will be responded by the supplicant (the spec
3951 * forbids including the p2p ie when responding to probe
3952 * requests that didn't include it).
3954 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
3955 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
3957 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
3958 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
3959 IEEE80211_STYPE_PROBE_RESP
);
3961 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
3966 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3967 CMD_TEMPL_PROBE_RESPONSE
,
3972 dev_kfree_skb(beacon
);
3980 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
3981 struct ieee80211_vif
*vif
,
3982 struct ieee80211_bss_conf
*bss_conf
,
3985 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3986 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3989 if (changed
& BSS_CHANGED_BEACON_INT
) {
3990 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
3991 bss_conf
->beacon_int
);
3993 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3996 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
3997 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3999 wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
);
4002 if (changed
& BSS_CHANGED_BEACON
) {
4003 ret
= wlcore_set_beacon_template(wl
, vif
, is_ap
);
4010 wl1271_error("beacon info change failed: %d", ret
);
4014 /* AP mode changes */
4015 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
4016 struct ieee80211_vif
*vif
,
4017 struct ieee80211_bss_conf
*bss_conf
,
4020 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4023 if (changed
& BSS_CHANGED_BASIC_RATES
) {
4024 u32 rates
= bss_conf
->basic_rates
;
4026 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
4028 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
4029 wlvif
->basic_rate_set
);
4031 ret
= wl1271_init_ap_rates(wl
, wlvif
);
4033 wl1271_error("AP rate policy change failed %d", ret
);
4037 ret
= wl1271_ap_init_templates(wl
, vif
);
4041 ret
= wl1271_ap_set_probe_resp_tmpl(wl
, wlvif
->basic_rate
, vif
);
4045 ret
= wlcore_set_beacon_template(wl
, vif
, true);
4050 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
4054 if (changed
& BSS_CHANGED_BEACON_ENABLED
) {
4055 if (bss_conf
->enable_beacon
) {
4056 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
4057 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
4061 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
4065 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
4066 wl1271_debug(DEBUG_AP
, "started AP");
4069 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
4071 * AP might be in ROC in case we have just
4072 * sent auth reply. handle it.
4074 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
4075 wl12xx_croc(wl
, wlvif
->role_id
);
4077 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
4081 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
4082 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
4084 wl1271_debug(DEBUG_AP
, "stopped AP");
4089 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4093 /* Handle HT information change */
4094 if ((changed
& BSS_CHANGED_HT
) &&
4095 (bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
)) {
4096 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4097 bss_conf
->ht_operation_mode
);
4099 wl1271_warning("Set ht information failed %d", ret
);
4108 static int wlcore_set_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
4109 struct ieee80211_bss_conf
*bss_conf
,
4115 wl1271_debug(DEBUG_MAC80211
,
4116 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4117 bss_conf
->bssid
, bss_conf
->aid
,
4118 bss_conf
->beacon_int
,
4119 bss_conf
->basic_rates
, sta_rate_set
);
4121 wlvif
->beacon_int
= bss_conf
->beacon_int
;
4122 rates
= bss_conf
->basic_rates
;
4123 wlvif
->basic_rate_set
=
4124 wl1271_tx_enabled_rates_get(wl
, rates
,
4127 wl1271_tx_min_rate_get(wl
,
4128 wlvif
->basic_rate_set
);
4132 wl1271_tx_enabled_rates_get(wl
,
4136 /* we only support sched_scan while not connected */
4137 if (wl
->sched_vif
== wlvif
)
4138 wl
->ops
->sched_scan_stop(wl
, wlvif
);
4140 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4144 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
4148 ret
= wl1271_build_qos_null_data(wl
, wl12xx_wlvif_to_vif(wlvif
));
4152 wlcore_set_ssid(wl
, wlvif
);
4154 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4159 static int wlcore_clear_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
4163 /* revert back to minimum rates for the current band */
4164 wl1271_set_band_rate(wl
, wlvif
);
4165 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4167 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4171 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4172 test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
)) {
4173 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4178 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4181 /* STA/IBSS mode changes */
4182 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
4183 struct ieee80211_vif
*vif
,
4184 struct ieee80211_bss_conf
*bss_conf
,
4187 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4188 bool do_join
= false;
4189 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
4190 bool ibss_joined
= false;
4191 u32 sta_rate_set
= 0;
4193 struct ieee80211_sta
*sta
;
4194 bool sta_exists
= false;
4195 struct ieee80211_sta_ht_cap sta_ht_cap
;
4198 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
4204 if (changed
& BSS_CHANGED_IBSS
) {
4205 if (bss_conf
->ibss_joined
) {
4206 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
4209 wlcore_unset_assoc(wl
, wlvif
);
4210 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4214 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
4217 /* Need to update the SSID (for filtering etc) */
4218 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
4221 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
4222 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
4223 bss_conf
->enable_beacon
? "enabled" : "disabled");
4228 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
)
4229 wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
4231 if (changed
& BSS_CHANGED_CQM
) {
4232 bool enable
= false;
4233 if (bss_conf
->cqm_rssi_thold
)
4235 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
4236 bss_conf
->cqm_rssi_thold
,
4237 bss_conf
->cqm_rssi_hyst
);
4240 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
4243 if (changed
& (BSS_CHANGED_BSSID
| BSS_CHANGED_HT
|
4244 BSS_CHANGED_ASSOC
)) {
4246 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
4248 u8
*rx_mask
= sta
->ht_cap
.mcs
.rx_mask
;
4250 /* save the supp_rates of the ap */
4251 sta_rate_set
= sta
->supp_rates
[wlvif
->band
];
4252 if (sta
->ht_cap
.ht_supported
)
4254 (rx_mask
[0] << HW_HT_RATES_OFFSET
) |
4255 (rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
4256 sta_ht_cap
= sta
->ht_cap
;
4263 if (changed
& BSS_CHANGED_BSSID
) {
4264 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
4265 ret
= wlcore_set_bssid(wl
, wlvif
, bss_conf
,
4270 /* Need to update the BSSID (for filtering etc) */
4273 ret
= wlcore_clear_bssid(wl
, wlvif
);
4279 if (changed
& BSS_CHANGED_IBSS
) {
4280 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
4281 bss_conf
->ibss_joined
);
4283 if (bss_conf
->ibss_joined
) {
4284 u32 rates
= bss_conf
->basic_rates
;
4285 wlvif
->basic_rate_set
=
4286 wl1271_tx_enabled_rates_get(wl
, rates
,
4289 wl1271_tx_min_rate_get(wl
,
4290 wlvif
->basic_rate_set
);
4292 /* by default, use 11b + OFDM rates */
4293 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
4294 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4300 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4305 ret
= wlcore_join(wl
, wlvif
);
4307 wl1271_warning("cmd join failed %d", ret
);
4312 if (changed
& BSS_CHANGED_ASSOC
) {
4313 if (bss_conf
->assoc
) {
4314 ret
= wlcore_set_assoc(wl
, wlvif
, bss_conf
,
4319 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
4320 wl12xx_set_authorized(wl
, wlvif
);
4322 wlcore_unset_assoc(wl
, wlvif
);
4326 if (changed
& BSS_CHANGED_PS
) {
4327 if ((bss_conf
->ps
) &&
4328 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
4329 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4333 if (wl
->conf
.conn
.forced_ps
) {
4334 ps_mode
= STATION_POWER_SAVE_MODE
;
4335 ps_mode_str
= "forced";
4337 ps_mode
= STATION_AUTO_PS_MODE
;
4338 ps_mode_str
= "auto";
4341 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
4343 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
4345 wl1271_warning("enter %s ps failed %d",
4347 } else if (!bss_conf
->ps
&&
4348 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4349 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
4351 ret
= wl1271_ps_set_mode(wl
, wlvif
,
4352 STATION_ACTIVE_MODE
);
4354 wl1271_warning("exit auto ps failed %d", ret
);
4358 /* Handle new association with HT. Do this after join. */
4361 bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
;
4363 ret
= wlcore_hw_set_peer_cap(wl
,
4369 wl1271_warning("Set ht cap failed %d", ret
);
4375 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4376 bss_conf
->ht_operation_mode
);
4378 wl1271_warning("Set ht information failed %d",
4385 /* Handle arp filtering. Done after join. */
4386 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
4387 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
4388 __be32 addr
= bss_conf
->arp_addr_list
[0];
4389 wlvif
->sta
.qos
= bss_conf
->qos
;
4390 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
4392 if (bss_conf
->arp_addr_cnt
== 1 && bss_conf
->assoc
) {
4393 wlvif
->ip_addr
= addr
;
4395 * The template should have been configured only upon
4396 * association. however, it seems that the correct ip
4397 * isn't being set (when sending), so we have to
4398 * reconfigure the template upon every ip change.
4400 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
4402 wl1271_warning("build arp rsp failed: %d", ret
);
4406 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
4407 (ACX_ARP_FILTER_ARP_FILTERING
|
4408 ACX_ARP_FILTER_AUTO_ARP
),
4412 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
4423 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
4424 struct ieee80211_vif
*vif
,
4425 struct ieee80211_bss_conf
*bss_conf
,
4428 struct wl1271
*wl
= hw
->priv
;
4429 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4430 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4433 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info role %d changed 0x%x",
4434 wlvif
->role_id
, (int)changed
);
4437 * make sure to cancel pending disconnections if our association
4440 if (!is_ap
&& (changed
& BSS_CHANGED_ASSOC
))
4441 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
4443 if (is_ap
&& (changed
& BSS_CHANGED_BEACON_ENABLED
) &&
4444 !bss_conf
->enable_beacon
)
4445 wl1271_tx_flush(wl
);
4447 mutex_lock(&wl
->mutex
);
4449 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4452 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4455 ret
= wl1271_ps_elp_wakeup(wl
);
4460 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
4462 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
4464 wl1271_ps_elp_sleep(wl
);
4467 mutex_unlock(&wl
->mutex
);
4470 static int wlcore_op_add_chanctx(struct ieee80211_hw
*hw
,
4471 struct ieee80211_chanctx_conf
*ctx
)
4473 wl1271_debug(DEBUG_MAC80211
, "mac80211 add chanctx %d (type %d)",
4474 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4475 cfg80211_get_chandef_type(&ctx
->def
));
4479 static void wlcore_op_remove_chanctx(struct ieee80211_hw
*hw
,
4480 struct ieee80211_chanctx_conf
*ctx
)
4482 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove chanctx %d (type %d)",
4483 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4484 cfg80211_get_chandef_type(&ctx
->def
));
4487 static void wlcore_op_change_chanctx(struct ieee80211_hw
*hw
,
4488 struct ieee80211_chanctx_conf
*ctx
,
4491 wl1271_debug(DEBUG_MAC80211
,
4492 "mac80211 change chanctx %d (type %d) changed 0x%x",
4493 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4494 cfg80211_get_chandef_type(&ctx
->def
), changed
);
4497 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw
*hw
,
4498 struct ieee80211_vif
*vif
,
4499 struct ieee80211_chanctx_conf
*ctx
)
4501 struct wl1271
*wl
= hw
->priv
;
4502 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4503 int channel
= ieee80211_frequency_to_channel(
4504 ctx
->def
.chan
->center_freq
);
4506 wl1271_debug(DEBUG_MAC80211
,
4507 "mac80211 assign chanctx (role %d) %d (type %d)",
4508 wlvif
->role_id
, channel
, cfg80211_get_chandef_type(&ctx
->def
));
4510 mutex_lock(&wl
->mutex
);
4512 wlvif
->band
= ctx
->def
.chan
->band
;
4513 wlvif
->channel
= channel
;
4514 wlvif
->channel_type
= cfg80211_get_chandef_type(&ctx
->def
);
4516 /* update default rates according to the band */
4517 wl1271_set_band_rate(wl
, wlvif
);
4519 mutex_unlock(&wl
->mutex
);
4524 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw
*hw
,
4525 struct ieee80211_vif
*vif
,
4526 struct ieee80211_chanctx_conf
*ctx
)
4528 struct wl1271
*wl
= hw
->priv
;
4529 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4531 wl1271_debug(DEBUG_MAC80211
,
4532 "mac80211 unassign chanctx (role %d) %d (type %d)",
4534 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4535 cfg80211_get_chandef_type(&ctx
->def
));
4537 wl1271_tx_flush(wl
);
4540 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4541 struct ieee80211_vif
*vif
, u16 queue
,
4542 const struct ieee80211_tx_queue_params
*params
)
4544 struct wl1271
*wl
= hw
->priv
;
4545 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4549 mutex_lock(&wl
->mutex
);
4551 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4554 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4556 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4558 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4561 ret
= wl1271_ps_elp_wakeup(wl
);
4566 * the txop is confed in units of 32us by the mac80211,
4569 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4570 params
->cw_min
, params
->cw_max
,
4571 params
->aifs
, params
->txop
<< 5);
4575 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4576 CONF_CHANNEL_TYPE_EDCF
,
4577 wl1271_tx_get_queue(queue
),
4578 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4582 wl1271_ps_elp_sleep(wl
);
4585 mutex_unlock(&wl
->mutex
);
4590 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4591 struct ieee80211_vif
*vif
)
4594 struct wl1271
*wl
= hw
->priv
;
4595 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4596 u64 mactime
= ULLONG_MAX
;
4599 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4601 mutex_lock(&wl
->mutex
);
4603 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4606 ret
= wl1271_ps_elp_wakeup(wl
);
4610 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4615 wl1271_ps_elp_sleep(wl
);
4618 mutex_unlock(&wl
->mutex
);
4622 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4623 struct survey_info
*survey
)
4625 struct ieee80211_conf
*conf
= &hw
->conf
;
4630 survey
->channel
= conf
->chandef
.chan
;
4635 static int wl1271_allocate_sta(struct wl1271
*wl
,
4636 struct wl12xx_vif
*wlvif
,
4637 struct ieee80211_sta
*sta
)
4639 struct wl1271_station
*wl_sta
;
4643 if (wl
->active_sta_count
>= AP_MAX_STATIONS
) {
4644 wl1271_warning("could not allocate HLID - too much stations");
4648 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4649 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4651 wl1271_warning("could not allocate HLID - too many links");
4655 /* use the previous security seq, if this is a recovery/resume */
4656 wl
->links
[wl_sta
->hlid
].total_freed_pkts
= wl_sta
->total_freed_pkts
;
4658 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4659 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4660 wl
->active_sta_count
++;
4664 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4666 struct wl1271_station
*wl_sta
;
4667 struct ieee80211_sta
*sta
;
4668 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4670 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4673 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4674 __clear_bit(hlid
, &wl
->ap_ps_map
);
4675 __clear_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
4678 * save the last used PN in the private part of iee80211_sta,
4679 * in case of recovery/suspend
4682 sta
= ieee80211_find_sta(vif
, wl
->links
[hlid
].addr
);
4684 wl_sta
= (void *)sta
->drv_priv
;
4685 wl_sta
->total_freed_pkts
= wl
->links
[hlid
].total_freed_pkts
;
4688 * increment the initial seq number on recovery to account for
4689 * transmitted packets that we haven't yet got in the FW status
4691 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
4692 wl_sta
->total_freed_pkts
+=
4693 WL1271_TX_SQN_POST_RECOVERY_PADDING
;
4697 wl12xx_free_link(wl
, wlvif
, &hlid
);
4698 wl
->active_sta_count
--;
4701 * rearm the tx watchdog when the last STA is freed - give the FW a
4702 * chance to return STA-buffered packets before complaining.
4704 if (wl
->active_sta_count
== 0)
4705 wl12xx_rearm_tx_watchdog_locked(wl
);
4708 static int wl12xx_sta_add(struct wl1271
*wl
,
4709 struct wl12xx_vif
*wlvif
,
4710 struct ieee80211_sta
*sta
)
4712 struct wl1271_station
*wl_sta
;
4716 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4718 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4722 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4723 hlid
= wl_sta
->hlid
;
4725 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4727 wl1271_free_sta(wl
, wlvif
, hlid
);
4732 static int wl12xx_sta_remove(struct wl1271
*wl
,
4733 struct wl12xx_vif
*wlvif
,
4734 struct ieee80211_sta
*sta
)
4736 struct wl1271_station
*wl_sta
;
4739 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4741 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4743 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4746 ret
= wl12xx_cmd_remove_peer(wl
, wl_sta
->hlid
);
4750 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4754 static void wlcore_roc_if_possible(struct wl1271
*wl
,
4755 struct wl12xx_vif
*wlvif
)
4757 if (find_first_bit(wl
->roc_map
,
4758 WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
)
4761 if (WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
))
4764 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
, wlvif
->band
, wlvif
->channel
);
4768 * when wl_sta is NULL, we treat this call as if coming from a
4769 * pending auth reply.
4770 * wl->mutex must be taken and the FW must be awake when the call
4773 void wlcore_update_inconn_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
4774 struct wl1271_station
*wl_sta
, bool in_conn
)
4777 if (WARN_ON(wl_sta
&& wl_sta
->in_connection
))
4780 if (!wlvif
->ap_pending_auth_reply
&&
4781 !wlvif
->inconn_count
)
4782 wlcore_roc_if_possible(wl
, wlvif
);
4785 wl_sta
->in_connection
= true;
4786 wlvif
->inconn_count
++;
4788 wlvif
->ap_pending_auth_reply
= true;
4791 if (wl_sta
&& !wl_sta
->in_connection
)
4794 if (WARN_ON(!wl_sta
&& !wlvif
->ap_pending_auth_reply
))
4797 if (WARN_ON(wl_sta
&& !wlvif
->inconn_count
))
4801 wl_sta
->in_connection
= false;
4802 wlvif
->inconn_count
--;
4804 wlvif
->ap_pending_auth_reply
= false;
4807 if (!wlvif
->inconn_count
&& !wlvif
->ap_pending_auth_reply
&&
4808 test_bit(wlvif
->role_id
, wl
->roc_map
))
4809 wl12xx_croc(wl
, wlvif
->role_id
);
4813 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4814 struct wl12xx_vif
*wlvif
,
4815 struct ieee80211_sta
*sta
,
4816 enum ieee80211_sta_state old_state
,
4817 enum ieee80211_sta_state new_state
)
4819 struct wl1271_station
*wl_sta
;
4820 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4821 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4824 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4826 /* Add station (AP mode) */
4828 old_state
== IEEE80211_STA_NOTEXIST
&&
4829 new_state
== IEEE80211_STA_NONE
) {
4830 ret
= wl12xx_sta_add(wl
, wlvif
, sta
);
4834 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, true);
4837 /* Remove station (AP mode) */
4839 old_state
== IEEE80211_STA_NONE
&&
4840 new_state
== IEEE80211_STA_NOTEXIST
) {
4842 wl12xx_sta_remove(wl
, wlvif
, sta
);
4844 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
4847 /* Authorize station (AP mode) */
4849 new_state
== IEEE80211_STA_AUTHORIZED
) {
4850 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, wl_sta
->hlid
);
4854 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4859 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
4862 /* Authorize station */
4864 new_state
== IEEE80211_STA_AUTHORIZED
) {
4865 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4866 ret
= wl12xx_set_authorized(wl
, wlvif
);
4872 old_state
== IEEE80211_STA_AUTHORIZED
&&
4873 new_state
== IEEE80211_STA_ASSOC
) {
4874 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4875 clear_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
);
4878 /* clear ROCs on failure or authorization */
4880 (new_state
== IEEE80211_STA_AUTHORIZED
||
4881 new_state
== IEEE80211_STA_NOTEXIST
)) {
4882 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
4883 wl12xx_croc(wl
, wlvif
->role_id
);
4887 old_state
== IEEE80211_STA_NOTEXIST
&&
4888 new_state
== IEEE80211_STA_NONE
) {
4889 if (find_first_bit(wl
->roc_map
,
4890 WL12XX_MAX_ROLES
) >= WL12XX_MAX_ROLES
) {
4891 WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
);
4892 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
,
4893 wlvif
->band
, wlvif
->channel
);
4899 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
4900 struct ieee80211_vif
*vif
,
4901 struct ieee80211_sta
*sta
,
4902 enum ieee80211_sta_state old_state
,
4903 enum ieee80211_sta_state new_state
)
4905 struct wl1271
*wl
= hw
->priv
;
4906 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4909 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
4910 sta
->aid
, old_state
, new_state
);
4912 mutex_lock(&wl
->mutex
);
4914 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4919 ret
= wl1271_ps_elp_wakeup(wl
);
4923 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
4925 wl1271_ps_elp_sleep(wl
);
4927 mutex_unlock(&wl
->mutex
);
4928 if (new_state
< old_state
)
4933 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
4934 struct ieee80211_vif
*vif
,
4935 enum ieee80211_ampdu_mlme_action action
,
4936 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
4939 struct wl1271
*wl
= hw
->priv
;
4940 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4942 u8 hlid
, *ba_bitmap
;
4944 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
4947 /* sanity check - the fields in FW are only 8bits wide */
4948 if (WARN_ON(tid
> 0xFF))
4951 mutex_lock(&wl
->mutex
);
4953 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4958 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
4959 hlid
= wlvif
->sta
.hlid
;
4960 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
4961 struct wl1271_station
*wl_sta
;
4963 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4964 hlid
= wl_sta
->hlid
;
4970 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
4972 ret
= wl1271_ps_elp_wakeup(wl
);
4976 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
4980 case IEEE80211_AMPDU_RX_START
:
4981 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
4986 if (wl
->ba_rx_session_count
>= wl
->ba_rx_session_count_max
) {
4988 wl1271_error("exceeded max RX BA sessions");
4992 if (*ba_bitmap
& BIT(tid
)) {
4994 wl1271_error("cannot enable RX BA session on active "
4999 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
5002 *ba_bitmap
|= BIT(tid
);
5003 wl
->ba_rx_session_count
++;
5007 case IEEE80211_AMPDU_RX_STOP
:
5008 if (!(*ba_bitmap
& BIT(tid
))) {
5010 * this happens on reconfig - so only output a debug
5011 * message for now, and don't fail the function.
5013 wl1271_debug(DEBUG_MAC80211
,
5014 "no active RX BA session on tid: %d",
5020 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
5023 *ba_bitmap
&= ~BIT(tid
);
5024 wl
->ba_rx_session_count
--;
5029 * The BA initiator session management in FW independently.
5030 * Falling break here on purpose for all TX APDU commands.
5032 case IEEE80211_AMPDU_TX_START
:
5033 case IEEE80211_AMPDU_TX_STOP_CONT
:
5034 case IEEE80211_AMPDU_TX_STOP_FLUSH
:
5035 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT
:
5036 case IEEE80211_AMPDU_TX_OPERATIONAL
:
5041 wl1271_error("Incorrect ampdu action id=%x\n", action
);
5045 wl1271_ps_elp_sleep(wl
);
5048 mutex_unlock(&wl
->mutex
);
5053 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
5054 struct ieee80211_vif
*vif
,
5055 const struct cfg80211_bitrate_mask
*mask
)
5057 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5058 struct wl1271
*wl
= hw
->priv
;
5061 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
5062 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
5063 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
5065 mutex_lock(&wl
->mutex
);
5067 for (i
= 0; i
< WLCORE_NUM_BANDS
; i
++)
5068 wlvif
->bitrate_masks
[i
] =
5069 wl1271_tx_enabled_rates_get(wl
,
5070 mask
->control
[i
].legacy
,
5073 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5076 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
5077 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
5079 ret
= wl1271_ps_elp_wakeup(wl
);
5083 wl1271_set_band_rate(wl
, wlvif
);
5085 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
5086 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
5088 wl1271_ps_elp_sleep(wl
);
5091 mutex_unlock(&wl
->mutex
);
5096 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
5097 struct ieee80211_channel_switch
*ch_switch
)
5099 struct wl1271
*wl
= hw
->priv
;
5100 struct wl12xx_vif
*wlvif
;
5103 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
5105 wl1271_tx_flush(wl
);
5107 mutex_lock(&wl
->mutex
);
5109 if (unlikely(wl
->state
== WLCORE_STATE_OFF
)) {
5110 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
5111 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
5112 ieee80211_chswitch_done(vif
, false);
5115 } else if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5119 ret
= wl1271_ps_elp_wakeup(wl
);
5123 /* TODO: change mac80211 to pass vif as param */
5124 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
5125 unsigned long delay_usec
;
5127 ret
= wl
->ops
->channel_switch(wl
, wlvif
, ch_switch
);
5131 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
5133 /* indicate failure 5 seconds after channel switch time */
5134 delay_usec
= ieee80211_tu_to_usec(wlvif
->beacon_int
) *
5136 ieee80211_queue_delayed_work(hw
, &wlvif
->channel_switch_work
,
5137 usecs_to_jiffies(delay_usec
) +
5138 msecs_to_jiffies(5000));
5142 wl1271_ps_elp_sleep(wl
);
5145 mutex_unlock(&wl
->mutex
);
5148 static void wlcore_op_flush(struct ieee80211_hw
*hw
, u32 queues
, bool drop
)
5150 struct wl1271
*wl
= hw
->priv
;
5152 wl1271_tx_flush(wl
);
5155 static int wlcore_op_remain_on_channel(struct ieee80211_hw
*hw
,
5156 struct ieee80211_vif
*vif
,
5157 struct ieee80211_channel
*chan
,
5159 enum ieee80211_roc_type type
)
5161 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5162 struct wl1271
*wl
= hw
->priv
;
5163 int channel
, ret
= 0;
5165 channel
= ieee80211_frequency_to_channel(chan
->center_freq
);
5167 wl1271_debug(DEBUG_MAC80211
, "mac80211 roc %d (%d)",
5168 channel
, wlvif
->role_id
);
5170 mutex_lock(&wl
->mutex
);
5172 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5175 /* return EBUSY if we can't ROC right now */
5176 if (WARN_ON(wl
->roc_vif
||
5177 find_first_bit(wl
->roc_map
,
5178 WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
)) {
5183 ret
= wl1271_ps_elp_wakeup(wl
);
5187 ret
= wl12xx_start_dev(wl
, wlvif
, chan
->band
, channel
);
5192 ieee80211_queue_delayed_work(hw
, &wl
->roc_complete_work
,
5193 msecs_to_jiffies(duration
));
5195 wl1271_ps_elp_sleep(wl
);
5197 mutex_unlock(&wl
->mutex
);
5201 static int __wlcore_roc_completed(struct wl1271
*wl
)
5203 struct wl12xx_vif
*wlvif
;
5206 /* already completed */
5207 if (unlikely(!wl
->roc_vif
))
5210 wlvif
= wl12xx_vif_to_data(wl
->roc_vif
);
5212 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
5215 ret
= wl12xx_stop_dev(wl
, wlvif
);
5224 static int wlcore_roc_completed(struct wl1271
*wl
)
5228 wl1271_debug(DEBUG_MAC80211
, "roc complete");
5230 mutex_lock(&wl
->mutex
);
5232 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5237 ret
= wl1271_ps_elp_wakeup(wl
);
5241 ret
= __wlcore_roc_completed(wl
);
5243 wl1271_ps_elp_sleep(wl
);
5245 mutex_unlock(&wl
->mutex
);
5250 static void wlcore_roc_complete_work(struct work_struct
*work
)
5252 struct delayed_work
*dwork
;
5256 dwork
= container_of(work
, struct delayed_work
, work
);
5257 wl
= container_of(dwork
, struct wl1271
, roc_complete_work
);
5259 ret
= wlcore_roc_completed(wl
);
5261 ieee80211_remain_on_channel_expired(wl
->hw
);
5264 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw
*hw
)
5266 struct wl1271
*wl
= hw
->priv
;
5268 wl1271_debug(DEBUG_MAC80211
, "mac80211 croc");
5271 wl1271_tx_flush(wl
);
5274 * we can't just flush_work here, because it might deadlock
5275 * (as we might get called from the same workqueue)
5277 cancel_delayed_work_sync(&wl
->roc_complete_work
);
5278 wlcore_roc_completed(wl
);
5283 static void wlcore_op_sta_rc_update(struct ieee80211_hw
*hw
,
5284 struct ieee80211_vif
*vif
,
5285 struct ieee80211_sta
*sta
,
5288 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5289 struct wl1271
*wl
= hw
->priv
;
5291 wlcore_hw_sta_rc_update(wl
, wlvif
, sta
, changed
);
5294 static int wlcore_op_get_rssi(struct ieee80211_hw
*hw
,
5295 struct ieee80211_vif
*vif
,
5296 struct ieee80211_sta
*sta
,
5299 struct wl1271
*wl
= hw
->priv
;
5300 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5303 wl1271_debug(DEBUG_MAC80211
, "mac80211 get_rssi");
5305 mutex_lock(&wl
->mutex
);
5307 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5310 ret
= wl1271_ps_elp_wakeup(wl
);
5314 ret
= wlcore_acx_average_rssi(wl
, wlvif
, rssi_dbm
);
5319 wl1271_ps_elp_sleep(wl
);
5322 mutex_unlock(&wl
->mutex
);
5327 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
5329 struct wl1271
*wl
= hw
->priv
;
5332 mutex_lock(&wl
->mutex
);
5334 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5337 /* packets are considered pending if in the TX queue or the FW */
5338 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
5340 mutex_unlock(&wl
->mutex
);
5345 /* can't be const, mac80211 writes to this */
5346 static struct ieee80211_rate wl1271_rates
[] = {
5348 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
5349 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
5351 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
5352 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
5353 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5355 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
5356 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
5357 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5359 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
5360 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
5361 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5363 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5364 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5366 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5367 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5369 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5370 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5372 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5373 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5375 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5376 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5378 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5379 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5381 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5382 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5384 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5385 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5388 /* can't be const, mac80211 writes to this */
5389 static struct ieee80211_channel wl1271_channels
[] = {
5390 { .hw_value
= 1, .center_freq
= 2412, .max_power
= WLCORE_MAX_TXPWR
},
5391 { .hw_value
= 2, .center_freq
= 2417, .max_power
= WLCORE_MAX_TXPWR
},
5392 { .hw_value
= 3, .center_freq
= 2422, .max_power
= WLCORE_MAX_TXPWR
},
5393 { .hw_value
= 4, .center_freq
= 2427, .max_power
= WLCORE_MAX_TXPWR
},
5394 { .hw_value
= 5, .center_freq
= 2432, .max_power
= WLCORE_MAX_TXPWR
},
5395 { .hw_value
= 6, .center_freq
= 2437, .max_power
= WLCORE_MAX_TXPWR
},
5396 { .hw_value
= 7, .center_freq
= 2442, .max_power
= WLCORE_MAX_TXPWR
},
5397 { .hw_value
= 8, .center_freq
= 2447, .max_power
= WLCORE_MAX_TXPWR
},
5398 { .hw_value
= 9, .center_freq
= 2452, .max_power
= WLCORE_MAX_TXPWR
},
5399 { .hw_value
= 10, .center_freq
= 2457, .max_power
= WLCORE_MAX_TXPWR
},
5400 { .hw_value
= 11, .center_freq
= 2462, .max_power
= WLCORE_MAX_TXPWR
},
5401 { .hw_value
= 12, .center_freq
= 2467, .max_power
= WLCORE_MAX_TXPWR
},
5402 { .hw_value
= 13, .center_freq
= 2472, .max_power
= WLCORE_MAX_TXPWR
},
5403 { .hw_value
= 14, .center_freq
= 2484, .max_power
= WLCORE_MAX_TXPWR
},
5406 /* can't be const, mac80211 writes to this */
5407 static struct ieee80211_supported_band wl1271_band_2ghz
= {
5408 .channels
= wl1271_channels
,
5409 .n_channels
= ARRAY_SIZE(wl1271_channels
),
5410 .bitrates
= wl1271_rates
,
5411 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
5414 /* 5 GHz data rates for WL1273 */
5415 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
5417 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5418 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5420 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5421 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5423 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5424 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5426 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5427 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5429 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5430 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5432 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5433 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5435 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5436 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5438 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5439 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5442 /* 5 GHz band channels for WL1273 */
5443 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
5444 { .hw_value
= 8, .center_freq
= 5040, .max_power
= WLCORE_MAX_TXPWR
},
5445 { .hw_value
= 12, .center_freq
= 5060, .max_power
= WLCORE_MAX_TXPWR
},
5446 { .hw_value
= 16, .center_freq
= 5080, .max_power
= WLCORE_MAX_TXPWR
},
5447 { .hw_value
= 34, .center_freq
= 5170, .max_power
= WLCORE_MAX_TXPWR
},
5448 { .hw_value
= 36, .center_freq
= 5180, .max_power
= WLCORE_MAX_TXPWR
},
5449 { .hw_value
= 38, .center_freq
= 5190, .max_power
= WLCORE_MAX_TXPWR
},
5450 { .hw_value
= 40, .center_freq
= 5200, .max_power
= WLCORE_MAX_TXPWR
},
5451 { .hw_value
= 42, .center_freq
= 5210, .max_power
= WLCORE_MAX_TXPWR
},
5452 { .hw_value
= 44, .center_freq
= 5220, .max_power
= WLCORE_MAX_TXPWR
},
5453 { .hw_value
= 46, .center_freq
= 5230, .max_power
= WLCORE_MAX_TXPWR
},
5454 { .hw_value
= 48, .center_freq
= 5240, .max_power
= WLCORE_MAX_TXPWR
},
5455 { .hw_value
= 52, .center_freq
= 5260, .max_power
= WLCORE_MAX_TXPWR
},
5456 { .hw_value
= 56, .center_freq
= 5280, .max_power
= WLCORE_MAX_TXPWR
},
5457 { .hw_value
= 60, .center_freq
= 5300, .max_power
= WLCORE_MAX_TXPWR
},
5458 { .hw_value
= 64, .center_freq
= 5320, .max_power
= WLCORE_MAX_TXPWR
},
5459 { .hw_value
= 100, .center_freq
= 5500, .max_power
= WLCORE_MAX_TXPWR
},
5460 { .hw_value
= 104, .center_freq
= 5520, .max_power
= WLCORE_MAX_TXPWR
},
5461 { .hw_value
= 108, .center_freq
= 5540, .max_power
= WLCORE_MAX_TXPWR
},
5462 { .hw_value
= 112, .center_freq
= 5560, .max_power
= WLCORE_MAX_TXPWR
},
5463 { .hw_value
= 116, .center_freq
= 5580, .max_power
= WLCORE_MAX_TXPWR
},
5464 { .hw_value
= 120, .center_freq
= 5600, .max_power
= WLCORE_MAX_TXPWR
},
5465 { .hw_value
= 124, .center_freq
= 5620, .max_power
= WLCORE_MAX_TXPWR
},
5466 { .hw_value
= 128, .center_freq
= 5640, .max_power
= WLCORE_MAX_TXPWR
},
5467 { .hw_value
= 132, .center_freq
= 5660, .max_power
= WLCORE_MAX_TXPWR
},
5468 { .hw_value
= 136, .center_freq
= 5680, .max_power
= WLCORE_MAX_TXPWR
},
5469 { .hw_value
= 140, .center_freq
= 5700, .max_power
= WLCORE_MAX_TXPWR
},
5470 { .hw_value
= 149, .center_freq
= 5745, .max_power
= WLCORE_MAX_TXPWR
},
5471 { .hw_value
= 153, .center_freq
= 5765, .max_power
= WLCORE_MAX_TXPWR
},
5472 { .hw_value
= 157, .center_freq
= 5785, .max_power
= WLCORE_MAX_TXPWR
},
5473 { .hw_value
= 161, .center_freq
= 5805, .max_power
= WLCORE_MAX_TXPWR
},
5474 { .hw_value
= 165, .center_freq
= 5825, .max_power
= WLCORE_MAX_TXPWR
},
5477 static struct ieee80211_supported_band wl1271_band_5ghz
= {
5478 .channels
= wl1271_channels_5ghz
,
5479 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
5480 .bitrates
= wl1271_rates_5ghz
,
5481 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
5484 static const struct ieee80211_ops wl1271_ops
= {
5485 .start
= wl1271_op_start
,
5486 .stop
= wlcore_op_stop
,
5487 .add_interface
= wl1271_op_add_interface
,
5488 .remove_interface
= wl1271_op_remove_interface
,
5489 .change_interface
= wl12xx_op_change_interface
,
5491 .suspend
= wl1271_op_suspend
,
5492 .resume
= wl1271_op_resume
,
5494 .config
= wl1271_op_config
,
5495 .prepare_multicast
= wl1271_op_prepare_multicast
,
5496 .configure_filter
= wl1271_op_configure_filter
,
5498 .set_key
= wlcore_op_set_key
,
5499 .hw_scan
= wl1271_op_hw_scan
,
5500 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
5501 .sched_scan_start
= wl1271_op_sched_scan_start
,
5502 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
5503 .bss_info_changed
= wl1271_op_bss_info_changed
,
5504 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
5505 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
5506 .conf_tx
= wl1271_op_conf_tx
,
5507 .get_tsf
= wl1271_op_get_tsf
,
5508 .get_survey
= wl1271_op_get_survey
,
5509 .sta_state
= wl12xx_op_sta_state
,
5510 .ampdu_action
= wl1271_op_ampdu_action
,
5511 .tx_frames_pending
= wl1271_tx_frames_pending
,
5512 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
5513 .set_default_unicast_key
= wl1271_op_set_default_key_idx
,
5514 .channel_switch
= wl12xx_op_channel_switch
,
5515 .flush
= wlcore_op_flush
,
5516 .remain_on_channel
= wlcore_op_remain_on_channel
,
5517 .cancel_remain_on_channel
= wlcore_op_cancel_remain_on_channel
,
5518 .add_chanctx
= wlcore_op_add_chanctx
,
5519 .remove_chanctx
= wlcore_op_remove_chanctx
,
5520 .change_chanctx
= wlcore_op_change_chanctx
,
5521 .assign_vif_chanctx
= wlcore_op_assign_vif_chanctx
,
5522 .unassign_vif_chanctx
= wlcore_op_unassign_vif_chanctx
,
5523 .sta_rc_update
= wlcore_op_sta_rc_update
,
5524 .get_rssi
= wlcore_op_get_rssi
,
5525 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
5529 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
5535 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
5536 wl1271_error("Illegal RX rate from HW: %d", rate
);
5540 idx
= wl
->band_rate_to_idx
[band
][rate
];
5541 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
5542 wl1271_error("Unsupported RX rate from HW: %d", rate
);
5549 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
, u32 oui
, u32 nic
)
5553 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x",
5556 if (nic
+ WLCORE_NUM_MAC_ADDRESSES
- wl
->num_mac_addr
> 0xffffff)
5557 wl1271_warning("NIC part of the MAC address wraps around!");
5559 for (i
= 0; i
< wl
->num_mac_addr
; i
++) {
5560 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
5561 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
5562 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
5563 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
5564 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
5565 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
5569 /* we may be one address short at the most */
5570 WARN_ON(wl
->num_mac_addr
+ 1 < WLCORE_NUM_MAC_ADDRESSES
);
5573 * turn on the LAA bit in the first address and use it as
5576 if (wl
->num_mac_addr
< WLCORE_NUM_MAC_ADDRESSES
) {
5577 int idx
= WLCORE_NUM_MAC_ADDRESSES
- 1;
5578 memcpy(&wl
->addresses
[idx
], &wl
->addresses
[0],
5579 sizeof(wl
->addresses
[0]));
5581 wl
->addresses
[idx
].addr
[2] |= BIT(1);
5584 wl
->hw
->wiphy
->n_addresses
= WLCORE_NUM_MAC_ADDRESSES
;
5585 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
5588 static int wl12xx_get_hw_info(struct wl1271
*wl
)
5592 ret
= wl12xx_set_power_on(wl
);
5596 ret
= wlcore_read_reg(wl
, REG_CHIP_ID_B
, &wl
->chip
.id
);
5600 wl
->fuse_oui_addr
= 0;
5601 wl
->fuse_nic_addr
= 0;
5603 ret
= wl
->ops
->get_pg_ver(wl
, &wl
->hw_pg_ver
);
5607 if (wl
->ops
->get_mac
)
5608 ret
= wl
->ops
->get_mac(wl
);
5611 wl1271_power_off(wl
);
5615 static int wl1271_register_hw(struct wl1271
*wl
)
5618 u32 oui_addr
= 0, nic_addr
= 0;
5620 if (wl
->mac80211_registered
)
5623 if (wl
->nvs_len
>= 12) {
5624 /* NOTE: The wl->nvs->nvs element must be first, in
5625 * order to simplify the casting, we assume it is at
5626 * the beginning of the wl->nvs structure.
5628 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
5631 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
5633 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
5636 /* if the MAC address is zeroed in the NVS derive from fuse */
5637 if (oui_addr
== 0 && nic_addr
== 0) {
5638 oui_addr
= wl
->fuse_oui_addr
;
5639 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5640 nic_addr
= wl
->fuse_nic_addr
+ 1;
5643 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
);
5645 ret
= ieee80211_register_hw(wl
->hw
);
5647 wl1271_error("unable to register mac80211 hw: %d", ret
);
5651 wl
->mac80211_registered
= true;
5653 wl1271_debugfs_init(wl
);
5655 wl1271_notice("loaded");
5661 static void wl1271_unregister_hw(struct wl1271
*wl
)
5664 wl1271_plt_stop(wl
);
5666 ieee80211_unregister_hw(wl
->hw
);
5667 wl
->mac80211_registered
= false;
5671 static const struct ieee80211_iface_limit wlcore_iface_limits
[] = {
5674 .types
= BIT(NL80211_IFTYPE_STATION
),
5678 .types
= BIT(NL80211_IFTYPE_AP
) |
5679 BIT(NL80211_IFTYPE_P2P_GO
) |
5680 BIT(NL80211_IFTYPE_P2P_CLIENT
),
5684 static struct ieee80211_iface_combination
5685 wlcore_iface_combinations
[] = {
5687 .max_interfaces
= 3,
5688 .limits
= wlcore_iface_limits
,
5689 .n_limits
= ARRAY_SIZE(wlcore_iface_limits
),
5693 static int wl1271_init_ieee80211(struct wl1271
*wl
)
5696 static const u32 cipher_suites
[] = {
5697 WLAN_CIPHER_SUITE_WEP40
,
5698 WLAN_CIPHER_SUITE_WEP104
,
5699 WLAN_CIPHER_SUITE_TKIP
,
5700 WLAN_CIPHER_SUITE_CCMP
,
5701 WL1271_CIPHER_SUITE_GEM
,
5704 /* The tx descriptor buffer */
5705 wl
->hw
->extra_tx_headroom
= sizeof(struct wl1271_tx_hw_descr
);
5707 if (wl
->quirks
& WLCORE_QUIRK_TKIP_HEADER_SPACE
)
5708 wl
->hw
->extra_tx_headroom
+= WL1271_EXTRA_SPACE_TKIP
;
5711 /* FIXME: find a proper value */
5712 wl
->hw
->channel_change_time
= 10000;
5713 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
5715 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
5716 IEEE80211_HW_SUPPORTS_PS
|
5717 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
5718 IEEE80211_HW_SUPPORTS_UAPSD
|
5719 IEEE80211_HW_HAS_RATE_CONTROL
|
5720 IEEE80211_HW_CONNECTION_MONITOR
|
5721 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
5722 IEEE80211_HW_SPECTRUM_MGMT
|
5723 IEEE80211_HW_AP_LINK_PS
|
5724 IEEE80211_HW_AMPDU_AGGREGATION
|
5725 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
5726 IEEE80211_HW_QUEUE_CONTROL
;
5728 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
5729 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
5731 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
5732 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5733 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5734 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5735 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5736 wl
->hw
->wiphy
->max_match_sets
= 16;
5738 * Maximum length of elements in scanning probe request templates
5739 * should be the maximum length possible for a template, without
5740 * the IEEE80211 header of the template
5742 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5743 sizeof(struct ieee80211_header
);
5745 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5746 sizeof(struct ieee80211_header
);
5748 wl
->hw
->wiphy
->max_remain_on_channel_duration
= 5000;
5750 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5751 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
|
5752 WIPHY_FLAG_SUPPORTS_SCHED_SCAN
;
5754 /* make sure all our channels fit in the scanned_ch bitmask */
5755 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5756 ARRAY_SIZE(wl1271_channels_5ghz
) >
5757 WL1271_MAX_CHANNELS
);
5759 * clear channel flags from the previous usage
5760 * and restore max_power & max_antenna_gain values.
5762 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels
); i
++) {
5763 wl1271_band_2ghz
.channels
[i
].flags
= 0;
5764 wl1271_band_2ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
5765 wl1271_band_2ghz
.channels
[i
].max_antenna_gain
= 0;
5768 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels_5ghz
); i
++) {
5769 wl1271_band_5ghz
.channels
[i
].flags
= 0;
5770 wl1271_band_5ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
5771 wl1271_band_5ghz
.channels
[i
].max_antenna_gain
= 0;
5775 * We keep local copies of the band structs because we need to
5776 * modify them on a per-device basis.
5778 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5779 sizeof(wl1271_band_2ghz
));
5780 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
,
5781 &wl
->ht_cap
[IEEE80211_BAND_2GHZ
],
5782 sizeof(*wl
->ht_cap
));
5783 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5784 sizeof(wl1271_band_5ghz
));
5785 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
,
5786 &wl
->ht_cap
[IEEE80211_BAND_5GHZ
],
5787 sizeof(*wl
->ht_cap
));
5789 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5790 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5791 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5792 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5795 * allow 4 queues per mac address we support +
5796 * 1 cab queue per mac + one global offchannel Tx queue
5798 wl
->hw
->queues
= (NUM_TX_QUEUES
+ 1) * WLCORE_NUM_MAC_ADDRESSES
+ 1;
5800 /* the last queue is the offchannel queue */
5801 wl
->hw
->offchannel_tx_hw_queue
= wl
->hw
->queues
- 1;
5802 wl
->hw
->max_rates
= 1;
5804 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5806 /* the FW answers probe-requests in AP-mode */
5807 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5808 wl
->hw
->wiphy
->probe_resp_offload
=
5809 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5810 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5811 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5813 /* allowed interface combinations */
5814 wlcore_iface_combinations
[0].num_different_channels
= wl
->num_channels
;
5815 wl
->hw
->wiphy
->iface_combinations
= wlcore_iface_combinations
;
5816 wl
->hw
->wiphy
->n_iface_combinations
=
5817 ARRAY_SIZE(wlcore_iface_combinations
);
5819 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5821 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5822 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5824 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5829 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
, u32 aggr_buf_size
,
5832 struct ieee80211_hw
*hw
;
5837 BUILD_BUG_ON(AP_MAX_STATIONS
> WL12XX_MAX_LINKS
);
5839 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5841 wl1271_error("could not alloc ieee80211_hw");
5847 memset(wl
, 0, sizeof(*wl
));
5849 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5851 wl1271_error("could not alloc wl priv");
5853 goto err_priv_alloc
;
5856 INIT_LIST_HEAD(&wl
->wlvif_list
);
5860 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5861 for (j
= 0; j
< WL12XX_MAX_LINKS
; j
++)
5862 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5864 skb_queue_head_init(&wl
->deferred_rx_queue
);
5865 skb_queue_head_init(&wl
->deferred_tx_queue
);
5867 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5868 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5869 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5870 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5871 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5872 INIT_DELAYED_WORK(&wl
->roc_complete_work
, wlcore_roc_complete_work
);
5873 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5875 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5876 if (!wl
->freezable_wq
) {
5883 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
5884 wl
->band
= IEEE80211_BAND_2GHZ
;
5885 wl
->channel_type
= NL80211_CHAN_NO_HT
;
5887 wl
->sg_enabled
= true;
5888 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
5889 wl
->recovery_count
= 0;
5892 wl
->ap_fw_ps_map
= 0;
5894 wl
->platform_quirks
= 0;
5895 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
5896 wl
->active_sta_count
= 0;
5897 wl
->active_link_count
= 0;
5899 init_waitqueue_head(&wl
->fwlog_waitq
);
5901 /* The system link is always allocated */
5902 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
5904 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
5905 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
5906 wl
->tx_frames
[i
] = NULL
;
5908 spin_lock_init(&wl
->wl_lock
);
5910 wl
->state
= WLCORE_STATE_OFF
;
5911 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5912 mutex_init(&wl
->mutex
);
5913 mutex_init(&wl
->flush_mutex
);
5914 init_completion(&wl
->nvs_loading_complete
);
5916 order
= get_order(aggr_buf_size
);
5917 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
5918 if (!wl
->aggr_buf
) {
5922 wl
->aggr_buf_size
= aggr_buf_size
;
5924 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
5925 if (!wl
->dummy_packet
) {
5930 /* Allocate one page for the FW log */
5931 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
5934 goto err_dummy_packet
;
5937 wl
->mbox_size
= mbox_size
;
5938 wl
->mbox
= kmalloc(wl
->mbox_size
, GFP_KERNEL
| GFP_DMA
);
5944 wl
->buffer_32
= kmalloc(sizeof(*wl
->buffer_32
), GFP_KERNEL
);
5945 if (!wl
->buffer_32
) {
5956 free_page((unsigned long)wl
->fwlog
);
5959 dev_kfree_skb(wl
->dummy_packet
);
5962 free_pages((unsigned long)wl
->aggr_buf
, order
);
5965 destroy_workqueue(wl
->freezable_wq
);
5968 wl1271_debugfs_exit(wl
);
5972 ieee80211_free_hw(hw
);
5976 return ERR_PTR(ret
);
5978 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
5980 int wlcore_free_hw(struct wl1271
*wl
)
5982 /* Unblock any fwlog readers */
5983 mutex_lock(&wl
->mutex
);
5984 wl
->fwlog_size
= -1;
5985 wake_up_interruptible_all(&wl
->fwlog_waitq
);
5986 mutex_unlock(&wl
->mutex
);
5988 wlcore_sysfs_free(wl
);
5990 kfree(wl
->buffer_32
);
5992 free_page((unsigned long)wl
->fwlog
);
5993 dev_kfree_skb(wl
->dummy_packet
);
5994 free_pages((unsigned long)wl
->aggr_buf
, get_order(wl
->aggr_buf_size
));
5996 wl1271_debugfs_exit(wl
);
6000 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
6004 kfree(wl
->fw_status_1
);
6005 kfree(wl
->tx_res_if
);
6006 destroy_workqueue(wl
->freezable_wq
);
6009 ieee80211_free_hw(wl
->hw
);
6013 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
6016 static const struct wiphy_wowlan_support wlcore_wowlan_support
= {
6017 .flags
= WIPHY_WOWLAN_ANY
,
6018 .n_patterns
= WL1271_MAX_RX_FILTERS
,
6019 .pattern_min_len
= 1,
6020 .pattern_max_len
= WL1271_RX_FILTER_MAX_PATTERN_SIZE
,
6024 static irqreturn_t
wlcore_hardirq(int irq
, void *cookie
)
6026 return IRQ_WAKE_THREAD
;
6029 static void wlcore_nvs_cb(const struct firmware
*fw
, void *context
)
6031 struct wl1271
*wl
= context
;
6032 struct platform_device
*pdev
= wl
->pdev
;
6033 struct wlcore_platdev_data
*pdev_data
= dev_get_platdata(&pdev
->dev
);
6034 struct wl12xx_platform_data
*pdata
= pdev_data
->pdata
;
6035 unsigned long irqflags
;
6037 irq_handler_t hardirq_fn
= NULL
;
6040 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
6042 wl1271_error("Could not allocate nvs data");
6045 wl
->nvs_len
= fw
->size
;
6047 wl1271_debug(DEBUG_BOOT
, "Could not get nvs file %s",
6053 ret
= wl
->ops
->setup(wl
);
6057 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
6059 /* adjust some runtime configuration parameters */
6060 wlcore_adjust_conf(wl
);
6062 wl
->irq
= platform_get_irq(pdev
, 0);
6063 wl
->platform_quirks
= pdata
->platform_quirks
;
6064 wl
->if_ops
= pdev_data
->if_ops
;
6066 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
) {
6067 irqflags
= IRQF_TRIGGER_RISING
;
6068 hardirq_fn
= wlcore_hardirq
;
6070 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
6073 ret
= request_threaded_irq(wl
->irq
, hardirq_fn
, wlcore_irq
,
6074 irqflags
, pdev
->name
, wl
);
6076 wl1271_error("request_irq() failed: %d", ret
);
6081 ret
= enable_irq_wake(wl
->irq
);
6083 wl
->irq_wake_enabled
= true;
6084 device_init_wakeup(wl
->dev
, 1);
6085 if (pdata
->pwr_in_suspend
)
6086 wl
->hw
->wiphy
->wowlan
= &wlcore_wowlan_support
;
6089 disable_irq(wl
->irq
);
6091 ret
= wl12xx_get_hw_info(wl
);
6093 wl1271_error("couldn't get hw info");
6097 ret
= wl
->ops
->identify_chip(wl
);
6101 ret
= wl1271_init_ieee80211(wl
);
6105 ret
= wl1271_register_hw(wl
);
6109 ret
= wlcore_sysfs_init(wl
);
6113 wl
->initialized
= true;
6117 wl1271_unregister_hw(wl
);
6120 free_irq(wl
->irq
, wl
);
6126 release_firmware(fw
);
6127 complete_all(&wl
->nvs_loading_complete
);
6130 int wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
6134 if (!wl
->ops
|| !wl
->ptable
)
6137 wl
->dev
= &pdev
->dev
;
6139 platform_set_drvdata(pdev
, wl
);
6141 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_HOTPLUG
,
6142 WL12XX_NVS_NAME
, &pdev
->dev
, GFP_KERNEL
,
6145 wl1271_error("request_firmware_nowait failed: %d", ret
);
6146 complete_all(&wl
->nvs_loading_complete
);
6151 EXPORT_SYMBOL_GPL(wlcore_probe
);
6153 int wlcore_remove(struct platform_device
*pdev
)
6155 struct wl1271
*wl
= platform_get_drvdata(pdev
);
6157 wait_for_completion(&wl
->nvs_loading_complete
);
6158 if (!wl
->initialized
)
6161 if (wl
->irq_wake_enabled
) {
6162 device_init_wakeup(wl
->dev
, 0);
6163 disable_irq_wake(wl
->irq
);
6165 wl1271_unregister_hw(wl
);
6166 free_irq(wl
->irq
, wl
);
6171 EXPORT_SYMBOL_GPL(wlcore_remove
);
6173 u32 wl12xx_debug_level
= DEBUG_NONE
;
6174 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
6175 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
6176 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
6178 module_param_named(fwlog
, fwlog_param
, charp
, 0);
6179 MODULE_PARM_DESC(fwlog
,
6180 "FW logger options: continuous, ondemand, dbgpins or disable");
6182 module_param(fwlog_mem_blocks
, int, S_IRUSR
| S_IWUSR
);
6183 MODULE_PARM_DESC(fwlog_mem_blocks
, "fwlog mem_blocks");
6185 module_param(bug_on_recovery
, int, S_IRUSR
| S_IWUSR
);
6186 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
6188 module_param(no_recovery
, int, S_IRUSR
| S_IWUSR
);
6189 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
6191 MODULE_LICENSE("GPL");
6192 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6193 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6194 MODULE_FIRMWARE(WL12XX_NVS_NAME
);