Commit | Line | Data |
---|---|---|
f5fc0f86 LC |
1 | /* |
2 | * This file is part of wl1271 | |
3 | * | |
4 | * Copyright (C) 2009 Nokia Corporation | |
5 | * | |
6 | * Contact: Luciano Coelho <luciano.coelho@nokia.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License | |
10 | * version 2 as published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but | |
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | |
20 | * 02110-1301 USA | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/kernel.h> | |
25 | #include <linux/module.h> | |
c6c8a65d | 26 | #include <linux/etherdevice.h> |
f5fc0f86 | 27 | |
c31be25a | 28 | #include "wlcore.h" |
0f4e3122 | 29 | #include "debug.h" |
00d20100 | 30 | #include "io.h" |
00d20100 SL |
31 | #include "ps.h" |
32 | #include "tx.h" | |
56d4f8f6 | 33 | #include "event.h" |
b3b4b4b8 | 34 | #include "hw_ops.h" |
f5fc0f86 | 35 | |
00782136 LC |
36 | /* |
37 | * TODO: this is here just for now, it must be removed when the data | |
38 | * operations are in place. | |
39 | */ | |
40 | #include "../wl12xx/reg.h" | |
41 | ||
536129c8 EP |
42 | static int wl1271_set_default_wep_key(struct wl1271 *wl, |
43 | struct wl12xx_vif *wlvif, u8 id) | |
7f179b46 AN |
44 | { |
45 | int ret; | |
536129c8 | 46 | bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); |
7f179b46 AN |
47 | |
48 | if (is_ap) | |
c690ec81 | 49 | ret = wl12xx_cmd_set_default_wep_key(wl, id, |
a8ab39a4 | 50 | wlvif->ap.bcast_hlid); |
7f179b46 | 51 | else |
154da67c | 52 | ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); |
7f179b46 AN |
53 | |
54 | if (ret < 0) | |
55 | return ret; | |
56 | ||
57 | wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id); | |
58 | return 0; | |
59 | } | |
60 | ||
25eeb9e3 | 61 | static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) |
f5fc0f86 | 62 | { |
25eeb9e3 IY |
63 | int id; |
64 | ||
72b0624f AN |
65 | id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc); |
66 | if (id >= wl->num_tx_desc) | |
25eeb9e3 IY |
67 | return -EBUSY; |
68 | ||
69 | __set_bit(id, wl->tx_frames_map); | |
70 | wl->tx_frames[id] = skb; | |
71 | wl->tx_frames_cnt++; | |
72 | return id; | |
73 | } | |
f5fc0f86 | 74 | |
25eeb9e3 IY |
75 | static void wl1271_free_tx_id(struct wl1271 *wl, int id) |
76 | { | |
77 | if (__test_and_clear_bit(id, wl->tx_frames_map)) { | |
72b0624f | 78 | if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) |
ef2e3004 IY |
79 | clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); |
80 | ||
25eeb9e3 IY |
81 | wl->tx_frames[id] = NULL; |
82 | wl->tx_frames_cnt--; | |
83 | } | |
f5fc0f86 LC |
84 | } |
85 | ||
99a2775d AN |
86 | static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, |
87 | struct sk_buff *skb) | |
88 | { | |
89 | struct ieee80211_hdr *hdr; | |
90 | ||
91 | /* | |
92 | * add the station to the known list before transmitting the | |
93 | * authentication response. this way it won't get de-authed by FW | |
94 | * when transmitting too soon. | |
95 | */ | |
96 | hdr = (struct ieee80211_hdr *)(skb->data + | |
97 | sizeof(struct wl1271_tx_hw_descr)); | |
98 | if (ieee80211_is_auth(hdr->frame_control)) | |
99 | wl1271_acx_set_inconnection_sta(wl, hdr->addr1); | |
100 | } | |
101 | ||
c7ffb902 EP |
102 | static void wl1271_tx_regulate_link(struct wl1271 *wl, |
103 | struct wl12xx_vif *wlvif, | |
104 | u8 hlid) | |
b622d992 | 105 | { |
da03209e | 106 | bool fw_ps, single_sta; |
9b17f1b3 | 107 | u8 tx_pkts; |
b622d992 | 108 | |
c7ffb902 | 109 | if (WARN_ON(!test_bit(hlid, wlvif->links_map))) |
b622d992 AN |
110 | return; |
111 | ||
112 | fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); | |
9b17f1b3 | 113 | tx_pkts = wl->links[hlid].allocated_pkts; |
da03209e | 114 | single_sta = (wl->active_sta_count == 1); |
b622d992 AN |
115 | |
116 | /* | |
117 | * if in FW PS and there is enough data in FW we can put the link | |
118 | * into high-level PS and clean out its TX queues. | |
da03209e AN |
119 | * Make an exception if this is the only connected station. In this |
120 | * case FW-memory congestion is not a problem. | |
b622d992 | 121 | */ |
da03209e | 122 | if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) |
6e8cd331 | 123 | wl12xx_ps_link_start(wl, wlvif, hlid, true); |
b622d992 AN |
124 | } |
125 | ||
f8e0af6b | 126 | bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) |
f4df1bd5 EP |
127 | { |
128 | return wl->dummy_packet == skb; | |
129 | } | |
130 | ||
a8ab39a4 EP |
131 | u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
132 | struct sk_buff *skb) | |
a8c0ddb5 AN |
133 | { |
134 | struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); | |
135 | ||
136 | if (control->control.sta) { | |
137 | struct wl1271_station *wl_sta; | |
138 | ||
139 | wl_sta = (struct wl1271_station *) | |
140 | control->control.sta->drv_priv; | |
141 | return wl_sta->hlid; | |
142 | } else { | |
143 | struct ieee80211_hdr *hdr; | |
144 | ||
53d40d0b | 145 | if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) |
f4df1bd5 EP |
146 | return wl->system_hlid; |
147 | ||
a8c0ddb5 AN |
148 | hdr = (struct ieee80211_hdr *)skb->data; |
149 | if (ieee80211_is_mgmt(hdr->frame_control)) | |
a8ab39a4 | 150 | return wlvif->ap.global_hlid; |
a8c0ddb5 | 151 | else |
a8ab39a4 | 152 | return wlvif->ap.bcast_hlid; |
a8c0ddb5 AN |
153 | } |
154 | } | |
155 | ||
d6a3cc2e EP |
156 | u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
157 | struct sk_buff *skb) | |
f4df1bd5 | 158 | { |
df4c849f EP |
159 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
160 | ||
0f168014 | 161 | if (!wlvif || wl12xx_is_dummy_packet(wl, skb)) |
f4df1bd5 EP |
162 | return wl->system_hlid; |
163 | ||
536129c8 | 164 | if (wlvif->bss_type == BSS_TYPE_AP_BSS) |
a8ab39a4 | 165 | return wl12xx_tx_get_hlid_ap(wl, wlvif, skb); |
f4df1bd5 | 166 | |
ba8447f6 | 167 | if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || |
eee514e3 | 168 | test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && |
df4c849f EP |
169 | !ieee80211_is_auth(hdr->frame_control) && |
170 | !ieee80211_is_assoc_req(hdr->frame_control)) | |
154da67c | 171 | return wlvif->sta.hlid; |
f4df1bd5 | 172 | else |
afaf8bdb | 173 | return wlvif->dev_hlid; |
f4df1bd5 EP |
174 | } |
175 | ||
b3b4b4b8 AN |
176 | unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, |
177 | unsigned int packet_length) | |
0da13da7 | 178 | { |
f83985bb | 179 | if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN) |
ce39defb | 180 | return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); |
f83985bb AN |
181 | else |
182 | return ALIGN(packet_length, WL1271_TX_ALIGN_TO); | |
0da13da7 | 183 | } |
b3b4b4b8 | 184 | EXPORT_SYMBOL(wlcore_calc_packet_alignment); |
0da13da7 | 185 | |
a32d0cdf | 186 | static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
536129c8 EP |
187 | struct sk_buff *skb, u32 extra, u32 buf_offset, |
188 | u8 hlid) | |
f5fc0f86 LC |
189 | { |
190 | struct wl1271_tx_hw_descr *desc; | |
191 | u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; | |
5c9417f1 | 192 | u32 total_blocks; |
742246f8 | 193 | int id, ret = -EBUSY, ac; |
3edab305 | 194 | u32 spare_blocks = wl->normal_tx_spare; |
a32d0cdf | 195 | bool is_dummy = false; |
f5fc0f86 | 196 | |
a19606b4 | 197 | if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) |
6c6e669e | 198 | return -EAGAIN; |
a19606b4 | 199 | |
f5fc0f86 | 200 | /* allocate free identifier for the packet */ |
25eeb9e3 | 201 | id = wl1271_alloc_tx_id(wl, skb); |
f5fc0f86 LC |
202 | if (id < 0) |
203 | return id; | |
204 | ||
3edab305 | 205 | if (unlikely(wl12xx_is_dummy_packet(wl, skb))) |
a32d0cdf | 206 | is_dummy = true; |
3edab305 AN |
207 | else if (wlvif->is_gem) |
208 | spare_blocks = wl->gem_tx_spare; | |
e9eb8cbe | 209 | |
b3b4b4b8 | 210 | total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); |
48a61477 | 211 | |
f5fc0f86 LC |
212 | if (total_blocks <= wl->tx_blocks_available) { |
213 | desc = (struct wl1271_tx_hw_descr *)skb_push( | |
214 | skb, total_len - skb->len); | |
215 | ||
4a3b97ee AN |
216 | wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks, |
217 | spare_blocks); | |
ae77eccf | 218 | |
f5fc0f86 LC |
219 | desc->id = id; |
220 | ||
221 | wl->tx_blocks_available -= total_blocks; | |
7bb5d6ce | 222 | wl->tx_allocated_blocks += total_blocks; |
f5fc0f86 | 223 | |
55df5afb AN |
224 | /* If the FW was empty before, arm the Tx watchdog */ |
225 | if (wl->tx_allocated_blocks == total_blocks) | |
226 | wl12xx_rearm_tx_watchdog_locked(wl); | |
227 | ||
742246f8 AN |
228 | ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
229 | wl->tx_allocated_pkts[ac]++; | |
bf54e301 | 230 | |
0f168014 EP |
231 | if (!is_dummy && wlvif && |
232 | wlvif->bss_type == BSS_TYPE_AP_BSS && | |
c7ffb902 | 233 | test_bit(hlid, wlvif->ap.sta_hlid_map)) |
9b17f1b3 | 234 | wl->links[hlid].allocated_pkts++; |
09039f42 | 235 | |
f5fc0f86 LC |
236 | ret = 0; |
237 | ||
238 | wl1271_debug(DEBUG_TX, | |
239 | "tx_allocate: size: %d, blocks: %d, id: %d", | |
240 | total_len, total_blocks, id); | |
781608c4 | 241 | } else { |
25eeb9e3 | 242 | wl1271_free_tx_id(wl, id); |
781608c4 | 243 | } |
f5fc0f86 LC |
244 | |
245 | return ret; | |
246 | } | |
247 | ||
a32d0cdf | 248 | static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
536129c8 EP |
249 | struct sk_buff *skb, u32 extra, |
250 | struct ieee80211_tx_info *control, u8 hlid) | |
f5fc0f86 | 251 | { |
ac5e1e39 | 252 | struct timespec ts; |
f5fc0f86 | 253 | struct wl1271_tx_hw_descr *desc; |
6f266e91 | 254 | int ac, rate_idx; |
ac5e1e39 | 255 | s64 hosttime; |
cf00f379 | 256 | u16 tx_attr = 0; |
f4f57943 EP |
257 | __le16 frame_control; |
258 | struct ieee80211_hdr *hdr; | |
259 | u8 *frame_start; | |
a32d0cdf | 260 | bool is_dummy; |
f5fc0f86 LC |
261 | |
262 | desc = (struct wl1271_tx_hw_descr *) skb->data; | |
f4f57943 EP |
263 | frame_start = (u8 *)(desc + 1); |
264 | hdr = (struct ieee80211_hdr *)(frame_start + extra); | |
265 | frame_control = hdr->frame_control; | |
f5fc0f86 | 266 | |
1e2b7976 JO |
267 | /* relocate space for security header */ |
268 | if (extra) { | |
f4f57943 EP |
269 | int hdrlen = ieee80211_hdrlen(frame_control); |
270 | memmove(frame_start, hdr, hdrlen); | |
1e2b7976 JO |
271 | } |
272 | ||
f5fc0f86 | 273 | /* configure packet life time */ |
ac5e1e39 JO |
274 | getnstimeofday(&ts); |
275 | hosttime = (timespec_to_ns(&ts) >> 10); | |
276 | desc->start_time = cpu_to_le32(hosttime - wl->time_offset); | |
c6c8a65d | 277 | |
a32d0cdf | 278 | is_dummy = wl12xx_is_dummy_packet(wl, skb); |
0f168014 | 279 | if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) |
c6c8a65d AN |
280 | desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); |
281 | else | |
282 | desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); | |
f5fc0f86 | 283 | |
db674d24 | 284 | /* queue */ |
c6999d83 | 285 | ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
db674d24 | 286 | desc->tid = skb->priority; |
c6c8a65d | 287 | |
a32d0cdf | 288 | if (is_dummy) { |
ae47c45f SL |
289 | /* |
290 | * FW expects the dummy packet to have an invalid session id - | |
291 | * any session id that is different than the one set in the join | |
292 | */ | |
98b86253 | 293 | tx_attr = (SESSION_COUNTER_INVALID << |
ae47c45f SL |
294 | TX_HW_ATTR_OFST_SESSION_COUNTER) & |
295 | TX_HW_ATTR_SESSION_COUNTER; | |
296 | ||
297 | tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; | |
0f168014 | 298 | } else if (wlvif) { |
ae47c45f | 299 | /* configure the tx attributes */ |
98b86253 EP |
300 | tx_attr = wlvif->session_counter << |
301 | TX_HW_ATTR_OFST_SESSION_COUNTER; | |
ae47c45f SL |
302 | } |
303 | ||
79b122dc | 304 | desc->hlid = hlid; |
0f168014 | 305 | if (is_dummy || !wlvif) |
a32d0cdf EP |
306 | rate_idx = 0; |
307 | else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { | |
c6c8a65d AN |
308 | /* if the packets are destined for AP (have a STA entry) |
309 | send them with AP rate policies, otherwise use default | |
310 | basic rates */ | |
8a0f2ee3 EP |
311 | if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) |
312 | rate_idx = wlvif->sta.p2p_rate_idx; | |
313 | else if (control->control.sta) | |
e5a359f8 | 314 | rate_idx = wlvif->sta.ap_rate_idx; |
c6c8a65d | 315 | else |
e5a359f8 | 316 | rate_idx = wlvif->sta.basic_rate_idx; |
c6c8a65d | 317 | } else { |
a8ab39a4 | 318 | if (hlid == wlvif->ap.global_hlid) |
e5a359f8 | 319 | rate_idx = wlvif->ap.mgmt_rate_idx; |
a8ab39a4 | 320 | else if (hlid == wlvif->ap.bcast_hlid) |
e5a359f8 | 321 | rate_idx = wlvif->ap.bcast_rate_idx; |
e51ae9be | 322 | else |
e5a359f8 | 323 | rate_idx = wlvif->ap.ucast_rate_idx[ac]; |
c6c8a65d AN |
324 | } |
325 | ||
326 | tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; | |
d0f63b20 | 327 | |
f4f57943 EP |
328 | /* for WEP shared auth - no fw encryption is needed */ |
329 | if (ieee80211_is_auth(frame_control) && | |
330 | ieee80211_has_protected(frame_control)) | |
331 | tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; | |
332 | ||
6f266e91 | 333 | desc->reserved = 0; |
d0f63b20 | 334 | desc->tx_attr = cpu_to_le16(tx_attr); |
6f266e91 AN |
335 | |
336 | wlcore_hw_set_tx_desc_data_len(wl, desc, skb); | |
f5fc0f86 LC |
337 | } |
338 | ||
339 | /* caller must hold wl->mutex */ | |
a32d0cdf EP |
340 | static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
341 | struct sk_buff *skb, u32 buf_offset) | |
f5fc0f86 LC |
342 | { |
343 | struct ieee80211_tx_info *info; | |
344 | u32 extra = 0; | |
345 | int ret = 0; | |
a19606b4 | 346 | u32 total_len; |
09039f42 | 347 | u8 hlid; |
536129c8 | 348 | bool is_dummy; |
f5fc0f86 LC |
349 | |
350 | if (!skb) | |
351 | return -EINVAL; | |
352 | ||
353 | info = IEEE80211_SKB_CB(skb); | |
354 | ||
536129c8 EP |
355 | /* TODO: handle dummy packets on multi-vifs */ |
356 | is_dummy = wl12xx_is_dummy_packet(wl, skb); | |
536129c8 | 357 | |
f5fc0f86 | 358 | if (info->control.hw_key && |
97359d12 | 359 | info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) |
5ec8a448 | 360 | extra = WL1271_EXTRA_SPACE_TKIP; |
f5fc0f86 LC |
361 | |
362 | if (info->control.hw_key) { | |
7f179b46 AN |
363 | bool is_wep; |
364 | u8 idx = info->control.hw_key->hw_key_idx; | |
365 | u32 cipher = info->control.hw_key->cipher; | |
366 | ||
367 | is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || | |
368 | (cipher == WLAN_CIPHER_SUITE_WEP104); | |
f5fc0f86 | 369 | |
f75c753f | 370 | if (unlikely(is_wep && wlvif->default_key != idx)) { |
536129c8 | 371 | ret = wl1271_set_default_wep_key(wl, wlvif, idx); |
f5fc0f86 LC |
372 | if (ret < 0) |
373 | return ret; | |
f75c753f | 374 | wlvif->default_key = idx; |
f5fc0f86 LC |
375 | } |
376 | } | |
d6a3cc2e | 377 | hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); |
79b122dc | 378 | if (hlid == WL12XX_INVALID_LINK_ID) { |
e0d62536 | 379 | wl1271_error("invalid hlid. dropping skb 0x%p", skb); |
79b122dc EP |
380 | return -EINVAL; |
381 | } | |
09039f42 | 382 | |
a32d0cdf | 383 | ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid); |
f5fc0f86 LC |
384 | if (ret < 0) |
385 | return ret; | |
386 | ||
a32d0cdf | 387 | wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); |
fae2fd76 | 388 | |
0f168014 | 389 | if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { |
99a2775d | 390 | wl1271_tx_ap_update_inconnection_sta(wl, skb); |
c7ffb902 | 391 | wl1271_tx_regulate_link(wl, wlvif, hlid); |
b622d992 | 392 | } |
99a2775d | 393 | |
a19606b4 | 394 | /* |
48a61477 SL |
395 | * The length of each packet is stored in terms of |
396 | * words. Thus, we must pad the skb data to make sure its | |
397 | * length is aligned. The number of padding bytes is computed | |
398 | * and set in wl1271_tx_fill_hdr. | |
399 | * In special cases, we want to align to a specific block size | |
400 | * (eg. for wl128x with SDIO we align to 256). | |
a19606b4 | 401 | */ |
b3b4b4b8 | 402 | total_len = wlcore_calc_packet_alignment(wl, skb->len); |
48a61477 | 403 | |
a19606b4 IY |
404 | memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); |
405 | memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); | |
f5fc0f86 | 406 | |
990f5de7 | 407 | /* Revert side effects in the dummy packet skb, so it can be reused */ |
536129c8 | 408 | if (is_dummy) |
990f5de7 IY |
409 | skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); |
410 | ||
a19606b4 | 411 | return total_len; |
f5fc0f86 LC |
412 | } |
413 | ||
af7fbb28 EP |
414 | u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, |
415 | enum ieee80211_band rate_band) | |
830fb67b JO |
416 | { |
417 | struct ieee80211_supported_band *band; | |
418 | u32 enabled_rates = 0; | |
419 | int bit; | |
420 | ||
af7fbb28 | 421 | band = wl->hw->wiphy->bands[rate_band]; |
830fb67b JO |
422 | for (bit = 0; bit < band->n_bitrates; bit++) { |
423 | if (rate_set & 0x1) | |
424 | enabled_rates |= band->bitrates[bit].hw_value; | |
425 | rate_set >>= 1; | |
426 | } | |
427 | ||
18357850 SL |
428 | /* MCS rates indication are on bits 16 - 23 */ |
429 | rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; | |
430 | ||
431 | for (bit = 0; bit < 8; bit++) { | |
432 | if (rate_set & 0x1) | |
433 | enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); | |
434 | rate_set >>= 1; | |
435 | } | |
18357850 | 436 | |
830fb67b JO |
437 | return enabled_rates; |
438 | } | |
439 | ||
a8c0ddb5 | 440 | void wl1271_handle_tx_low_watermark(struct wl1271 *wl) |
2fe33e8c IY |
441 | { |
442 | unsigned long flags; | |
708bb3cf | 443 | int i; |
2fe33e8c | 444 | |
708bb3cf AN |
445 | for (i = 0; i < NUM_TX_QUEUES; i++) { |
446 | if (test_bit(i, &wl->stopped_queues_map) && | |
f1a46384 | 447 | wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) { |
708bb3cf AN |
448 | /* firmware buffer has space, restart queues */ |
449 | spin_lock_irqsave(&wl->wl_lock, flags); | |
450 | ieee80211_wake_queue(wl->hw, | |
451 | wl1271_tx_get_mac80211_queue(i)); | |
452 | clear_bit(i, &wl->stopped_queues_map); | |
453 | spin_unlock_irqrestore(&wl->wl_lock, flags); | |
454 | } | |
2fe33e8c IY |
455 | } |
456 | } | |
457 | ||
742246f8 AN |
458 | static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, |
459 | struct sk_buff_head *queues) | |
460 | { | |
461 | int i, q = -1, ac; | |
462 | u32 min_pkts = 0xffffffff; | |
463 | ||
464 | /* | |
465 | * Find a non-empty ac where: | |
466 | * 1. There are packets to transmit | |
467 | * 2. The FW has the least allocated blocks | |
468 | * | |
469 | * We prioritize the ACs according to VO>VI>BE>BK | |
470 | */ | |
471 | for (i = 0; i < NUM_TX_QUEUES; i++) { | |
472 | ac = wl1271_tx_get_queue(i); | |
473 | if (!skb_queue_empty(&queues[ac]) && | |
474 | (wl->tx_allocated_pkts[ac] < min_pkts)) { | |
475 | q = ac; | |
476 | min_pkts = wl->tx_allocated_pkts[q]; | |
477 | } | |
478 | } | |
479 | ||
480 | if (q == -1) | |
481 | return NULL; | |
482 | ||
483 | return &queues[q]; | |
484 | } | |
485 | ||
d6a3cc2e EP |
486 | static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, |
487 | struct wl1271_link *lnk) | |
6742f554 | 488 | { |
d6a3cc2e | 489 | struct sk_buff *skb; |
6742f554 | 490 | unsigned long flags; |
742246f8 | 491 | struct sk_buff_head *queue; |
6742f554 | 492 | |
d6a3cc2e | 493 | queue = wl1271_select_queue(wl, lnk->tx_queue); |
742246f8 | 494 | if (!queue) |
d6a3cc2e | 495 | return NULL; |
742246f8 AN |
496 | |
497 | skb = skb_dequeue(queue); | |
6742f554 | 498 | if (skb) { |
f1a46384 | 499 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
6742f554 | 500 | spin_lock_irqsave(&wl->wl_lock, flags); |
6246ca00 | 501 | WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); |
f1a46384 | 502 | wl->tx_queue_count[q]--; |
6742f554 JO |
503 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
504 | } | |
505 | ||
506 | return skb; | |
507 | } | |
508 | ||
d6a3cc2e EP |
509 | static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, |
510 | struct wl12xx_vif *wlvif) | |
a8c0ddb5 AN |
511 | { |
512 | struct sk_buff *skb = NULL; | |
a8c0ddb5 AN |
513 | int i, h, start_hlid; |
514 | ||
515 | /* start from the link after the last one */ | |
4438aca9 | 516 | start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; |
a8c0ddb5 AN |
517 | |
518 | /* dequeue according to AC, round robin on each link */ | |
c7ffb902 EP |
519 | for (i = 0; i < WL12XX_MAX_LINKS; i++) { |
520 | h = (start_hlid + i) % WL12XX_MAX_LINKS; | |
a8c0ddb5 | 521 | |
742246f8 | 522 | /* only consider connected stations */ |
c7ffb902 | 523 | if (!test_bit(h, wlvif->links_map)) |
742246f8 AN |
524 | continue; |
525 | ||
d6a3cc2e EP |
526 | skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]); |
527 | if (!skb) | |
742246f8 AN |
528 | continue; |
529 | ||
d6a3cc2e EP |
530 | wlvif->last_tx_hlid = h; |
531 | break; | |
a8c0ddb5 AN |
532 | } |
533 | ||
d6a3cc2e | 534 | if (!skb) |
4438aca9 | 535 | wlvif->last_tx_hlid = 0; |
a8c0ddb5 AN |
536 | |
537 | return skb; | |
538 | } | |
539 | ||
a32d0cdf | 540 | static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) |
a8c0ddb5 | 541 | { |
990f5de7 | 542 | unsigned long flags; |
e4120df9 | 543 | struct wl12xx_vif *wlvif = wl->last_wlvif; |
990f5de7 IY |
544 | struct sk_buff *skb = NULL; |
545 | ||
49c9cd26 | 546 | /* continue from last wlvif (round robin) */ |
e4120df9 EP |
547 | if (wlvif) { |
548 | wl12xx_for_each_wlvif_continue(wl, wlvif) { | |
549 | skb = wl12xx_vif_skb_dequeue(wl, wlvif); | |
550 | if (skb) { | |
551 | wl->last_wlvif = wlvif; | |
552 | break; | |
553 | } | |
554 | } | |
555 | } | |
556 | ||
49c9cd26 AN |
557 | /* dequeue from the system HLID before the restarting wlvif list */ |
558 | if (!skb) | |
559 | skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); | |
560 | ||
561 | /* do a new pass over the wlvif list */ | |
e4120df9 EP |
562 | if (!skb) { |
563 | wl12xx_for_each_wlvif(wl, wlvif) { | |
564 | skb = wl12xx_vif_skb_dequeue(wl, wlvif); | |
565 | if (skb) { | |
566 | wl->last_wlvif = wlvif; | |
567 | break; | |
568 | } | |
49c9cd26 AN |
569 | |
570 | /* | |
571 | * No need to continue after last_wlvif. The previous | |
572 | * pass should have found it. | |
573 | */ | |
574 | if (wlvif == wl->last_wlvif) | |
575 | break; | |
e4120df9 | 576 | } |
a32d0cdf EP |
577 | } |
578 | ||
990f5de7 IY |
579 | if (!skb && |
580 | test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { | |
f1a46384 AN |
581 | int q; |
582 | ||
990f5de7 | 583 | skb = wl->dummy_packet; |
f1a46384 | 584 | q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); |
990f5de7 | 585 | spin_lock_irqsave(&wl->wl_lock, flags); |
6246ca00 | 586 | WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); |
f1a46384 | 587 | wl->tx_queue_count[q]--; |
990f5de7 IY |
588 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
589 | } | |
590 | ||
591 | return skb; | |
a8c0ddb5 AN |
592 | } |
593 | ||
d6a3cc2e | 594 | static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, |
536129c8 | 595 | struct sk_buff *skb) |
6742f554 JO |
596 | { |
597 | unsigned long flags; | |
598 | int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | |
599 | ||
990f5de7 IY |
600 | if (wl12xx_is_dummy_packet(wl, skb)) { |
601 | set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); | |
d6a3cc2e EP |
602 | } else { |
603 | u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); | |
a8c0ddb5 AN |
604 | skb_queue_head(&wl->links[hlid].tx_queue[q], skb); |
605 | ||
606 | /* make sure we dequeue the same packet next time */ | |
4438aca9 | 607 | wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % |
d6a3cc2e | 608 | WL12XX_MAX_LINKS; |
a8c0ddb5 AN |
609 | } |
610 | ||
6742f554 | 611 | spin_lock_irqsave(&wl->wl_lock, flags); |
f1a46384 | 612 | wl->tx_queue_count[q]++; |
6742f554 JO |
613 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
614 | } | |
615 | ||
77ddaa10 EP |
616 | static bool wl1271_tx_is_data_present(struct sk_buff *skb) |
617 | { | |
618 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); | |
619 | ||
620 | return ieee80211_is_data_present(hdr->frame_control); | |
621 | } | |
622 | ||
9eb599e9 EP |
623 | void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) |
624 | { | |
625 | struct wl12xx_vif *wlvif; | |
626 | u32 timeout; | |
627 | u8 hlid; | |
628 | ||
629 | if (!wl->conf.rx_streaming.interval) | |
630 | return; | |
631 | ||
632 | if (!wl->conf.rx_streaming.always && | |
633 | !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) | |
634 | return; | |
635 | ||
636 | timeout = wl->conf.rx_streaming.duration; | |
637 | wl12xx_for_each_wlvif_sta(wl, wlvif) { | |
638 | bool found = false; | |
639 | for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) { | |
640 | if (test_bit(hlid, wlvif->links_map)) { | |
641 | found = true; | |
642 | break; | |
643 | } | |
644 | } | |
645 | ||
646 | if (!found) | |
647 | continue; | |
648 | ||
649 | /* enable rx streaming */ | |
0744bdb6 | 650 | if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) |
9eb599e9 EP |
651 | ieee80211_queue_work(wl->hw, |
652 | &wlvif->rx_streaming_enable_work); | |
653 | ||
654 | mod_timer(&wlvif->rx_streaming_timer, | |
655 | jiffies + msecs_to_jiffies(timeout)); | |
656 | } | |
657 | } | |
658 | ||
a32d0cdf | 659 | void wl1271_tx_work_locked(struct wl1271 *wl) |
f5fc0f86 | 660 | { |
a32d0cdf | 661 | struct wl12xx_vif *wlvif; |
f5fc0f86 | 662 | struct sk_buff *skb; |
9eb599e9 | 663 | struct wl1271_tx_hw_descr *desc; |
6c6e669e IY |
664 | u32 buf_offset = 0; |
665 | bool sent_packets = false; | |
9eb599e9 | 666 | unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; |
f5fc0f86 LC |
667 | int ret; |
668 | ||
f5fc0f86 | 669 | if (unlikely(wl->state == WL1271_STATE_OFF)) |
c1b193eb | 670 | return; |
f5fc0f86 | 671 | |
a32d0cdf | 672 | while ((skb = wl1271_skb_dequeue(wl))) { |
0f168014 | 673 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
9eb599e9 EP |
674 | bool has_data = false; |
675 | ||
a32d0cdf | 676 | wlvif = NULL; |
0f168014 EP |
677 | if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) |
678 | wlvif = wl12xx_vif_to_data(info->control.vif); | |
a32d0cdf | 679 | |
9eb599e9 | 680 | has_data = wlvif && wl1271_tx_is_data_present(skb); |
a32d0cdf | 681 | ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset); |
6c6e669e | 682 | if (ret == -EAGAIN) { |
a19606b4 | 683 | /* |
6c6e669e IY |
684 | * Aggregation buffer is full. |
685 | * Flush buffer and try again. | |
686 | */ | |
d6a3cc2e | 687 | wl1271_skb_queue_head(wl, wlvif, skb); |
00782136 LC |
688 | wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, |
689 | buf_offset, true); | |
6c6e669e IY |
690 | sent_packets = true; |
691 | buf_offset = 0; | |
692 | continue; | |
693 | } else if (ret == -EBUSY) { | |
694 | /* | |
695 | * Firmware buffer is full. | |
a19606b4 IY |
696 | * Queue back last skb, and stop aggregating. |
697 | */ | |
d6a3cc2e | 698 | wl1271_skb_queue_head(wl, wlvif, skb); |
a522550a IY |
699 | /* No work left, avoid scheduling redundant tx work */ |
700 | set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); | |
ffb591cd | 701 | goto out_ack; |
f5fc0f86 | 702 | } else if (ret < 0) { |
5de8eef4 EP |
703 | if (wl12xx_is_dummy_packet(wl, skb)) |
704 | /* | |
705 | * fw still expects dummy packet, | |
706 | * so re-enqueue it | |
707 | */ | |
708 | wl1271_skb_queue_head(wl, wlvif, skb); | |
709 | else | |
710 | ieee80211_free_txskb(wl->hw, skb); | |
ffb591cd | 711 | goto out_ack; |
f5fc0f86 | 712 | } |
a19606b4 IY |
713 | buf_offset += ret; |
714 | wl->tx_packets_count++; | |
9eb599e9 EP |
715 | if (has_data) { |
716 | desc = (struct wl1271_tx_hw_descr *) skb->data; | |
717 | __set_bit(desc->hlid, active_hlids); | |
718 | } | |
f5fc0f86 LC |
719 | } |
720 | ||
ffb591cd | 721 | out_ack: |
a19606b4 | 722 | if (buf_offset) { |
00782136 LC |
723 | wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, |
724 | buf_offset, true); | |
6c6e669e IY |
725 | sent_packets = true; |
726 | } | |
727 | if (sent_packets) { | |
606ea9fa IY |
728 | /* |
729 | * Interrupt the firmware with the new packets. This is only | |
730 | * required for older hardware revisions | |
731 | */ | |
6f7dd16c | 732 | if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) |
00782136 | 733 | wl1271_write32(wl, WL12XX_HOST_WR_ACCESS, |
606ea9fa IY |
734 | wl->tx_packets_count); |
735 | ||
a8c0ddb5 | 736 | wl1271_handle_tx_low_watermark(wl); |
a19606b4 | 737 | } |
9eb599e9 | 738 | wl12xx_rearm_rx_streaming(wl, active_hlids); |
a522550a | 739 | } |
f5fc0f86 | 740 | |
a522550a IY |
741 | void wl1271_tx_work(struct work_struct *work) |
742 | { | |
743 | struct wl1271 *wl = container_of(work, struct wl1271, tx_work); | |
c1b193eb | 744 | int ret; |
a522550a IY |
745 | |
746 | mutex_lock(&wl->mutex); | |
c1b193eb EP |
747 | ret = wl1271_ps_elp_wakeup(wl); |
748 | if (ret < 0) | |
749 | goto out; | |
750 | ||
a32d0cdf | 751 | wl1271_tx_work_locked(wl); |
c1b193eb | 752 | |
c75bbcdb | 753 | wl1271_ps_elp_sleep(wl); |
c1b193eb | 754 | out: |
f5fc0f86 LC |
755 | mutex_unlock(&wl->mutex); |
756 | } | |
757 | ||
d2e2d769 PF |
758 | static u8 wl1271_tx_get_rate_flags(u8 rate_class_index) |
759 | { | |
defe02c7 PF |
760 | u8 flags = 0; |
761 | ||
43a8bc5a AN |
762 | /* |
763 | * TODO: use wl12xx constants when this code is moved to wl12xx, as | |
764 | * only it uses Tx-completion. | |
765 | */ | |
766 | if (rate_class_index <= 8) | |
defe02c7 | 767 | flags |= IEEE80211_TX_RC_MCS; |
43a8bc5a AN |
768 | |
769 | /* | |
770 | * TODO: use wl12xx constants when this code is moved to wl12xx, as | |
771 | * only it uses Tx-completion. | |
772 | */ | |
773 | if (rate_class_index == 0) | |
defe02c7 | 774 | flags |= IEEE80211_TX_RC_SHORT_GI; |
43a8bc5a | 775 | |
defe02c7 | 776 | return flags; |
d2e2d769 PF |
777 | } |
778 | ||
f5fc0f86 LC |
779 | static void wl1271_tx_complete_packet(struct wl1271 *wl, |
780 | struct wl1271_tx_hw_res_descr *result) | |
781 | { | |
f5fc0f86 | 782 | struct ieee80211_tx_info *info; |
48e93e40 EP |
783 | struct ieee80211_vif *vif; |
784 | struct wl12xx_vif *wlvif; | |
f5fc0f86 | 785 | struct sk_buff *skb; |
f5fc0f86 | 786 | int id = result->id; |
31627dc5 | 787 | int rate = -1; |
d2e2d769 | 788 | u8 rate_flags = 0; |
31627dc5 | 789 | u8 retries = 0; |
f5fc0f86 LC |
790 | |
791 | /* check for id legality */ | |
72b0624f | 792 | if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { |
f5fc0f86 LC |
793 | wl1271_warning("TX result illegal id: %d", id); |
794 | return; | |
795 | } | |
796 | ||
797 | skb = wl->tx_frames[id]; | |
798 | info = IEEE80211_SKB_CB(skb); | |
799 | ||
990f5de7 | 800 | if (wl12xx_is_dummy_packet(wl, skb)) { |
ae47c45f SL |
801 | wl1271_free_tx_id(wl, id); |
802 | return; | |
803 | } | |
804 | ||
48e93e40 EP |
805 | /* info->control is valid as long as we don't update info->status */ |
806 | vif = info->control.vif; | |
807 | wlvif = wl12xx_vif_to_data(vif); | |
808 | ||
31627dc5 JO |
809 | /* update the TX status info */ |
810 | if (result->status == TX_SUCCESS) { | |
811 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) | |
f5fc0f86 | 812 | info->flags |= IEEE80211_TX_STAT_ACK; |
43a8bc5a | 813 | rate = wlcore_rate_to_idx(wl, result->rate_class_index, |
1b92f15e | 814 | wlvif->band); |
d2e2d769 | 815 | rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); |
31627dc5 JO |
816 | retries = result->ack_failures; |
817 | } else if (result->status == TX_RETRY_EXCEEDED) { | |
818 | wl->stats.excessive_retries++; | |
819 | retries = result->ack_failures; | |
f5fc0f86 LC |
820 | } |
821 | ||
31627dc5 JO |
822 | info->status.rates[0].idx = rate; |
823 | info->status.rates[0].count = retries; | |
d2e2d769 | 824 | info->status.rates[0].flags = rate_flags; |
31627dc5 JO |
825 | info->status.ack_signal = -1; |
826 | ||
f5fc0f86 LC |
827 | wl->stats.retry_count += result->ack_failures; |
828 | ||
b992c682 OK |
829 | /* |
830 | * update sequence number only when relevant, i.e. only in | |
831 | * sessions of TKIP, AES and GEM (not in open or WEP sessions) | |
832 | */ | |
833 | if (info->control.hw_key && | |
834 | (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP || | |
835 | info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP || | |
836 | info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) { | |
837 | u8 fw_lsb = result->tx_security_sequence_number_lsb; | |
48e93e40 | 838 | u8 cur_lsb = wlvif->tx_security_last_seq_lsb; |
b992c682 OK |
839 | |
840 | /* | |
841 | * update security sequence number, taking care of potential | |
842 | * wrap-around | |
843 | */ | |
48e93e40 EP |
844 | wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff; |
845 | wlvif->tx_security_last_seq_lsb = fw_lsb; | |
b992c682 | 846 | } |
ac4e4ce5 | 847 | |
1e2b7976 JO |
848 | /* remove private header from packet */ |
849 | skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | |
850 | ||
851 | /* remove TKIP header space if present */ | |
f5fc0f86 | 852 | if (info->control.hw_key && |
97359d12 | 853 | info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { |
1e2b7976 | 854 | int hdrlen = ieee80211_get_hdrlen_from_skb(skb); |
5ec8a448 EP |
855 | memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, |
856 | hdrlen); | |
857 | skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); | |
1e2b7976 | 858 | } |
f5fc0f86 LC |
859 | |
860 | wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" | |
861 | " status 0x%x", | |
862 | result->id, skb, result->ack_failures, | |
863 | result->rate_class_index, result->status); | |
864 | ||
f5fc0f86 | 865 | /* return the packet to the stack */ |
a620865e | 866 | skb_queue_tail(&wl->deferred_tx_queue, skb); |
92ef8960 | 867 | queue_work(wl->freezable_wq, &wl->netstack_work); |
25eeb9e3 | 868 | wl1271_free_tx_id(wl, result->id); |
f5fc0f86 LC |
869 | } |
870 | ||
871 | /* Called upon reception of a TX complete interrupt */ | |
ffb591cd | 872 | void wl1271_tx_complete(struct wl1271 *wl) |
f5fc0f86 | 873 | { |
2c208890 | 874 | struct wl1271_acx_mem_map *memmap = wl->target_mem_map; |
ffb591cd | 875 | u32 count, fw_counter; |
f5fc0f86 LC |
876 | u32 i; |
877 | ||
f5fc0f86 | 878 | /* read the tx results from the chipset */ |
7b048c52 TP |
879 | wl1271_read(wl, le32_to_cpu(memmap->tx_result), |
880 | wl->tx_res_if, sizeof(*wl->tx_res_if), false); | |
ffb591cd JO |
881 | fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); |
882 | ||
883 | /* write host counter to chipset (to ack) */ | |
884 | wl1271_write32(wl, le32_to_cpu(memmap->tx_result) + | |
885 | offsetof(struct wl1271_tx_hw_res_if, | |
886 | tx_result_host_counter), fw_counter); | |
887 | ||
888 | count = fw_counter - wl->tx_results_count; | |
06f7bc7d | 889 | wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); |
f5fc0f86 LC |
890 | |
891 | /* verify that the result buffer is not getting overrun */ | |
ffb591cd | 892 | if (unlikely(count > TX_HW_RESULT_QUEUE_LEN)) |
f5fc0f86 | 893 | wl1271_warning("TX result overflow from chipset: %d", count); |
f5fc0f86 LC |
894 | |
895 | /* process the results */ | |
896 | for (i = 0; i < count; i++) { | |
897 | struct wl1271_tx_hw_res_descr *result; | |
898 | u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; | |
899 | ||
900 | /* process the packet */ | |
901 | result = &(wl->tx_res_if->tx_results_queue[offset]); | |
902 | wl1271_tx_complete_packet(wl, result); | |
903 | ||
904 | wl->tx_results_count++; | |
905 | } | |
f5fc0f86 | 906 | } |
53d67a50 | 907 | EXPORT_SYMBOL(wl1271_tx_complete); |
f5fc0f86 | 908 | |
a8c0ddb5 AN |
909 | void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) |
910 | { | |
911 | struct sk_buff *skb; | |
f1a46384 | 912 | int i; |
a8c0ddb5 | 913 | unsigned long flags; |
1d36cd89 | 914 | struct ieee80211_tx_info *info; |
f1a46384 | 915 | int total[NUM_TX_QUEUES]; |
a8c0ddb5 AN |
916 | |
917 | for (i = 0; i < NUM_TX_QUEUES; i++) { | |
f1a46384 | 918 | total[i] = 0; |
a8c0ddb5 AN |
919 | while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) { |
920 | wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); | |
79ebec76 AN |
921 | |
922 | if (!wl12xx_is_dummy_packet(wl, skb)) { | |
923 | info = IEEE80211_SKB_CB(skb); | |
924 | info->status.rates[0].idx = -1; | |
925 | info->status.rates[0].count = 0; | |
926 | ieee80211_tx_status_ni(wl->hw, skb); | |
927 | } | |
928 | ||
f1a46384 | 929 | total[i]++; |
a8c0ddb5 AN |
930 | } |
931 | } | |
932 | ||
933 | spin_lock_irqsave(&wl->wl_lock, flags); | |
f1a46384 AN |
934 | for (i = 0; i < NUM_TX_QUEUES; i++) |
935 | wl->tx_queue_count[i] -= total[i]; | |
a8c0ddb5 AN |
936 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
937 | ||
938 | wl1271_handle_tx_low_watermark(wl); | |
939 | } | |
940 | ||
7dece1c8 | 941 | /* caller must hold wl->mutex and TX must be stopped */ |
d6a3cc2e | 942 | void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) |
f5fc0f86 LC |
943 | { |
944 | int i; | |
f5fc0f86 LC |
945 | |
946 | /* TX failure */ | |
d6a3cc2e EP |
947 | for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { |
948 | if (wlvif->bss_type == BSS_TYPE_AP_BSS) | |
c7ffb902 | 949 | wl1271_free_sta(wl, wlvif, i); |
d6a3cc2e EP |
950 | else |
951 | wlvif->sta.ba_rx_bitmap = 0; | |
f1acea9a | 952 | |
d6a3cc2e EP |
953 | wl->links[i].allocated_pkts = 0; |
954 | wl->links[i].prev_freed_pkts = 0; | |
f5fc0f86 | 955 | } |
d6a3cc2e EP |
956 | wlvif->last_tx_hlid = 0; |
957 | ||
958 | } | |
959 | /* caller must hold wl->mutex and TX must be stopped */ | |
960 | void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) | |
961 | { | |
962 | int i; | |
963 | struct sk_buff *skb; | |
964 | struct ieee80211_tx_info *info; | |
a8c0ddb5 | 965 | |
6246ca00 AN |
966 | /* only reset the queues if something bad happened */ |
967 | if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) { | |
968 | for (i = 0; i < WL12XX_MAX_LINKS; i++) | |
969 | wl1271_tx_reset_link_queues(wl, i); | |
970 | ||
971 | for (i = 0; i < NUM_TX_QUEUES; i++) | |
972 | wl->tx_queue_count[i] = 0; | |
973 | } | |
f1acea9a | 974 | |
708bb3cf | 975 | wl->stopped_queues_map = 0; |
f5fc0f86 | 976 | |
2fe33e8c IY |
977 | /* |
978 | * Make sure the driver is at a consistent state, in case this | |
979 | * function is called from a context other than interface removal. | |
7dece1c8 | 980 | * This call will always wake the TX queues. |
2fe33e8c | 981 | */ |
7dece1c8 AN |
982 | if (reset_tx_queues) |
983 | wl1271_handle_tx_low_watermark(wl); | |
2fe33e8c | 984 | |
72b0624f | 985 | for (i = 0; i < wl->num_tx_desc; i++) { |
50e9f746 IY |
986 | if (wl->tx_frames[i] == NULL) |
987 | continue; | |
988 | ||
989 | skb = wl->tx_frames[i]; | |
990 | wl1271_free_tx_id(wl, i); | |
991 | wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); | |
992 | ||
990f5de7 | 993 | if (!wl12xx_is_dummy_packet(wl, skb)) { |
ae47c45f SL |
994 | /* |
995 | * Remove private headers before passing the skb to | |
996 | * mac80211 | |
997 | */ | |
998 | info = IEEE80211_SKB_CB(skb); | |
999 | skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | |
1000 | if (info->control.hw_key && | |
1001 | info->control.hw_key->cipher == | |
1002 | WLAN_CIPHER_SUITE_TKIP) { | |
1003 | int hdrlen = ieee80211_get_hdrlen_from_skb(skb); | |
5ec8a448 | 1004 | memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, |
ae47c45f | 1005 | skb->data, hdrlen); |
5ec8a448 | 1006 | skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); |
ae47c45f | 1007 | } |
50e9f746 | 1008 | |
ae47c45f SL |
1009 | info->status.rates[0].idx = -1; |
1010 | info->status.rates[0].count = 0; | |
50e9f746 | 1011 | |
c27d3acc | 1012 | ieee80211_tx_status_ni(wl->hw, skb); |
ae47c45f | 1013 | } |
50e9f746 | 1014 | } |
781608c4 JO |
1015 | } |
1016 | ||
1017 | #define WL1271_TX_FLUSH_TIMEOUT 500000 | |
1018 | ||
1019 | /* caller must *NOT* hold wl->mutex */ | |
1020 | void wl1271_tx_flush(struct wl1271 *wl) | |
1021 | { | |
1022 | unsigned long timeout; | |
18aa755b | 1023 | int i; |
781608c4 JO |
1024 | timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); |
1025 | ||
1026 | while (!time_after(jiffies, timeout)) { | |
1027 | mutex_lock(&wl->mutex); | |
a8c0ddb5 | 1028 | wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d", |
f1a46384 AN |
1029 | wl->tx_frames_cnt, |
1030 | wl1271_tx_total_queue_count(wl)); | |
1031 | if ((wl->tx_frames_cnt == 0) && | |
1032 | (wl1271_tx_total_queue_count(wl) == 0)) { | |
781608c4 JO |
1033 | mutex_unlock(&wl->mutex); |
1034 | return; | |
1035 | } | |
1036 | mutex_unlock(&wl->mutex); | |
1037 | msleep(1); | |
1038 | } | |
1039 | ||
1040 | wl1271_warning("Unable to flush all TX buffers, timed out."); | |
18aa755b AN |
1041 | |
1042 | /* forcibly flush all Tx buffers on our queues */ | |
1043 | mutex_lock(&wl->mutex); | |
1044 | for (i = 0; i < WL12XX_MAX_LINKS; i++) | |
1045 | wl1271_tx_reset_link_queues(wl, i); | |
1046 | mutex_unlock(&wl->mutex); | |
f5fc0f86 | 1047 | } |
e0fe371b | 1048 | |
af7fbb28 | 1049 | u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) |
e0fe371b | 1050 | { |
af7fbb28 EP |
1051 | if (WARN_ON(!rate_set)) |
1052 | return 0; | |
e0fe371b | 1053 | |
af7fbb28 | 1054 | return BIT(__ffs(rate_set)); |
e0fe371b | 1055 | } |