Commit | Line | Data |
---|---|---|
02525a79 | 1 | /* |
382afc3d | 2 | * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. |
02525a79 VK |
3 | * |
4 | * Permission to use, copy, modify, and/or distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
b4490f42 VK |
17 | #include "wil6210.h" |
18 | #include "txrx.h" | |
19 | ||
20 | #define SEQ_MODULO 0x1000 | |
21 | #define SEQ_MASK 0xfff | |
22 | ||
23 | static inline int seq_less(u16 sq1, u16 sq2) | |
24 | { | |
25 | return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); | |
26 | } | |
27 | ||
28 | static inline u16 seq_inc(u16 sq) | |
29 | { | |
30 | return (sq + 1) & SEQ_MASK; | |
31 | } | |
32 | ||
33 | static inline u16 seq_sub(u16 sq1, u16 sq2) | |
34 | { | |
35 | return (sq1 - sq2) & SEQ_MASK; | |
36 | } | |
37 | ||
38 | static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq) | |
39 | { | |
40 | return seq_sub(seq, r->ssn) % r->buf_size; | |
41 | } | |
42 | ||
43 | static void wil_release_reorder_frame(struct wil6210_priv *wil, | |
44 | struct wil_tid_ampdu_rx *r, | |
45 | int index) | |
46 | { | |
47 | struct net_device *ndev = wil_to_ndev(wil); | |
48 | struct sk_buff *skb = r->reorder_buf[index]; | |
49 | ||
50 | if (!skb) | |
51 | goto no_frame; | |
52 | ||
53 | /* release the frame from the reorder ring buffer */ | |
54 | r->stored_mpdu_num--; | |
55 | r->reorder_buf[index] = NULL; | |
56 | wil_netif_rx_any(skb, ndev); | |
57 | ||
58 | no_frame: | |
59 | r->head_seq_num = seq_inc(r->head_seq_num); | |
60 | } | |
61 | ||
62 | static void wil_release_reorder_frames(struct wil6210_priv *wil, | |
63 | struct wil_tid_ampdu_rx *r, | |
64 | u16 hseq) | |
65 | { | |
66 | int index; | |
67 | ||
cf42c4e5 VK |
68 | /* note: this function is never called with |
69 | * hseq preceding r->head_seq_num, i.e it is always true | |
70 | * !seq_less(hseq, r->head_seq_num) | |
71 | * and thus on loop exit it should be | |
72 | * r->head_seq_num == hseq | |
73 | */ | |
74 | while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) { | |
b4490f42 VK |
75 | index = reorder_index(r, r->head_seq_num); |
76 | wil_release_reorder_frame(wil, r, index); | |
77 | } | |
cf42c4e5 | 78 | r->head_seq_num = hseq; |
b4490f42 VK |
79 | } |
80 | ||
81 | static void wil_reorder_release(struct wil6210_priv *wil, | |
82 | struct wil_tid_ampdu_rx *r) | |
83 | { | |
84 | int index = reorder_index(r, r->head_seq_num); | |
85 | ||
86 | while (r->reorder_buf[index]) { | |
87 | wil_release_reorder_frame(wil, r, index); | |
88 | index = reorder_index(r, r->head_seq_num); | |
89 | } | |
90 | } | |
91 | ||
bd33273b | 92 | /* called in NAPI context */ |
b4490f42 | 93 | void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb) |
bd33273b | 94 | __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) |
b4490f42 VK |
95 | { |
96 | struct net_device *ndev = wil_to_ndev(wil); | |
97 | struct vring_rx_desc *d = wil_skb_rxdesc(skb); | |
98 | int tid = wil_rxdesc_tid(d); | |
99 | int cid = wil_rxdesc_cid(d); | |
100 | int mid = wil_rxdesc_mid(d); | |
101 | u16 seq = wil_rxdesc_seq(d); | |
e4373d8e | 102 | int mcast = wil_rxdesc_mcast(d); |
b4490f42 | 103 | struct wil_sta_info *sta = &wil->sta[cid]; |
ec81b5ad | 104 | struct wil_tid_ampdu_rx *r; |
b4490f42 VK |
105 | u16 hseq; |
106 | int index; | |
107 | ||
e4373d8e VK |
108 | wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n", |
109 | mid, cid, tid, seq, mcast); | |
110 | ||
111 | if (unlikely(mcast)) { | |
112 | wil_netif_rx_any(skb, ndev); | |
113 | return; | |
114 | } | |
b4490f42 | 115 | |
bd33273b | 116 | spin_lock(&sta->tid_rx_lock); |
ec81b5ad DL |
117 | |
118 | r = sta->tid_rx[tid]; | |
b4490f42 VK |
119 | if (!r) { |
120 | wil_netif_rx_any(skb, ndev); | |
bd33273b | 121 | goto out; |
b4490f42 VK |
122 | } |
123 | ||
91a8edcc | 124 | r->total++; |
b4490f42 VK |
125 | hseq = r->head_seq_num; |
126 | ||
c888cdd4 VK |
127 | /** Due to the race between WMI events, where BACK establishment |
128 | * reported, and data Rx, few packets may be pass up before reorder | |
129 | * buffer get allocated. Catch up by pretending SSN is what we | |
130 | * see in the 1-st Rx packet | |
ff7c5c3b VK |
131 | * |
132 | * Another scenario, Rx get delayed and we got packet from before | |
133 | * BACK. Pass it to the stack and wait. | |
c888cdd4 VK |
134 | */ |
135 | if (r->first_time) { | |
136 | r->first_time = false; | |
137 | if (seq != r->head_seq_num) { | |
ff7c5c3b VK |
138 | if (seq_less(seq, r->head_seq_num)) { |
139 | wil_err(wil, | |
140 | "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n", | |
141 | seq, r->head_seq_num); | |
142 | r->first_time = true; | |
143 | wil_netif_rx_any(skb, ndev); | |
144 | goto out; | |
145 | } | |
146 | wil_err(wil, | |
147 | "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n", | |
148 | seq, r->head_seq_num); | |
c888cdd4 VK |
149 | r->head_seq_num = seq; |
150 | r->ssn = seq; | |
151 | } | |
152 | } | |
153 | ||
b4490f42 VK |
154 | /* frame with out of date sequence number */ |
155 | if (seq_less(seq, r->head_seq_num)) { | |
d5b1c32f | 156 | r->ssn_last_drop = seq; |
91a8edcc VK |
157 | r->drop_old++; |
158 | wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n", | |
159 | seq, r->head_seq_num); | |
b4490f42 VK |
160 | dev_kfree_skb(skb); |
161 | goto out; | |
162 | } | |
163 | ||
164 | /* | |
165 | * If frame the sequence number exceeds our buffering window | |
166 | * size release some previous frames to make room for this one. | |
167 | */ | |
168 | if (!seq_less(seq, r->head_seq_num + r->buf_size)) { | |
169 | hseq = seq_inc(seq_sub(seq, r->buf_size)); | |
170 | /* release stored frames up to new head to stack */ | |
171 | wil_release_reorder_frames(wil, r, hseq); | |
172 | } | |
173 | ||
174 | /* Now the new frame is always in the range of the reordering buffer */ | |
175 | ||
176 | index = reorder_index(r, seq); | |
177 | ||
178 | /* check if we already stored this frame */ | |
179 | if (r->reorder_buf[index]) { | |
91a8edcc VK |
180 | r->drop_dup++; |
181 | wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq); | |
b4490f42 VK |
182 | dev_kfree_skb(skb); |
183 | goto out; | |
184 | } | |
185 | ||
186 | /* | |
187 | * If the current MPDU is in the right order and nothing else | |
188 | * is stored we can process it directly, no need to buffer it. | |
189 | * If it is first but there's something stored, we may be able | |
190 | * to release frames after this one. | |
191 | */ | |
192 | if (seq == r->head_seq_num && r->stored_mpdu_num == 0) { | |
193 | r->head_seq_num = seq_inc(r->head_seq_num); | |
194 | wil_netif_rx_any(skb, ndev); | |
195 | goto out; | |
196 | } | |
197 | ||
198 | /* put the frame in the reordering buffer */ | |
199 | r->reorder_buf[index] = skb; | |
200 | r->reorder_time[index] = jiffies; | |
201 | r->stored_mpdu_num++; | |
202 | wil_reorder_release(wil, r); | |
203 | ||
204 | out: | |
bd33273b | 205 | spin_unlock(&sta->tid_rx_lock); |
b4490f42 VK |
206 | } |
207 | ||
208 | struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, | |
209 | int size, u16 ssn) | |
210 | { | |
211 | struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL); | |
8fe59627 | 212 | |
b4490f42 VK |
213 | if (!r) |
214 | return NULL; | |
215 | ||
216 | r->reorder_buf = | |
217 | kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL); | |
218 | r->reorder_time = | |
219 | kcalloc(size, sizeof(unsigned long), GFP_KERNEL); | |
220 | if (!r->reorder_buf || !r->reorder_time) { | |
221 | kfree(r->reorder_buf); | |
222 | kfree(r->reorder_time); | |
223 | kfree(r); | |
224 | return NULL; | |
225 | } | |
226 | ||
b4490f42 VK |
227 | r->ssn = ssn; |
228 | r->head_seq_num = ssn; | |
229 | r->buf_size = size; | |
230 | r->stored_mpdu_num = 0; | |
c888cdd4 | 231 | r->first_time = true; |
b4490f42 VK |
232 | return r; |
233 | } | |
234 | ||
235 | void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, | |
236 | struct wil_tid_ampdu_rx *r) | |
237 | { | |
238 | if (!r) | |
239 | return; | |
240 | wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size); | |
241 | kfree(r->reorder_buf); | |
242 | kfree(r->reorder_time); | |
243 | kfree(r); | |
244 | } | |
3277213f VK |
245 | |
246 | /* ADDBA processing */ | |
247 | static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize) | |
248 | { | |
249 | u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE / | |
250 | (mtu_max + WIL_MAX_MPDU_OVERHEAD)); | |
251 | ||
252 | if (!req_agg_wsize) | |
253 | return max_agg_size; | |
254 | ||
255 | return min(max_agg_size, req_agg_wsize); | |
256 | } | |
257 | ||
258 | /* Block Ack - Rx side (recipient */ | |
259 | int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, | |
260 | u8 dialog_token, __le16 ba_param_set, | |
261 | __le16 ba_timeout, __le16 ba_seq_ctrl) | |
262 | { | |
263 | struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL); | |
264 | ||
265 | if (!req) | |
266 | return -ENOMEM; | |
267 | ||
268 | req->cidxtid = cidxtid; | |
269 | req->dialog_token = dialog_token; | |
270 | req->ba_param_set = le16_to_cpu(ba_param_set); | |
271 | req->ba_timeout = le16_to_cpu(ba_timeout); | |
272 | req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl); | |
273 | ||
274 | mutex_lock(&wil->back_rx_mutex); | |
275 | list_add_tail(&req->list, &wil->back_rx_pending); | |
276 | mutex_unlock(&wil->back_rx_mutex); | |
277 | ||
278 | queue_work(wil->wq_service, &wil->back_rx_worker); | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
283 | static void wil_back_rx_handle(struct wil6210_priv *wil, | |
284 | struct wil_back_rx *req) | |
bd33273b | 285 | __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) |
3277213f VK |
286 | { |
287 | struct wil_sta_info *sta; | |
288 | u8 cid, tid; | |
289 | u16 agg_wsize = 0; | |
290 | /* bit 0: A-MSDU supported | |
291 | * bit 1: policy (should be 0 for us) | |
292 | * bits 2..5: TID | |
293 | * bits 6..15: buffer size | |
294 | */ | |
295 | u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15); | |
296 | bool agg_amsdu = !!(req->ba_param_set & BIT(0)); | |
297 | int ba_policy = req->ba_param_set & BIT(1); | |
298 | u16 agg_timeout = req->ba_timeout; | |
299 | u16 status = WLAN_STATUS_SUCCESS; | |
ff7c5c3b | 300 | u16 ssn = req->ba_seq_ctrl >> 4; |
382afc3d | 301 | struct wil_tid_ampdu_rx *r; |
3277213f VK |
302 | int rc; |
303 | ||
bd33273b | 304 | might_sleep(); |
3277213f VK |
305 | parse_cidxtid(req->cidxtid, &cid, &tid); |
306 | ||
307 | /* sanity checks */ | |
308 | if (cid >= WIL6210_MAX_CID) { | |
309 | wil_err(wil, "BACK: invalid CID %d\n", cid); | |
310 | return; | |
311 | } | |
312 | ||
313 | sta = &wil->sta[cid]; | |
314 | if (sta->status != wil_sta_connected) { | |
315 | wil_err(wil, "BACK: CID %d not connected\n", cid); | |
316 | return; | |
317 | } | |
318 | ||
319 | wil_dbg_wmi(wil, | |
ff7c5c3b | 320 | "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n", |
3277213f | 321 | cid, sta->addr, tid, req_agg_wsize, req->ba_timeout, |
ff7c5c3b | 322 | agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn); |
3277213f VK |
323 | |
324 | /* apply policies */ | |
325 | if (ba_policy) { | |
326 | wil_err(wil, "BACK requested unsupported ba_policy == 1\n"); | |
327 | status = WLAN_STATUS_INVALID_QOS_PARAM; | |
328 | } | |
329 | if (status == WLAN_STATUS_SUCCESS) | |
330 | agg_wsize = wil_agg_size(wil, req_agg_wsize); | |
331 | ||
332 | rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status, | |
333 | agg_amsdu, agg_wsize, agg_timeout); | |
334 | if (rc || (status != WLAN_STATUS_SUCCESS)) | |
335 | return; | |
336 | ||
337 | /* apply */ | |
382afc3d | 338 | r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn); |
bd33273b | 339 | spin_lock_bh(&sta->tid_rx_lock); |
3277213f | 340 | wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]); |
382afc3d | 341 | sta->tid_rx[tid] = r; |
bd33273b | 342 | spin_unlock_bh(&sta->tid_rx_lock); |
3277213f VK |
343 | } |
344 | ||
345 | void wil_back_rx_flush(struct wil6210_priv *wil) | |
346 | { | |
347 | struct wil_back_rx *evt, *t; | |
348 | ||
349 | wil_dbg_misc(wil, "%s()\n", __func__); | |
350 | ||
351 | mutex_lock(&wil->back_rx_mutex); | |
352 | ||
353 | list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) { | |
354 | list_del(&evt->list); | |
355 | kfree(evt); | |
356 | } | |
357 | ||
358 | mutex_unlock(&wil->back_rx_mutex); | |
359 | } | |
360 | ||
361 | /* Retrieve next ADDBA request from the pending list */ | |
362 | static struct list_head *next_back_rx(struct wil6210_priv *wil) | |
363 | { | |
364 | struct list_head *ret = NULL; | |
365 | ||
366 | mutex_lock(&wil->back_rx_mutex); | |
367 | ||
368 | if (!list_empty(&wil->back_rx_pending)) { | |
369 | ret = wil->back_rx_pending.next; | |
370 | list_del(ret); | |
371 | } | |
372 | ||
373 | mutex_unlock(&wil->back_rx_mutex); | |
374 | ||
375 | return ret; | |
376 | } | |
377 | ||
378 | void wil_back_rx_worker(struct work_struct *work) | |
379 | { | |
380 | struct wil6210_priv *wil = container_of(work, struct wil6210_priv, | |
381 | back_rx_worker); | |
382 | struct wil_back_rx *evt; | |
383 | struct list_head *lh; | |
384 | ||
385 | while ((lh = next_back_rx(wil)) != NULL) { | |
386 | evt = list_entry(lh, struct wil_back_rx, list); | |
387 | ||
388 | wil_back_rx_handle(wil, evt); | |
389 | kfree(evt); | |
390 | } | |
391 | } | |
3a124ed6 VK |
392 | |
393 | /* BACK - Tx (originator) side */ | |
394 | static void wil_back_tx_handle(struct wil6210_priv *wil, | |
395 | struct wil_back_tx *req) | |
396 | { | |
397 | struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid]; | |
398 | int rc; | |
399 | ||
400 | if (txdata->addba_in_progress) { | |
401 | wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n", | |
402 | req->ringid); | |
403 | return; | |
404 | } | |
405 | if (txdata->agg_wsize) { | |
406 | wil_dbg_misc(wil, | |
407 | "ADDBA for vring[%d] already established wsize %d\n", | |
408 | req->ringid, txdata->agg_wsize); | |
409 | return; | |
410 | } | |
411 | txdata->addba_in_progress = true; | |
412 | rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout); | |
413 | if (rc) | |
414 | txdata->addba_in_progress = false; | |
415 | } | |
416 | ||
417 | static struct list_head *next_back_tx(struct wil6210_priv *wil) | |
418 | { | |
419 | struct list_head *ret = NULL; | |
420 | ||
421 | mutex_lock(&wil->back_tx_mutex); | |
422 | ||
423 | if (!list_empty(&wil->back_tx_pending)) { | |
424 | ret = wil->back_tx_pending.next; | |
425 | list_del(ret); | |
426 | } | |
427 | ||
428 | mutex_unlock(&wil->back_tx_mutex); | |
429 | ||
430 | return ret; | |
431 | } | |
432 | ||
433 | void wil_back_tx_worker(struct work_struct *work) | |
434 | { | |
435 | struct wil6210_priv *wil = container_of(work, struct wil6210_priv, | |
436 | back_tx_worker); | |
437 | struct wil_back_tx *evt; | |
438 | struct list_head *lh; | |
439 | ||
440 | while ((lh = next_back_tx(wil)) != NULL) { | |
441 | evt = list_entry(lh, struct wil_back_tx, list); | |
442 | ||
443 | wil_back_tx_handle(wil, evt); | |
444 | kfree(evt); | |
445 | } | |
446 | } | |
447 | ||
448 | void wil_back_tx_flush(struct wil6210_priv *wil) | |
449 | { | |
450 | struct wil_back_tx *evt, *t; | |
451 | ||
452 | wil_dbg_misc(wil, "%s()\n", __func__); | |
453 | ||
454 | mutex_lock(&wil->back_tx_mutex); | |
455 | ||
456 | list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) { | |
457 | list_del(&evt->list); | |
458 | kfree(evt); | |
459 | } | |
460 | ||
461 | mutex_unlock(&wil->back_tx_mutex); | |
462 | } | |
463 | ||
3a3def8d | 464 | int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize) |
3a124ed6 VK |
465 | { |
466 | struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL); | |
467 | ||
468 | if (!req) | |
469 | return -ENOMEM; | |
470 | ||
471 | req->ringid = ringid; | |
3a3def8d | 472 | req->agg_wsize = wil_agg_size(wil, wsize); |
3a124ed6 VK |
473 | req->agg_timeout = 0; |
474 | ||
475 | mutex_lock(&wil->back_tx_mutex); | |
476 | list_add_tail(&req->list, &wil->back_tx_pending); | |
477 | mutex_unlock(&wil->back_tx_mutex); | |
478 | ||
479 | queue_work(wil->wq_service, &wil->back_tx_worker); | |
480 | ||
481 | return 0; | |
482 | } |