Commit | Line | Data |
---|---|---|
5e3dd157 KV |
1 | /* |
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | |
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | |
4 | * | |
5 | * Permission to use, copy, modify, and/or distribute this software for any | |
6 | * purpose with or without fee is hereby granted, provided that the above | |
7 | * copyright notice and this permission notice appear in all copies. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
16 | */ | |
17 | ||
edb8236d | 18 | #include "core.h" |
5e3dd157 KV |
19 | #include "htc.h" |
20 | #include "htt.h" | |
21 | #include "txrx.h" | |
22 | #include "debug.h" | |
a9bf0506 | 23 | #include "trace.h" |
5e3dd157 KV |
24 | |
25 | #include <linux/log2.h> | |
26 | ||
27 | /* slightly larger than one large A-MPDU */ | |
28 | #define HTT_RX_RING_SIZE_MIN 128 | |
29 | ||
30 | /* roughly 20 ms @ 1 Gbps of 1500B MSDUs */ | |
31 | #define HTT_RX_RING_SIZE_MAX 2048 | |
32 | ||
33 | #define HTT_RX_AVG_FRM_BYTES 1000 | |
34 | ||
35 | /* ms, very conservative */ | |
36 | #define HTT_RX_HOST_LATENCY_MAX_MS 20 | |
37 | ||
38 | /* ms, conservative */ | |
39 | #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 | |
40 | ||
41 | /* when under memory pressure rx ring refill may fail and needs a retry */ | |
42 | #define HTT_RX_RING_REFILL_RETRY_MS 50 | |
43 | ||
f6dc2095 MK |
44 | |
45 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); | |
6c5151a9 | 46 | static void ath10k_htt_txrx_compl_task(unsigned long ptr); |
f6dc2095 | 47 | |
5e3dd157 KV |
48 | static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) |
49 | { | |
50 | int size; | |
51 | ||
52 | /* | |
53 | * It is expected that the host CPU will typically be able to | |
54 | * service the rx indication from one A-MPDU before the rx | |
55 | * indication from the subsequent A-MPDU happens, roughly 1-2 ms | |
56 | * later. However, the rx ring should be sized very conservatively, | |
57 | * to accomodate the worst reasonable delay before the host CPU | |
58 | * services a rx indication interrupt. | |
59 | * | |
60 | * The rx ring need not be kept full of empty buffers. In theory, | |
61 | * the htt host SW can dynamically track the low-water mark in the | |
62 | * rx ring, and dynamically adjust the level to which the rx ring | |
63 | * is filled with empty buffers, to dynamically meet the desired | |
64 | * low-water mark. | |
65 | * | |
66 | * In contrast, it's difficult to resize the rx ring itself, once | |
67 | * it's in use. Thus, the ring itself should be sized very | |
68 | * conservatively, while the degree to which the ring is filled | |
69 | * with empty buffers should be sized moderately conservatively. | |
70 | */ | |
71 | ||
72 | /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ | |
73 | size = | |
74 | htt->max_throughput_mbps + | |
75 | 1000 / | |
76 | (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS; | |
77 | ||
78 | if (size < HTT_RX_RING_SIZE_MIN) | |
79 | size = HTT_RX_RING_SIZE_MIN; | |
80 | ||
81 | if (size > HTT_RX_RING_SIZE_MAX) | |
82 | size = HTT_RX_RING_SIZE_MAX; | |
83 | ||
84 | size = roundup_pow_of_two(size); | |
85 | ||
86 | return size; | |
87 | } | |
88 | ||
89 | static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt) | |
90 | { | |
91 | int size; | |
92 | ||
93 | /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ | |
94 | size = | |
95 | htt->max_throughput_mbps * | |
96 | 1000 / | |
97 | (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS; | |
98 | ||
99 | /* | |
100 | * Make sure the fill level is at least 1 less than the ring size. | |
101 | * Leaving 1 element empty allows the SW to easily distinguish | |
102 | * between a full ring vs. an empty ring. | |
103 | */ | |
104 | if (size >= htt->rx_ring.size) | |
105 | size = htt->rx_ring.size - 1; | |
106 | ||
107 | return size; | |
108 | } | |
109 | ||
110 | static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) | |
111 | { | |
112 | struct sk_buff *skb; | |
113 | struct ath10k_skb_cb *cb; | |
114 | int i; | |
115 | ||
116 | for (i = 0; i < htt->rx_ring.fill_cnt; i++) { | |
117 | skb = htt->rx_ring.netbufs_ring[i]; | |
118 | cb = ATH10K_SKB_CB(skb); | |
119 | dma_unmap_single(htt->ar->dev, cb->paddr, | |
120 | skb->len + skb_tailroom(skb), | |
121 | DMA_FROM_DEVICE); | |
122 | dev_kfree_skb_any(skb); | |
123 | } | |
124 | ||
125 | htt->rx_ring.fill_cnt = 0; | |
126 | } | |
127 | ||
128 | static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | |
129 | { | |
130 | struct htt_rx_desc *rx_desc; | |
131 | struct sk_buff *skb; | |
132 | dma_addr_t paddr; | |
133 | int ret = 0, idx; | |
134 | ||
135 | idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr)); | |
136 | while (num > 0) { | |
137 | skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); | |
138 | if (!skb) { | |
139 | ret = -ENOMEM; | |
140 | goto fail; | |
141 | } | |
142 | ||
143 | if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) | |
144 | skb_pull(skb, | |
145 | PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - | |
146 | skb->data); | |
147 | ||
148 | /* Clear rx_desc attention word before posting to Rx ring */ | |
149 | rx_desc = (struct htt_rx_desc *)skb->data; | |
150 | rx_desc->attention.flags = __cpu_to_le32(0); | |
151 | ||
152 | paddr = dma_map_single(htt->ar->dev, skb->data, | |
153 | skb->len + skb_tailroom(skb), | |
154 | DMA_FROM_DEVICE); | |
155 | ||
156 | if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { | |
157 | dev_kfree_skb_any(skb); | |
158 | ret = -ENOMEM; | |
159 | goto fail; | |
160 | } | |
161 | ||
162 | ATH10K_SKB_CB(skb)->paddr = paddr; | |
163 | htt->rx_ring.netbufs_ring[idx] = skb; | |
164 | htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); | |
165 | htt->rx_ring.fill_cnt++; | |
166 | ||
167 | num--; | |
168 | idx++; | |
169 | idx &= htt->rx_ring.size_mask; | |
170 | } | |
171 | ||
172 | fail: | |
173 | *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx); | |
174 | return ret; | |
175 | } | |
176 | ||
177 | static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | |
178 | { | |
179 | lockdep_assert_held(&htt->rx_ring.lock); | |
180 | return __ath10k_htt_rx_ring_fill_n(htt, num); | |
181 | } | |
182 | ||
183 | static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) | |
184 | { | |
6e712d42 | 185 | int ret, num_deficit, num_to_fill; |
5e3dd157 | 186 | |
6e712d42 MK |
187 | /* Refilling the whole RX ring buffer proves to be a bad idea. The |
188 | * reason is RX may take up significant amount of CPU cycles and starve | |
189 | * other tasks, e.g. TX on an ethernet device while acting as a bridge | |
190 | * with ath10k wlan interface. This ended up with very poor performance | |
191 | * once CPU the host system was overwhelmed with RX on ath10k. | |
192 | * | |
193 | * By limiting the number of refills the replenishing occurs | |
194 | * progressively. This in turns makes use of the fact tasklets are | |
195 | * processed in FIFO order. This means actual RX processing can starve | |
196 | * out refilling. If there's not enough buffers on RX ring FW will not | |
197 | * report RX until it is refilled with enough buffers. This | |
198 | * automatically balances load wrt to CPU power. | |
199 | * | |
200 | * This probably comes at a cost of lower maximum throughput but | |
201 | * improves the avarage and stability. */ | |
5e3dd157 | 202 | spin_lock_bh(&htt->rx_ring.lock); |
6e712d42 MK |
203 | num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; |
204 | num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); | |
205 | num_deficit -= num_to_fill; | |
5e3dd157 KV |
206 | ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); |
207 | if (ret == -ENOMEM) { | |
208 | /* | |
209 | * Failed to fill it to the desired level - | |
210 | * we'll start a timer and try again next time. | |
211 | * As long as enough buffers are left in the ring for | |
212 | * another A-MPDU rx, no special recovery is needed. | |
213 | */ | |
214 | mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + | |
215 | msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); | |
6e712d42 MK |
216 | } else if (num_deficit > 0) { |
217 | tasklet_schedule(&htt->rx_replenish_task); | |
5e3dd157 KV |
218 | } |
219 | spin_unlock_bh(&htt->rx_ring.lock); | |
220 | } | |
221 | ||
222 | static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) | |
223 | { | |
224 | struct ath10k_htt *htt = (struct ath10k_htt *)arg; | |
225 | ath10k_htt_rx_msdu_buff_replenish(htt); | |
226 | } | |
227 | ||
228 | static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt) | |
229 | { | |
230 | return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) - | |
231 | htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask; | |
232 | } | |
233 | ||
234 | void ath10k_htt_rx_detach(struct ath10k_htt *htt) | |
235 | { | |
236 | int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; | |
237 | ||
238 | del_timer_sync(&htt->rx_ring.refill_retry_timer); | |
6e712d42 | 239 | tasklet_kill(&htt->rx_replenish_task); |
6c5151a9 MK |
240 | tasklet_kill(&htt->txrx_compl_task); |
241 | ||
242 | skb_queue_purge(&htt->tx_compl_q); | |
243 | skb_queue_purge(&htt->rx_compl_q); | |
5e3dd157 KV |
244 | |
245 | while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { | |
246 | struct sk_buff *skb = | |
247 | htt->rx_ring.netbufs_ring[sw_rd_idx]; | |
248 | struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); | |
249 | ||
250 | dma_unmap_single(htt->ar->dev, cb->paddr, | |
251 | skb->len + skb_tailroom(skb), | |
252 | DMA_FROM_DEVICE); | |
253 | dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]); | |
254 | sw_rd_idx++; | |
255 | sw_rd_idx &= htt->rx_ring.size_mask; | |
256 | } | |
257 | ||
258 | dma_free_coherent(htt->ar->dev, | |
259 | (htt->rx_ring.size * | |
260 | sizeof(htt->rx_ring.paddrs_ring)), | |
261 | htt->rx_ring.paddrs_ring, | |
262 | htt->rx_ring.base_paddr); | |
263 | ||
264 | dma_free_coherent(htt->ar->dev, | |
265 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
266 | htt->rx_ring.alloc_idx.vaddr, | |
267 | htt->rx_ring.alloc_idx.paddr); | |
268 | ||
269 | kfree(htt->rx_ring.netbufs_ring); | |
270 | } | |
271 | ||
272 | static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) | |
273 | { | |
274 | int idx; | |
275 | struct sk_buff *msdu; | |
276 | ||
45967089 | 277 | lockdep_assert_held(&htt->rx_ring.lock); |
5e3dd157 KV |
278 | |
279 | if (ath10k_htt_rx_ring_elems(htt) == 0) | |
280 | ath10k_warn("htt rx ring is empty!\n"); | |
281 | ||
282 | idx = htt->rx_ring.sw_rd_idx.msdu_payld; | |
283 | msdu = htt->rx_ring.netbufs_ring[idx]; | |
284 | ||
285 | idx++; | |
286 | idx &= htt->rx_ring.size_mask; | |
287 | htt->rx_ring.sw_rd_idx.msdu_payld = idx; | |
288 | htt->rx_ring.fill_cnt--; | |
289 | ||
5e3dd157 KV |
290 | return msdu; |
291 | } | |
292 | ||
293 | static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb) | |
294 | { | |
295 | struct sk_buff *next; | |
296 | ||
297 | while (skb) { | |
298 | next = skb->next; | |
299 | dev_kfree_skb_any(skb); | |
300 | skb = next; | |
301 | } | |
302 | } | |
303 | ||
304 | static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, | |
305 | u8 **fw_desc, int *fw_desc_len, | |
306 | struct sk_buff **head_msdu, | |
307 | struct sk_buff **tail_msdu) | |
308 | { | |
309 | int msdu_len, msdu_chaining = 0; | |
310 | struct sk_buff *msdu; | |
311 | struct htt_rx_desc *rx_desc; | |
312 | ||
45967089 MK |
313 | lockdep_assert_held(&htt->rx_ring.lock); |
314 | ||
5e3dd157 KV |
315 | if (ath10k_htt_rx_ring_elems(htt) == 0) |
316 | ath10k_warn("htt rx ring is empty!\n"); | |
317 | ||
318 | if (htt->rx_confused) { | |
319 | ath10k_warn("htt is confused. refusing rx\n"); | |
320 | return 0; | |
321 | } | |
322 | ||
323 | msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); | |
324 | while (msdu) { | |
325 | int last_msdu, msdu_len_invalid, msdu_chained; | |
326 | ||
327 | dma_unmap_single(htt->ar->dev, | |
328 | ATH10K_SKB_CB(msdu)->paddr, | |
329 | msdu->len + skb_tailroom(msdu), | |
330 | DMA_FROM_DEVICE); | |
331 | ||
75fb2f94 | 332 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ", |
5e3dd157 KV |
333 | msdu->data, msdu->len + skb_tailroom(msdu)); |
334 | ||
335 | rx_desc = (struct htt_rx_desc *)msdu->data; | |
336 | ||
337 | /* FIXME: we must report msdu payload since this is what caller | |
338 | * expects now */ | |
339 | skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); | |
340 | skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); | |
341 | ||
342 | /* | |
343 | * Sanity check - confirm the HW is finished filling in the | |
344 | * rx data. | |
345 | * If the HW and SW are working correctly, then it's guaranteed | |
346 | * that the HW's MAC DMA is done before this point in the SW. | |
347 | * To prevent the case that we handle a stale Rx descriptor, | |
348 | * just assert for now until we have a way to recover. | |
349 | */ | |
350 | if (!(__le32_to_cpu(rx_desc->attention.flags) | |
351 | & RX_ATTENTION_FLAGS_MSDU_DONE)) { | |
352 | ath10k_htt_rx_free_msdu_chain(*head_msdu); | |
353 | *head_msdu = NULL; | |
354 | msdu = NULL; | |
355 | ath10k_err("htt rx stopped. cannot recover\n"); | |
356 | htt->rx_confused = true; | |
357 | break; | |
358 | } | |
359 | ||
360 | /* | |
361 | * Copy the FW rx descriptor for this MSDU from the rx | |
362 | * indication message into the MSDU's netbuf. HL uses the | |
363 | * same rx indication message definition as LL, and simply | |
364 | * appends new info (fields from the HW rx desc, and the | |
365 | * MSDU payload itself). So, the offset into the rx | |
366 | * indication message only has to account for the standard | |
367 | * offset of the per-MSDU FW rx desc info within the | |
368 | * message, and how many bytes of the per-MSDU FW rx desc | |
369 | * info have already been consumed. (And the endianness of | |
370 | * the host, since for a big-endian host, the rx ind | |
371 | * message contents, including the per-MSDU rx desc bytes, | |
372 | * were byteswapped during upload.) | |
373 | */ | |
374 | if (*fw_desc_len > 0) { | |
375 | rx_desc->fw_desc.info0 = **fw_desc; | |
376 | /* | |
377 | * The target is expected to only provide the basic | |
378 | * per-MSDU rx descriptors. Just to be sure, verify | |
379 | * that the target has not attached extension data | |
380 | * (e.g. LRO flow ID). | |
381 | */ | |
382 | ||
383 | /* or more, if there's extension data */ | |
384 | (*fw_desc)++; | |
385 | (*fw_desc_len)--; | |
386 | } else { | |
387 | /* | |
388 | * When an oversized AMSDU happened, FW will lost | |
389 | * some of MSDU status - in this case, the FW | |
390 | * descriptors provided will be less than the | |
391 | * actual MSDUs inside this MPDU. Mark the FW | |
392 | * descriptors so that it will still deliver to | |
393 | * upper stack, if no CRC error for this MPDU. | |
394 | * | |
395 | * FIX THIS - the FW descriptors are actually for | |
396 | * MSDUs in the end of this A-MSDU instead of the | |
397 | * beginning. | |
398 | */ | |
399 | rx_desc->fw_desc.info0 = 0; | |
400 | } | |
401 | ||
402 | msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) | |
403 | & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | | |
404 | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); | |
405 | msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), | |
406 | RX_MSDU_START_INFO0_MSDU_LENGTH); | |
407 | msdu_chained = rx_desc->frag_info.ring2_more_count; | |
408 | ||
409 | if (msdu_len_invalid) | |
410 | msdu_len = 0; | |
411 | ||
412 | skb_trim(msdu, 0); | |
413 | skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); | |
414 | msdu_len -= msdu->len; | |
415 | ||
416 | /* FIXME: Do chained buffers include htt_rx_desc or not? */ | |
417 | while (msdu_chained--) { | |
418 | struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); | |
419 | ||
420 | dma_unmap_single(htt->ar->dev, | |
421 | ATH10K_SKB_CB(next)->paddr, | |
422 | next->len + skb_tailroom(next), | |
423 | DMA_FROM_DEVICE); | |
424 | ||
75fb2f94 BG |
425 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, |
426 | "htt rx chained: ", next->data, | |
5e3dd157 KV |
427 | next->len + skb_tailroom(next)); |
428 | ||
429 | skb_trim(next, 0); | |
430 | skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE)); | |
431 | msdu_len -= next->len; | |
432 | ||
433 | msdu->next = next; | |
434 | msdu = next; | |
435 | msdu_chaining = 1; | |
436 | } | |
437 | ||
5e3dd157 KV |
438 | last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & |
439 | RX_MSDU_END_INFO0_LAST_MSDU; | |
440 | ||
441 | if (last_msdu) { | |
442 | msdu->next = NULL; | |
443 | break; | |
444 | } else { | |
445 | struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); | |
446 | msdu->next = next; | |
447 | msdu = next; | |
448 | } | |
449 | } | |
450 | *tail_msdu = msdu; | |
451 | ||
452 | /* | |
453 | * Don't refill the ring yet. | |
454 | * | |
455 | * First, the elements popped here are still in use - it is not | |
456 | * safe to overwrite them until the matching call to | |
457 | * mpdu_desc_list_next. Second, for efficiency it is preferable to | |
458 | * refill the rx ring with 1 PPDU's worth of rx buffers (something | |
459 | * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers | |
460 | * (something like 3 buffers). Consequently, we'll rely on the txrx | |
461 | * SW to tell us when it is done pulling all the PPDU's rx buffers | |
462 | * out of the rx ring, and then refill it just once. | |
463 | */ | |
464 | ||
465 | return msdu_chaining; | |
466 | } | |
467 | ||
6e712d42 MK |
468 | static void ath10k_htt_rx_replenish_task(unsigned long ptr) |
469 | { | |
470 | struct ath10k_htt *htt = (struct ath10k_htt *)ptr; | |
471 | ath10k_htt_rx_msdu_buff_replenish(htt); | |
472 | } | |
473 | ||
5e3dd157 KV |
474 | int ath10k_htt_rx_attach(struct ath10k_htt *htt) |
475 | { | |
476 | dma_addr_t paddr; | |
477 | void *vaddr; | |
478 | struct timer_list *timer = &htt->rx_ring.refill_retry_timer; | |
479 | ||
480 | htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); | |
481 | if (!is_power_of_2(htt->rx_ring.size)) { | |
482 | ath10k_warn("htt rx ring size is not power of 2\n"); | |
483 | return -EINVAL; | |
484 | } | |
485 | ||
486 | htt->rx_ring.size_mask = htt->rx_ring.size - 1; | |
487 | ||
488 | /* | |
489 | * Set the initial value for the level to which the rx ring | |
490 | * should be filled, based on the max throughput and the | |
491 | * worst likely latency for the host to fill the rx ring | |
492 | * with new buffers. In theory, this fill level can be | |
493 | * dynamically adjusted from the initial value set here, to | |
494 | * reflect the actual host latency rather than a | |
495 | * conservative assumption about the host latency. | |
496 | */ | |
497 | htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); | |
498 | ||
499 | htt->rx_ring.netbufs_ring = | |
500 | kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *), | |
501 | GFP_KERNEL); | |
502 | if (!htt->rx_ring.netbufs_ring) | |
503 | goto err_netbuf; | |
504 | ||
505 | vaddr = dma_alloc_coherent(htt->ar->dev, | |
506 | (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)), | |
507 | &paddr, GFP_DMA); | |
508 | if (!vaddr) | |
509 | goto err_dma_ring; | |
510 | ||
511 | htt->rx_ring.paddrs_ring = vaddr; | |
512 | htt->rx_ring.base_paddr = paddr; | |
513 | ||
514 | vaddr = dma_alloc_coherent(htt->ar->dev, | |
515 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
516 | &paddr, GFP_DMA); | |
517 | if (!vaddr) | |
518 | goto err_dma_idx; | |
519 | ||
520 | htt->rx_ring.alloc_idx.vaddr = vaddr; | |
521 | htt->rx_ring.alloc_idx.paddr = paddr; | |
522 | htt->rx_ring.sw_rd_idx.msdu_payld = 0; | |
523 | *htt->rx_ring.alloc_idx.vaddr = 0; | |
524 | ||
525 | /* Initialize the Rx refill retry timer */ | |
526 | setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt); | |
527 | ||
528 | spin_lock_init(&htt->rx_ring.lock); | |
529 | ||
530 | htt->rx_ring.fill_cnt = 0; | |
531 | if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) | |
532 | goto err_fill_ring; | |
533 | ||
6e712d42 MK |
534 | tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, |
535 | (unsigned long)htt); | |
536 | ||
6c5151a9 MK |
537 | skb_queue_head_init(&htt->tx_compl_q); |
538 | skb_queue_head_init(&htt->rx_compl_q); | |
539 | ||
540 | tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, | |
541 | (unsigned long)htt); | |
542 | ||
aad0b65f | 543 | ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", |
5e3dd157 KV |
544 | htt->rx_ring.size, htt->rx_ring.fill_level); |
545 | return 0; | |
546 | ||
547 | err_fill_ring: | |
548 | ath10k_htt_rx_ring_free(htt); | |
549 | dma_free_coherent(htt->ar->dev, | |
550 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
551 | htt->rx_ring.alloc_idx.vaddr, | |
552 | htt->rx_ring.alloc_idx.paddr); | |
553 | err_dma_idx: | |
554 | dma_free_coherent(htt->ar->dev, | |
555 | (htt->rx_ring.size * | |
556 | sizeof(htt->rx_ring.paddrs_ring)), | |
557 | htt->rx_ring.paddrs_ring, | |
558 | htt->rx_ring.base_paddr); | |
559 | err_dma_ring: | |
560 | kfree(htt->rx_ring.netbufs_ring); | |
561 | err_netbuf: | |
562 | return -ENOMEM; | |
563 | } | |
564 | ||
565 | static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) | |
566 | { | |
567 | switch (type) { | |
568 | case HTT_RX_MPDU_ENCRYPT_WEP40: | |
569 | case HTT_RX_MPDU_ENCRYPT_WEP104: | |
570 | return 4; | |
571 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: | |
572 | case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */ | |
573 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: | |
574 | case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */ | |
575 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: | |
576 | return 8; | |
577 | case HTT_RX_MPDU_ENCRYPT_NONE: | |
578 | return 0; | |
579 | } | |
580 | ||
581 | ath10k_warn("unknown encryption type %d\n", type); | |
582 | return 0; | |
583 | } | |
584 | ||
585 | static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) | |
586 | { | |
587 | switch (type) { | |
588 | case HTT_RX_MPDU_ENCRYPT_NONE: | |
589 | case HTT_RX_MPDU_ENCRYPT_WEP40: | |
590 | case HTT_RX_MPDU_ENCRYPT_WEP104: | |
591 | case HTT_RX_MPDU_ENCRYPT_WEP128: | |
592 | case HTT_RX_MPDU_ENCRYPT_WAPI: | |
593 | return 0; | |
594 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: | |
595 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: | |
596 | return 4; | |
597 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: | |
598 | return 8; | |
599 | } | |
600 | ||
601 | ath10k_warn("unknown encryption type %d\n", type); | |
602 | return 0; | |
603 | } | |
604 | ||
605 | /* Applies for first msdu in chain, before altering it. */ | |
606 | static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb) | |
607 | { | |
608 | struct htt_rx_desc *rxd; | |
609 | enum rx_msdu_decap_format fmt; | |
610 | ||
611 | rxd = (void *)skb->data - sizeof(*rxd); | |
612 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
613 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
614 | ||
615 | if (fmt == RX_MSDU_DECAP_RAW) | |
616 | return (void *)skb->data; | |
617 | else | |
618 | return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; | |
619 | } | |
620 | ||
621 | /* This function only applies for first msdu in an msdu chain */ | |
622 | static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) | |
623 | { | |
624 | if (ieee80211_is_data_qos(hdr->frame_control)) { | |
625 | u8 *qc = ieee80211_get_qos_ctl(hdr); | |
626 | if (qc[0] & 0x80) | |
627 | return true; | |
628 | } | |
629 | return false; | |
630 | } | |
631 | ||
f6dc2095 MK |
632 | struct rfc1042_hdr { |
633 | u8 llc_dsap; | |
634 | u8 llc_ssap; | |
635 | u8 llc_ctrl; | |
636 | u8 snap_oui[3]; | |
637 | __be16 snap_type; | |
638 | } __packed; | |
639 | ||
640 | struct amsdu_subframe_hdr { | |
641 | u8 dst[ETH_ALEN]; | |
642 | u8 src[ETH_ALEN]; | |
643 | __be16 len; | |
644 | } __packed; | |
645 | ||
d960c369 MK |
646 | static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) |
647 | { | |
648 | /* nwifi header is padded to 4 bytes. this fixes 4addr rx */ | |
649 | return round_up(ieee80211_hdrlen(hdr->frame_control), 4); | |
650 | } | |
651 | ||
f6dc2095 MK |
652 | static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, |
653 | struct htt_rx_info *info) | |
5e3dd157 KV |
654 | { |
655 | struct htt_rx_desc *rxd; | |
5e3dd157 | 656 | struct sk_buff *first; |
5e3dd157 KV |
657 | struct sk_buff *skb = info->skb; |
658 | enum rx_msdu_decap_format fmt; | |
659 | enum htt_rx_mpdu_encrypt_type enctype; | |
f6dc2095 | 660 | struct ieee80211_hdr *hdr; |
784f69d3 | 661 | u8 hdr_buf[64], addr[ETH_ALEN], *qos; |
5e3dd157 | 662 | unsigned int hdr_len; |
5e3dd157 KV |
663 | |
664 | rxd = (void *)skb->data - sizeof(*rxd); | |
5e3dd157 KV |
665 | enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), |
666 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | |
667 | ||
f6dc2095 MK |
668 | hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; |
669 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
670 | memcpy(hdr_buf, hdr, hdr_len); | |
671 | hdr = (struct ieee80211_hdr *)hdr_buf; | |
5e3dd157 | 672 | |
5e3dd157 KV |
673 | first = skb; |
674 | while (skb) { | |
675 | void *decap_hdr; | |
f6dc2095 | 676 | int len; |
5e3dd157 KV |
677 | |
678 | rxd = (void *)skb->data - sizeof(*rxd); | |
679 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
f6dc2095 | 680 | RX_MSDU_START_INFO1_DECAP_FORMAT); |
5e3dd157 KV |
681 | decap_hdr = (void *)rxd->rx_hdr_status; |
682 | ||
f6dc2095 | 683 | skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); |
5e3dd157 | 684 | |
f6dc2095 MK |
685 | /* First frame in an A-MSDU chain has more decapped data. */ |
686 | if (skb == first) { | |
687 | len = round_up(ieee80211_hdrlen(hdr->frame_control), 4); | |
688 | len += round_up(ath10k_htt_rx_crypto_param_len(enctype), | |
689 | 4); | |
690 | decap_hdr += len; | |
5e3dd157 KV |
691 | } |
692 | ||
f6dc2095 MK |
693 | switch (fmt) { |
694 | case RX_MSDU_DECAP_RAW: | |
e3fbf8d2 | 695 | /* remove trailing FCS */ |
f6dc2095 MK |
696 | skb_trim(skb, skb->len - FCS_LEN); |
697 | break; | |
698 | case RX_MSDU_DECAP_NATIVE_WIFI: | |
784f69d3 MK |
699 | /* pull decapped header and copy DA */ |
700 | hdr = (struct ieee80211_hdr *)skb->data; | |
d960c369 | 701 | hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); |
784f69d3 MK |
702 | memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN); |
703 | skb_pull(skb, hdr_len); | |
704 | ||
705 | /* push original 802.11 header */ | |
706 | hdr = (struct ieee80211_hdr *)hdr_buf; | |
707 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
708 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); | |
709 | ||
710 | /* original A-MSDU header has the bit set but we're | |
711 | * not including A-MSDU subframe header */ | |
712 | hdr = (struct ieee80211_hdr *)skb->data; | |
713 | qos = ieee80211_get_qos_ctl(hdr); | |
714 | qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; | |
715 | ||
716 | /* original 802.11 header has a different DA */ | |
717 | memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN); | |
f6dc2095 MK |
718 | break; |
719 | case RX_MSDU_DECAP_ETHERNET2_DIX: | |
e3fbf8d2 MK |
720 | /* strip ethernet header and insert decapped 802.11 |
721 | * header, amsdu subframe header and rfc1042 header */ | |
722 | ||
f6dc2095 MK |
723 | len = 0; |
724 | len += sizeof(struct rfc1042_hdr); | |
725 | len += sizeof(struct amsdu_subframe_hdr); | |
726 | ||
727 | skb_pull(skb, sizeof(struct ethhdr)); | |
728 | memcpy(skb_push(skb, len), decap_hdr, len); | |
729 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); | |
730 | break; | |
731 | case RX_MSDU_DECAP_8023_SNAP_LLC: | |
e3fbf8d2 MK |
732 | /* insert decapped 802.11 header making a singly |
733 | * A-MSDU */ | |
f6dc2095 MK |
734 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); |
735 | break; | |
5e3dd157 KV |
736 | } |
737 | ||
f6dc2095 MK |
738 | info->skb = skb; |
739 | info->encrypt_type = enctype; | |
5e3dd157 | 740 | skb = skb->next; |
f6dc2095 | 741 | info->skb->next = NULL; |
5e3dd157 | 742 | |
652de35e KV |
743 | if (skb) |
744 | info->amsdu_more = true; | |
745 | ||
f6dc2095 MK |
746 | ath10k_process_rx(htt->ar, info); |
747 | } | |
5e3dd157 | 748 | |
f6dc2095 MK |
749 | /* FIXME: It might be nice to re-assemble the A-MSDU when there's a |
750 | * monitor interface active for sniffing purposes. */ | |
5e3dd157 KV |
751 | } |
752 | ||
f6dc2095 | 753 | static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) |
5e3dd157 KV |
754 | { |
755 | struct sk_buff *skb = info->skb; | |
756 | struct htt_rx_desc *rxd; | |
757 | struct ieee80211_hdr *hdr; | |
758 | enum rx_msdu_decap_format fmt; | |
759 | enum htt_rx_mpdu_encrypt_type enctype; | |
e3fbf8d2 MK |
760 | int hdr_len; |
761 | void *rfc1042; | |
5e3dd157 KV |
762 | |
763 | /* This shouldn't happen. If it does than it may be a FW bug. */ | |
764 | if (skb->next) { | |
75fb2f94 | 765 | ath10k_warn("htt rx received chained non A-MSDU frame\n"); |
5e3dd157 KV |
766 | ath10k_htt_rx_free_msdu_chain(skb->next); |
767 | skb->next = NULL; | |
768 | } | |
769 | ||
770 | rxd = (void *)skb->data - sizeof(*rxd); | |
771 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
772 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
773 | enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), | |
774 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | |
e3fbf8d2 MK |
775 | hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; |
776 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
5e3dd157 | 777 | |
f6dc2095 MK |
778 | skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); |
779 | ||
5e3dd157 KV |
780 | switch (fmt) { |
781 | case RX_MSDU_DECAP_RAW: | |
782 | /* remove trailing FCS */ | |
e3fbf8d2 | 783 | skb_trim(skb, skb->len - FCS_LEN); |
5e3dd157 KV |
784 | break; |
785 | case RX_MSDU_DECAP_NATIVE_WIFI: | |
784f69d3 MK |
786 | /* Pull decapped header */ |
787 | hdr = (struct ieee80211_hdr *)skb->data; | |
d960c369 | 788 | hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); |
784f69d3 MK |
789 | skb_pull(skb, hdr_len); |
790 | ||
791 | /* Push original header */ | |
792 | hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; | |
793 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
794 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); | |
5e3dd157 KV |
795 | break; |
796 | case RX_MSDU_DECAP_ETHERNET2_DIX: | |
e3fbf8d2 MK |
797 | /* strip ethernet header and insert decapped 802.11 header and |
798 | * rfc1042 header */ | |
5e3dd157 | 799 | |
e3fbf8d2 MK |
800 | rfc1042 = hdr; |
801 | rfc1042 += roundup(hdr_len, 4); | |
802 | rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); | |
5e3dd157 | 803 | |
e3fbf8d2 MK |
804 | skb_pull(skb, sizeof(struct ethhdr)); |
805 | memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)), | |
806 | rfc1042, sizeof(struct rfc1042_hdr)); | |
807 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); | |
808 | break; | |
809 | case RX_MSDU_DECAP_8023_SNAP_LLC: | |
810 | /* remove A-MSDU subframe header and insert | |
811 | * decapped 802.11 header. rfc1042 header is already there */ | |
5e3dd157 | 812 | |
e3fbf8d2 MK |
813 | skb_pull(skb, sizeof(struct amsdu_subframe_hdr)); |
814 | memcpy(skb_push(skb, hdr_len), hdr, hdr_len); | |
815 | break; | |
5e3dd157 KV |
816 | } |
817 | ||
818 | info->skb = skb; | |
819 | info->encrypt_type = enctype; | |
f6dc2095 MK |
820 | |
821 | ath10k_process_rx(htt->ar, info); | |
5e3dd157 KV |
822 | } |
823 | ||
824 | static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb) | |
825 | { | |
826 | struct htt_rx_desc *rxd; | |
827 | u32 flags; | |
828 | ||
829 | rxd = (void *)skb->data - sizeof(*rxd); | |
830 | flags = __le32_to_cpu(rxd->attention.flags); | |
831 | ||
832 | if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR) | |
833 | return true; | |
834 | ||
835 | return false; | |
836 | } | |
837 | ||
838 | static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb) | |
839 | { | |
840 | struct htt_rx_desc *rxd; | |
841 | u32 flags; | |
842 | ||
843 | rxd = (void *)skb->data - sizeof(*rxd); | |
844 | flags = __le32_to_cpu(rxd->attention.flags); | |
845 | ||
846 | if (flags & RX_ATTENTION_FLAGS_FCS_ERR) | |
847 | return true; | |
848 | ||
849 | return false; | |
850 | } | |
851 | ||
22569400 JD |
852 | static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb) |
853 | { | |
854 | struct htt_rx_desc *rxd; | |
855 | u32 flags; | |
856 | ||
857 | rxd = (void *)skb->data - sizeof(*rxd); | |
858 | flags = __le32_to_cpu(rxd->attention.flags); | |
859 | ||
860 | if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR) | |
861 | return true; | |
862 | ||
863 | return false; | |
864 | } | |
865 | ||
a80ddb00 JD |
866 | static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb) |
867 | { | |
868 | struct htt_rx_desc *rxd; | |
869 | u32 flags; | |
870 | ||
871 | rxd = (void *)skb->data - sizeof(*rxd); | |
872 | flags = __le32_to_cpu(rxd->attention.flags); | |
873 | ||
874 | if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE) | |
875 | return true; | |
876 | ||
877 | return false; | |
878 | } | |
879 | ||
605f81aa MK |
880 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) |
881 | { | |
882 | struct htt_rx_desc *rxd; | |
883 | u32 flags, info; | |
884 | bool is_ip4, is_ip6; | |
885 | bool is_tcp, is_udp; | |
886 | bool ip_csum_ok, tcpudp_csum_ok; | |
887 | ||
888 | rxd = (void *)skb->data - sizeof(*rxd); | |
889 | flags = __le32_to_cpu(rxd->attention.flags); | |
890 | info = __le32_to_cpu(rxd->msdu_start.info1); | |
891 | ||
892 | is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); | |
893 | is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); | |
894 | is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); | |
895 | is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); | |
896 | ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); | |
897 | tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); | |
898 | ||
899 | if (!is_ip4 && !is_ip6) | |
900 | return CHECKSUM_NONE; | |
901 | if (!is_tcp && !is_udp) | |
902 | return CHECKSUM_NONE; | |
903 | if (!ip_csum_ok) | |
904 | return CHECKSUM_NONE; | |
905 | if (!tcpudp_csum_ok) | |
906 | return CHECKSUM_NONE; | |
907 | ||
908 | return CHECKSUM_UNNECESSARY; | |
909 | } | |
910 | ||
5e3dd157 KV |
911 | static void ath10k_htt_rx_handler(struct ath10k_htt *htt, |
912 | struct htt_rx_indication *rx) | |
913 | { | |
914 | struct htt_rx_info info; | |
915 | struct htt_rx_indication_mpdu_range *mpdu_ranges; | |
916 | struct ieee80211_hdr *hdr; | |
917 | int num_mpdu_ranges; | |
918 | int fw_desc_len; | |
919 | u8 *fw_desc; | |
920 | int i, j; | |
5e3dd157 | 921 | |
45967089 MK |
922 | lockdep_assert_held(&htt->rx_ring.lock); |
923 | ||
5e3dd157 KV |
924 | memset(&info, 0, sizeof(info)); |
925 | ||
926 | fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); | |
927 | fw_desc = (u8 *)&rx->fw_desc; | |
928 | ||
929 | num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), | |
930 | HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); | |
931 | mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); | |
932 | ||
933 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", | |
934 | rx, sizeof(*rx) + | |
935 | (sizeof(struct htt_rx_indication_mpdu_range) * | |
936 | num_mpdu_ranges)); | |
937 | ||
938 | for (i = 0; i < num_mpdu_ranges; i++) { | |
939 | info.status = mpdu_ranges[i].mpdu_range_status; | |
940 | ||
941 | for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { | |
942 | struct sk_buff *msdu_head, *msdu_tail; | |
943 | enum htt_rx_mpdu_status status; | |
944 | int msdu_chaining; | |
945 | ||
946 | msdu_head = NULL; | |
947 | msdu_tail = NULL; | |
948 | msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, | |
949 | &fw_desc, | |
950 | &fw_desc_len, | |
951 | &msdu_head, | |
952 | &msdu_tail); | |
953 | ||
954 | if (!msdu_head) { | |
955 | ath10k_warn("htt rx no data!\n"); | |
956 | continue; | |
957 | } | |
958 | ||
959 | if (msdu_head->len == 0) { | |
960 | ath10k_dbg(ATH10K_DBG_HTT, | |
961 | "htt rx dropping due to zero-len\n"); | |
962 | ath10k_htt_rx_free_msdu_chain(msdu_head); | |
963 | continue; | |
964 | } | |
965 | ||
966 | if (ath10k_htt_rx_has_decrypt_err(msdu_head)) { | |
c6b56b03 BG |
967 | ath10k_dbg(ATH10K_DBG_HTT, |
968 | "htt rx dropping due to decrypt-err\n"); | |
5e3dd157 KV |
969 | ath10k_htt_rx_free_msdu_chain(msdu_head); |
970 | continue; | |
971 | } | |
972 | ||
973 | status = info.status; | |
974 | ||
975 | /* Skip mgmt frames while we handle this in WMI */ | |
a80ddb00 JD |
976 | if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL || |
977 | ath10k_htt_rx_is_mgmt(msdu_head)) { | |
75fb2f94 | 978 | ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); |
5e3dd157 KV |
979 | ath10k_htt_rx_free_msdu_chain(msdu_head); |
980 | continue; | |
981 | } | |
982 | ||
983 | if (status != HTT_RX_IND_MPDU_STATUS_OK && | |
984 | status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && | |
716ae53c | 985 | status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER && |
5e3dd157 KV |
986 | !htt->ar->monitor_enabled) { |
987 | ath10k_dbg(ATH10K_DBG_HTT, | |
988 | "htt rx ignoring frame w/ status %d\n", | |
989 | status); | |
990 | ath10k_htt_rx_free_msdu_chain(msdu_head); | |
991 | continue; | |
992 | } | |
993 | ||
e8a50f8b | 994 | if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { |
75fb2f94 BG |
995 | ath10k_dbg(ATH10K_DBG_HTT, |
996 | "htt rx CAC running\n"); | |
e8a50f8b MP |
997 | ath10k_htt_rx_free_msdu_chain(msdu_head); |
998 | continue; | |
999 | } | |
1000 | ||
5e3dd157 KV |
1001 | /* FIXME: we do not support chaining yet. |
1002 | * this needs investigation */ | |
1003 | if (msdu_chaining) { | |
75fb2f94 | 1004 | ath10k_warn("htt rx msdu_chaining is true\n"); |
5e3dd157 KV |
1005 | ath10k_htt_rx_free_msdu_chain(msdu_head); |
1006 | continue; | |
1007 | } | |
1008 | ||
1009 | info.skb = msdu_head; | |
1010 | info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); | |
22569400 | 1011 | info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head); |
c6b56b03 BG |
1012 | |
1013 | if (info.fcs_err) | |
1014 | ath10k_dbg(ATH10K_DBG_HTT, | |
1015 | "htt rx has FCS err\n"); | |
1016 | ||
1017 | if (info.mic_err) | |
1018 | ath10k_dbg(ATH10K_DBG_HTT, | |
1019 | "htt rx has MIC err\n"); | |
1020 | ||
5e3dd157 KV |
1021 | info.signal = ATH10K_DEFAULT_NOISE_FLOOR; |
1022 | info.signal += rx->ppdu.combined_rssi; | |
1023 | ||
1024 | info.rate.info0 = rx->ppdu.info0; | |
1025 | info.rate.info1 = __le32_to_cpu(rx->ppdu.info1); | |
1026 | info.rate.info2 = __le32_to_cpu(rx->ppdu.info2); | |
e72698f8 | 1027 | info.tsf = __le32_to_cpu(rx->ppdu.tsf); |
5e3dd157 KV |
1028 | |
1029 | hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); | |
1030 | ||
1031 | if (ath10k_htt_rx_hdr_is_amsdu(hdr)) | |
f6dc2095 | 1032 | ath10k_htt_rx_amsdu(htt, &info); |
5e3dd157 | 1033 | else |
f6dc2095 | 1034 | ath10k_htt_rx_msdu(htt, &info); |
5e3dd157 KV |
1035 | } |
1036 | } | |
1037 | ||
6e712d42 | 1038 | tasklet_schedule(&htt->rx_replenish_task); |
5e3dd157 KV |
1039 | } |
1040 | ||
1041 | static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, | |
1042 | struct htt_rx_fragment_indication *frag) | |
1043 | { | |
1044 | struct sk_buff *msdu_head, *msdu_tail; | |
1045 | struct htt_rx_desc *rxd; | |
1046 | enum rx_msdu_decap_format fmt; | |
1047 | struct htt_rx_info info = {}; | |
1048 | struct ieee80211_hdr *hdr; | |
1049 | int msdu_chaining; | |
1050 | bool tkip_mic_err; | |
1051 | bool decrypt_err; | |
1052 | u8 *fw_desc; | |
1053 | int fw_desc_len, hdrlen, paramlen; | |
1054 | int trim; | |
1055 | ||
1056 | fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); | |
1057 | fw_desc = (u8 *)frag->fw_msdu_rx_desc; | |
1058 | ||
1059 | msdu_head = NULL; | |
1060 | msdu_tail = NULL; | |
45967089 MK |
1061 | |
1062 | spin_lock_bh(&htt->rx_ring.lock); | |
5e3dd157 KV |
1063 | msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, |
1064 | &msdu_head, &msdu_tail); | |
45967089 | 1065 | spin_unlock_bh(&htt->rx_ring.lock); |
5e3dd157 KV |
1066 | |
1067 | ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); | |
1068 | ||
1069 | if (!msdu_head) { | |
1070 | ath10k_warn("htt rx frag no data\n"); | |
1071 | return; | |
1072 | } | |
1073 | ||
1074 | if (msdu_chaining || msdu_head != msdu_tail) { | |
1075 | ath10k_warn("aggregation with fragmentation?!\n"); | |
1076 | ath10k_htt_rx_free_msdu_chain(msdu_head); | |
1077 | return; | |
1078 | } | |
1079 | ||
1080 | /* FIXME: implement signal strength */ | |
1081 | ||
1082 | hdr = (struct ieee80211_hdr *)msdu_head->data; | |
1083 | rxd = (void *)msdu_head->data - sizeof(*rxd); | |
1084 | tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) & | |
1085 | RX_ATTENTION_FLAGS_TKIP_MIC_ERR); | |
1086 | decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) & | |
1087 | RX_ATTENTION_FLAGS_DECRYPT_ERR); | |
1088 | fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
1089 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
1090 | ||
1091 | if (fmt != RX_MSDU_DECAP_RAW) { | |
1092 | ath10k_warn("we dont support non-raw fragmented rx yet\n"); | |
1093 | dev_kfree_skb_any(msdu_head); | |
1094 | goto end; | |
1095 | } | |
1096 | ||
1097 | info.skb = msdu_head; | |
1098 | info.status = HTT_RX_IND_MPDU_STATUS_OK; | |
1099 | info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0), | |
1100 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | |
605f81aa | 1101 | info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb); |
5e3dd157 KV |
1102 | |
1103 | if (tkip_mic_err) { | |
1104 | ath10k_warn("tkip mic error\n"); | |
1105 | info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR; | |
1106 | } | |
1107 | ||
1108 | if (decrypt_err) { | |
1109 | ath10k_warn("decryption err in fragmented rx\n"); | |
1110 | dev_kfree_skb_any(info.skb); | |
1111 | goto end; | |
1112 | } | |
1113 | ||
1114 | if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) { | |
1115 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | |
1116 | paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type); | |
1117 | ||
1118 | /* It is more efficient to move the header than the payload */ | |
1119 | memmove((void *)info.skb->data + paramlen, | |
1120 | (void *)info.skb->data, | |
1121 | hdrlen); | |
1122 | skb_pull(info.skb, paramlen); | |
1123 | hdr = (struct ieee80211_hdr *)info.skb->data; | |
1124 | } | |
1125 | ||
1126 | /* remove trailing FCS */ | |
1127 | trim = 4; | |
1128 | ||
1129 | /* remove crypto trailer */ | |
1130 | trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type); | |
1131 | ||
1132 | /* last fragment of TKIP frags has MIC */ | |
1133 | if (!ieee80211_has_morefrags(hdr->frame_control) && | |
1134 | info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) | |
1135 | trim += 8; | |
1136 | ||
1137 | if (trim > info.skb->len) { | |
1138 | ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); | |
1139 | dev_kfree_skb_any(info.skb); | |
1140 | goto end; | |
1141 | } | |
1142 | ||
1143 | skb_trim(info.skb, info.skb->len - trim); | |
1144 | ||
75fb2f94 | 1145 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", |
5e3dd157 KV |
1146 | info.skb->data, info.skb->len); |
1147 | ath10k_process_rx(htt->ar, &info); | |
1148 | ||
1149 | end: | |
1150 | if (fw_desc_len > 0) { | |
1151 | ath10k_dbg(ATH10K_DBG_HTT, | |
1152 | "expecting more fragmented rx in one indication %d\n", | |
1153 | fw_desc_len); | |
1154 | } | |
1155 | } | |
1156 | ||
6c5151a9 MK |
1157 | static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, |
1158 | struct sk_buff *skb) | |
1159 | { | |
1160 | struct ath10k_htt *htt = &ar->htt; | |
1161 | struct htt_resp *resp = (struct htt_resp *)skb->data; | |
1162 | struct htt_tx_done tx_done = {}; | |
1163 | int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); | |
1164 | __le16 msdu_id; | |
1165 | int i; | |
1166 | ||
45967089 MK |
1167 | lockdep_assert_held(&htt->tx_lock); |
1168 | ||
6c5151a9 MK |
1169 | switch (status) { |
1170 | case HTT_DATA_TX_STATUS_NO_ACK: | |
1171 | tx_done.no_ack = true; | |
1172 | break; | |
1173 | case HTT_DATA_TX_STATUS_OK: | |
1174 | break; | |
1175 | case HTT_DATA_TX_STATUS_DISCARD: | |
1176 | case HTT_DATA_TX_STATUS_POSTPONE: | |
1177 | case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: | |
1178 | tx_done.discard = true; | |
1179 | break; | |
1180 | default: | |
1181 | ath10k_warn("unhandled tx completion status %d\n", status); | |
1182 | tx_done.discard = true; | |
1183 | break; | |
1184 | } | |
1185 | ||
1186 | ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", | |
1187 | resp->data_tx_completion.num_msdus); | |
1188 | ||
1189 | for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { | |
1190 | msdu_id = resp->data_tx_completion.msdus[i]; | |
1191 | tx_done.msdu_id = __le16_to_cpu(msdu_id); | |
1192 | ath10k_txrx_tx_unref(htt, &tx_done); | |
1193 | } | |
1194 | } | |
1195 | ||
5e3dd157 KV |
1196 | void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) |
1197 | { | |
edb8236d | 1198 | struct ath10k_htt *htt = &ar->htt; |
5e3dd157 KV |
1199 | struct htt_resp *resp = (struct htt_resp *)skb->data; |
1200 | ||
1201 | /* confirm alignment */ | |
1202 | if (!IS_ALIGNED((unsigned long)skb->data, 4)) | |
1203 | ath10k_warn("unaligned htt message, expect trouble\n"); | |
1204 | ||
75fb2f94 | 1205 | ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", |
5e3dd157 KV |
1206 | resp->hdr.msg_type); |
1207 | switch (resp->hdr.msg_type) { | |
1208 | case HTT_T2H_MSG_TYPE_VERSION_CONF: { | |
1209 | htt->target_version_major = resp->ver_resp.major; | |
1210 | htt->target_version_minor = resp->ver_resp.minor; | |
1211 | complete(&htt->target_version_received); | |
1212 | break; | |
1213 | } | |
6c5151a9 | 1214 | case HTT_T2H_MSG_TYPE_RX_IND: |
45967089 MK |
1215 | spin_lock_bh(&htt->rx_ring.lock); |
1216 | __skb_queue_tail(&htt->rx_compl_q, skb); | |
1217 | spin_unlock_bh(&htt->rx_ring.lock); | |
6c5151a9 MK |
1218 | tasklet_schedule(&htt->txrx_compl_task); |
1219 | return; | |
5e3dd157 KV |
1220 | case HTT_T2H_MSG_TYPE_PEER_MAP: { |
1221 | struct htt_peer_map_event ev = { | |
1222 | .vdev_id = resp->peer_map.vdev_id, | |
1223 | .peer_id = __le16_to_cpu(resp->peer_map.peer_id), | |
1224 | }; | |
1225 | memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); | |
1226 | ath10k_peer_map_event(htt, &ev); | |
1227 | break; | |
1228 | } | |
1229 | case HTT_T2H_MSG_TYPE_PEER_UNMAP: { | |
1230 | struct htt_peer_unmap_event ev = { | |
1231 | .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), | |
1232 | }; | |
1233 | ath10k_peer_unmap_event(htt, &ev); | |
1234 | break; | |
1235 | } | |
1236 | case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { | |
1237 | struct htt_tx_done tx_done = {}; | |
1238 | int status = __le32_to_cpu(resp->mgmt_tx_completion.status); | |
1239 | ||
1240 | tx_done.msdu_id = | |
1241 | __le32_to_cpu(resp->mgmt_tx_completion.desc_id); | |
1242 | ||
1243 | switch (status) { | |
1244 | case HTT_MGMT_TX_STATUS_OK: | |
1245 | break; | |
1246 | case HTT_MGMT_TX_STATUS_RETRY: | |
1247 | tx_done.no_ack = true; | |
1248 | break; | |
1249 | case HTT_MGMT_TX_STATUS_DROP: | |
1250 | tx_done.discard = true; | |
1251 | break; | |
1252 | } | |
1253 | ||
6c5151a9 | 1254 | spin_lock_bh(&htt->tx_lock); |
0a89f8a0 | 1255 | ath10k_txrx_tx_unref(htt, &tx_done); |
6c5151a9 | 1256 | spin_unlock_bh(&htt->tx_lock); |
5e3dd157 KV |
1257 | break; |
1258 | } | |
6c5151a9 MK |
1259 | case HTT_T2H_MSG_TYPE_TX_COMPL_IND: |
1260 | spin_lock_bh(&htt->tx_lock); | |
1261 | __skb_queue_tail(&htt->tx_compl_q, skb); | |
1262 | spin_unlock_bh(&htt->tx_lock); | |
1263 | tasklet_schedule(&htt->txrx_compl_task); | |
1264 | return; | |
5e3dd157 KV |
1265 | case HTT_T2H_MSG_TYPE_SEC_IND: { |
1266 | struct ath10k *ar = htt->ar; | |
1267 | struct htt_security_indication *ev = &resp->security_indication; | |
1268 | ||
1269 | ath10k_dbg(ATH10K_DBG_HTT, | |
1270 | "sec ind peer_id %d unicast %d type %d\n", | |
1271 | __le16_to_cpu(ev->peer_id), | |
1272 | !!(ev->flags & HTT_SECURITY_IS_UNICAST), | |
1273 | MS(ev->flags, HTT_SECURITY_TYPE)); | |
1274 | complete(&ar->install_key_done); | |
1275 | break; | |
1276 | } | |
1277 | case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { | |
1278 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", | |
1279 | skb->data, skb->len); | |
1280 | ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); | |
1281 | break; | |
1282 | } | |
1283 | case HTT_T2H_MSG_TYPE_TEST: | |
1284 | /* FIX THIS */ | |
1285 | break; | |
5e3dd157 | 1286 | case HTT_T2H_MSG_TYPE_STATS_CONF: |
a9bf0506 KV |
1287 | trace_ath10k_htt_stats(skb->data, skb->len); |
1288 | break; | |
1289 | case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: | |
5e3dd157 KV |
1290 | case HTT_T2H_MSG_TYPE_RX_ADDBA: |
1291 | case HTT_T2H_MSG_TYPE_RX_DELBA: | |
1292 | case HTT_T2H_MSG_TYPE_RX_FLUSH: | |
1293 | default: | |
1294 | ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n", | |
1295 | resp->hdr.msg_type); | |
1296 | ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", | |
1297 | skb->data, skb->len); | |
1298 | break; | |
1299 | }; | |
1300 | ||
1301 | /* Free the indication buffer */ | |
1302 | dev_kfree_skb_any(skb); | |
1303 | } | |
6c5151a9 MK |
1304 | |
1305 | static void ath10k_htt_txrx_compl_task(unsigned long ptr) | |
1306 | { | |
1307 | struct ath10k_htt *htt = (struct ath10k_htt *)ptr; | |
1308 | struct htt_resp *resp; | |
1309 | struct sk_buff *skb; | |
1310 | ||
45967089 MK |
1311 | spin_lock_bh(&htt->tx_lock); |
1312 | while ((skb = __skb_dequeue(&htt->tx_compl_q))) { | |
6c5151a9 MK |
1313 | ath10k_htt_rx_frm_tx_compl(htt->ar, skb); |
1314 | dev_kfree_skb_any(skb); | |
1315 | } | |
45967089 | 1316 | spin_unlock_bh(&htt->tx_lock); |
6c5151a9 | 1317 | |
45967089 MK |
1318 | spin_lock_bh(&htt->rx_ring.lock); |
1319 | while ((skb = __skb_dequeue(&htt->rx_compl_q))) { | |
6c5151a9 MK |
1320 | resp = (struct htt_resp *)skb->data; |
1321 | ath10k_htt_rx_handler(htt, &resp->rx_ind); | |
1322 | dev_kfree_skb_any(skb); | |
1323 | } | |
45967089 | 1324 | spin_unlock_bh(&htt->rx_ring.lock); |
6c5151a9 | 1325 | } |