Commit | Line | Data |
---|---|---|
5e3dd157 KV |
1 | /* |
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | |
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | |
4 | * | |
5 | * Permission to use, copy, modify, and/or distribute this software for any | |
6 | * purpose with or without fee is hereby granted, provided that the above | |
7 | * copyright notice and this permission notice appear in all copies. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
16 | */ | |
17 | ||
edb8236d | 18 | #include "core.h" |
5e3dd157 KV |
19 | #include "htc.h" |
20 | #include "htt.h" | |
21 | #include "txrx.h" | |
22 | #include "debug.h" | |
a9bf0506 | 23 | #include "trace.h" |
aa5b4fbc | 24 | #include "mac.h" |
5e3dd157 KV |
25 | |
26 | #include <linux/log2.h> | |
27 | ||
c545070e MK |
28 | #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX |
29 | #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1) | |
5e3dd157 KV |
30 | |
31 | /* when under memory pressure rx ring refill may fail and needs a retry */ | |
32 | #define HTT_RX_RING_REFILL_RETRY_MS 50 | |
33 | ||
f6dc2095 | 34 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); |
6c5151a9 | 35 | static void ath10k_htt_txrx_compl_task(unsigned long ptr); |
f6dc2095 | 36 | |
c545070e MK |
37 | static struct sk_buff * |
38 | ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr) | |
39 | { | |
40 | struct ath10k_skb_rxcb *rxcb; | |
41 | ||
42 | hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) | |
43 | if (rxcb->paddr == paddr) | |
44 | return ATH10K_RXCB_SKB(rxcb); | |
45 | ||
46 | WARN_ON_ONCE(1); | |
47 | return NULL; | |
48 | } | |
49 | ||
5e3dd157 KV |
50 | static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) |
51 | { | |
52 | struct sk_buff *skb; | |
c545070e MK |
53 | struct ath10k_skb_rxcb *rxcb; |
54 | struct hlist_node *n; | |
5e3dd157 KV |
55 | int i; |
56 | ||
c545070e MK |
57 | if (htt->rx_ring.in_ord_rx) { |
58 | hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { | |
59 | skb = ATH10K_RXCB_SKB(rxcb); | |
60 | dma_unmap_single(htt->ar->dev, rxcb->paddr, | |
61 | skb->len + skb_tailroom(skb), | |
62 | DMA_FROM_DEVICE); | |
63 | hash_del(&rxcb->hlist); | |
64 | dev_kfree_skb_any(skb); | |
65 | } | |
66 | } else { | |
67 | for (i = 0; i < htt->rx_ring.size; i++) { | |
68 | skb = htt->rx_ring.netbufs_ring[i]; | |
69 | if (!skb) | |
70 | continue; | |
71 | ||
72 | rxcb = ATH10K_SKB_RXCB(skb); | |
73 | dma_unmap_single(htt->ar->dev, rxcb->paddr, | |
74 | skb->len + skb_tailroom(skb), | |
75 | DMA_FROM_DEVICE); | |
76 | dev_kfree_skb_any(skb); | |
77 | } | |
5e3dd157 KV |
78 | } |
79 | ||
80 | htt->rx_ring.fill_cnt = 0; | |
c545070e MK |
81 | hash_init(htt->rx_ring.skb_table); |
82 | memset(htt->rx_ring.netbufs_ring, 0, | |
83 | htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); | |
5e3dd157 KV |
84 | } |
85 | ||
86 | static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | |
87 | { | |
88 | struct htt_rx_desc *rx_desc; | |
c545070e | 89 | struct ath10k_skb_rxcb *rxcb; |
5e3dd157 KV |
90 | struct sk_buff *skb; |
91 | dma_addr_t paddr; | |
92 | int ret = 0, idx; | |
93 | ||
c545070e MK |
94 | /* The Full Rx Reorder firmware has no way of telling the host |
95 | * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. | |
96 | * To keep things simple make sure ring is always half empty. This | |
97 | * guarantees there'll be no replenishment overruns possible. | |
98 | */ | |
99 | BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); | |
100 | ||
8cc7f26c | 101 | idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); |
5e3dd157 KV |
102 | while (num > 0) { |
103 | skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); | |
104 | if (!skb) { | |
105 | ret = -ENOMEM; | |
106 | goto fail; | |
107 | } | |
108 | ||
109 | if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) | |
110 | skb_pull(skb, | |
111 | PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - | |
112 | skb->data); | |
113 | ||
114 | /* Clear rx_desc attention word before posting to Rx ring */ | |
115 | rx_desc = (struct htt_rx_desc *)skb->data; | |
116 | rx_desc->attention.flags = __cpu_to_le32(0); | |
117 | ||
118 | paddr = dma_map_single(htt->ar->dev, skb->data, | |
119 | skb->len + skb_tailroom(skb), | |
120 | DMA_FROM_DEVICE); | |
121 | ||
122 | if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { | |
123 | dev_kfree_skb_any(skb); | |
124 | ret = -ENOMEM; | |
125 | goto fail; | |
126 | } | |
127 | ||
c545070e MK |
128 | rxcb = ATH10K_SKB_RXCB(skb); |
129 | rxcb->paddr = paddr; | |
5e3dd157 KV |
130 | htt->rx_ring.netbufs_ring[idx] = skb; |
131 | htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); | |
132 | htt->rx_ring.fill_cnt++; | |
133 | ||
c545070e MK |
134 | if (htt->rx_ring.in_ord_rx) { |
135 | hash_add(htt->rx_ring.skb_table, | |
136 | &ATH10K_SKB_RXCB(skb)->hlist, | |
137 | (u32)paddr); | |
138 | } | |
139 | ||
5e3dd157 KV |
140 | num--; |
141 | idx++; | |
142 | idx &= htt->rx_ring.size_mask; | |
143 | } | |
144 | ||
145 | fail: | |
5de6dfc8 VT |
146 | /* |
147 | * Make sure the rx buffer is updated before available buffer | |
148 | * index to avoid any potential rx ring corruption. | |
149 | */ | |
150 | mb(); | |
8cc7f26c | 151 | *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); |
5e3dd157 KV |
152 | return ret; |
153 | } | |
154 | ||
155 | static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) | |
156 | { | |
157 | lockdep_assert_held(&htt->rx_ring.lock); | |
158 | return __ath10k_htt_rx_ring_fill_n(htt, num); | |
159 | } | |
160 | ||
161 | static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) | |
162 | { | |
6e712d42 | 163 | int ret, num_deficit, num_to_fill; |
5e3dd157 | 164 | |
6e712d42 MK |
165 | /* Refilling the whole RX ring buffer proves to be a bad idea. The |
166 | * reason is RX may take up significant amount of CPU cycles and starve | |
167 | * other tasks, e.g. TX on an ethernet device while acting as a bridge | |
168 | * with ath10k wlan interface. This ended up with very poor performance | |
169 | * once CPU the host system was overwhelmed with RX on ath10k. | |
170 | * | |
171 | * By limiting the number of refills the replenishing occurs | |
172 | * progressively. This in turns makes use of the fact tasklets are | |
173 | * processed in FIFO order. This means actual RX processing can starve | |
174 | * out refilling. If there's not enough buffers on RX ring FW will not | |
175 | * report RX until it is refilled with enough buffers. This | |
176 | * automatically balances load wrt to CPU power. | |
177 | * | |
178 | * This probably comes at a cost of lower maximum throughput but | |
179 | * improves the avarage and stability. */ | |
5e3dd157 | 180 | spin_lock_bh(&htt->rx_ring.lock); |
6e712d42 MK |
181 | num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; |
182 | num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); | |
183 | num_deficit -= num_to_fill; | |
5e3dd157 KV |
184 | ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); |
185 | if (ret == -ENOMEM) { | |
186 | /* | |
187 | * Failed to fill it to the desired level - | |
188 | * we'll start a timer and try again next time. | |
189 | * As long as enough buffers are left in the ring for | |
190 | * another A-MPDU rx, no special recovery is needed. | |
191 | */ | |
192 | mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + | |
193 | msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); | |
6e712d42 MK |
194 | } else if (num_deficit > 0) { |
195 | tasklet_schedule(&htt->rx_replenish_task); | |
5e3dd157 KV |
196 | } |
197 | spin_unlock_bh(&htt->rx_ring.lock); | |
198 | } | |
199 | ||
200 | static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) | |
201 | { | |
202 | struct ath10k_htt *htt = (struct ath10k_htt *)arg; | |
af762c0b | 203 | |
5e3dd157 KV |
204 | ath10k_htt_rx_msdu_buff_replenish(htt); |
205 | } | |
206 | ||
c545070e | 207 | int ath10k_htt_rx_ring_refill(struct ath10k *ar) |
5e3dd157 | 208 | { |
c545070e MK |
209 | struct ath10k_htt *htt = &ar->htt; |
210 | int ret; | |
3e841fd0 | 211 | |
c545070e MK |
212 | spin_lock_bh(&htt->rx_ring.lock); |
213 | ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - | |
214 | htt->rx_ring.fill_cnt)); | |
215 | spin_unlock_bh(&htt->rx_ring.lock); | |
3e841fd0 | 216 | |
c545070e MK |
217 | if (ret) |
218 | ath10k_htt_rx_ring_free(htt); | |
219 | ||
220 | return ret; | |
3e841fd0 | 221 | } |
5e3dd157 | 222 | |
95bf21f9 | 223 | void ath10k_htt_rx_free(struct ath10k_htt *htt) |
3e841fd0 | 224 | { |
5e3dd157 | 225 | del_timer_sync(&htt->rx_ring.refill_retry_timer); |
6e712d42 | 226 | tasklet_kill(&htt->rx_replenish_task); |
6c5151a9 MK |
227 | tasklet_kill(&htt->txrx_compl_task); |
228 | ||
229 | skb_queue_purge(&htt->tx_compl_q); | |
230 | skb_queue_purge(&htt->rx_compl_q); | |
c545070e | 231 | skb_queue_purge(&htt->rx_in_ord_compl_q); |
5e3dd157 | 232 | |
c545070e | 233 | ath10k_htt_rx_ring_free(htt); |
5e3dd157 KV |
234 | |
235 | dma_free_coherent(htt->ar->dev, | |
236 | (htt->rx_ring.size * | |
237 | sizeof(htt->rx_ring.paddrs_ring)), | |
238 | htt->rx_ring.paddrs_ring, | |
239 | htt->rx_ring.base_paddr); | |
240 | ||
241 | dma_free_coherent(htt->ar->dev, | |
242 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
243 | htt->rx_ring.alloc_idx.vaddr, | |
244 | htt->rx_ring.alloc_idx.paddr); | |
245 | ||
246 | kfree(htt->rx_ring.netbufs_ring); | |
247 | } | |
248 | ||
249 | static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) | |
250 | { | |
7aa7a72a | 251 | struct ath10k *ar = htt->ar; |
5e3dd157 KV |
252 | int idx; |
253 | struct sk_buff *msdu; | |
254 | ||
45967089 | 255 | lockdep_assert_held(&htt->rx_ring.lock); |
5e3dd157 | 256 | |
8d60ee87 | 257 | if (htt->rx_ring.fill_cnt == 0) { |
7aa7a72a | 258 | ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); |
8d60ee87 MK |
259 | return NULL; |
260 | } | |
5e3dd157 KV |
261 | |
262 | idx = htt->rx_ring.sw_rd_idx.msdu_payld; | |
263 | msdu = htt->rx_ring.netbufs_ring[idx]; | |
3e841fd0 | 264 | htt->rx_ring.netbufs_ring[idx] = NULL; |
c545070e | 265 | htt->rx_ring.paddrs_ring[idx] = 0; |
5e3dd157 KV |
266 | |
267 | idx++; | |
268 | idx &= htt->rx_ring.size_mask; | |
269 | htt->rx_ring.sw_rd_idx.msdu_payld = idx; | |
270 | htt->rx_ring.fill_cnt--; | |
271 | ||
4de02806 | 272 | dma_unmap_single(htt->ar->dev, |
8582bf3b | 273 | ATH10K_SKB_RXCB(msdu)->paddr, |
4de02806 MK |
274 | msdu->len + skb_tailroom(msdu), |
275 | DMA_FROM_DEVICE); | |
276 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", | |
277 | msdu->data, msdu->len + skb_tailroom(msdu)); | |
4de02806 | 278 | |
5e3dd157 KV |
279 | return msdu; |
280 | } | |
281 | ||
d84dd60f | 282 | /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ |
5e3dd157 KV |
283 | static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, |
284 | u8 **fw_desc, int *fw_desc_len, | |
f0e2770f | 285 | struct sk_buff_head *amsdu) |
5e3dd157 | 286 | { |
7aa7a72a | 287 | struct ath10k *ar = htt->ar; |
5e3dd157 | 288 | int msdu_len, msdu_chaining = 0; |
9aa505d2 | 289 | struct sk_buff *msdu; |
5e3dd157 KV |
290 | struct htt_rx_desc *rx_desc; |
291 | ||
45967089 MK |
292 | lockdep_assert_held(&htt->rx_ring.lock); |
293 | ||
9aa505d2 | 294 | for (;;) { |
5e3dd157 KV |
295 | int last_msdu, msdu_len_invalid, msdu_chained; |
296 | ||
9aa505d2 MK |
297 | msdu = ath10k_htt_rx_netbuf_pop(htt); |
298 | if (!msdu) { | |
9aa505d2 | 299 | __skb_queue_purge(amsdu); |
e0bd7513 | 300 | return -ENOENT; |
9aa505d2 MK |
301 | } |
302 | ||
303 | __skb_queue_tail(amsdu, msdu); | |
304 | ||
5e3dd157 KV |
305 | rx_desc = (struct htt_rx_desc *)msdu->data; |
306 | ||
307 | /* FIXME: we must report msdu payload since this is what caller | |
308 | * expects now */ | |
309 | skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); | |
310 | skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); | |
311 | ||
312 | /* | |
313 | * Sanity check - confirm the HW is finished filling in the | |
314 | * rx data. | |
315 | * If the HW and SW are working correctly, then it's guaranteed | |
316 | * that the HW's MAC DMA is done before this point in the SW. | |
317 | * To prevent the case that we handle a stale Rx descriptor, | |
318 | * just assert for now until we have a way to recover. | |
319 | */ | |
320 | if (!(__le32_to_cpu(rx_desc->attention.flags) | |
321 | & RX_ATTENTION_FLAGS_MSDU_DONE)) { | |
9aa505d2 | 322 | __skb_queue_purge(amsdu); |
e0bd7513 | 323 | return -EIO; |
5e3dd157 KV |
324 | } |
325 | ||
326 | /* | |
327 | * Copy the FW rx descriptor for this MSDU from the rx | |
328 | * indication message into the MSDU's netbuf. HL uses the | |
329 | * same rx indication message definition as LL, and simply | |
330 | * appends new info (fields from the HW rx desc, and the | |
331 | * MSDU payload itself). So, the offset into the rx | |
332 | * indication message only has to account for the standard | |
333 | * offset of the per-MSDU FW rx desc info within the | |
334 | * message, and how many bytes of the per-MSDU FW rx desc | |
335 | * info have already been consumed. (And the endianness of | |
336 | * the host, since for a big-endian host, the rx ind | |
337 | * message contents, including the per-MSDU rx desc bytes, | |
338 | * were byteswapped during upload.) | |
339 | */ | |
340 | if (*fw_desc_len > 0) { | |
341 | rx_desc->fw_desc.info0 = **fw_desc; | |
342 | /* | |
343 | * The target is expected to only provide the basic | |
344 | * per-MSDU rx descriptors. Just to be sure, verify | |
345 | * that the target has not attached extension data | |
346 | * (e.g. LRO flow ID). | |
347 | */ | |
348 | ||
349 | /* or more, if there's extension data */ | |
350 | (*fw_desc)++; | |
351 | (*fw_desc_len)--; | |
352 | } else { | |
353 | /* | |
354 | * When an oversized AMSDU happened, FW will lost | |
355 | * some of MSDU status - in this case, the FW | |
356 | * descriptors provided will be less than the | |
357 | * actual MSDUs inside this MPDU. Mark the FW | |
358 | * descriptors so that it will still deliver to | |
359 | * upper stack, if no CRC error for this MPDU. | |
360 | * | |
361 | * FIX THIS - the FW descriptors are actually for | |
362 | * MSDUs in the end of this A-MSDU instead of the | |
363 | * beginning. | |
364 | */ | |
365 | rx_desc->fw_desc.info0 = 0; | |
366 | } | |
367 | ||
368 | msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) | |
369 | & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | | |
370 | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); | |
371 | msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), | |
372 | RX_MSDU_START_INFO0_MSDU_LENGTH); | |
373 | msdu_chained = rx_desc->frag_info.ring2_more_count; | |
374 | ||
375 | if (msdu_len_invalid) | |
376 | msdu_len = 0; | |
377 | ||
378 | skb_trim(msdu, 0); | |
379 | skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); | |
380 | msdu_len -= msdu->len; | |
381 | ||
9aa505d2 | 382 | /* Note: Chained buffers do not contain rx descriptor */ |
5e3dd157 | 383 | while (msdu_chained--) { |
9aa505d2 MK |
384 | msdu = ath10k_htt_rx_netbuf_pop(htt); |
385 | if (!msdu) { | |
9aa505d2 | 386 | __skb_queue_purge(amsdu); |
e0bd7513 | 387 | return -ENOENT; |
b30595ae MK |
388 | } |
389 | ||
9aa505d2 MK |
390 | __skb_queue_tail(amsdu, msdu); |
391 | skb_trim(msdu, 0); | |
392 | skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); | |
393 | msdu_len -= msdu->len; | |
ede9c8e0 | 394 | msdu_chaining = 1; |
5e3dd157 KV |
395 | } |
396 | ||
5e3dd157 KV |
397 | last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & |
398 | RX_MSDU_END_INFO0_LAST_MSDU; | |
399 | ||
b04e204f | 400 | trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, |
a0883cf7 | 401 | sizeof(*rx_desc) - sizeof(u32)); |
d8bb26b9 | 402 | |
9aa505d2 MK |
403 | if (last_msdu) |
404 | break; | |
5e3dd157 | 405 | } |
5e3dd157 | 406 | |
9aa505d2 | 407 | if (skb_queue_empty(amsdu)) |
d84dd60f JD |
408 | msdu_chaining = -1; |
409 | ||
5e3dd157 KV |
410 | /* |
411 | * Don't refill the ring yet. | |
412 | * | |
413 | * First, the elements popped here are still in use - it is not | |
414 | * safe to overwrite them until the matching call to | |
415 | * mpdu_desc_list_next. Second, for efficiency it is preferable to | |
416 | * refill the rx ring with 1 PPDU's worth of rx buffers (something | |
417 | * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers | |
418 | * (something like 3 buffers). Consequently, we'll rely on the txrx | |
419 | * SW to tell us when it is done pulling all the PPDU's rx buffers | |
420 | * out of the rx ring, and then refill it just once. | |
421 | */ | |
422 | ||
423 | return msdu_chaining; | |
424 | } | |
425 | ||
6e712d42 MK |
426 | static void ath10k_htt_rx_replenish_task(unsigned long ptr) |
427 | { | |
428 | struct ath10k_htt *htt = (struct ath10k_htt *)ptr; | |
af762c0b | 429 | |
6e712d42 MK |
430 | ath10k_htt_rx_msdu_buff_replenish(htt); |
431 | } | |
432 | ||
c545070e MK |
433 | static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, |
434 | u32 paddr) | |
435 | { | |
436 | struct ath10k *ar = htt->ar; | |
437 | struct ath10k_skb_rxcb *rxcb; | |
438 | struct sk_buff *msdu; | |
439 | ||
440 | lockdep_assert_held(&htt->rx_ring.lock); | |
441 | ||
442 | msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); | |
443 | if (!msdu) | |
444 | return NULL; | |
445 | ||
446 | rxcb = ATH10K_SKB_RXCB(msdu); | |
447 | hash_del(&rxcb->hlist); | |
448 | htt->rx_ring.fill_cnt--; | |
449 | ||
450 | dma_unmap_single(htt->ar->dev, rxcb->paddr, | |
451 | msdu->len + skb_tailroom(msdu), | |
452 | DMA_FROM_DEVICE); | |
453 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", | |
454 | msdu->data, msdu->len + skb_tailroom(msdu)); | |
455 | ||
456 | return msdu; | |
457 | } | |
458 | ||
459 | static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt, | |
460 | struct htt_rx_in_ord_ind *ev, | |
461 | struct sk_buff_head *list) | |
462 | { | |
463 | struct ath10k *ar = htt->ar; | |
464 | struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs; | |
465 | struct htt_rx_desc *rxd; | |
466 | struct sk_buff *msdu; | |
467 | int msdu_count; | |
468 | bool is_offload; | |
469 | u32 paddr; | |
470 | ||
471 | lockdep_assert_held(&htt->rx_ring.lock); | |
472 | ||
473 | msdu_count = __le16_to_cpu(ev->msdu_count); | |
474 | is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); | |
475 | ||
476 | while (msdu_count--) { | |
477 | paddr = __le32_to_cpu(msdu_desc->msdu_paddr); | |
478 | ||
479 | msdu = ath10k_htt_rx_pop_paddr(htt, paddr); | |
480 | if (!msdu) { | |
481 | __skb_queue_purge(list); | |
482 | return -ENOENT; | |
483 | } | |
484 | ||
485 | __skb_queue_tail(list, msdu); | |
486 | ||
487 | if (!is_offload) { | |
488 | rxd = (void *)msdu->data; | |
489 | ||
490 | trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); | |
491 | ||
492 | skb_put(msdu, sizeof(*rxd)); | |
493 | skb_pull(msdu, sizeof(*rxd)); | |
494 | skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); | |
495 | ||
496 | if (!(__le32_to_cpu(rxd->attention.flags) & | |
497 | RX_ATTENTION_FLAGS_MSDU_DONE)) { | |
498 | ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); | |
499 | return -EIO; | |
500 | } | |
501 | } | |
502 | ||
503 | msdu_desc++; | |
504 | } | |
505 | ||
506 | return 0; | |
507 | } | |
508 | ||
95bf21f9 | 509 | int ath10k_htt_rx_alloc(struct ath10k_htt *htt) |
5e3dd157 | 510 | { |
7aa7a72a | 511 | struct ath10k *ar = htt->ar; |
5e3dd157 KV |
512 | dma_addr_t paddr; |
513 | void *vaddr; | |
bd8bdbb6 | 514 | size_t size; |
5e3dd157 KV |
515 | struct timer_list *timer = &htt->rx_ring.refill_retry_timer; |
516 | ||
51fc7d74 MK |
517 | htt->rx_confused = false; |
518 | ||
fe2407a8 MK |
519 | /* XXX: The fill level could be changed during runtime in response to |
520 | * the host processing latency. Is this really worth it? | |
521 | */ | |
522 | htt->rx_ring.size = HTT_RX_RING_SIZE; | |
523 | htt->rx_ring.size_mask = htt->rx_ring.size - 1; | |
524 | htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL; | |
525 | ||
5e3dd157 | 526 | if (!is_power_of_2(htt->rx_ring.size)) { |
7aa7a72a | 527 | ath10k_warn(ar, "htt rx ring size is not power of 2\n"); |
5e3dd157 KV |
528 | return -EINVAL; |
529 | } | |
530 | ||
5e3dd157 | 531 | htt->rx_ring.netbufs_ring = |
3e841fd0 | 532 | kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *), |
5e3dd157 KV |
533 | GFP_KERNEL); |
534 | if (!htt->rx_ring.netbufs_ring) | |
535 | goto err_netbuf; | |
536 | ||
bd8bdbb6 KV |
537 | size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring); |
538 | ||
539 | vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA); | |
5e3dd157 KV |
540 | if (!vaddr) |
541 | goto err_dma_ring; | |
542 | ||
543 | htt->rx_ring.paddrs_ring = vaddr; | |
544 | htt->rx_ring.base_paddr = paddr; | |
545 | ||
546 | vaddr = dma_alloc_coherent(htt->ar->dev, | |
547 | sizeof(*htt->rx_ring.alloc_idx.vaddr), | |
548 | &paddr, GFP_DMA); | |
549 | if (!vaddr) | |
550 | goto err_dma_idx; | |
551 | ||
552 | htt->rx_ring.alloc_idx.vaddr = vaddr; | |
553 | htt->rx_ring.alloc_idx.paddr = paddr; | |
c545070e | 554 | htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; |
5e3dd157 KV |
555 | *htt->rx_ring.alloc_idx.vaddr = 0; |
556 | ||
557 | /* Initialize the Rx refill retry timer */ | |
558 | setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt); | |
559 | ||
560 | spin_lock_init(&htt->rx_ring.lock); | |
561 | ||
562 | htt->rx_ring.fill_cnt = 0; | |
c545070e MK |
563 | htt->rx_ring.sw_rd_idx.msdu_payld = 0; |
564 | hash_init(htt->rx_ring.skb_table); | |
5e3dd157 | 565 | |
6e712d42 MK |
566 | tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, |
567 | (unsigned long)htt); | |
568 | ||
6c5151a9 MK |
569 | skb_queue_head_init(&htt->tx_compl_q); |
570 | skb_queue_head_init(&htt->rx_compl_q); | |
c545070e | 571 | skb_queue_head_init(&htt->rx_in_ord_compl_q); |
6c5151a9 MK |
572 | |
573 | tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, | |
574 | (unsigned long)htt); | |
575 | ||
7aa7a72a | 576 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", |
5e3dd157 KV |
577 | htt->rx_ring.size, htt->rx_ring.fill_level); |
578 | return 0; | |
579 | ||
5e3dd157 KV |
580 | err_dma_idx: |
581 | dma_free_coherent(htt->ar->dev, | |
582 | (htt->rx_ring.size * | |
583 | sizeof(htt->rx_ring.paddrs_ring)), | |
584 | htt->rx_ring.paddrs_ring, | |
585 | htt->rx_ring.base_paddr); | |
586 | err_dma_ring: | |
587 | kfree(htt->rx_ring.netbufs_ring); | |
588 | err_netbuf: | |
589 | return -ENOMEM; | |
590 | } | |
591 | ||
7aa7a72a MK |
592 | static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, |
593 | enum htt_rx_mpdu_encrypt_type type) | |
5e3dd157 KV |
594 | { |
595 | switch (type) { | |
890d3b2a MK |
596 | case HTT_RX_MPDU_ENCRYPT_NONE: |
597 | return 0; | |
5e3dd157 KV |
598 | case HTT_RX_MPDU_ENCRYPT_WEP40: |
599 | case HTT_RX_MPDU_ENCRYPT_WEP104: | |
890d3b2a | 600 | return IEEE80211_WEP_IV_LEN; |
5e3dd157 | 601 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: |
5e3dd157 | 602 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: |
890d3b2a | 603 | return IEEE80211_TKIP_IV_LEN; |
5e3dd157 | 604 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: |
890d3b2a MK |
605 | return IEEE80211_CCMP_HDR_LEN; |
606 | case HTT_RX_MPDU_ENCRYPT_WEP128: | |
607 | case HTT_RX_MPDU_ENCRYPT_WAPI: | |
608 | break; | |
5e3dd157 KV |
609 | } |
610 | ||
890d3b2a | 611 | ath10k_warn(ar, "unsupported encryption type %d\n", type); |
5e3dd157 KV |
612 | return 0; |
613 | } | |
614 | ||
890d3b2a MK |
615 | #define MICHAEL_MIC_LEN 8 |
616 | ||
7aa7a72a MK |
617 | static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, |
618 | enum htt_rx_mpdu_encrypt_type type) | |
5e3dd157 KV |
619 | { |
620 | switch (type) { | |
621 | case HTT_RX_MPDU_ENCRYPT_NONE: | |
890d3b2a | 622 | return 0; |
5e3dd157 KV |
623 | case HTT_RX_MPDU_ENCRYPT_WEP40: |
624 | case HTT_RX_MPDU_ENCRYPT_WEP104: | |
890d3b2a | 625 | return IEEE80211_WEP_ICV_LEN; |
5e3dd157 KV |
626 | case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: |
627 | case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: | |
890d3b2a | 628 | return IEEE80211_TKIP_ICV_LEN; |
5e3dd157 | 629 | case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: |
890d3b2a MK |
630 | return IEEE80211_CCMP_MIC_LEN; |
631 | case HTT_RX_MPDU_ENCRYPT_WEP128: | |
632 | case HTT_RX_MPDU_ENCRYPT_WAPI: | |
633 | break; | |
5e3dd157 KV |
634 | } |
635 | ||
890d3b2a | 636 | ath10k_warn(ar, "unsupported encryption type %d\n", type); |
5e3dd157 KV |
637 | return 0; |
638 | } | |
639 | ||
f6dc2095 MK |
640 | struct rfc1042_hdr { |
641 | u8 llc_dsap; | |
642 | u8 llc_ssap; | |
643 | u8 llc_ctrl; | |
644 | u8 snap_oui[3]; | |
645 | __be16 snap_type; | |
646 | } __packed; | |
647 | ||
648 | struct amsdu_subframe_hdr { | |
649 | u8 dst[ETH_ALEN]; | |
650 | u8 src[ETH_ALEN]; | |
651 | __be16 len; | |
652 | } __packed; | |
653 | ||
73539b40 JD |
654 | static const u8 rx_legacy_rate_idx[] = { |
655 | 3, /* 0x00 - 11Mbps */ | |
656 | 2, /* 0x01 - 5.5Mbps */ | |
657 | 1, /* 0x02 - 2Mbps */ | |
658 | 0, /* 0x03 - 1Mbps */ | |
659 | 3, /* 0x04 - 11Mbps */ | |
660 | 2, /* 0x05 - 5.5Mbps */ | |
661 | 1, /* 0x06 - 2Mbps */ | |
662 | 0, /* 0x07 - 1Mbps */ | |
663 | 10, /* 0x08 - 48Mbps */ | |
664 | 8, /* 0x09 - 24Mbps */ | |
665 | 6, /* 0x0A - 12Mbps */ | |
666 | 4, /* 0x0B - 6Mbps */ | |
667 | 11, /* 0x0C - 54Mbps */ | |
668 | 9, /* 0x0D - 36Mbps */ | |
669 | 7, /* 0x0E - 18Mbps */ | |
670 | 5, /* 0x0F - 9Mbps */ | |
671 | }; | |
672 | ||
87326c97 | 673 | static void ath10k_htt_rx_h_rates(struct ath10k *ar, |
b9fd8a84 MK |
674 | struct ieee80211_rx_status *status, |
675 | struct htt_rx_desc *rxd) | |
73539b40 | 676 | { |
b9fd8a84 | 677 | enum ieee80211_band band; |
73539b40 | 678 | u8 cck, rate, rate_idx, bw, sgi, mcs, nss; |
73539b40 | 679 | u8 preamble = 0; |
b9fd8a84 | 680 | u32 info1, info2, info3; |
73539b40 | 681 | |
b9fd8a84 MK |
682 | /* Band value can't be set as undefined but freq can be 0 - use that to |
683 | * determine whether band is provided. | |
684 | * | |
685 | * FIXME: Perhaps this can go away if CCK rate reporting is a little | |
686 | * reworked? | |
687 | */ | |
688 | if (!status->freq) | |
73539b40 JD |
689 | return; |
690 | ||
b9fd8a84 MK |
691 | band = status->band; |
692 | info1 = __le32_to_cpu(rxd->ppdu_start.info1); | |
693 | info2 = __le32_to_cpu(rxd->ppdu_start.info2); | |
694 | info3 = __le32_to_cpu(rxd->ppdu_start.info3); | |
695 | ||
696 | preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); | |
73539b40 JD |
697 | |
698 | switch (preamble) { | |
699 | case HTT_RX_LEGACY: | |
b9fd8a84 MK |
700 | cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; |
701 | rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); | |
73539b40 JD |
702 | rate_idx = 0; |
703 | ||
704 | if (rate < 0x08 || rate > 0x0F) | |
705 | break; | |
706 | ||
707 | switch (band) { | |
708 | case IEEE80211_BAND_2GHZ: | |
709 | if (cck) | |
710 | rate &= ~BIT(3); | |
711 | rate_idx = rx_legacy_rate_idx[rate]; | |
712 | break; | |
713 | case IEEE80211_BAND_5GHZ: | |
714 | rate_idx = rx_legacy_rate_idx[rate]; | |
715 | /* We are using same rate table registering | |
716 | HW - ath10k_rates[]. In case of 5GHz skip | |
717 | CCK rates, so -4 here */ | |
718 | rate_idx -= 4; | |
719 | break; | |
720 | default: | |
721 | break; | |
722 | } | |
723 | ||
724 | status->rate_idx = rate_idx; | |
725 | break; | |
726 | case HTT_RX_HT: | |
727 | case HTT_RX_HT_WITH_TXBF: | |
b9fd8a84 MK |
728 | /* HT-SIG - Table 20-11 in info2 and info3 */ |
729 | mcs = info2 & 0x1F; | |
73539b40 | 730 | nss = mcs >> 3; |
b9fd8a84 MK |
731 | bw = (info2 >> 7) & 1; |
732 | sgi = (info3 >> 7) & 1; | |
73539b40 JD |
733 | |
734 | status->rate_idx = mcs; | |
735 | status->flag |= RX_FLAG_HT; | |
736 | if (sgi) | |
737 | status->flag |= RX_FLAG_SHORT_GI; | |
738 | if (bw) | |
739 | status->flag |= RX_FLAG_40MHZ; | |
740 | break; | |
741 | case HTT_RX_VHT: | |
742 | case HTT_RX_VHT_WITH_TXBF: | |
b9fd8a84 | 743 | /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 |
73539b40 | 744 | TODO check this */ |
b9fd8a84 MK |
745 | mcs = (info3 >> 4) & 0x0F; |
746 | nss = ((info2 >> 10) & 0x07) + 1; | |
747 | bw = info2 & 3; | |
748 | sgi = info3 & 1; | |
73539b40 JD |
749 | |
750 | status->rate_idx = mcs; | |
751 | status->vht_nss = nss; | |
752 | ||
753 | if (sgi) | |
754 | status->flag |= RX_FLAG_SHORT_GI; | |
755 | ||
756 | switch (bw) { | |
757 | /* 20MHZ */ | |
758 | case 0: | |
759 | break; | |
760 | /* 40MHZ */ | |
761 | case 1: | |
762 | status->flag |= RX_FLAG_40MHZ; | |
763 | break; | |
764 | /* 80MHZ */ | |
765 | case 2: | |
766 | status->vht_flag |= RX_VHT_FLAG_80MHZ; | |
767 | } | |
768 | ||
769 | status->flag |= RX_FLAG_VHT; | |
770 | break; | |
771 | default: | |
772 | break; | |
773 | } | |
774 | } | |
775 | ||
36653f05 JD |
776 | static bool ath10k_htt_rx_h_channel(struct ath10k *ar, |
777 | struct ieee80211_rx_status *status) | |
778 | { | |
779 | struct ieee80211_channel *ch; | |
780 | ||
781 | spin_lock_bh(&ar->data_lock); | |
782 | ch = ar->scan_channel; | |
783 | if (!ch) | |
784 | ch = ar->rx_channel; | |
785 | spin_unlock_bh(&ar->data_lock); | |
786 | ||
787 | if (!ch) | |
788 | return false; | |
789 | ||
790 | status->band = ch->band; | |
791 | status->freq = ch->center_freq; | |
792 | ||
793 | return true; | |
794 | } | |
795 | ||
b9fd8a84 MK |
796 | static void ath10k_htt_rx_h_signal(struct ath10k *ar, |
797 | struct ieee80211_rx_status *status, | |
798 | struct htt_rx_desc *rxd) | |
799 | { | |
800 | /* FIXME: Get real NF */ | |
801 | status->signal = ATH10K_DEFAULT_NOISE_FLOOR + | |
802 | rxd->ppdu_start.rssi_comb; | |
803 | status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; | |
804 | } | |
805 | ||
806 | static void ath10k_htt_rx_h_mactime(struct ath10k *ar, | |
807 | struct ieee80211_rx_status *status, | |
808 | struct htt_rx_desc *rxd) | |
809 | { | |
810 | /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This | |
811 | * means all prior MSDUs in a PPDU are reported to mac80211 without the | |
812 | * TSF. Is it worth holding frames until end of PPDU is known? | |
813 | * | |
814 | * FIXME: Can we get/compute 64bit TSF? | |
815 | */ | |
3ec79e3a | 816 | status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp); |
b9fd8a84 MK |
817 | status->flag |= RX_FLAG_MACTIME_END; |
818 | } | |
819 | ||
820 | static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, | |
821 | struct sk_buff_head *amsdu, | |
822 | struct ieee80211_rx_status *status) | |
823 | { | |
824 | struct sk_buff *first; | |
825 | struct htt_rx_desc *rxd; | |
826 | bool is_first_ppdu; | |
827 | bool is_last_ppdu; | |
828 | ||
829 | if (skb_queue_empty(amsdu)) | |
830 | return; | |
831 | ||
832 | first = skb_peek(amsdu); | |
833 | rxd = (void *)first->data - sizeof(*rxd); | |
834 | ||
835 | is_first_ppdu = !!(rxd->attention.flags & | |
836 | __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); | |
837 | is_last_ppdu = !!(rxd->attention.flags & | |
838 | __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); | |
839 | ||
840 | if (is_first_ppdu) { | |
841 | /* New PPDU starts so clear out the old per-PPDU status. */ | |
842 | status->freq = 0; | |
843 | status->rate_idx = 0; | |
844 | status->vht_nss = 0; | |
845 | status->vht_flag &= ~RX_VHT_FLAG_80MHZ; | |
846 | status->flag &= ~(RX_FLAG_HT | | |
847 | RX_FLAG_VHT | | |
848 | RX_FLAG_SHORT_GI | | |
849 | RX_FLAG_40MHZ | | |
850 | RX_FLAG_MACTIME_END); | |
851 | status->flag |= RX_FLAG_NO_SIGNAL_VAL; | |
852 | ||
853 | ath10k_htt_rx_h_signal(ar, status, rxd); | |
854 | ath10k_htt_rx_h_channel(ar, status); | |
855 | ath10k_htt_rx_h_rates(ar, status, rxd); | |
856 | } | |
857 | ||
858 | if (is_last_ppdu) | |
859 | ath10k_htt_rx_h_mactime(ar, status, rxd); | |
860 | } | |
861 | ||
76f5329a JD |
862 | static const char * const tid_to_ac[] = { |
863 | "BE", | |
864 | "BK", | |
865 | "BK", | |
866 | "BE", | |
867 | "VI", | |
868 | "VI", | |
869 | "VO", | |
870 | "VO", | |
871 | }; | |
872 | ||
873 | static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) | |
874 | { | |
875 | u8 *qc; | |
876 | int tid; | |
877 | ||
878 | if (!ieee80211_is_data_qos(hdr->frame_control)) | |
879 | return ""; | |
880 | ||
881 | qc = ieee80211_get_qos_ctl(hdr); | |
882 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; | |
883 | if (tid < 8) | |
884 | snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); | |
885 | else | |
886 | snprintf(out, size, "tid %d", tid); | |
887 | ||
888 | return out; | |
889 | } | |
890 | ||
85f6d7cf JD |
891 | static void ath10k_process_rx(struct ath10k *ar, |
892 | struct ieee80211_rx_status *rx_status, | |
893 | struct sk_buff *skb) | |
73539b40 JD |
894 | { |
895 | struct ieee80211_rx_status *status; | |
76f5329a JD |
896 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
897 | char tid[32]; | |
73539b40 | 898 | |
85f6d7cf JD |
899 | status = IEEE80211_SKB_RXCB(skb); |
900 | *status = *rx_status; | |
73539b40 | 901 | |
7aa7a72a | 902 | ath10k_dbg(ar, ATH10K_DBG_DATA, |
76f5329a | 903 | "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", |
85f6d7cf JD |
904 | skb, |
905 | skb->len, | |
76f5329a JD |
906 | ieee80211_get_SA(hdr), |
907 | ath10k_get_tid(hdr, tid, sizeof(tid)), | |
908 | is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? | |
909 | "mcast" : "ucast", | |
910 | (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, | |
73539b40 JD |
911 | status->flag == 0 ? "legacy" : "", |
912 | status->flag & RX_FLAG_HT ? "ht" : "", | |
913 | status->flag & RX_FLAG_VHT ? "vht" : "", | |
914 | status->flag & RX_FLAG_40MHZ ? "40" : "", | |
915 | status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "", | |
916 | status->flag & RX_FLAG_SHORT_GI ? "sgi " : "", | |
917 | status->rate_idx, | |
918 | status->vht_nss, | |
919 | status->freq, | |
87326c97 | 920 | status->band, status->flag, |
78433f96 | 921 | !!(status->flag & RX_FLAG_FAILED_FCS_CRC), |
76f5329a JD |
922 | !!(status->flag & RX_FLAG_MMIC_ERROR), |
923 | !!(status->flag & RX_FLAG_AMSDU_MORE)); | |
7aa7a72a | 924 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", |
85f6d7cf | 925 | skb->data, skb->len); |
5ce8e7fd RM |
926 | trace_ath10k_rx_hdr(ar, skb->data, skb->len); |
927 | trace_ath10k_rx_payload(ar, skb->data, skb->len); | |
73539b40 | 928 | |
85f6d7cf | 929 | ieee80211_rx(ar->hw, skb); |
73539b40 JD |
930 | } |
931 | ||
d960c369 MK |
932 | static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) |
933 | { | |
934 | /* nwifi header is padded to 4 bytes. this fixes 4addr rx */ | |
935 | return round_up(ieee80211_hdrlen(hdr->frame_control), 4); | |
936 | } | |
937 | ||
581c25f8 MK |
938 | static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, |
939 | struct sk_buff *msdu, | |
940 | struct ieee80211_rx_status *status, | |
941 | enum htt_rx_mpdu_encrypt_type enctype, | |
942 | bool is_decrypted) | |
5e3dd157 | 943 | { |
581c25f8 | 944 | struct ieee80211_hdr *hdr; |
5e3dd157 | 945 | struct htt_rx_desc *rxd; |
581c25f8 MK |
946 | size_t hdr_len; |
947 | size_t crypto_len; | |
948 | bool is_first; | |
949 | bool is_last; | |
950 | ||
951 | rxd = (void *)msdu->data - sizeof(*rxd); | |
952 | is_first = !!(rxd->msdu_end.info0 & | |
953 | __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); | |
954 | is_last = !!(rxd->msdu_end.info0 & | |
955 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); | |
956 | ||
957 | /* Delivered decapped frame: | |
958 | * [802.11 header] | |
959 | * [crypto param] <-- can be trimmed if !fcs_err && | |
960 | * !decrypt_err && !peer_idx_invalid | |
961 | * [amsdu header] <-- only if A-MSDU | |
962 | * [rfc1042/llc] | |
963 | * [payload] | |
964 | * [FCS] <-- at end, needs to be trimmed | |
965 | */ | |
966 | ||
967 | /* This probably shouldn't happen but warn just in case */ | |
968 | if (unlikely(WARN_ON_ONCE(!is_first))) | |
969 | return; | |
970 | ||
971 | /* This probably shouldn't happen but warn just in case */ | |
972 | if (unlikely(WARN_ON_ONCE(!(is_first && is_last)))) | |
973 | return; | |
974 | ||
975 | skb_trim(msdu, msdu->len - FCS_LEN); | |
976 | ||
977 | /* In most cases this will be true for sniffed frames. It makes sense | |
978 | * to deliver them as-is without stripping the crypto param. This would | |
979 | * also make sense for software based decryption (which is not | |
980 | * implemented in ath10k). | |
981 | * | |
982 | * If there's no error then the frame is decrypted. At least that is | |
983 | * the case for frames that come in via fragmented rx indication. | |
984 | */ | |
985 | if (!is_decrypted) | |
986 | return; | |
987 | ||
988 | /* The payload is decrypted so strip crypto params. Start from tail | |
989 | * since hdr is used to compute some stuff. | |
990 | */ | |
991 | ||
992 | hdr = (void *)msdu->data; | |
993 | ||
994 | /* Tail */ | |
995 | skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype)); | |
996 | ||
997 | /* MMIC */ | |
998 | if (!ieee80211_has_morefrags(hdr->frame_control) && | |
999 | enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) | |
1000 | skb_trim(msdu, msdu->len - 8); | |
1001 | ||
1002 | /* Head */ | |
1003 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
1004 | crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); | |
1005 | ||
1006 | memmove((void *)msdu->data + crypto_len, | |
1007 | (void *)msdu->data, hdr_len); | |
1008 | skb_pull(msdu, crypto_len); | |
1009 | } | |
1010 | ||
1011 | static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, | |
1012 | struct sk_buff *msdu, | |
1013 | struct ieee80211_rx_status *status, | |
1014 | const u8 first_hdr[64]) | |
1015 | { | |
f6dc2095 | 1016 | struct ieee80211_hdr *hdr; |
581c25f8 MK |
1017 | size_t hdr_len; |
1018 | u8 da[ETH_ALEN]; | |
1019 | u8 sa[ETH_ALEN]; | |
5e3dd157 | 1020 | |
581c25f8 MK |
1021 | /* Delivered decapped frame: |
1022 | * [nwifi 802.11 header] <-- replaced with 802.11 hdr | |
1023 | * [rfc1042/llc] | |
1024 | * | |
1025 | * Note: The nwifi header doesn't have QoS Control and is | |
1026 | * (always?) a 3addr frame. | |
1027 | * | |
1028 | * Note2: There's no A-MSDU subframe header. Even if it's part | |
1029 | * of an A-MSDU. | |
1030 | */ | |
9aa505d2 | 1031 | |
581c25f8 MK |
1032 | /* pull decapped header and copy SA & DA */ |
1033 | hdr = (struct ieee80211_hdr *)msdu->data; | |
1034 | hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); | |
1035 | ether_addr_copy(da, ieee80211_get_DA(hdr)); | |
1036 | ether_addr_copy(sa, ieee80211_get_SA(hdr)); | |
1037 | skb_pull(msdu, hdr_len); | |
5e3dd157 | 1038 | |
581c25f8 MK |
1039 | /* push original 802.11 header */ |
1040 | hdr = (struct ieee80211_hdr *)first_hdr; | |
f6dc2095 | 1041 | hdr_len = ieee80211_hdrlen(hdr->frame_control); |
581c25f8 | 1042 | memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); |
5e3dd157 | 1043 | |
581c25f8 MK |
1044 | /* original 802.11 header has a different DA and in |
1045 | * case of 4addr it may also have different SA | |
1046 | */ | |
1047 | hdr = (struct ieee80211_hdr *)msdu->data; | |
1048 | ether_addr_copy(ieee80211_get_DA(hdr), da); | |
1049 | ether_addr_copy(ieee80211_get_SA(hdr), sa); | |
1050 | } | |
5e3dd157 | 1051 | |
581c25f8 MK |
1052 | static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, |
1053 | struct sk_buff *msdu, | |
1054 | enum htt_rx_mpdu_encrypt_type enctype) | |
1055 | { | |
1056 | struct ieee80211_hdr *hdr; | |
1057 | struct htt_rx_desc *rxd; | |
1058 | size_t hdr_len, crypto_len; | |
1059 | void *rfc1042; | |
1060 | bool is_first, is_last, is_amsdu; | |
e3fbf8d2 | 1061 | |
581c25f8 MK |
1062 | rxd = (void *)msdu->data - sizeof(*rxd); |
1063 | hdr = (void *)rxd->rx_hdr_status; | |
f6dc2095 | 1064 | |
581c25f8 MK |
1065 | is_first = !!(rxd->msdu_end.info0 & |
1066 | __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); | |
1067 | is_last = !!(rxd->msdu_end.info0 & | |
1068 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); | |
1069 | is_amsdu = !(is_first && is_last); | |
5e3dd157 | 1070 | |
581c25f8 | 1071 | rfc1042 = hdr; |
5e3dd157 | 1072 | |
581c25f8 MK |
1073 | if (is_first) { |
1074 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
1075 | crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); | |
652de35e | 1076 | |
581c25f8 MK |
1077 | rfc1042 += round_up(hdr_len, 4) + |
1078 | round_up(crypto_len, 4); | |
f6dc2095 | 1079 | } |
5e3dd157 | 1080 | |
581c25f8 MK |
1081 | if (is_amsdu) |
1082 | rfc1042 += sizeof(struct amsdu_subframe_hdr); | |
1083 | ||
1084 | return rfc1042; | |
5e3dd157 KV |
1085 | } |
1086 | ||
581c25f8 MK |
1087 | static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, |
1088 | struct sk_buff *msdu, | |
1089 | struct ieee80211_rx_status *status, | |
1090 | const u8 first_hdr[64], | |
1091 | enum htt_rx_mpdu_encrypt_type enctype) | |
5e3dd157 | 1092 | { |
5e3dd157 | 1093 | struct ieee80211_hdr *hdr; |
581c25f8 MK |
1094 | struct ethhdr *eth; |
1095 | size_t hdr_len; | |
e3fbf8d2 | 1096 | void *rfc1042; |
581c25f8 MK |
1097 | u8 da[ETH_ALEN]; |
1098 | u8 sa[ETH_ALEN]; | |
5e3dd157 | 1099 | |
581c25f8 MK |
1100 | /* Delivered decapped frame: |
1101 | * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc | |
1102 | * [payload] | |
1103 | */ | |
1104 | ||
1105 | rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); | |
1106 | if (WARN_ON_ONCE(!rfc1042)) | |
1107 | return; | |
1108 | ||
1109 | /* pull decapped header and copy SA & DA */ | |
1110 | eth = (struct ethhdr *)msdu->data; | |
1111 | ether_addr_copy(da, eth->h_dest); | |
1112 | ether_addr_copy(sa, eth->h_source); | |
1113 | skb_pull(msdu, sizeof(struct ethhdr)); | |
1114 | ||
1115 | /* push rfc1042/llc/snap */ | |
1116 | memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, | |
1117 | sizeof(struct rfc1042_hdr)); | |
1118 | ||
1119 | /* push original 802.11 header */ | |
1120 | hdr = (struct ieee80211_hdr *)first_hdr; | |
1121 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
1122 | memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); | |
1123 | ||
1124 | /* original 802.11 header has a different DA and in | |
1125 | * case of 4addr it may also have different SA | |
1126 | */ | |
1127 | hdr = (struct ieee80211_hdr *)msdu->data; | |
1128 | ether_addr_copy(ieee80211_get_DA(hdr), da); | |
1129 | ether_addr_copy(ieee80211_get_SA(hdr), sa); | |
1130 | } | |
1131 | ||
1132 | static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, | |
1133 | struct sk_buff *msdu, | |
1134 | struct ieee80211_rx_status *status, | |
1135 | const u8 first_hdr[64]) | |
1136 | { | |
1137 | struct ieee80211_hdr *hdr; | |
1138 | size_t hdr_len; | |
1139 | ||
1140 | /* Delivered decapped frame: | |
1141 | * [amsdu header] <-- replaced with 802.11 hdr | |
1142 | * [rfc1042/llc] | |
1143 | * [payload] | |
1144 | */ | |
1145 | ||
1146 | skb_pull(msdu, sizeof(struct amsdu_subframe_hdr)); | |
1147 | ||
1148 | hdr = (struct ieee80211_hdr *)first_hdr; | |
e3fbf8d2 | 1149 | hdr_len = ieee80211_hdrlen(hdr->frame_control); |
581c25f8 MK |
1150 | memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); |
1151 | } | |
5e3dd157 | 1152 | |
581c25f8 MK |
1153 | static void ath10k_htt_rx_h_undecap(struct ath10k *ar, |
1154 | struct sk_buff *msdu, | |
1155 | struct ieee80211_rx_status *status, | |
1156 | u8 first_hdr[64], | |
1157 | enum htt_rx_mpdu_encrypt_type enctype, | |
1158 | bool is_decrypted) | |
1159 | { | |
1160 | struct htt_rx_desc *rxd; | |
1161 | enum rx_msdu_decap_format decap; | |
1162 | struct ieee80211_hdr *hdr; | |
f6dc2095 | 1163 | |
581c25f8 MK |
1164 | /* First msdu's decapped header: |
1165 | * [802.11 header] <-- padded to 4 bytes long | |
1166 | * [crypto param] <-- padded to 4 bytes long | |
1167 | * [amsdu header] <-- only if A-MSDU | |
1168 | * [rfc1042/llc] | |
1169 | * | |
1170 | * Other (2nd, 3rd, ..) msdu's decapped header: | |
1171 | * [amsdu header] <-- only if A-MSDU | |
1172 | * [rfc1042/llc] | |
1173 | */ | |
1174 | ||
1175 | rxd = (void *)msdu->data - sizeof(*rxd); | |
1176 | hdr = (void *)rxd->rx_hdr_status; | |
1177 | decap = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
1178 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
1179 | ||
1180 | switch (decap) { | |
5e3dd157 | 1181 | case RX_MSDU_DECAP_RAW: |
581c25f8 MK |
1182 | ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, |
1183 | is_decrypted); | |
5e3dd157 KV |
1184 | break; |
1185 | case RX_MSDU_DECAP_NATIVE_WIFI: | |
581c25f8 | 1186 | ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr); |
5e3dd157 KV |
1187 | break; |
1188 | case RX_MSDU_DECAP_ETHERNET2_DIX: | |
581c25f8 | 1189 | ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); |
e3fbf8d2 MK |
1190 | break; |
1191 | case RX_MSDU_DECAP_8023_SNAP_LLC: | |
581c25f8 | 1192 | ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr); |
e3fbf8d2 | 1193 | break; |
5e3dd157 | 1194 | } |
5e3dd157 KV |
1195 | } |
1196 | ||
605f81aa MK |
1197 | static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) |
1198 | { | |
1199 | struct htt_rx_desc *rxd; | |
1200 | u32 flags, info; | |
1201 | bool is_ip4, is_ip6; | |
1202 | bool is_tcp, is_udp; | |
1203 | bool ip_csum_ok, tcpudp_csum_ok; | |
1204 | ||
1205 | rxd = (void *)skb->data - sizeof(*rxd); | |
1206 | flags = __le32_to_cpu(rxd->attention.flags); | |
1207 | info = __le32_to_cpu(rxd->msdu_start.info1); | |
1208 | ||
1209 | is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); | |
1210 | is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); | |
1211 | is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); | |
1212 | is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); | |
1213 | ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); | |
1214 | tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); | |
1215 | ||
1216 | if (!is_ip4 && !is_ip6) | |
1217 | return CHECKSUM_NONE; | |
1218 | if (!is_tcp && !is_udp) | |
1219 | return CHECKSUM_NONE; | |
1220 | if (!ip_csum_ok) | |
1221 | return CHECKSUM_NONE; | |
1222 | if (!tcpudp_csum_ok) | |
1223 | return CHECKSUM_NONE; | |
1224 | ||
1225 | return CHECKSUM_UNNECESSARY; | |
1226 | } | |
1227 | ||
581c25f8 MK |
1228 | static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) |
1229 | { | |
1230 | msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu); | |
1231 | } | |
1232 | ||
1233 | static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, | |
1234 | struct sk_buff_head *amsdu, | |
1235 | struct ieee80211_rx_status *status) | |
1236 | { | |
1237 | struct sk_buff *first; | |
1238 | struct sk_buff *last; | |
1239 | struct sk_buff *msdu; | |
1240 | struct htt_rx_desc *rxd; | |
1241 | struct ieee80211_hdr *hdr; | |
1242 | enum htt_rx_mpdu_encrypt_type enctype; | |
1243 | u8 first_hdr[64]; | |
1244 | u8 *qos; | |
1245 | size_t hdr_len; | |
1246 | bool has_fcs_err; | |
1247 | bool has_crypto_err; | |
1248 | bool has_tkip_err; | |
1249 | bool has_peer_idx_invalid; | |
1250 | bool is_decrypted; | |
1251 | u32 attention; | |
1252 | ||
1253 | if (skb_queue_empty(amsdu)) | |
1254 | return; | |
1255 | ||
1256 | first = skb_peek(amsdu); | |
1257 | rxd = (void *)first->data - sizeof(*rxd); | |
1258 | ||
1259 | enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), | |
1260 | RX_MPDU_START_INFO0_ENCRYPT_TYPE); | |
1261 | ||
1262 | /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 | |
1263 | * decapped header. It'll be used for undecapping of each MSDU. | |
1264 | */ | |
1265 | hdr = (void *)rxd->rx_hdr_status; | |
1266 | hdr_len = ieee80211_hdrlen(hdr->frame_control); | |
1267 | memcpy(first_hdr, hdr, hdr_len); | |
1268 | ||
1269 | /* Each A-MSDU subframe will use the original header as the base and be | |
1270 | * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. | |
1271 | */ | |
1272 | hdr = (void *)first_hdr; | |
1273 | qos = ieee80211_get_qos_ctl(hdr); | |
1274 | qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; | |
1275 | ||
1276 | /* Some attention flags are valid only in the last MSDU. */ | |
1277 | last = skb_peek_tail(amsdu); | |
1278 | rxd = (void *)last->data - sizeof(*rxd); | |
1279 | attention = __le32_to_cpu(rxd->attention.flags); | |
1280 | ||
1281 | has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); | |
1282 | has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); | |
1283 | has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); | |
1284 | has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); | |
1285 | ||
1286 | /* Note: If hardware captures an encrypted frame that it can't decrypt, | |
1287 | * e.g. due to fcs error, missing peer or invalid key data it will | |
1288 | * report the frame as raw. | |
1289 | */ | |
1290 | is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && | |
1291 | !has_fcs_err && | |
1292 | !has_crypto_err && | |
1293 | !has_peer_idx_invalid); | |
1294 | ||
1295 | /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ | |
1296 | status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | | |
1297 | RX_FLAG_MMIC_ERROR | | |
1298 | RX_FLAG_DECRYPTED | | |
1299 | RX_FLAG_IV_STRIPPED | | |
1300 | RX_FLAG_MMIC_STRIPPED); | |
1301 | ||
1302 | if (has_fcs_err) | |
1303 | status->flag |= RX_FLAG_FAILED_FCS_CRC; | |
1304 | ||
1305 | if (has_tkip_err) | |
1306 | status->flag |= RX_FLAG_MMIC_ERROR; | |
1307 | ||
1308 | if (is_decrypted) | |
1309 | status->flag |= RX_FLAG_DECRYPTED | | |
1310 | RX_FLAG_IV_STRIPPED | | |
1311 | RX_FLAG_MMIC_STRIPPED; | |
1312 | ||
1313 | skb_queue_walk(amsdu, msdu) { | |
1314 | ath10k_htt_rx_h_csum_offload(msdu); | |
1315 | ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, | |
1316 | is_decrypted); | |
1317 | ||
1318 | /* Undecapping involves copying the original 802.11 header back | |
1319 | * to sk_buff. If frame is protected and hardware has decrypted | |
1320 | * it then remove the protected bit. | |
1321 | */ | |
1322 | if (!is_decrypted) | |
1323 | continue; | |
1324 | ||
1325 | hdr = (void *)msdu->data; | |
1326 | hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); | |
1327 | } | |
1328 | } | |
1329 | ||
1330 | static void ath10k_htt_rx_h_deliver(struct ath10k *ar, | |
1331 | struct sk_buff_head *amsdu, | |
1332 | struct ieee80211_rx_status *status) | |
1333 | { | |
1334 | struct sk_buff *msdu; | |
1335 | ||
1336 | while ((msdu = __skb_dequeue(amsdu))) { | |
1337 | /* Setup per-MSDU flags */ | |
1338 | if (skb_queue_empty(amsdu)) | |
1339 | status->flag &= ~RX_FLAG_AMSDU_MORE; | |
1340 | else | |
1341 | status->flag |= RX_FLAG_AMSDU_MORE; | |
1342 | ||
1343 | ath10k_process_rx(ar, status, msdu); | |
1344 | } | |
1345 | } | |
1346 | ||
9aa505d2 | 1347 | static int ath10k_unchain_msdu(struct sk_buff_head *amsdu) |
bfa35368 | 1348 | { |
9aa505d2 | 1349 | struct sk_buff *skb, *first; |
bfa35368 BG |
1350 | int space; |
1351 | int total_len = 0; | |
1352 | ||
1353 | /* TODO: Might could optimize this by using | |
1354 | * skb_try_coalesce or similar method to | |
1355 | * decrease copying, or maybe get mac80211 to | |
1356 | * provide a way to just receive a list of | |
1357 | * skb? | |
1358 | */ | |
1359 | ||
9aa505d2 | 1360 | first = __skb_dequeue(amsdu); |
bfa35368 BG |
1361 | |
1362 | /* Allocate total length all at once. */ | |
9aa505d2 MK |
1363 | skb_queue_walk(amsdu, skb) |
1364 | total_len += skb->len; | |
bfa35368 | 1365 | |
9aa505d2 | 1366 | space = total_len - skb_tailroom(first); |
bfa35368 | 1367 | if ((space > 0) && |
9aa505d2 | 1368 | (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { |
bfa35368 BG |
1369 | /* TODO: bump some rx-oom error stat */ |
1370 | /* put it back together so we can free the | |
1371 | * whole list at once. | |
1372 | */ | |
9aa505d2 | 1373 | __skb_queue_head(amsdu, first); |
bfa35368 BG |
1374 | return -1; |
1375 | } | |
1376 | ||
1377 | /* Walk list again, copying contents into | |
1378 | * msdu_head | |
1379 | */ | |
9aa505d2 MK |
1380 | while ((skb = __skb_dequeue(amsdu))) { |
1381 | skb_copy_from_linear_data(skb, skb_put(first, skb->len), | |
1382 | skb->len); | |
1383 | dev_kfree_skb_any(skb); | |
bfa35368 BG |
1384 | } |
1385 | ||
9aa505d2 | 1386 | __skb_queue_head(amsdu, first); |
bfa35368 BG |
1387 | return 0; |
1388 | } | |
1389 | ||
581c25f8 MK |
1390 | static void ath10k_htt_rx_h_unchain(struct ath10k *ar, |
1391 | struct sk_buff_head *amsdu, | |
1392 | bool chained) | |
2acc4eb2 | 1393 | { |
581c25f8 MK |
1394 | struct sk_buff *first; |
1395 | struct htt_rx_desc *rxd; | |
1396 | enum rx_msdu_decap_format decap; | |
7aa7a72a | 1397 | |
581c25f8 MK |
1398 | first = skb_peek(amsdu); |
1399 | rxd = (void *)first->data - sizeof(*rxd); | |
1400 | decap = MS(__le32_to_cpu(rxd->msdu_start.info1), | |
1401 | RX_MSDU_START_INFO1_DECAP_FORMAT); | |
2acc4eb2 | 1402 | |
581c25f8 MK |
1403 | if (!chained) |
1404 | return; | |
1405 | ||
1406 | /* FIXME: Current unchaining logic can only handle simple case of raw | |
1407 | * msdu chaining. If decapping is other than raw the chaining may be | |
1408 | * more complex and this isn't handled by the current code. Don't even | |
1409 | * try re-constructing such frames - it'll be pretty much garbage. | |
1410 | */ | |
1411 | if (decap != RX_MSDU_DECAP_RAW || | |
1412 | skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) { | |
1413 | __skb_queue_purge(amsdu); | |
1414 | return; | |
2acc4eb2 JD |
1415 | } |
1416 | ||
581c25f8 MK |
1417 | ath10k_unchain_msdu(amsdu); |
1418 | } | |
1419 | ||
1420 | static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, | |
1421 | struct sk_buff_head *amsdu, | |
1422 | struct ieee80211_rx_status *rx_status) | |
1423 | { | |
1424 | struct sk_buff *msdu; | |
1425 | struct htt_rx_desc *rxd; | |
d67d0a02 MK |
1426 | bool is_mgmt; |
1427 | bool has_fcs_err; | |
581c25f8 MK |
1428 | |
1429 | msdu = skb_peek(amsdu); | |
1430 | rxd = (void *)msdu->data - sizeof(*rxd); | |
1431 | ||
1432 | /* FIXME: It might be a good idea to do some fuzzy-testing to drop | |
1433 | * invalid/dangerous frames. | |
1434 | */ | |
1435 | ||
1436 | if (!rx_status->freq) { | |
1437 | ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n"); | |
36653f05 JD |
1438 | return false; |
1439 | } | |
1440 | ||
d67d0a02 MK |
1441 | is_mgmt = !!(rxd->attention.flags & |
1442 | __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); | |
1443 | has_fcs_err = !!(rxd->attention.flags & | |
1444 | __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR)); | |
1445 | ||
581c25f8 MK |
1446 | /* Management frames are handled via WMI events. The pros of such |
1447 | * approach is that channel is explicitly provided in WMI events | |
1448 | * whereas HTT doesn't provide channel information for Rxed frames. | |
d67d0a02 MK |
1449 | * |
1450 | * However some firmware revisions don't report corrupted frames via | |
1451 | * WMI so don't drop them. | |
581c25f8 | 1452 | */ |
d67d0a02 | 1453 | if (is_mgmt && !has_fcs_err) { |
7aa7a72a | 1454 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); |
2acc4eb2 JD |
1455 | return false; |
1456 | } | |
1457 | ||
581c25f8 MK |
1458 | if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { |
1459 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); | |
2acc4eb2 JD |
1460 | return false; |
1461 | } | |
1462 | ||
1463 | return true; | |
1464 | } | |
1465 | ||
581c25f8 MK |
1466 | static void ath10k_htt_rx_h_filter(struct ath10k *ar, |
1467 | struct sk_buff_head *amsdu, | |
1468 | struct ieee80211_rx_status *rx_status) | |
1469 | { | |
1470 | if (skb_queue_empty(amsdu)) | |
1471 | return; | |
1472 | ||
1473 | if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) | |
1474 | return; | |
1475 | ||
1476 | __skb_queue_purge(amsdu); | |
1477 | } | |
1478 | ||
5e3dd157 KV |
1479 | static void ath10k_htt_rx_handler(struct ath10k_htt *htt, |
1480 | struct htt_rx_indication *rx) | |
1481 | { | |
7aa7a72a | 1482 | struct ath10k *ar = htt->ar; |
6df92a3d | 1483 | struct ieee80211_rx_status *rx_status = &htt->rx_status; |
5e3dd157 | 1484 | struct htt_rx_indication_mpdu_range *mpdu_ranges; |
9aa505d2 | 1485 | struct sk_buff_head amsdu; |
5e3dd157 KV |
1486 | int num_mpdu_ranges; |
1487 | int fw_desc_len; | |
1488 | u8 *fw_desc; | |
d540690d | 1489 | int i, ret, mpdu_count = 0; |
5e3dd157 | 1490 | |
45967089 MK |
1491 | lockdep_assert_held(&htt->rx_ring.lock); |
1492 | ||
e0bd7513 MK |
1493 | if (htt->rx_confused) |
1494 | return; | |
1495 | ||
5e3dd157 KV |
1496 | fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); |
1497 | fw_desc = (u8 *)&rx->fw_desc; | |
1498 | ||
1499 | num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), | |
1500 | HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); | |
1501 | mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); | |
1502 | ||
7aa7a72a | 1503 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", |
5e3dd157 KV |
1504 | rx, sizeof(*rx) + |
1505 | (sizeof(struct htt_rx_indication_mpdu_range) * | |
1506 | num_mpdu_ranges)); | |
1507 | ||
d540690d MK |
1508 | for (i = 0; i < num_mpdu_ranges; i++) |
1509 | mpdu_count += mpdu_ranges[i].mpdu_count; | |
1510 | ||
1511 | while (mpdu_count--) { | |
d540690d MK |
1512 | __skb_queue_head_init(&amsdu); |
1513 | ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, | |
f0e2770f | 1514 | &fw_desc_len, &amsdu); |
d540690d | 1515 | if (ret < 0) { |
e0bd7513 | 1516 | ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); |
d540690d | 1517 | __skb_queue_purge(&amsdu); |
e0bd7513 MK |
1518 | /* FIXME: It's probably a good idea to reboot the |
1519 | * device instead of leaving it inoperable. | |
1520 | */ | |
1521 | htt->rx_confused = true; | |
1522 | break; | |
d540690d | 1523 | } |
5e3dd157 | 1524 | |
b9fd8a84 | 1525 | ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); |
581c25f8 MK |
1526 | ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); |
1527 | ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); | |
1528 | ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); | |
1529 | ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); | |
5e3dd157 KV |
1530 | } |
1531 | ||
6e712d42 | 1532 | tasklet_schedule(&htt->rx_replenish_task); |
5e3dd157 KV |
1533 | } |
1534 | ||
1535 | static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, | |
5b07e07f | 1536 | struct htt_rx_fragment_indication *frag) |
5e3dd157 | 1537 | { |
7aa7a72a | 1538 | struct ath10k *ar = htt->ar; |
6df92a3d | 1539 | struct ieee80211_rx_status *rx_status = &htt->rx_status; |
9aa505d2 | 1540 | struct sk_buff_head amsdu; |
d84dd60f | 1541 | int ret; |
5e3dd157 | 1542 | u8 *fw_desc; |
581c25f8 | 1543 | int fw_desc_len; |
5e3dd157 KV |
1544 | |
1545 | fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); | |
1546 | fw_desc = (u8 *)frag->fw_msdu_rx_desc; | |
1547 | ||
9aa505d2 | 1548 | __skb_queue_head_init(&amsdu); |
45967089 MK |
1549 | |
1550 | spin_lock_bh(&htt->rx_ring.lock); | |
d84dd60f | 1551 | ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, |
f0e2770f | 1552 | &amsdu); |
45967089 | 1553 | spin_unlock_bh(&htt->rx_ring.lock); |
5e3dd157 | 1554 | |
686687c9 MK |
1555 | tasklet_schedule(&htt->rx_replenish_task); |
1556 | ||
7aa7a72a | 1557 | ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); |
5e3dd157 | 1558 | |
d84dd60f | 1559 | if (ret) { |
7aa7a72a | 1560 | ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n", |
d84dd60f | 1561 | ret); |
9aa505d2 | 1562 | __skb_queue_purge(&amsdu); |
5e3dd157 KV |
1563 | return; |
1564 | } | |
1565 | ||
9aa505d2 MK |
1566 | if (skb_queue_len(&amsdu) != 1) { |
1567 | ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n"); | |
1568 | __skb_queue_purge(&amsdu); | |
1569 | return; | |
1570 | } | |
1571 | ||
89a5a317 | 1572 | ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); |
581c25f8 MK |
1573 | ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); |
1574 | ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); | |
1575 | ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); | |
5e3dd157 | 1576 | |
5e3dd157 | 1577 | if (fw_desc_len > 0) { |
7aa7a72a | 1578 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
5e3dd157 KV |
1579 | "expecting more fragmented rx in one indication %d\n", |
1580 | fw_desc_len); | |
1581 | } | |
1582 | } | |
1583 | ||
6c5151a9 MK |
1584 | static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, |
1585 | struct sk_buff *skb) | |
1586 | { | |
1587 | struct ath10k_htt *htt = &ar->htt; | |
1588 | struct htt_resp *resp = (struct htt_resp *)skb->data; | |
1589 | struct htt_tx_done tx_done = {}; | |
1590 | int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); | |
1591 | __le16 msdu_id; | |
1592 | int i; | |
1593 | ||
45967089 MK |
1594 | lockdep_assert_held(&htt->tx_lock); |
1595 | ||
6c5151a9 MK |
1596 | switch (status) { |
1597 | case HTT_DATA_TX_STATUS_NO_ACK: | |
1598 | tx_done.no_ack = true; | |
1599 | break; | |
1600 | case HTT_DATA_TX_STATUS_OK: | |
1601 | break; | |
1602 | case HTT_DATA_TX_STATUS_DISCARD: | |
1603 | case HTT_DATA_TX_STATUS_POSTPONE: | |
1604 | case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL: | |
1605 | tx_done.discard = true; | |
1606 | break; | |
1607 | default: | |
7aa7a72a | 1608 | ath10k_warn(ar, "unhandled tx completion status %d\n", status); |
6c5151a9 MK |
1609 | tx_done.discard = true; |
1610 | break; | |
1611 | } | |
1612 | ||
7aa7a72a | 1613 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", |
6c5151a9 MK |
1614 | resp->data_tx_completion.num_msdus); |
1615 | ||
1616 | for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { | |
1617 | msdu_id = resp->data_tx_completion.msdus[i]; | |
1618 | tx_done.msdu_id = __le16_to_cpu(msdu_id); | |
1619 | ath10k_txrx_tx_unref(htt, &tx_done); | |
1620 | } | |
1621 | } | |
1622 | ||
aa5b4fbc MK |
1623 | static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) |
1624 | { | |
1625 | struct htt_rx_addba *ev = &resp->rx_addba; | |
1626 | struct ath10k_peer *peer; | |
1627 | struct ath10k_vif *arvif; | |
1628 | u16 info0, tid, peer_id; | |
1629 | ||
1630 | info0 = __le16_to_cpu(ev->info0); | |
1631 | tid = MS(info0, HTT_RX_BA_INFO0_TID); | |
1632 | peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); | |
1633 | ||
7aa7a72a | 1634 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
1635 | "htt rx addba tid %hu peer_id %hu size %hhu\n", |
1636 | tid, peer_id, ev->window_size); | |
1637 | ||
1638 | spin_lock_bh(&ar->data_lock); | |
1639 | peer = ath10k_peer_find_by_id(ar, peer_id); | |
1640 | if (!peer) { | |
7aa7a72a | 1641 | ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", |
aa5b4fbc MK |
1642 | peer_id); |
1643 | spin_unlock_bh(&ar->data_lock); | |
1644 | return; | |
1645 | } | |
1646 | ||
1647 | arvif = ath10k_get_arvif(ar, peer->vdev_id); | |
1648 | if (!arvif) { | |
7aa7a72a | 1649 | ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", |
aa5b4fbc MK |
1650 | peer->vdev_id); |
1651 | spin_unlock_bh(&ar->data_lock); | |
1652 | return; | |
1653 | } | |
1654 | ||
7aa7a72a | 1655 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
1656 | "htt rx start rx ba session sta %pM tid %hu size %hhu\n", |
1657 | peer->addr, tid, ev->window_size); | |
1658 | ||
1659 | ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); | |
1660 | spin_unlock_bh(&ar->data_lock); | |
1661 | } | |
1662 | ||
1663 | static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) | |
1664 | { | |
1665 | struct htt_rx_delba *ev = &resp->rx_delba; | |
1666 | struct ath10k_peer *peer; | |
1667 | struct ath10k_vif *arvif; | |
1668 | u16 info0, tid, peer_id; | |
1669 | ||
1670 | info0 = __le16_to_cpu(ev->info0); | |
1671 | tid = MS(info0, HTT_RX_BA_INFO0_TID); | |
1672 | peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); | |
1673 | ||
7aa7a72a | 1674 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
1675 | "htt rx delba tid %hu peer_id %hu\n", |
1676 | tid, peer_id); | |
1677 | ||
1678 | spin_lock_bh(&ar->data_lock); | |
1679 | peer = ath10k_peer_find_by_id(ar, peer_id); | |
1680 | if (!peer) { | |
7aa7a72a | 1681 | ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", |
aa5b4fbc MK |
1682 | peer_id); |
1683 | spin_unlock_bh(&ar->data_lock); | |
1684 | return; | |
1685 | } | |
1686 | ||
1687 | arvif = ath10k_get_arvif(ar, peer->vdev_id); | |
1688 | if (!arvif) { | |
7aa7a72a | 1689 | ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", |
aa5b4fbc MK |
1690 | peer->vdev_id); |
1691 | spin_unlock_bh(&ar->data_lock); | |
1692 | return; | |
1693 | } | |
1694 | ||
7aa7a72a | 1695 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
aa5b4fbc MK |
1696 | "htt rx stop rx ba session sta %pM tid %hu\n", |
1697 | peer->addr, tid); | |
1698 | ||
1699 | ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); | |
1700 | spin_unlock_bh(&ar->data_lock); | |
1701 | } | |
1702 | ||
c545070e MK |
1703 | static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, |
1704 | struct sk_buff_head *amsdu) | |
1705 | { | |
1706 | struct sk_buff *msdu; | |
1707 | struct htt_rx_desc *rxd; | |
1708 | ||
1709 | if (skb_queue_empty(list)) | |
1710 | return -ENOBUFS; | |
1711 | ||
1712 | if (WARN_ON(!skb_queue_empty(amsdu))) | |
1713 | return -EINVAL; | |
1714 | ||
1715 | while ((msdu = __skb_dequeue(list))) { | |
1716 | __skb_queue_tail(amsdu, msdu); | |
1717 | ||
1718 | rxd = (void *)msdu->data - sizeof(*rxd); | |
1719 | if (rxd->msdu_end.info0 & | |
1720 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) | |
1721 | break; | |
1722 | } | |
1723 | ||
1724 | msdu = skb_peek_tail(amsdu); | |
1725 | rxd = (void *)msdu->data - sizeof(*rxd); | |
1726 | if (!(rxd->msdu_end.info0 & | |
1727 | __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { | |
1728 | skb_queue_splice_init(amsdu, list); | |
1729 | return -EAGAIN; | |
1730 | } | |
1731 | ||
1732 | return 0; | |
1733 | } | |
1734 | ||
1735 | static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, | |
1736 | struct sk_buff *skb) | |
1737 | { | |
1738 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
1739 | ||
1740 | if (!ieee80211_has_protected(hdr->frame_control)) | |
1741 | return; | |
1742 | ||
1743 | /* Offloaded frames are already decrypted but firmware insists they are | |
1744 | * protected in the 802.11 header. Strip the flag. Otherwise mac80211 | |
1745 | * will drop the frame. | |
1746 | */ | |
1747 | ||
1748 | hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); | |
1749 | status->flag |= RX_FLAG_DECRYPTED | | |
1750 | RX_FLAG_IV_STRIPPED | | |
1751 | RX_FLAG_MMIC_STRIPPED; | |
1752 | } | |
1753 | ||
1754 | static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, | |
1755 | struct sk_buff_head *list) | |
1756 | { | |
1757 | struct ath10k_htt *htt = &ar->htt; | |
1758 | struct ieee80211_rx_status *status = &htt->rx_status; | |
1759 | struct htt_rx_offload_msdu *rx; | |
1760 | struct sk_buff *msdu; | |
1761 | size_t offset; | |
1762 | ||
1763 | while ((msdu = __skb_dequeue(list))) { | |
1764 | /* Offloaded frames don't have Rx descriptor. Instead they have | |
1765 | * a short meta information header. | |
1766 | */ | |
1767 | ||
1768 | rx = (void *)msdu->data; | |
1769 | ||
1770 | skb_put(msdu, sizeof(*rx)); | |
1771 | skb_pull(msdu, sizeof(*rx)); | |
1772 | ||
1773 | if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { | |
1774 | ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); | |
1775 | dev_kfree_skb_any(msdu); | |
1776 | continue; | |
1777 | } | |
1778 | ||
1779 | skb_put(msdu, __le16_to_cpu(rx->msdu_len)); | |
1780 | ||
1781 | /* Offloaded rx header length isn't multiple of 2 nor 4 so the | |
1782 | * actual payload is unaligned. Align the frame. Otherwise | |
1783 | * mac80211 complains. This shouldn't reduce performance much | |
1784 | * because these offloaded frames are rare. | |
1785 | */ | |
1786 | offset = 4 - ((unsigned long)msdu->data & 3); | |
1787 | skb_put(msdu, offset); | |
1788 | memmove(msdu->data + offset, msdu->data, msdu->len); | |
1789 | skb_pull(msdu, offset); | |
1790 | ||
1791 | /* FIXME: The frame is NWifi. Re-construct QoS Control | |
1792 | * if possible later. | |
1793 | */ | |
1794 | ||
1795 | memset(status, 0, sizeof(*status)); | |
1796 | status->flag |= RX_FLAG_NO_SIGNAL_VAL; | |
1797 | ||
1798 | ath10k_htt_rx_h_rx_offload_prot(status, msdu); | |
1799 | ath10k_htt_rx_h_channel(ar, status); | |
1800 | ath10k_process_rx(ar, status, msdu); | |
1801 | } | |
1802 | } | |
1803 | ||
1804 | static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) | |
1805 | { | |
1806 | struct ath10k_htt *htt = &ar->htt; | |
1807 | struct htt_resp *resp = (void *)skb->data; | |
1808 | struct ieee80211_rx_status *status = &htt->rx_status; | |
1809 | struct sk_buff_head list; | |
1810 | struct sk_buff_head amsdu; | |
1811 | u16 peer_id; | |
1812 | u16 msdu_count; | |
1813 | u8 vdev_id; | |
1814 | u8 tid; | |
1815 | bool offload; | |
1816 | bool frag; | |
1817 | int ret; | |
1818 | ||
1819 | lockdep_assert_held(&htt->rx_ring.lock); | |
1820 | ||
1821 | if (htt->rx_confused) | |
1822 | return; | |
1823 | ||
1824 | skb_pull(skb, sizeof(resp->hdr)); | |
1825 | skb_pull(skb, sizeof(resp->rx_in_ord_ind)); | |
1826 | ||
1827 | peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); | |
1828 | msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); | |
1829 | vdev_id = resp->rx_in_ord_ind.vdev_id; | |
1830 | tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); | |
1831 | offload = !!(resp->rx_in_ord_ind.info & | |
1832 | HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); | |
1833 | frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); | |
1834 | ||
1835 | ath10k_dbg(ar, ATH10K_DBG_HTT, | |
1836 | "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", | |
1837 | vdev_id, peer_id, tid, offload, frag, msdu_count); | |
1838 | ||
1839 | if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) { | |
1840 | ath10k_warn(ar, "dropping invalid in order rx indication\n"); | |
1841 | return; | |
1842 | } | |
1843 | ||
1844 | /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later | |
1845 | * extracted and processed. | |
1846 | */ | |
1847 | __skb_queue_head_init(&list); | |
1848 | ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list); | |
1849 | if (ret < 0) { | |
1850 | ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); | |
1851 | htt->rx_confused = true; | |
1852 | return; | |
1853 | } | |
1854 | ||
1855 | /* Offloaded frames are very different and need to be handled | |
1856 | * separately. | |
1857 | */ | |
1858 | if (offload) | |
1859 | ath10k_htt_rx_h_rx_offload(ar, &list); | |
1860 | ||
1861 | while (!skb_queue_empty(&list)) { | |
1862 | __skb_queue_head_init(&amsdu); | |
1863 | ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu); | |
1864 | switch (ret) { | |
1865 | case 0: | |
1866 | /* Note: The in-order indication may report interleaved | |
1867 | * frames from different PPDUs meaning reported rx rate | |
1868 | * to mac80211 isn't accurate/reliable. It's still | |
1869 | * better to report something than nothing though. This | |
1870 | * should still give an idea about rx rate to the user. | |
1871 | */ | |
1872 | ath10k_htt_rx_h_ppdu(ar, &amsdu, status); | |
1873 | ath10k_htt_rx_h_filter(ar, &amsdu, status); | |
1874 | ath10k_htt_rx_h_mpdu(ar, &amsdu, status); | |
1875 | ath10k_htt_rx_h_deliver(ar, &amsdu, status); | |
1876 | break; | |
1877 | case -EAGAIN: | |
1878 | /* fall through */ | |
1879 | default: | |
1880 | /* Should not happen. */ | |
1881 | ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); | |
1882 | htt->rx_confused = true; | |
1883 | __skb_queue_purge(&list); | |
1884 | return; | |
1885 | } | |
1886 | } | |
1887 | ||
1888 | tasklet_schedule(&htt->rx_replenish_task); | |
1889 | } | |
1890 | ||
5e3dd157 KV |
1891 | void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) |
1892 | { | |
edb8236d | 1893 | struct ath10k_htt *htt = &ar->htt; |
5e3dd157 KV |
1894 | struct htt_resp *resp = (struct htt_resp *)skb->data; |
1895 | ||
1896 | /* confirm alignment */ | |
1897 | if (!IS_ALIGNED((unsigned long)skb->data, 4)) | |
7aa7a72a | 1898 | ath10k_warn(ar, "unaligned htt message, expect trouble\n"); |
5e3dd157 | 1899 | |
7aa7a72a | 1900 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", |
5e3dd157 KV |
1901 | resp->hdr.msg_type); |
1902 | switch (resp->hdr.msg_type) { | |
1903 | case HTT_T2H_MSG_TYPE_VERSION_CONF: { | |
1904 | htt->target_version_major = resp->ver_resp.major; | |
1905 | htt->target_version_minor = resp->ver_resp.minor; | |
1906 | complete(&htt->target_version_received); | |
1907 | break; | |
1908 | } | |
6c5151a9 | 1909 | case HTT_T2H_MSG_TYPE_RX_IND: |
45967089 MK |
1910 | spin_lock_bh(&htt->rx_ring.lock); |
1911 | __skb_queue_tail(&htt->rx_compl_q, skb); | |
1912 | spin_unlock_bh(&htt->rx_ring.lock); | |
6c5151a9 MK |
1913 | tasklet_schedule(&htt->txrx_compl_task); |
1914 | return; | |
5e3dd157 KV |
1915 | case HTT_T2H_MSG_TYPE_PEER_MAP: { |
1916 | struct htt_peer_map_event ev = { | |
1917 | .vdev_id = resp->peer_map.vdev_id, | |
1918 | .peer_id = __le16_to_cpu(resp->peer_map.peer_id), | |
1919 | }; | |
1920 | memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); | |
1921 | ath10k_peer_map_event(htt, &ev); | |
1922 | break; | |
1923 | } | |
1924 | case HTT_T2H_MSG_TYPE_PEER_UNMAP: { | |
1925 | struct htt_peer_unmap_event ev = { | |
1926 | .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), | |
1927 | }; | |
1928 | ath10k_peer_unmap_event(htt, &ev); | |
1929 | break; | |
1930 | } | |
1931 | case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { | |
1932 | struct htt_tx_done tx_done = {}; | |
1933 | int status = __le32_to_cpu(resp->mgmt_tx_completion.status); | |
1934 | ||
1935 | tx_done.msdu_id = | |
1936 | __le32_to_cpu(resp->mgmt_tx_completion.desc_id); | |
1937 | ||
1938 | switch (status) { | |
1939 | case HTT_MGMT_TX_STATUS_OK: | |
1940 | break; | |
1941 | case HTT_MGMT_TX_STATUS_RETRY: | |
1942 | tx_done.no_ack = true; | |
1943 | break; | |
1944 | case HTT_MGMT_TX_STATUS_DROP: | |
1945 | tx_done.discard = true; | |
1946 | break; | |
1947 | } | |
1948 | ||
6c5151a9 | 1949 | spin_lock_bh(&htt->tx_lock); |
0a89f8a0 | 1950 | ath10k_txrx_tx_unref(htt, &tx_done); |
6c5151a9 | 1951 | spin_unlock_bh(&htt->tx_lock); |
5e3dd157 KV |
1952 | break; |
1953 | } | |
6c5151a9 MK |
1954 | case HTT_T2H_MSG_TYPE_TX_COMPL_IND: |
1955 | spin_lock_bh(&htt->tx_lock); | |
1956 | __skb_queue_tail(&htt->tx_compl_q, skb); | |
1957 | spin_unlock_bh(&htt->tx_lock); | |
1958 | tasklet_schedule(&htt->txrx_compl_task); | |
1959 | return; | |
5e3dd157 KV |
1960 | case HTT_T2H_MSG_TYPE_SEC_IND: { |
1961 | struct ath10k *ar = htt->ar; | |
1962 | struct htt_security_indication *ev = &resp->security_indication; | |
1963 | ||
7aa7a72a | 1964 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
5e3dd157 KV |
1965 | "sec ind peer_id %d unicast %d type %d\n", |
1966 | __le16_to_cpu(ev->peer_id), | |
1967 | !!(ev->flags & HTT_SECURITY_IS_UNICAST), | |
1968 | MS(ev->flags, HTT_SECURITY_TYPE)); | |
1969 | complete(&ar->install_key_done); | |
1970 | break; | |
1971 | } | |
1972 | case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { | |
7aa7a72a | 1973 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", |
5e3dd157 KV |
1974 | skb->data, skb->len); |
1975 | ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); | |
1976 | break; | |
1977 | } | |
1978 | case HTT_T2H_MSG_TYPE_TEST: | |
1979 | /* FIX THIS */ | |
1980 | break; | |
5e3dd157 | 1981 | case HTT_T2H_MSG_TYPE_STATS_CONF: |
d35a6c18 | 1982 | trace_ath10k_htt_stats(ar, skb->data, skb->len); |
a9bf0506 KV |
1983 | break; |
1984 | case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: | |
708b9bde MK |
1985 | /* Firmware can return tx frames if it's unable to fully |
1986 | * process them and suspects host may be able to fix it. ath10k | |
1987 | * sends all tx frames as already inspected so this shouldn't | |
1988 | * happen unless fw has a bug. | |
1989 | */ | |
7aa7a72a | 1990 | ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); |
708b9bde | 1991 | break; |
5e3dd157 | 1992 | case HTT_T2H_MSG_TYPE_RX_ADDBA: |
aa5b4fbc MK |
1993 | ath10k_htt_rx_addba(ar, resp); |
1994 | break; | |
5e3dd157 | 1995 | case HTT_T2H_MSG_TYPE_RX_DELBA: |
aa5b4fbc MK |
1996 | ath10k_htt_rx_delba(ar, resp); |
1997 | break; | |
bfdd7937 RM |
1998 | case HTT_T2H_MSG_TYPE_PKTLOG: { |
1999 | struct ath10k_pktlog_hdr *hdr = | |
2000 | (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload; | |
2001 | ||
2002 | trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, | |
2003 | sizeof(*hdr) + | |
2004 | __le16_to_cpu(hdr->size)); | |
2005 | break; | |
2006 | } | |
aa5b4fbc MK |
2007 | case HTT_T2H_MSG_TYPE_RX_FLUSH: { |
2008 | /* Ignore this event because mac80211 takes care of Rx | |
2009 | * aggregation reordering. | |
2010 | */ | |
2011 | break; | |
2012 | } | |
c545070e MK |
2013 | case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { |
2014 | spin_lock_bh(&htt->rx_ring.lock); | |
2015 | __skb_queue_tail(&htt->rx_in_ord_compl_q, skb); | |
2016 | spin_unlock_bh(&htt->rx_ring.lock); | |
2017 | tasklet_schedule(&htt->txrx_compl_task); | |
2018 | return; | |
2019 | } | |
2020 | case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: | |
2021 | /* FIXME: This WMI-TLV event is overlapping with 10.2 | |
2022 | * CHAN_CHANGE - both being 0xF. Neither is being used in | |
2023 | * practice so no immediate action is necessary. Nevertheless | |
2024 | * HTT may need an abstraction layer like WMI has one day. | |
2025 | */ | |
2026 | break; | |
5e3dd157 | 2027 | default: |
2358a544 MK |
2028 | ath10k_warn(ar, "htt event (%d) not handled\n", |
2029 | resp->hdr.msg_type); | |
7aa7a72a | 2030 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", |
5e3dd157 KV |
2031 | skb->data, skb->len); |
2032 | break; | |
2033 | }; | |
2034 | ||
2035 | /* Free the indication buffer */ | |
2036 | dev_kfree_skb_any(skb); | |
2037 | } | |
6c5151a9 MK |
2038 | |
2039 | static void ath10k_htt_txrx_compl_task(unsigned long ptr) | |
2040 | { | |
2041 | struct ath10k_htt *htt = (struct ath10k_htt *)ptr; | |
c545070e | 2042 | struct ath10k *ar = htt->ar; |
6c5151a9 MK |
2043 | struct htt_resp *resp; |
2044 | struct sk_buff *skb; | |
2045 | ||
45967089 MK |
2046 | spin_lock_bh(&htt->tx_lock); |
2047 | while ((skb = __skb_dequeue(&htt->tx_compl_q))) { | |
6c5151a9 MK |
2048 | ath10k_htt_rx_frm_tx_compl(htt->ar, skb); |
2049 | dev_kfree_skb_any(skb); | |
2050 | } | |
45967089 | 2051 | spin_unlock_bh(&htt->tx_lock); |
6c5151a9 | 2052 | |
45967089 MK |
2053 | spin_lock_bh(&htt->rx_ring.lock); |
2054 | while ((skb = __skb_dequeue(&htt->rx_compl_q))) { | |
6c5151a9 MK |
2055 | resp = (struct htt_resp *)skb->data; |
2056 | ath10k_htt_rx_handler(htt, &resp->rx_ind); | |
2057 | dev_kfree_skb_any(skb); | |
2058 | } | |
c545070e MK |
2059 | |
2060 | while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) { | |
2061 | ath10k_htt_rx_in_ord_ind(ar, skb); | |
2062 | dev_kfree_skb_any(skb); | |
2063 | } | |
45967089 | 2064 | spin_unlock_bh(&htt->rx_ring.lock); |
6c5151a9 | 2065 | } |