Commit | Line | Data |
---|---|---|
5e3dd157 KV |
1 | /* |
2 | * Copyright (c) 2005-2011 Atheros Communications Inc. | |
3 | * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. | |
4 | * | |
5 | * Permission to use, copy, modify, and/or distribute this software for any | |
6 | * purpose with or without fee is hereby granted, provided that the above | |
7 | * copyright notice and this permission notice appear in all copies. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
16 | */ | |
17 | ||
18 | #include <linux/etherdevice.h> | |
19 | #include "htt.h" | |
20 | #include "mac.h" | |
21 | #include "hif.h" | |
22 | #include "txrx.h" | |
23 | #include "debug.h" | |
24 | ||
7b7da0a0 | 25 | void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc) |
5e3dd157 | 26 | { |
7b7da0a0 VN |
27 | if (limit_mgmt_desc) |
28 | htt->num_pending_mgmt_tx--; | |
29 | ||
5e3dd157 KV |
30 | htt->num_pending_tx--; |
31 | if (htt->num_pending_tx == htt->max_num_pending_tx - 1) | |
96d828d4 | 32 | ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); |
5e3dd157 KV |
33 | } |
34 | ||
7b7da0a0 VN |
35 | static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, |
36 | bool limit_mgmt_desc) | |
5e3dd157 KV |
37 | { |
38 | spin_lock_bh(&htt->tx_lock); | |
7b7da0a0 | 39 | __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); |
5e3dd157 KV |
40 | spin_unlock_bh(&htt->tx_lock); |
41 | } | |
42 | ||
7b7da0a0 VN |
43 | static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt, |
44 | bool limit_mgmt_desc, bool is_probe_resp) | |
5e3dd157 | 45 | { |
7b7da0a0 | 46 | struct ath10k *ar = htt->ar; |
5e3dd157 KV |
47 | int ret = 0; |
48 | ||
49 | spin_lock_bh(&htt->tx_lock); | |
50 | ||
51 | if (htt->num_pending_tx >= htt->max_num_pending_tx) { | |
52 | ret = -EBUSY; | |
53 | goto exit; | |
54 | } | |
55 | ||
7b7da0a0 VN |
56 | if (limit_mgmt_desc) { |
57 | if (is_probe_resp && (htt->num_pending_mgmt_tx > | |
58 | ar->hw_params.max_probe_resp_desc_thres)) { | |
59 | ret = -EBUSY; | |
60 | goto exit; | |
61 | } | |
62 | htt->num_pending_mgmt_tx++; | |
63 | } | |
64 | ||
5e3dd157 KV |
65 | htt->num_pending_tx++; |
66 | if (htt->num_pending_tx == htt->max_num_pending_tx) | |
96d828d4 | 67 | ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); |
5e3dd157 KV |
68 | |
69 | exit: | |
70 | spin_unlock_bh(&htt->tx_lock); | |
71 | return ret; | |
72 | } | |
73 | ||
89d6d835 | 74 | int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) |
5e3dd157 | 75 | { |
7aa7a72a | 76 | struct ath10k *ar = htt->ar; |
89d6d835 | 77 | int ret; |
5e3dd157 KV |
78 | |
79 | lockdep_assert_held(&htt->tx_lock); | |
80 | ||
fbc03a46 PO |
81 | ret = idr_alloc(&htt->pending_tx, skb, 0, |
82 | htt->max_num_pending_tx, GFP_ATOMIC); | |
89d6d835 MK |
83 | |
84 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); | |
5e3dd157 | 85 | |
89d6d835 | 86 | return ret; |
5e3dd157 KV |
87 | } |
88 | ||
89 | void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) | |
90 | { | |
7aa7a72a MK |
91 | struct ath10k *ar = htt->ar; |
92 | ||
5e3dd157 KV |
93 | lockdep_assert_held(&htt->tx_lock); |
94 | ||
7aa7a72a | 95 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); |
89d6d835 MK |
96 | |
97 | idr_remove(&htt->pending_tx, msdu_id); | |
5e3dd157 KV |
98 | } |
99 | ||
95bf21f9 | 100 | int ath10k_htt_tx_alloc(struct ath10k_htt *htt) |
5e3dd157 | 101 | { |
7aa7a72a | 102 | struct ath10k *ar = htt->ar; |
d9156b5f | 103 | int ret, size; |
7aa7a72a | 104 | |
7aa7a72a | 105 | ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", |
5e3dd157 KV |
106 | htt->max_num_pending_tx); |
107 | ||
89d6d835 MK |
108 | spin_lock_init(&htt->tx_lock); |
109 | idr_init(&htt->pending_tx); | |
5e3dd157 | 110 | |
683b95e8 PO |
111 | size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); |
112 | htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, | |
113 | &htt->txbuf.paddr, | |
114 | GFP_DMA); | |
115 | if (!htt->txbuf.vaddr) { | |
116 | ath10k_err(ar, "failed to alloc tx buffer\n"); | |
d9156b5f RM |
117 | ret = -ENOMEM; |
118 | goto free_idr_pending_tx; | |
119 | } | |
120 | ||
121 | if (!ar->hw_params.continuous_frag_desc) | |
122 | goto skip_frag_desc_alloc; | |
123 | ||
124 | size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); | |
125 | htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, | |
126 | &htt->frag_desc.paddr, | |
127 | GFP_DMA); | |
128 | if (!htt->frag_desc.vaddr) { | |
129 | ath10k_warn(ar, "failed to alloc fragment desc memory\n"); | |
130 | ret = -ENOMEM; | |
683b95e8 | 131 | goto free_txbuf; |
a16942e6 MK |
132 | } |
133 | ||
d9156b5f | 134 | skip_frag_desc_alloc: |
5e3dd157 | 135 | return 0; |
d9156b5f | 136 | |
683b95e8 PO |
137 | free_txbuf: |
138 | size = htt->max_num_pending_tx * | |
139 | sizeof(struct ath10k_htt_txbuf); | |
140 | dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, | |
141 | htt->txbuf.paddr); | |
d9156b5f RM |
142 | free_idr_pending_tx: |
143 | idr_destroy(&htt->pending_tx); | |
144 | return ret; | |
5e3dd157 KV |
145 | } |
146 | ||
89d6d835 | 147 | static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) |
5e3dd157 | 148 | { |
89d6d835 MK |
149 | struct ath10k *ar = ctx; |
150 | struct ath10k_htt *htt = &ar->htt; | |
0a89f8a0 | 151 | struct htt_tx_done tx_done = {0}; |
5e3dd157 | 152 | |
89d6d835 | 153 | ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); |
5e3dd157 | 154 | |
89d6d835 MK |
155 | tx_done.discard = 1; |
156 | tx_done.msdu_id = msdu_id; | |
5e3dd157 | 157 | |
89d6d835 | 158 | ath10k_txrx_tx_unref(htt, &tx_done); |
89d6d835 MK |
159 | |
160 | return 0; | |
5e3dd157 KV |
161 | } |
162 | ||
95bf21f9 | 163 | void ath10k_htt_tx_free(struct ath10k_htt *htt) |
5e3dd157 | 164 | { |
d9156b5f RM |
165 | int size; |
166 | ||
89d6d835 MK |
167 | idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); |
168 | idr_destroy(&htt->pending_tx); | |
683b95e8 PO |
169 | |
170 | if (htt->txbuf.vaddr) { | |
171 | size = htt->max_num_pending_tx * | |
172 | sizeof(struct ath10k_htt_txbuf); | |
173 | dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, | |
174 | htt->txbuf.paddr); | |
175 | } | |
d9156b5f RM |
176 | |
177 | if (htt->frag_desc.vaddr) { | |
178 | size = htt->max_num_pending_tx * | |
179 | sizeof(struct htt_msdu_ext_desc); | |
180 | dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr, | |
181 | htt->frag_desc.paddr); | |
182 | } | |
5e3dd157 KV |
183 | } |
184 | ||
185 | void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) | |
186 | { | |
0a89f8a0 | 187 | dev_kfree_skb_any(skb); |
5e3dd157 KV |
188 | } |
189 | ||
3f0f7ed4 RM |
190 | void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb) |
191 | { | |
192 | dev_kfree_skb_any(skb); | |
193 | } | |
194 | EXPORT_SYMBOL(ath10k_htt_hif_tx_complete); | |
195 | ||
5e3dd157 KV |
196 | int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) |
197 | { | |
7aa7a72a | 198 | struct ath10k *ar = htt->ar; |
5e3dd157 KV |
199 | struct sk_buff *skb; |
200 | struct htt_cmd *cmd; | |
201 | int len = 0; | |
202 | int ret; | |
203 | ||
204 | len += sizeof(cmd->hdr); | |
205 | len += sizeof(cmd->ver_req); | |
206 | ||
7aa7a72a | 207 | skb = ath10k_htc_alloc_skb(ar, len); |
5e3dd157 KV |
208 | if (!skb) |
209 | return -ENOMEM; | |
210 | ||
211 | skb_put(skb, len); | |
212 | cmd = (struct htt_cmd *)skb->data; | |
213 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; | |
214 | ||
cd003fad | 215 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
5e3dd157 KV |
216 | if (ret) { |
217 | dev_kfree_skb_any(skb); | |
218 | return ret; | |
219 | } | |
220 | ||
221 | return 0; | |
222 | } | |
223 | ||
a3d135e5 KV |
224 | int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) |
225 | { | |
7aa7a72a | 226 | struct ath10k *ar = htt->ar; |
a3d135e5 KV |
227 | struct htt_stats_req *req; |
228 | struct sk_buff *skb; | |
229 | struct htt_cmd *cmd; | |
230 | int len = 0, ret; | |
231 | ||
232 | len += sizeof(cmd->hdr); | |
233 | len += sizeof(cmd->stats_req); | |
234 | ||
7aa7a72a | 235 | skb = ath10k_htc_alloc_skb(ar, len); |
a3d135e5 KV |
236 | if (!skb) |
237 | return -ENOMEM; | |
238 | ||
239 | skb_put(skb, len); | |
240 | cmd = (struct htt_cmd *)skb->data; | |
241 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; | |
242 | ||
243 | req = &cmd->stats_req; | |
244 | ||
245 | memset(req, 0, sizeof(*req)); | |
246 | ||
247 | /* currently we support only max 8 bit masks so no need to worry | |
248 | * about endian support */ | |
249 | req->upload_types[0] = mask; | |
250 | req->reset_types[0] = mask; | |
251 | req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; | |
252 | req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); | |
253 | req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); | |
254 | ||
a3d135e5 KV |
255 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
256 | if (ret) { | |
7aa7a72a MK |
257 | ath10k_warn(ar, "failed to send htt type stats request: %d", |
258 | ret); | |
a3d135e5 KV |
259 | dev_kfree_skb_any(skb); |
260 | return ret; | |
261 | } | |
262 | ||
263 | return 0; | |
264 | } | |
265 | ||
d9156b5f RM |
266 | int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) |
267 | { | |
268 | struct ath10k *ar = htt->ar; | |
269 | struct sk_buff *skb; | |
270 | struct htt_cmd *cmd; | |
271 | int ret, size; | |
272 | ||
273 | if (!ar->hw_params.continuous_frag_desc) | |
274 | return 0; | |
275 | ||
276 | if (!htt->frag_desc.paddr) { | |
277 | ath10k_warn(ar, "invalid frag desc memory\n"); | |
278 | return -EINVAL; | |
279 | } | |
280 | ||
281 | size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); | |
282 | skb = ath10k_htc_alloc_skb(ar, size); | |
283 | if (!skb) | |
284 | return -ENOMEM; | |
285 | ||
286 | skb_put(skb, size); | |
287 | cmd = (struct htt_cmd *)skb->data; | |
288 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; | |
289 | cmd->frag_desc_bank_cfg.info = 0; | |
290 | cmd->frag_desc_bank_cfg.num_banks = 1; | |
291 | cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc); | |
292 | cmd->frag_desc_bank_cfg.bank_base_addrs[0] = | |
293 | __cpu_to_le32(htt->frag_desc.paddr); | |
fbc03a46 | 294 | cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0; |
d9156b5f RM |
295 | cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id = |
296 | __cpu_to_le16(htt->max_num_pending_tx - 1); | |
297 | ||
298 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); | |
299 | if (ret) { | |
300 | ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", | |
301 | ret); | |
302 | dev_kfree_skb_any(skb); | |
303 | return ret; | |
304 | } | |
305 | ||
306 | return 0; | |
307 | } | |
308 | ||
5e3dd157 KV |
309 | int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) |
310 | { | |
7aa7a72a | 311 | struct ath10k *ar = htt->ar; |
5e3dd157 KV |
312 | struct sk_buff *skb; |
313 | struct htt_cmd *cmd; | |
314 | struct htt_rx_ring_setup_ring *ring; | |
315 | const int num_rx_ring = 1; | |
316 | u16 flags; | |
317 | u32 fw_idx; | |
318 | int len; | |
319 | int ret; | |
320 | ||
321 | /* | |
322 | * the HW expects the buffer to be an integral number of 4-byte | |
323 | * "words" | |
324 | */ | |
325 | BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); | |
326 | BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); | |
327 | ||
328 | len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) | |
329 | + (sizeof(*ring) * num_rx_ring); | |
7aa7a72a | 330 | skb = ath10k_htc_alloc_skb(ar, len); |
5e3dd157 KV |
331 | if (!skb) |
332 | return -ENOMEM; | |
333 | ||
334 | skb_put(skb, len); | |
335 | ||
336 | cmd = (struct htt_cmd *)skb->data; | |
337 | ring = &cmd->rx_setup.rings[0]; | |
338 | ||
339 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; | |
340 | cmd->rx_setup.hdr.num_rings = 1; | |
341 | ||
342 | /* FIXME: do we need all of this? */ | |
343 | flags = 0; | |
344 | flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; | |
345 | flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; | |
346 | flags |= HTT_RX_RING_FLAGS_PPDU_START; | |
347 | flags |= HTT_RX_RING_FLAGS_PPDU_END; | |
348 | flags |= HTT_RX_RING_FLAGS_MPDU_START; | |
349 | flags |= HTT_RX_RING_FLAGS_MPDU_END; | |
350 | flags |= HTT_RX_RING_FLAGS_MSDU_START; | |
351 | flags |= HTT_RX_RING_FLAGS_MSDU_END; | |
352 | flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; | |
353 | flags |= HTT_RX_RING_FLAGS_FRAG_INFO; | |
354 | flags |= HTT_RX_RING_FLAGS_UNICAST_RX; | |
355 | flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; | |
356 | flags |= HTT_RX_RING_FLAGS_CTRL_RX; | |
357 | flags |= HTT_RX_RING_FLAGS_MGMT_RX; | |
358 | flags |= HTT_RX_RING_FLAGS_NULL_RX; | |
359 | flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; | |
360 | ||
361 | fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); | |
362 | ||
363 | ring->fw_idx_shadow_reg_paddr = | |
364 | __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); | |
365 | ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); | |
366 | ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); | |
367 | ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); | |
368 | ring->flags = __cpu_to_le16(flags); | |
369 | ring->fw_idx_init_val = __cpu_to_le16(fw_idx); | |
370 | ||
371 | #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) | |
372 | ||
373 | ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); | |
374 | ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); | |
375 | ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); | |
376 | ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); | |
377 | ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); | |
378 | ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); | |
379 | ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); | |
380 | ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); | |
381 | ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); | |
382 | ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); | |
383 | ||
384 | #undef desc_offset | |
385 | ||
cd003fad | 386 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); |
5e3dd157 KV |
387 | if (ret) { |
388 | dev_kfree_skb_any(skb); | |
389 | return ret; | |
390 | } | |
391 | ||
392 | return 0; | |
393 | } | |
394 | ||
d385623a JD |
395 | int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, |
396 | u8 max_subfrms_ampdu, | |
397 | u8 max_subfrms_amsdu) | |
398 | { | |
7aa7a72a | 399 | struct ath10k *ar = htt->ar; |
d385623a JD |
400 | struct htt_aggr_conf *aggr_conf; |
401 | struct sk_buff *skb; | |
402 | struct htt_cmd *cmd; | |
403 | int len; | |
404 | int ret; | |
405 | ||
406 | /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ | |
407 | ||
408 | if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) | |
409 | return -EINVAL; | |
410 | ||
411 | if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) | |
412 | return -EINVAL; | |
413 | ||
414 | len = sizeof(cmd->hdr); | |
415 | len += sizeof(cmd->aggr_conf); | |
416 | ||
7aa7a72a | 417 | skb = ath10k_htc_alloc_skb(ar, len); |
d385623a JD |
418 | if (!skb) |
419 | return -ENOMEM; | |
420 | ||
421 | skb_put(skb, len); | |
422 | cmd = (struct htt_cmd *)skb->data; | |
423 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; | |
424 | ||
425 | aggr_conf = &cmd->aggr_conf; | |
426 | aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; | |
427 | aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; | |
428 | ||
7aa7a72a | 429 | ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", |
d385623a JD |
430 | aggr_conf->max_num_amsdu_subframes, |
431 | aggr_conf->max_num_ampdu_subframes); | |
432 | ||
433 | ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); | |
434 | if (ret) { | |
435 | dev_kfree_skb_any(skb); | |
436 | return ret; | |
437 | } | |
438 | ||
439 | return 0; | |
440 | } | |
441 | ||
5e3dd157 KV |
442 | int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) |
443 | { | |
7aa7a72a MK |
444 | struct ath10k *ar = htt->ar; |
445 | struct device *dev = ar->dev; | |
5e3dd157 KV |
446 | struct sk_buff *txdesc = NULL; |
447 | struct htt_cmd *cmd; | |
1f8bb151 | 448 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); |
5e00d31a | 449 | u8 vdev_id = skb_cb->vdev_id; |
5e3dd157 KV |
450 | int len = 0; |
451 | int msdu_id = -1; | |
452 | int res; | |
7b7da0a0 VN |
453 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
454 | bool limit_mgmt_desc = false; | |
455 | bool is_probe_resp = false; | |
456 | ||
457 | if (ar->hw_params.max_probe_resp_desc_thres) { | |
458 | limit_mgmt_desc = true; | |
459 | ||
460 | if (ieee80211_is_probe_resp(hdr->frame_control)) | |
461 | is_probe_resp = true; | |
462 | } | |
463 | ||
464 | res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); | |
5e3dd157 | 465 | |
5e3dd157 | 466 | if (res) |
2f3773bc | 467 | goto err; |
5e3dd157 KV |
468 | |
469 | len += sizeof(cmd->hdr); | |
470 | len += sizeof(cmd->mgmt_tx); | |
471 | ||
5e3dd157 | 472 | spin_lock_bh(&htt->tx_lock); |
89d6d835 | 473 | res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); |
005fb161 | 474 | spin_unlock_bh(&htt->tx_lock); |
b9e284e5 | 475 | if (res < 0) |
2f3773bc | 476 | goto err_tx_dec; |
b9e284e5 | 477 | |
2f3773bc | 478 | msdu_id = res; |
5e3dd157 | 479 | |
90eceb3b T |
480 | if ((ieee80211_is_action(hdr->frame_control) || |
481 | ieee80211_is_deauth(hdr->frame_control) || | |
482 | ieee80211_is_disassoc(hdr->frame_control)) && | |
483 | ieee80211_has_protected(hdr->frame_control)) { | |
484 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); | |
485 | } | |
486 | ||
7aa7a72a | 487 | txdesc = ath10k_htc_alloc_skb(ar, len); |
2f3773bc MK |
488 | if (!txdesc) { |
489 | res = -ENOMEM; | |
490 | goto err_free_msdu_id; | |
491 | } | |
492 | ||
767d34fc MK |
493 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
494 | DMA_TO_DEVICE); | |
495 | res = dma_mapping_error(dev, skb_cb->paddr); | |
5e55e3cb MK |
496 | if (res) { |
497 | res = -EIO; | |
2f3773bc | 498 | goto err_free_txdesc; |
5e55e3cb | 499 | } |
5e3dd157 KV |
500 | |
501 | skb_put(txdesc, len); | |
502 | cmd = (struct htt_cmd *)txdesc->data; | |
1d0088f8 RM |
503 | memset(cmd, 0, len); |
504 | ||
5e3dd157 KV |
505 | cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; |
506 | cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); | |
507 | cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); | |
508 | cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); | |
509 | cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); | |
510 | memcpy(cmd->mgmt_tx.hdr, msdu->data, | |
511 | min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); | |
512 | ||
a16942e6 | 513 | skb_cb->htt.txbuf = NULL; |
1f8bb151 | 514 | |
cd003fad | 515 | res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); |
5e3dd157 | 516 | if (res) |
2f3773bc | 517 | goto err_unmap_msdu; |
5e3dd157 KV |
518 | |
519 | return 0; | |
520 | ||
2f3773bc | 521 | err_unmap_msdu: |
767d34fc | 522 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
2f3773bc MK |
523 | err_free_txdesc: |
524 | dev_kfree_skb_any(txdesc); | |
525 | err_free_msdu_id: | |
526 | spin_lock_bh(&htt->tx_lock); | |
2f3773bc MK |
527 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
528 | spin_unlock_bh(&htt->tx_lock); | |
529 | err_tx_dec: | |
7b7da0a0 | 530 | ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); |
2f3773bc | 531 | err: |
5e3dd157 KV |
532 | return res; |
533 | } | |
534 | ||
535 | int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) | |
536 | { | |
7aa7a72a MK |
537 | struct ath10k *ar = htt->ar; |
538 | struct device *dev = ar->dev; | |
5e3dd157 | 539 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; |
1f8bb151 | 540 | struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); |
a16942e6 MK |
541 | struct ath10k_hif_sg_item sg_items[2]; |
542 | struct htt_data_tx_desc_frag *frags; | |
543 | u8 vdev_id = skb_cb->vdev_id; | |
544 | u8 tid = skb_cb->htt.tid; | |
545 | int prefetch_len; | |
5e3dd157 | 546 | int res; |
a16942e6 MK |
547 | u8 flags0 = 0; |
548 | u16 msdu_id, flags1 = 0; | |
d740d8fd | 549 | u32 frags_paddr = 0; |
b9635195 | 550 | struct htt_msdu_ext_desc *ext_desc = NULL; |
7b7da0a0 VN |
551 | bool limit_mgmt_desc = false; |
552 | bool is_probe_resp = false; | |
553 | ||
554 | if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) && | |
555 | ar->hw_params.max_probe_resp_desc_thres) { | |
556 | limit_mgmt_desc = true; | |
557 | ||
558 | if (ieee80211_is_probe_resp(hdr->frame_control)) | |
559 | is_probe_resp = true; | |
560 | } | |
5e3dd157 | 561 | |
7b7da0a0 | 562 | res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); |
5e3dd157 | 563 | if (res) |
2f3773bc MK |
564 | goto err; |
565 | ||
566 | spin_lock_bh(&htt->tx_lock); | |
89d6d835 | 567 | res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); |
005fb161 | 568 | spin_unlock_bh(&htt->tx_lock); |
b9e284e5 | 569 | if (res < 0) |
2f3773bc | 570 | goto err_tx_dec; |
b9e284e5 | 571 | |
2f3773bc | 572 | msdu_id = res; |
5e3dd157 KV |
573 | |
574 | prefetch_len = min(htt->prefetch_len, msdu->len); | |
575 | prefetch_len = roundup(prefetch_len, 4); | |
576 | ||
683b95e8 PO |
577 | skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id]; |
578 | skb_cb->htt.txbuf_paddr = htt->txbuf.paddr + | |
579 | (sizeof(struct ath10k_htt_txbuf) * msdu_id); | |
5e3dd157 | 580 | |
eebc67fe MK |
581 | if ((ieee80211_is_action(hdr->frame_control) || |
582 | ieee80211_is_deauth(hdr->frame_control) || | |
583 | ieee80211_is_disassoc(hdr->frame_control)) && | |
ccec9038 | 584 | ieee80211_has_protected(hdr->frame_control)) { |
eebc67fe | 585 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); |
ccec9038 | 586 | } else if (!skb_cb->htt.nohwcrypt && |
bc76c287 BC |
587 | skb_cb->txmode == ATH10K_HW_TXRX_RAW && |
588 | ieee80211_has_protected(hdr->frame_control)) { | |
ccec9038 DL |
589 | skb_put(msdu, IEEE80211_CCMP_MIC_LEN); |
590 | } | |
eebc67fe | 591 | |
767d34fc MK |
592 | skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, |
593 | DMA_TO_DEVICE); | |
594 | res = dma_mapping_error(dev, skb_cb->paddr); | |
5e55e3cb MK |
595 | if (res) { |
596 | res = -EIO; | |
683b95e8 | 597 | goto err_free_msdu_id; |
5e55e3cb | 598 | } |
5e3dd157 | 599 | |
d740d8fd MK |
600 | switch (skb_cb->txmode) { |
601 | case ATH10K_HW_TXRX_RAW: | |
602 | case ATH10K_HW_TXRX_NATIVE_WIFI: | |
603 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; | |
604 | /* pass through */ | |
605 | case ATH10K_HW_TXRX_ETHERNET: | |
fbc03a46 | 606 | if (ar->hw_params.continuous_frag_desc) { |
ae7d3821 PO |
607 | memset(&htt->frag_desc.vaddr[msdu_id], 0, |
608 | sizeof(struct htt_msdu_ext_desc)); | |
fbc03a46 PO |
609 | frags = (struct htt_data_tx_desc_frag *) |
610 | &htt->frag_desc.vaddr[msdu_id].frags; | |
b9635195 | 611 | ext_desc = &htt->frag_desc.vaddr[msdu_id]; |
fbc03a46 PO |
612 | frags[0].tword_addr.paddr_lo = |
613 | __cpu_to_le32(skb_cb->paddr); | |
614 | frags[0].tword_addr.paddr_hi = 0; | |
615 | frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); | |
fbc03a46 PO |
616 | |
617 | frags_paddr = htt->frag_desc.paddr + | |
618 | (sizeof(struct htt_msdu_ext_desc) * msdu_id); | |
619 | } else { | |
620 | frags = skb_cb->htt.txbuf->frags; | |
621 | frags[0].dword_addr.paddr = | |
622 | __cpu_to_le32(skb_cb->paddr); | |
623 | frags[0].dword_addr.len = __cpu_to_le32(msdu->len); | |
624 | frags[1].dword_addr.paddr = 0; | |
625 | frags[1].dword_addr.len = 0; | |
626 | ||
627 | frags_paddr = skb_cb->htt.txbuf_paddr; | |
628 | } | |
d740d8fd | 629 | flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); |
d740d8fd MK |
630 | break; |
631 | case ATH10K_HW_TXRX_MGMT: | |
a16942e6 MK |
632 | flags0 |= SM(ATH10K_HW_TXRX_MGMT, |
633 | HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); | |
d740d8fd | 634 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; |
5e3dd157 | 635 | |
a16942e6 | 636 | frags_paddr = skb_cb->paddr; |
d740d8fd | 637 | break; |
a16942e6 MK |
638 | } |
639 | ||
640 | /* Normally all commands go through HTC which manages tx credits for | |
641 | * each endpoint and notifies when tx is completed. | |
642 | * | |
643 | * HTT endpoint is creditless so there's no need to care about HTC | |
644 | * flags. In that case it is trivial to fill the HTC header here. | |
645 | * | |
646 | * MSDU transmission is considered completed upon HTT event. This | |
647 | * implies no relevant resources can be freed until after the event is | |
648 | * received. That's why HTC tx completion handler itself is ignored by | |
649 | * setting NULL to transfer_context for all sg items. | |
650 | * | |
651 | * There is simply no point in pushing HTT TX_FRM through HTC tx path | |
652 | * as it's a waste of resources. By bypassing HTC it is possible to | |
653 | * avoid extra memory allocations, compress data structures and thus | |
654 | * improve performance. */ | |
655 | ||
656 | skb_cb->htt.txbuf->htc_hdr.eid = htt->eid; | |
657 | skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16( | |
658 | sizeof(skb_cb->htt.txbuf->cmd_hdr) + | |
659 | sizeof(skb_cb->htt.txbuf->cmd_tx) + | |
660 | prefetch_len); | |
661 | skb_cb->htt.txbuf->htc_hdr.flags = 0; | |
5e3dd157 | 662 | |
ccec9038 DL |
663 | if (skb_cb->htt.nohwcrypt) |
664 | flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; | |
665 | ||
5e3dd157 KV |
666 | flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); |
667 | flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); | |
ccec9038 DL |
668 | if (msdu->ip_summed == CHECKSUM_PARTIAL && |
669 | !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { | |
75930d1a HS |
670 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; |
671 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; | |
b9635195 MP |
672 | if (ar->hw_params.continuous_frag_desc) |
673 | ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; | |
75930d1a | 674 | } |
5e3dd157 | 675 | |
708b9bde MK |
676 | /* Prevent firmware from sending up tx inspection requests. There's |
677 | * nothing ath10k can do with frames requested for inspection so force | |
678 | * it to simply rely a regular tx completion with discard status. | |
679 | */ | |
680 | flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; | |
681 | ||
a16942e6 MK |
682 | skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; |
683 | skb_cb->htt.txbuf->cmd_tx.flags0 = flags0; | |
684 | skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); | |
685 | skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); | |
686 | skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); | |
687 | skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); | |
d39de991 VT |
688 | if (ath10k_mac_tx_frm_has_freq(ar)) { |
689 | skb_cb->htt.txbuf->cmd_tx.offchan_tx.peerid = | |
690 | __cpu_to_le16(HTT_INVALID_PEERID); | |
691 | skb_cb->htt.txbuf->cmd_tx.offchan_tx.freq = | |
692 | __cpu_to_le16(skb_cb->htt.freq); | |
693 | } else { | |
694 | skb_cb->htt.txbuf->cmd_tx.peerid = | |
695 | __cpu_to_le32(HTT_INVALID_PEERID); | |
696 | } | |
a16942e6 | 697 | |
d1e50f47 | 698 | trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); |
7aa7a72a | 699 | ath10k_dbg(ar, ATH10K_DBG_HTT, |
8d6d3624 | 700 | "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", |
a16942e6 | 701 | flags0, flags1, msdu->len, msdu_id, frags_paddr, |
8d6d3624 | 702 | (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq); |
7aa7a72a | 703 | ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", |
a16942e6 | 704 | msdu->data, msdu->len); |
5ce8e7fd RM |
705 | trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); |
706 | trace_ath10k_tx_payload(ar, msdu->data, msdu->len); | |
5e3dd157 | 707 | |
a16942e6 MK |
708 | sg_items[0].transfer_id = 0; |
709 | sg_items[0].transfer_context = NULL; | |
710 | sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr; | |
711 | sg_items[0].paddr = skb_cb->htt.txbuf_paddr + | |
712 | sizeof(skb_cb->htt.txbuf->frags); | |
713 | sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) + | |
714 | sizeof(skb_cb->htt.txbuf->cmd_hdr) + | |
715 | sizeof(skb_cb->htt.txbuf->cmd_tx); | |
716 | ||
717 | sg_items[1].transfer_id = 0; | |
718 | sg_items[1].transfer_context = NULL; | |
719 | sg_items[1].vaddr = msdu->data; | |
720 | sg_items[1].paddr = skb_cb->paddr; | |
721 | sg_items[1].len = prefetch_len; | |
722 | ||
723 | res = ath10k_hif_tx_sg(htt->ar, | |
724 | htt->ar->htc.endpoint[htt->eid].ul_pipe_id, | |
725 | sg_items, ARRAY_SIZE(sg_items)); | |
5e3dd157 | 726 | if (res) |
1f8bb151 | 727 | goto err_unmap_msdu; |
5e3dd157 KV |
728 | |
729 | return 0; | |
2f3773bc | 730 | |
2f3773bc | 731 | err_unmap_msdu: |
767d34fc | 732 | dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); |
2f3773bc MK |
733 | err_free_msdu_id: |
734 | spin_lock_bh(&htt->tx_lock); | |
2f3773bc MK |
735 | ath10k_htt_tx_free_msdu_id(htt, msdu_id); |
736 | spin_unlock_bh(&htt->tx_lock); | |
737 | err_tx_dec: | |
7b7da0a0 | 738 | ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); |
2f3773bc | 739 | err: |
5e3dd157 KV |
740 | return res; |
741 | } |