2 * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/etherdevice.h>
18 #include <net/ieee80211_radiotap.h>
19 #include <linux/if_arp.h>
20 #include <linux/moduleparam.h>
22 #include <linux/ipv6.h>
24 #include <linux/prefetch.h>
31 static bool rtap_include_phy_info
;
32 module_param(rtap_include_phy_info
, bool, S_IRUGO
);
33 MODULE_PARM_DESC(rtap_include_phy_info
,
34 " Include PHY info in the radiotap header, default - no");
37 module_param(rx_align_2
, bool, S_IRUGO
);
38 MODULE_PARM_DESC(rx_align_2
, " align Rx buffers on 4*n+2, default - no");
40 static inline uint
wil_rx_snaplen(void)
42 return rx_align_2
? 6 : 0;
45 static inline int wil_vring_is_empty(struct vring
*vring
)
47 return vring
->swhead
== vring
->swtail
;
50 static inline u32
wil_vring_next_tail(struct vring
*vring
)
52 return (vring
->swtail
+ 1) % vring
->size
;
55 static inline void wil_vring_advance_head(struct vring
*vring
, int n
)
57 vring
->swhead
= (vring
->swhead
+ n
) % vring
->size
;
60 static inline int wil_vring_is_full(struct vring
*vring
)
62 return wil_vring_next_tail(vring
) == vring
->swhead
;
65 /* Used space in Tx Vring */
66 static inline int wil_vring_used_tx(struct vring
*vring
)
68 u32 swhead
= vring
->swhead
;
69 u32 swtail
= vring
->swtail
;
70 return (vring
->size
+ swhead
- swtail
) % vring
->size
;
73 /* Available space in Tx Vring */
74 static inline int wil_vring_avail_tx(struct vring
*vring
)
76 return vring
->size
- wil_vring_used_tx(vring
) - 1;
79 /* wil_vring_wmark_low - low watermark for available descriptor space */
80 static inline int wil_vring_wmark_low(struct vring
*vring
)
85 /* wil_vring_wmark_high - high watermark for available descriptor space */
86 static inline int wil_vring_wmark_high(struct vring
*vring
)
91 /* wil_val_in_range - check if value in [min,max) */
92 static inline bool wil_val_in_range(int val
, int min
, int max
)
94 return val
>= min
&& val
< max
;
97 static int wil_vring_alloc(struct wil6210_priv
*wil
, struct vring
*vring
)
99 struct device
*dev
= wil_to_dev(wil
);
100 size_t sz
= vring
->size
* sizeof(vring
->va
[0]);
103 wil_dbg_misc(wil
, "%s()\n", __func__
);
105 BUILD_BUG_ON(sizeof(vring
->va
[0]) != 32);
109 vring
->ctx
= kcalloc(vring
->size
, sizeof(vring
->ctx
[0]), GFP_KERNEL
);
114 /* vring->va should be aligned on its size rounded up to power of 2
115 * This is granted by the dma_alloc_coherent
117 vring
->va
= dma_alloc_coherent(dev
, sz
, &vring
->pa
, GFP_KERNEL
);
123 /* initially, all descriptors are SW owned
124 * For Tx and Rx, ownership bit is at the same location, thus
127 for (i
= 0; i
< vring
->size
; i
++) {
128 volatile struct vring_tx_desc
*_d
= &vring
->va
[i
].tx
;
130 _d
->dma
.status
= TX_DMA_STATUS_DU
;
133 wil_dbg_misc(wil
, "vring[%d] 0x%p:%pad 0x%p\n", vring
->size
,
134 vring
->va
, &vring
->pa
, vring
->ctx
);
139 static void wil_txdesc_unmap(struct device
*dev
, struct vring_tx_desc
*d
,
142 dma_addr_t pa
= wil_desc_addr(&d
->dma
.addr
);
143 u16 dmalen
= le16_to_cpu(d
->dma
.length
);
145 switch (ctx
->mapped_as
) {
146 case wil_mapped_as_single
:
147 dma_unmap_single(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
149 case wil_mapped_as_page
:
150 dma_unmap_page(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
157 static void wil_vring_free(struct wil6210_priv
*wil
, struct vring
*vring
,
160 struct device
*dev
= wil_to_dev(wil
);
161 size_t sz
= vring
->size
* sizeof(vring
->va
[0]);
164 int vring_index
= vring
- wil
->vring_tx
;
166 wil_dbg_misc(wil
, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
167 vring_index
, vring
->size
, vring
->va
,
168 &vring
->pa
, vring
->ctx
);
170 wil_dbg_misc(wil
, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
171 vring
->size
, vring
->va
,
172 &vring
->pa
, vring
->ctx
);
175 while (!wil_vring_is_empty(vring
)) {
181 struct vring_tx_desc dd
, *d
= &dd
;
182 volatile struct vring_tx_desc
*_d
=
183 &vring
->va
[vring
->swtail
].tx
;
185 ctx
= &vring
->ctx
[vring
->swtail
];
187 wil_txdesc_unmap(dev
, d
, ctx
);
189 dev_kfree_skb_any(ctx
->skb
);
190 vring
->swtail
= wil_vring_next_tail(vring
);
192 struct vring_rx_desc dd
, *d
= &dd
;
193 volatile struct vring_rx_desc
*_d
=
194 &vring
->va
[vring
->swhead
].rx
;
196 ctx
= &vring
->ctx
[vring
->swhead
];
198 pa
= wil_desc_addr(&d
->dma
.addr
);
199 dmalen
= le16_to_cpu(d
->dma
.length
);
200 dma_unmap_single(dev
, pa
, dmalen
, DMA_FROM_DEVICE
);
202 wil_vring_advance_head(vring
, 1);
205 dma_free_coherent(dev
, sz
, (void *)vring
->va
, vring
->pa
);
213 * Allocate one skb for Rx VRING
215 * Safe to call from IRQ
217 static int wil_vring_alloc_skb(struct wil6210_priv
*wil
, struct vring
*vring
,
220 struct device
*dev
= wil_to_dev(wil
);
221 unsigned int sz
= mtu_max
+ ETH_HLEN
+ wil_rx_snaplen();
222 struct vring_rx_desc dd
, *d
= &dd
;
223 volatile struct vring_rx_desc
*_d
= &vring
->va
[i
].rx
;
225 struct sk_buff
*skb
= dev_alloc_skb(sz
+ headroom
);
230 skb_reserve(skb
, headroom
);
233 pa
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_FROM_DEVICE
);
234 if (unlikely(dma_mapping_error(dev
, pa
))) {
239 d
->dma
.d0
= BIT(9) | RX_DMA_D0_CMD_DMA_IT
;
240 wil_desc_addr_set(&d
->dma
.addr
, pa
);
241 /* ip_length don't care */
243 /* error don't care */
244 d
->dma
.status
= 0; /* BIT(0) should be 0 for HW_OWNED */
245 d
->dma
.length
= cpu_to_le16(sz
);
247 vring
->ctx
[i
].skb
= skb
;
253 * Adds radiotap header
255 * Any error indicated as "Bad FCS"
257 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
258 * - Rx descriptor: 32 bytes
261 static void wil_rx_add_radiotap_header(struct wil6210_priv
*wil
,
264 struct wireless_dev
*wdev
= wil
->wdev
;
265 struct wil6210_rtap
{
266 struct ieee80211_radiotap_header rthdr
;
267 /* fields should be in the order of bits in rthdr.it_present */
271 __le16 chnl_freq
__aligned(2);
278 struct wil6210_rtap_vendor
{
279 struct wil6210_rtap rtap
;
281 u8 vendor_oui
[3] __aligned(2);
286 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
287 struct wil6210_rtap_vendor
*rtap_vendor
;
288 int rtap_len
= sizeof(struct wil6210_rtap
);
289 int phy_length
= 0; /* phy info header size, bytes */
290 static char phy_data
[128];
291 struct ieee80211_channel
*ch
= wdev
->preset_chandef
.chan
;
293 if (rtap_include_phy_info
) {
294 rtap_len
= sizeof(*rtap_vendor
) + sizeof(*d
);
295 /* calculate additional length */
296 if (d
->dma
.status
& RX_DMA_STATUS_PHY_INFO
) {
298 * PHY info starts from 8-byte boundary
299 * there are 8-byte lines, last line may be partially
300 * written (HW bug), thus FW configures for last line
301 * to be excessive. Driver skips this last line.
303 int len
= min_t(int, 8 + sizeof(phy_data
),
304 wil_rxdesc_phy_length(d
));
307 void *p
= skb_tail_pointer(skb
);
308 void *pa
= PTR_ALIGN(p
, 8);
310 if (skb_tailroom(skb
) >= len
+ (pa
- p
)) {
311 phy_length
= len
- 8;
312 memcpy(phy_data
, pa
, phy_length
);
316 rtap_len
+= phy_length
;
319 if (skb_headroom(skb
) < rtap_len
&&
320 pskb_expand_head(skb
, rtap_len
, 0, GFP_ATOMIC
)) {
321 wil_err(wil
, "Unable to expand headrom to %d\n", rtap_len
);
325 rtap_vendor
= (void *)skb_push(skb
, rtap_len
);
326 memset(rtap_vendor
, 0, rtap_len
);
328 rtap_vendor
->rtap
.rthdr
.it_version
= PKTHDR_RADIOTAP_VERSION
;
329 rtap_vendor
->rtap
.rthdr
.it_len
= cpu_to_le16(rtap_len
);
330 rtap_vendor
->rtap
.rthdr
.it_present
= cpu_to_le32(
331 (1 << IEEE80211_RADIOTAP_FLAGS
) |
332 (1 << IEEE80211_RADIOTAP_CHANNEL
) |
333 (1 << IEEE80211_RADIOTAP_MCS
));
334 if (d
->dma
.status
& RX_DMA_STATUS_ERROR
)
335 rtap_vendor
->rtap
.flags
|= IEEE80211_RADIOTAP_F_BADFCS
;
337 rtap_vendor
->rtap
.chnl_freq
= cpu_to_le16(ch
? ch
->center_freq
: 58320);
338 rtap_vendor
->rtap
.chnl_flags
= cpu_to_le16(0);
340 rtap_vendor
->rtap
.mcs_present
= IEEE80211_RADIOTAP_MCS_HAVE_MCS
;
341 rtap_vendor
->rtap
.mcs_flags
= 0;
342 rtap_vendor
->rtap
.mcs_index
= wil_rxdesc_mcs(d
);
344 if (rtap_include_phy_info
) {
345 rtap_vendor
->rtap
.rthdr
.it_present
|= cpu_to_le32(1 <<
346 IEEE80211_RADIOTAP_VENDOR_NAMESPACE
);
347 /* OUI for Wilocity 04:ce:14 */
348 rtap_vendor
->vendor_oui
[0] = 0x04;
349 rtap_vendor
->vendor_oui
[1] = 0xce;
350 rtap_vendor
->vendor_oui
[2] = 0x14;
351 rtap_vendor
->vendor_ns
= 1;
352 /* Rx descriptor + PHY data */
353 rtap_vendor
->vendor_skip
= cpu_to_le16(sizeof(*d
) +
355 memcpy(rtap_vendor
->vendor_data
, (void *)d
, sizeof(*d
));
356 memcpy(rtap_vendor
->vendor_data
+ sizeof(*d
), phy_data
,
362 * reap 1 frame from @swhead
364 * Rx descriptor copied to skb->cb
366 * Safe to call from IRQ
368 static struct sk_buff
*wil_vring_reap_rx(struct wil6210_priv
*wil
,
371 struct device
*dev
= wil_to_dev(wil
);
372 struct net_device
*ndev
= wil_to_ndev(wil
);
373 volatile struct vring_rx_desc
*_d
;
374 struct vring_rx_desc
*d
;
377 unsigned int snaplen
= wil_rx_snaplen();
378 unsigned int sz
= mtu_max
+ ETH_HLEN
+ snaplen
;
382 int i
= (int)vring
->swhead
;
383 struct wil_net_stats
*stats
;
385 BUILD_BUG_ON(sizeof(struct vring_rx_desc
) > sizeof(skb
->cb
));
387 if (unlikely(wil_vring_is_empty(vring
)))
390 _d
= &vring
->va
[i
].rx
;
391 if (unlikely(!(_d
->dma
.status
& RX_DMA_STATUS_DU
))) {
392 /* it is not error, we just reached end of Rx done area */
396 skb
= vring
->ctx
[i
].skb
;
397 vring
->ctx
[i
].skb
= NULL
;
398 wil_vring_advance_head(vring
, 1);
400 wil_err(wil
, "No Rx skb at [%d]\n", i
);
403 d
= wil_skb_rxdesc(skb
);
405 pa
= wil_desc_addr(&d
->dma
.addr
);
407 dma_unmap_single(dev
, pa
, sz
, DMA_FROM_DEVICE
);
408 dmalen
= le16_to_cpu(d
->dma
.length
);
410 trace_wil6210_rx(i
, d
);
411 wil_dbg_txrx(wil
, "Rx[%3d] : %d bytes\n", i
, dmalen
);
412 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE
, 32, 4,
413 (const void *)d
, sizeof(*d
), false);
415 if (unlikely(dmalen
> sz
)) {
416 wil_err(wil
, "Rx size too large: %d bytes!\n", dmalen
);
420 skb_trim(skb
, dmalen
);
424 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET
, 16, 1,
425 skb
->data
, skb_headlen(skb
), false);
427 cid
= wil_rxdesc_cid(d
);
428 stats
= &wil
->sta
[cid
].stats
;
429 stats
->last_mcs_rx
= wil_rxdesc_mcs(d
);
431 /* use radiotap header only if required */
432 if (ndev
->type
== ARPHRD_IEEE80211_RADIOTAP
)
433 wil_rx_add_radiotap_header(wil
, skb
);
435 /* no extra checks if in sniffer mode */
436 if (ndev
->type
!= ARPHRD_ETHER
)
439 * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
440 * Driver should recognize it by frame type, that is found
441 * in Rx descriptor. If type is not data, it is 802.11 frame as is
443 ftype
= wil_rxdesc_ftype(d
) << 2;
444 if (unlikely(ftype
!= IEEE80211_FTYPE_DATA
)) {
445 wil_dbg_txrx(wil
, "Non-data frame ftype 0x%08x\n", ftype
);
446 /* TODO: process it */
451 if (unlikely(skb
->len
< ETH_HLEN
+ snaplen
)) {
452 wil_err(wil
, "Short frame, len = %d\n", skb
->len
);
453 /* TODO: process it (i.e. BAR) */
458 /* L4 IDENT is on when HW calculated checksum, check status
459 * and in case of error drop the packet
460 * higher stack layers will handle retransmission (if required)
462 if (likely(d
->dma
.status
& RX_DMA_STATUS_L4I
)) {
463 /* L4 protocol identified, csum calculated */
464 if (likely((d
->dma
.error
& RX_DMA_ERROR_L4_ERR
) == 0))
465 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
466 /* If HW reports bad checksum, let IP stack re-check it
467 * For example, HW don't understand Microsoft IP stack that
468 * mis-calculates TCP checksum - if it should be 0x0,
469 * it writes 0xffff in violation of RFC 1624
475 * +-------+-------+---------+------------+------+
476 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
477 * +-------+-------+---------+------------+------+
478 * Need to remove SNAP, shifting SA and DA forward
480 memmove(skb
->data
+ snaplen
, skb
->data
, 2 * ETH_ALEN
);
481 skb_pull(skb
, snaplen
);
488 * allocate and fill up to @count buffers in rx ring
489 * buffers posted at @swtail
491 static int wil_rx_refill(struct wil6210_priv
*wil
, int count
)
493 struct net_device
*ndev
= wil_to_ndev(wil
);
494 struct vring
*v
= &wil
->vring_rx
;
497 int headroom
= ndev
->type
== ARPHRD_IEEE80211_RADIOTAP
?
498 WIL6210_RTAP_SIZE
: 0;
500 for (; next_tail
= wil_vring_next_tail(v
),
501 (next_tail
!= v
->swhead
) && (count
-- > 0);
502 v
->swtail
= next_tail
) {
503 rc
= wil_vring_alloc_skb(wil
, v
, v
->swtail
, headroom
);
505 wil_err(wil
, "Error %d in wil_rx_refill[%d]\n",
510 iowrite32(v
->swtail
, wil
->csr
+ HOSTADDR(v
->hwtail
));
516 * Pass Rx packet to the netif. Update statistics.
517 * Called in softirq context (NAPI poll).
519 void wil_netif_rx_any(struct sk_buff
*skb
, struct net_device
*ndev
)
521 gro_result_t rc
= GRO_NORMAL
;
522 struct wil6210_priv
*wil
= ndev_to_wil(ndev
);
523 struct wireless_dev
*wdev
= wil_to_wdev(wil
);
524 unsigned int len
= skb
->len
;
525 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
526 int cid
= wil_rxdesc_cid(d
); /* always 0..7, no need to check */
527 struct ethhdr
*eth
= (void *)skb
->data
;
528 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
529 * is not suitable, need to look at data
531 int mcast
= is_multicast_ether_addr(eth
->h_dest
);
532 struct wil_net_stats
*stats
= &wil
->sta
[cid
].stats
;
533 struct sk_buff
*xmit_skb
= NULL
;
534 static const char * const gro_res_str
[] = {
535 [GRO_MERGED
] = "GRO_MERGED",
536 [GRO_MERGED_FREE
] = "GRO_MERGED_FREE",
537 [GRO_HELD
] = "GRO_HELD",
538 [GRO_NORMAL
] = "GRO_NORMAL",
539 [GRO_DROP
] = "GRO_DROP",
544 if (wdev
->iftype
== NL80211_IFTYPE_AP
&& !wil
->ap_isolate
) {
546 /* send multicast frames both to higher layers in
547 * local net stack and back to the wireless medium
549 xmit_skb
= skb_copy(skb
, GFP_ATOMIC
);
551 int xmit_cid
= wil_find_cid(wil
, eth
->h_dest
);
554 /* The destination station is associated to
555 * this AP (in this VLAN), so send the frame
556 * directly to it and do not pass it to local
565 /* Send to wireless media and increase priority by 256 to
566 * keep the received priority instead of reclassifying
567 * the frame (see cfg80211_classify8021d).
569 xmit_skb
->dev
= ndev
;
570 xmit_skb
->priority
+= 256;
571 xmit_skb
->protocol
= htons(ETH_P_802_3
);
572 skb_reset_network_header(xmit_skb
);
573 skb_reset_mac_header(xmit_skb
);
574 wil_dbg_txrx(wil
, "Rx -> Tx %d bytes\n", len
);
575 dev_queue_xmit(xmit_skb
);
578 if (skb
) { /* deliver to local stack */
580 skb
->protocol
= eth_type_trans(skb
, ndev
);
581 rc
= napi_gro_receive(&wil
->napi_rx
, skb
);
582 wil_dbg_txrx(wil
, "Rx complete %d bytes => %s\n",
583 len
, gro_res_str
[rc
]);
585 /* statistics. rc set to GRO_NORMAL for AP bridging */
586 if (unlikely(rc
== GRO_DROP
)) {
587 ndev
->stats
.rx_dropped
++;
589 wil_dbg_txrx(wil
, "Rx drop %d bytes\n", len
);
591 ndev
->stats
.rx_packets
++;
593 ndev
->stats
.rx_bytes
+= len
;
594 stats
->rx_bytes
+= len
;
596 ndev
->stats
.multicast
++;
601 * Proceed all completed skb's from Rx VRING
603 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
605 void wil_rx_handle(struct wil6210_priv
*wil
, int *quota
)
607 struct net_device
*ndev
= wil_to_ndev(wil
);
608 struct vring
*v
= &wil
->vring_rx
;
611 if (unlikely(!v
->va
)) {
612 wil_err(wil
, "Rx IRQ while Rx not yet initialized\n");
615 wil_dbg_txrx(wil
, "%s()\n", __func__
);
616 while ((*quota
> 0) && (NULL
!= (skb
= wil_vring_reap_rx(wil
, v
)))) {
619 if (wil
->wdev
->iftype
== NL80211_IFTYPE_MONITOR
) {
621 skb_reset_mac_header(skb
);
622 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
623 skb
->pkt_type
= PACKET_OTHERHOST
;
624 skb
->protocol
= htons(ETH_P_802_2
);
625 wil_netif_rx_any(skb
, ndev
);
627 wil_rx_reorder(wil
, skb
);
630 wil_rx_refill(wil
, v
->size
);
633 int wil_rx_init(struct wil6210_priv
*wil
, u16 size
)
635 struct vring
*vring
= &wil
->vring_rx
;
638 wil_dbg_misc(wil
, "%s()\n", __func__
);
641 wil_err(wil
, "Rx ring already allocated\n");
646 rc
= wil_vring_alloc(wil
, vring
);
650 rc
= wmi_rx_chain_add(wil
, vring
);
654 rc
= wil_rx_refill(wil
, vring
->size
);
660 wil_vring_free(wil
, vring
, 0);
665 void wil_rx_fini(struct wil6210_priv
*wil
)
667 struct vring
*vring
= &wil
->vring_rx
;
669 wil_dbg_misc(wil
, "%s()\n", __func__
);
672 wil_vring_free(wil
, vring
, 0);
675 int wil_vring_init_tx(struct wil6210_priv
*wil
, int id
, int size
,
679 struct wmi_vring_cfg_cmd cmd
= {
680 .action
= cpu_to_le32(WMI_VRING_CMD_ADD
),
684 cpu_to_le16(wil_mtu2macbuf(mtu_max
)),
685 .ring_size
= cpu_to_le16(size
),
688 .cidxtid
= mk_cidxtid(cid
, tid
),
689 .encap_trans_type
= WMI_VRING_ENC_TYPE_802_3
,
694 .priority
= cpu_to_le16(0),
695 .timeslot_us
= cpu_to_le16(0xfff),
700 struct wil6210_mbox_hdr_wmi wmi
;
701 struct wmi_vring_cfg_done_event cmd
;
703 struct vring
*vring
= &wil
->vring_tx
[id
];
704 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[id
];
706 wil_dbg_misc(wil
, "%s() max_mpdu_size %d\n", __func__
,
707 cmd
.vring_cfg
.tx_sw_ring
.max_mpdu_size
);
710 wil_err(wil
, "Tx ring [%d] already allocated\n", id
);
715 memset(txdata
, 0, sizeof(*txdata
));
716 spin_lock_init(&txdata
->lock
);
718 rc
= wil_vring_alloc(wil
, vring
);
722 wil
->vring2cid_tid
[id
][0] = cid
;
723 wil
->vring2cid_tid
[id
][1] = tid
;
725 cmd
.vring_cfg
.tx_sw_ring
.ring_mem_base
= cpu_to_le64(vring
->pa
);
727 rc
= wmi_call(wil
, WMI_VRING_CFG_CMDID
, &cmd
, sizeof(cmd
),
728 WMI_VRING_CFG_DONE_EVENTID
, &reply
, sizeof(reply
), 100);
732 if (reply
.cmd
.status
!= WMI_FW_STATUS_SUCCESS
) {
733 wil_err(wil
, "Tx config failed, status 0x%02x\n",
738 vring
->hwtail
= le32_to_cpu(reply
.cmd
.tx_vring_tail_ptr
);
741 if (wil
->sta
[cid
].data_port_open
&& (agg_wsize
>= 0))
742 wil_addba_tx_request(wil
, id
, agg_wsize
);
746 wil_vring_free(wil
, vring
, 1);
752 int wil_vring_init_bcast(struct wil6210_priv
*wil
, int id
, int size
)
755 struct wmi_bcast_vring_cfg_cmd cmd
= {
756 .action
= cpu_to_le32(WMI_VRING_CMD_ADD
),
760 cpu_to_le16(wil_mtu2macbuf(mtu_max
)),
761 .ring_size
= cpu_to_le16(size
),
764 .encap_trans_type
= WMI_VRING_ENC_TYPE_802_3
,
768 struct wil6210_mbox_hdr_wmi wmi
;
769 struct wmi_vring_cfg_done_event cmd
;
771 struct vring
*vring
= &wil
->vring_tx
[id
];
772 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[id
];
774 wil_dbg_misc(wil
, "%s() max_mpdu_size %d\n", __func__
,
775 cmd
.vring_cfg
.tx_sw_ring
.max_mpdu_size
);
778 wil_err(wil
, "Tx ring [%d] already allocated\n", id
);
783 memset(txdata
, 0, sizeof(*txdata
));
784 spin_lock_init(&txdata
->lock
);
786 rc
= wil_vring_alloc(wil
, vring
);
790 wil
->vring2cid_tid
[id
][0] = WIL6210_MAX_CID
; /* CID */
791 wil
->vring2cid_tid
[id
][1] = 0; /* TID */
793 cmd
.vring_cfg
.tx_sw_ring
.ring_mem_base
= cpu_to_le64(vring
->pa
);
795 rc
= wmi_call(wil
, WMI_BCAST_VRING_CFG_CMDID
, &cmd
, sizeof(cmd
),
796 WMI_VRING_CFG_DONE_EVENTID
, &reply
, sizeof(reply
), 100);
800 if (reply
.cmd
.status
!= WMI_FW_STATUS_SUCCESS
) {
801 wil_err(wil
, "Tx config failed, status 0x%02x\n",
806 vring
->hwtail
= le32_to_cpu(reply
.cmd
.tx_vring_tail_ptr
);
812 wil_vring_free(wil
, vring
, 1);
818 void wil_vring_fini_tx(struct wil6210_priv
*wil
, int id
)
820 struct vring
*vring
= &wil
->vring_tx
[id
];
821 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[id
];
823 WARN_ON(!mutex_is_locked(&wil
->mutex
));
828 wil_dbg_misc(wil
, "%s() id=%d\n", __func__
, id
);
830 spin_lock_bh(&txdata
->lock
);
831 txdata
->enabled
= 0; /* no Tx can be in progress or start anew */
832 spin_unlock_bh(&txdata
->lock
);
833 /* make sure NAPI won't touch this vring */
834 if (test_bit(wil_status_napi_en
, wil
->status
))
835 napi_synchronize(&wil
->napi_tx
);
837 wil_vring_free(wil
, vring
, 1);
838 memset(txdata
, 0, sizeof(*txdata
));
841 static struct vring
*wil_find_tx_ucast(struct wil6210_priv
*wil
,
845 struct ethhdr
*eth
= (void *)skb
->data
;
846 int cid
= wil_find_cid(wil
, eth
->h_dest
);
851 if (!wil
->sta
[cid
].data_port_open
&&
852 (skb
->protocol
!= cpu_to_be16(ETH_P_PAE
)))
855 /* TODO: fix for multiple TID */
856 for (i
= 0; i
< ARRAY_SIZE(wil
->vring2cid_tid
); i
++) {
857 if (wil
->vring2cid_tid
[i
][0] == cid
) {
858 struct vring
*v
= &wil
->vring_tx
[i
];
860 wil_dbg_txrx(wil
, "%s(%pM) -> [%d]\n",
861 __func__
, eth
->h_dest
, i
);
865 wil_dbg_txrx(wil
, "vring[%d] not valid\n", i
);
874 static int wil_tx_vring(struct wil6210_priv
*wil
, struct vring
*vring
,
875 struct sk_buff
*skb
);
877 static struct vring
*wil_find_tx_vring_sta(struct wil6210_priv
*wil
,
884 /* In the STA mode, it is expected to have only 1 VRING
885 * for the AP we connected to.
886 * find 1-st vring and see whether it is eligible for data
888 for (i
= 0; i
< WIL6210_MAX_TX_RINGS
; i
++) {
889 v
= &wil
->vring_tx
[i
];
893 cid
= wil
->vring2cid_tid
[i
][0];
894 if (cid
>= WIL6210_MAX_CID
) /* skip BCAST */
897 if (!wil
->sta
[cid
].data_port_open
&&
898 (skb
->protocol
!= cpu_to_be16(ETH_P_PAE
)))
901 wil_dbg_txrx(wil
, "Tx -> ring %d\n", i
);
906 wil_dbg_txrx(wil
, "Tx while no vrings active?\n");
911 /* Use one of 2 strategies:
913 * 1. New (real broadcast):
914 * use dedicated broadcast vring
915 * 2. Old (pseudo-DMS):
916 * Find 1-st vring and return it;
917 * duplicate skb and send it to other active vrings;
918 * in all cases override dest address to unicast peer's address
919 * Use old strategy when new is not supported yet:
923 static struct vring
*wil_find_tx_bcast_1(struct wil6210_priv
*wil
,
927 int i
= wil
->bcast_vring
;
931 v
= &wil
->vring_tx
[i
];
938 static void wil_set_da_for_vring(struct wil6210_priv
*wil
,
939 struct sk_buff
*skb
, int vring_index
)
941 struct ethhdr
*eth
= (void *)skb
->data
;
942 int cid
= wil
->vring2cid_tid
[vring_index
][0];
944 ether_addr_copy(eth
->h_dest
, wil
->sta
[cid
].addr
);
947 static struct vring
*wil_find_tx_bcast_2(struct wil6210_priv
*wil
,
950 struct vring
*v
, *v2
;
951 struct sk_buff
*skb2
;
954 struct ethhdr
*eth
= (void *)skb
->data
;
955 char *src
= eth
->h_source
;
957 /* find 1-st vring eligible for data */
958 for (i
= 0; i
< WIL6210_MAX_TX_RINGS
; i
++) {
959 v
= &wil
->vring_tx
[i
];
963 cid
= wil
->vring2cid_tid
[i
][0];
964 if (cid
>= WIL6210_MAX_CID
) /* skip BCAST */
966 if (!wil
->sta
[cid
].data_port_open
)
969 /* don't Tx back to source when re-routing Rx->Tx at the AP */
970 if (0 == memcmp(wil
->sta
[cid
].addr
, src
, ETH_ALEN
))
976 wil_dbg_txrx(wil
, "Tx while no vrings active?\n");
981 wil_dbg_txrx(wil
, "BCAST -> ring %d\n", i
);
982 wil_set_da_for_vring(wil
, skb
, i
);
984 /* find other active vrings and duplicate skb for each */
985 for (i
++; i
< WIL6210_MAX_TX_RINGS
; i
++) {
986 v2
= &wil
->vring_tx
[i
];
989 cid
= wil
->vring2cid_tid
[i
][0];
990 if (cid
>= WIL6210_MAX_CID
) /* skip BCAST */
992 if (!wil
->sta
[cid
].data_port_open
)
995 if (0 == memcmp(wil
->sta
[cid
].addr
, src
, ETH_ALEN
))
998 skb2
= skb_copy(skb
, GFP_ATOMIC
);
1000 wil_dbg_txrx(wil
, "BCAST DUP -> ring %d\n", i
);
1001 wil_set_da_for_vring(wil
, skb2
, i
);
1002 wil_tx_vring(wil
, v2
, skb2
);
1004 wil_err(wil
, "skb_copy failed\n");
1011 static struct vring
*wil_find_tx_bcast(struct wil6210_priv
*wil
,
1012 struct sk_buff
*skb
)
1014 struct wireless_dev
*wdev
= wil
->wdev
;
1016 if (wdev
->iftype
!= NL80211_IFTYPE_AP
)
1017 return wil_find_tx_bcast_2(wil
, skb
);
1020 return wil_find_tx_bcast_2(wil
, skb
);
1022 return wil_find_tx_bcast_1(wil
, skb
);
1025 static int wil_tx_desc_map(struct vring_tx_desc
*d
, dma_addr_t pa
, u32 len
,
1028 wil_desc_addr_set(&d
->dma
.addr
, pa
);
1029 d
->dma
.ip_length
= 0;
1030 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1031 d
->dma
.b11
= 0/*14 | BIT(7)*/;
1033 d
->dma
.status
= 0; /* BIT(0) should be 0 for HW_OWNED */
1034 d
->dma
.length
= cpu_to_le16((u16
)len
);
1035 d
->dma
.d0
= (vring_index
<< DMA_CFG_DESC_TX_0_QID_POS
);
1039 d
->mac
.ucode_cmd
= 0;
1040 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
1041 d
->mac
.d
[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS
) |
1042 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS
);
1048 void wil_tx_desc_set_nr_frags(struct vring_tx_desc
*d
, int nr_frags
)
1050 d
->mac
.d
[2] |= ((nr_frags
+ 1) <<
1051 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS
);
1054 static int wil_tx_desc_offload_cksum_set(struct wil6210_priv
*wil
,
1055 struct vring_tx_desc
*d
,
1056 struct sk_buff
*skb
)
1060 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1063 d
->dma
.b11
= ETH_HLEN
; /* MAC header length */
1065 switch (skb
->protocol
) {
1066 case cpu_to_be16(ETH_P_IP
):
1067 protocol
= ip_hdr(skb
)->protocol
;
1068 d
->dma
.b11
|= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS
);
1070 case cpu_to_be16(ETH_P_IPV6
):
1071 protocol
= ipv6_hdr(skb
)->nexthdr
;
1079 d
->dma
.d0
|= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS
);
1080 /* L4 header len: TCP header length */
1082 (tcp_hdrlen(skb
) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
);
1085 /* L4 header len: UDP header length */
1087 (sizeof(struct udphdr
) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
);
1093 d
->dma
.ip_length
= skb_network_header_len(skb
);
1094 /* Enable TCP/UDP checksum */
1095 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS
);
1096 /* Calculate pseudo-header */
1097 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS
);
1102 static int __wil_tx_vring(struct wil6210_priv
*wil
, struct vring
*vring
,
1103 struct sk_buff
*skb
)
1105 struct device
*dev
= wil_to_dev(wil
);
1106 struct vring_tx_desc dd
, *d
= &dd
;
1107 volatile struct vring_tx_desc
*_d
;
1108 u32 swhead
= vring
->swhead
;
1109 int avail
= wil_vring_avail_tx(vring
);
1110 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1112 int vring_index
= vring
- wil
->vring_tx
;
1113 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[vring_index
];
1117 bool mcast
= (vring_index
== wil
->bcast_vring
);
1118 uint len
= skb_headlen(skb
);
1120 wil_dbg_txrx(wil
, "%s()\n", __func__
);
1122 if (unlikely(!txdata
->enabled
))
1125 if (unlikely(avail
< 1 + nr_frags
)) {
1126 wil_err_ratelimited(wil
,
1127 "Tx ring[%2d] full. No space for %d fragments\n",
1128 vring_index
, 1 + nr_frags
);
1131 _d
= &vring
->va
[i
].tx
;
1133 pa
= dma_map_single(dev
, skb
->data
, skb_headlen(skb
), DMA_TO_DEVICE
);
1135 wil_dbg_txrx(wil
, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index
,
1136 skb_headlen(skb
), skb
->data
, &pa
);
1137 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET
, 16, 1,
1138 skb
->data
, skb_headlen(skb
), false);
1140 if (unlikely(dma_mapping_error(dev
, pa
)))
1142 vring
->ctx
[i
].mapped_as
= wil_mapped_as_single
;
1144 wil_tx_desc_map(d
, pa
, len
, vring_index
);
1145 if (unlikely(mcast
)) {
1146 d
->mac
.d
[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS
); /* MCS 0 */
1147 if (unlikely(len
> WIL_BCAST_MCS0_LIMIT
)) {
1149 d
->mac
.d
[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS
);
1151 d
->mac
.d
[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS
) |
1152 (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS
);
1155 /* Process TCP/UDP checksum offloading */
1156 if (unlikely(wil_tx_desc_offload_cksum_set(wil
, d
, skb
))) {
1157 wil_err(wil
, "Tx[%2d] Failed to set cksum, drop packet\n",
1162 vring
->ctx
[i
].nr_frags
= nr_frags
;
1163 wil_tx_desc_set_nr_frags(d
, nr_frags
);
1165 /* middle segments */
1166 for (; f
< nr_frags
; f
++) {
1167 const struct skb_frag_struct
*frag
=
1168 &skb_shinfo(skb
)->frags
[f
];
1169 int len
= skb_frag_size(frag
);
1172 wil_dbg_txrx(wil
, "Tx[%2d] desc[%4d]\n", vring_index
, i
);
1173 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE
, 32, 4,
1174 (const void *)d
, sizeof(*d
), false);
1175 i
= (swhead
+ f
+ 1) % vring
->size
;
1176 _d
= &vring
->va
[i
].tx
;
1177 pa
= skb_frag_dma_map(dev
, frag
, 0, skb_frag_size(frag
),
1179 if (unlikely(dma_mapping_error(dev
, pa
)))
1181 vring
->ctx
[i
].mapped_as
= wil_mapped_as_page
;
1182 wil_tx_desc_map(d
, pa
, len
, vring_index
);
1183 /* no need to check return code -
1184 * if it succeeded for 1-st descriptor,
1185 * it will succeed here too
1187 wil_tx_desc_offload_cksum_set(wil
, d
, skb
);
1189 /* for the last seg only */
1190 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS
);
1191 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS
);
1192 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS
);
1194 wil_dbg_txrx(wil
, "Tx[%2d] desc[%4d]\n", vring_index
, i
);
1195 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE
, 32, 4,
1196 (const void *)d
, sizeof(*d
), false);
1198 /* hold reference to skb
1199 * to prevent skb release before accounting
1200 * in case of immediate "tx done"
1202 vring
->ctx
[i
].skb
= skb_get(skb
);
1204 /* performance monitoring */
1205 used
= wil_vring_used_tx(vring
);
1206 if (wil_val_in_range(vring_idle_trsh
,
1207 used
, used
+ nr_frags
+ 1)) {
1208 txdata
->idle
+= get_cycles() - txdata
->last_idle
;
1209 wil_dbg_txrx(wil
, "Ring[%2d] not idle %d -> %d\n",
1210 vring_index
, used
, used
+ nr_frags
+ 1);
1213 /* advance swhead */
1214 wil_vring_advance_head(vring
, nr_frags
+ 1);
1215 wil_dbg_txrx(wil
, "Tx[%2d] swhead %d -> %d\n", vring_index
, swhead
,
1217 trace_wil6210_tx(vring_index
, swhead
, skb
->len
, nr_frags
);
1218 iowrite32(vring
->swhead
, wil
->csr
+ HOSTADDR(vring
->hwtail
));
1222 /* unmap what we have mapped */
1223 nr_frags
= f
+ 1; /* frags mapped + one for skb head */
1224 for (f
= 0; f
< nr_frags
; f
++) {
1225 struct wil_ctx
*ctx
;
1227 i
= (swhead
+ f
) % vring
->size
;
1228 ctx
= &vring
->ctx
[i
];
1229 _d
= &vring
->va
[i
].tx
;
1231 _d
->dma
.status
= TX_DMA_STATUS_DU
;
1232 wil_txdesc_unmap(dev
, d
, ctx
);
1235 dev_kfree_skb_any(ctx
->skb
);
1237 memset(ctx
, 0, sizeof(*ctx
));
1243 static int wil_tx_vring(struct wil6210_priv
*wil
, struct vring
*vring
,
1244 struct sk_buff
*skb
)
1246 int vring_index
= vring
- wil
->vring_tx
;
1247 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[vring_index
];
1250 spin_lock(&txdata
->lock
);
1251 rc
= __wil_tx_vring(wil
, vring
, skb
);
1252 spin_unlock(&txdata
->lock
);
1256 netdev_tx_t
wil_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1258 struct wil6210_priv
*wil
= ndev_to_wil(ndev
);
1259 struct ethhdr
*eth
= (void *)skb
->data
;
1260 bool bcast
= is_multicast_ether_addr(eth
->h_dest
);
1261 struct vring
*vring
;
1262 static bool pr_once_fw
;
1265 wil_dbg_txrx(wil
, "%s()\n", __func__
);
1266 if (unlikely(!test_bit(wil_status_fwready
, wil
->status
))) {
1268 wil_err(wil
, "FW not ready\n");
1273 if (unlikely(!test_bit(wil_status_fwconnected
, wil
->status
))) {
1274 wil_err(wil
, "FW not connected\n");
1277 if (unlikely(wil
->wdev
->iftype
== NL80211_IFTYPE_MONITOR
)) {
1278 wil_err(wil
, "Xmit in monitor mode not supported\n");
1284 if (wil
->wdev
->iftype
== NL80211_IFTYPE_STATION
) {
1285 /* in STA mode (ESS), all to same VRING */
1286 vring
= wil_find_tx_vring_sta(wil
, skb
);
1287 } else { /* direct communication, find matching VRING */
1288 vring
= bcast
? wil_find_tx_bcast(wil
, skb
) :
1289 wil_find_tx_ucast(wil
, skb
);
1291 if (unlikely(!vring
)) {
1292 wil_dbg_txrx(wil
, "No Tx VRING found for %pM\n", eth
->h_dest
);
1295 /* set up vring entry */
1296 rc
= wil_tx_vring(wil
, vring
, skb
);
1298 /* do we still have enough room in the vring? */
1299 if (unlikely(wil_vring_avail_tx(vring
) < wil_vring_wmark_low(vring
))) {
1300 netif_tx_stop_all_queues(wil_to_ndev(wil
));
1301 wil_dbg_txrx(wil
, "netif_tx_stop : ring full\n");
1306 /* statistics will be updated on the tx_complete */
1307 dev_kfree_skb_any(skb
);
1308 return NETDEV_TX_OK
;
1310 return NETDEV_TX_BUSY
;
1312 break; /* goto drop; */
1315 ndev
->stats
.tx_dropped
++;
1316 dev_kfree_skb_any(skb
);
1318 return NET_XMIT_DROP
;
1321 static inline bool wil_need_txstat(struct sk_buff
*skb
)
1323 struct ethhdr
*eth
= (void *)skb
->data
;
1325 return is_unicast_ether_addr(eth
->h_dest
) && skb
->sk
&&
1326 (skb_shinfo(skb
)->tx_flags
& SKBTX_WIFI_STATUS
);
1329 static inline void wil_consume_skb(struct sk_buff
*skb
, bool acked
)
1331 if (unlikely(wil_need_txstat(skb
)))
1332 skb_complete_wifi_ack(skb
, acked
);
1334 acked
? dev_consume_skb_any(skb
) : dev_kfree_skb_any(skb
);
1338 * Clean up transmitted skb's from the Tx VRING
1340 * Return number of descriptors cleared
1342 * Safe to call from IRQ
1344 int wil_tx_complete(struct wil6210_priv
*wil
, int ringid
)
1346 struct net_device
*ndev
= wil_to_ndev(wil
);
1347 struct device
*dev
= wil_to_dev(wil
);
1348 struct vring
*vring
= &wil
->vring_tx
[ringid
];
1349 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[ringid
];
1351 int cid
= wil
->vring2cid_tid
[ringid
][0];
1352 struct wil_net_stats
*stats
= NULL
;
1353 volatile struct vring_tx_desc
*_d
;
1354 int used_before_complete
;
1357 if (unlikely(!vring
->va
)) {
1358 wil_err(wil
, "Tx irq[%d]: vring not initialized\n", ringid
);
1362 if (unlikely(!txdata
->enabled
)) {
1363 wil_info(wil
, "Tx irq[%d]: vring disabled\n", ringid
);
1367 wil_dbg_txrx(wil
, "%s(%d)\n", __func__
, ringid
);
1369 used_before_complete
= wil_vring_used_tx(vring
);
1371 if (cid
< WIL6210_MAX_CID
)
1372 stats
= &wil
->sta
[cid
].stats
;
1374 while (!wil_vring_is_empty(vring
)) {
1376 struct wil_ctx
*ctx
= &vring
->ctx
[vring
->swtail
];
1378 * For the fragmented skb, HW will set DU bit only for the
1379 * last fragment. look for it
1381 int lf
= (vring
->swtail
+ ctx
->nr_frags
) % vring
->size
;
1382 /* TODO: check we are not past head */
1384 _d
= &vring
->va
[lf
].tx
;
1385 if (unlikely(!(_d
->dma
.status
& TX_DMA_STATUS_DU
)))
1388 new_swtail
= (lf
+ 1) % vring
->size
;
1389 while (vring
->swtail
!= new_swtail
) {
1390 struct vring_tx_desc dd
, *d
= &dd
;
1392 struct sk_buff
*skb
;
1394 ctx
= &vring
->ctx
[vring
->swtail
];
1396 _d
= &vring
->va
[vring
->swtail
].tx
;
1400 dmalen
= le16_to_cpu(d
->dma
.length
);
1401 trace_wil6210_tx_done(ringid
, vring
->swtail
, dmalen
,
1404 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
1405 ringid
, vring
->swtail
, dmalen
,
1406 d
->dma
.status
, d
->dma
.error
);
1407 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE
, 32, 4,
1408 (const void *)d
, sizeof(*d
), false);
1410 wil_txdesc_unmap(dev
, d
, ctx
);
1413 if (likely(d
->dma
.error
== 0)) {
1414 ndev
->stats
.tx_packets
++;
1415 ndev
->stats
.tx_bytes
+= skb
->len
;
1417 stats
->tx_packets
++;
1418 stats
->tx_bytes
+= skb
->len
;
1421 ndev
->stats
.tx_errors
++;
1425 wil_consume_skb(skb
, d
->dma
.error
== 0);
1427 memset(ctx
, 0, sizeof(*ctx
));
1428 /* There is no need to touch HW descriptor:
1429 * - ststus bit TX_DMA_STATUS_DU is set by design,
1430 * so hardware will not try to process this desc.,
1431 * - rest of descriptor will be initialized on Tx.
1433 vring
->swtail
= wil_vring_next_tail(vring
);
1438 /* performance monitoring */
1439 used_new
= wil_vring_used_tx(vring
);
1440 if (wil_val_in_range(vring_idle_trsh
,
1441 used_new
, used_before_complete
)) {
1442 wil_dbg_txrx(wil
, "Ring[%2d] idle %d -> %d\n",
1443 ringid
, used_before_complete
, used_new
);
1444 txdata
->last_idle
= get_cycles();
1447 if (wil_vring_avail_tx(vring
) > wil_vring_wmark_high(vring
)) {
1448 wil_dbg_txrx(wil
, "netif_tx_wake : ring not full\n");
1449 netif_tx_wake_all_queues(wil_to_ndev(wil
));