2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
30 #include "vmxnet3_int.h"
32 char vmxnet3_driver_name
[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
37 * Last entry must be all 0s
39 static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table
) = {
40 {PCI_VDEVICE(VMWARE
, PCI_DEVICE_ID_VMWARE_VMXNET3
)},
44 MODULE_DEVICE_TABLE(pci
, vmxnet3_pciid_table
);
46 static int enable_mq
= 1;
49 vmxnet3_write_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
);
52 * Enable/Disable the given intr
55 vmxnet3_enable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
57 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 0);
62 vmxnet3_disable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
64 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 1);
69 * Enable/Disable all intrs used by the device
72 vmxnet3_enable_all_intrs(struct vmxnet3_adapter
*adapter
)
76 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
77 vmxnet3_enable_intr(adapter
, i
);
78 adapter
->shared
->devRead
.intrConf
.intrCtrl
&=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL
);
84 vmxnet3_disable_all_intrs(struct vmxnet3_adapter
*adapter
)
88 adapter
->shared
->devRead
.intrConf
.intrCtrl
|=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
90 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
91 vmxnet3_disable_intr(adapter
, i
);
96 vmxnet3_ack_events(struct vmxnet3_adapter
*adapter
, u32 events
)
98 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_ECR
, events
);
103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
110 vmxnet3_tq_start(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
113 netif_start_subqueue(adapter
->netdev
, tq
- adapter
->tx_queue
);
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
121 netif_wake_subqueue(adapter
->netdev
, (tq
- adapter
->tx_queue
));
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
130 netif_stop_subqueue(adapter
->netdev
, (tq
- adapter
->tx_queue
));
135 * Check the link state. This may start or stop the tx queue.
138 vmxnet3_check_link(struct vmxnet3_adapter
*adapter
, bool affectTxQueue
)
144 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
145 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_GET_LINK
);
146 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
147 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
149 adapter
->link_speed
= ret
>> 16;
150 if (ret
& 1) { /* Link is up. */
151 netdev_info(adapter
->netdev
, "NIC Link is Up %d Mbps\n",
152 adapter
->link_speed
);
153 if (!netif_carrier_ok(adapter
->netdev
))
154 netif_carrier_on(adapter
->netdev
);
157 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
158 vmxnet3_tq_start(&adapter
->tx_queue
[i
],
162 netdev_info(adapter
->netdev
, "NIC Link is Down\n");
163 if (netif_carrier_ok(adapter
->netdev
))
164 netif_carrier_off(adapter
->netdev
);
167 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
168 vmxnet3_tq_stop(&adapter
->tx_queue
[i
], adapter
);
174 vmxnet3_process_events(struct vmxnet3_adapter
*adapter
)
178 u32 events
= le32_to_cpu(adapter
->shared
->ecr
);
182 vmxnet3_ack_events(adapter
, events
);
184 /* Check if link state has changed */
185 if (events
& VMXNET3_ECR_LINK
)
186 vmxnet3_check_link(adapter
, true);
188 /* Check if there is an error on xmit/recv queues */
189 if (events
& (VMXNET3_ECR_TQERR
| VMXNET3_ECR_RQERR
)) {
190 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
191 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
192 VMXNET3_CMD_GET_QUEUE_STATUS
);
193 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
195 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
196 if (adapter
->tqd_start
[i
].status
.stopped
)
197 dev_err(&adapter
->netdev
->dev
,
198 "%s: tq[%d] error 0x%x\n",
199 adapter
->netdev
->name
, i
, le32_to_cpu(
200 adapter
->tqd_start
[i
].status
.error
));
201 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
202 if (adapter
->rqd_start
[i
].status
.stopped
)
203 dev_err(&adapter
->netdev
->dev
,
204 "%s: rq[%d] error 0x%x\n",
205 adapter
->netdev
->name
, i
,
206 adapter
->rqd_start
[i
].status
.error
);
208 schedule_work(&adapter
->work
);
212 #ifdef __BIG_ENDIAN_BITFIELD
214 * The device expects the bitfields in shared structures to be written in
215 * little endian. When CPU is big endian, the following routines are used to
216 * correctly read and write into ABI.
217 * The general technique used here is : double word bitfields are defined in
218 * opposite order for big endian architecture. Then before reading them in
219 * driver the complete double word is translated using le32_to_cpu. Similarly
220 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
221 * double words into required format.
222 * In order to avoid touching bits in shared structure more than once, temporary
223 * descriptors are used. These are passed as srcDesc to following functions.
225 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc
*srcDesc
,
226 struct Vmxnet3_RxDesc
*dstDesc
)
228 u32
*src
= (u32
*)srcDesc
+ 2;
229 u32
*dst
= (u32
*)dstDesc
+ 2;
230 dstDesc
->addr
= le64_to_cpu(srcDesc
->addr
);
231 *dst
= le32_to_cpu(*src
);
232 dstDesc
->ext1
= le32_to_cpu(srcDesc
->ext1
);
235 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc
*srcDesc
,
236 struct Vmxnet3_TxDesc
*dstDesc
)
239 u32
*src
= (u32
*)(srcDesc
+ 1);
240 u32
*dst
= (u32
*)(dstDesc
+ 1);
242 /* Working backwards so that the gen bit is set at the end. */
243 for (i
= 2; i
> 0; i
--) {
246 *dst
= cpu_to_le32(*src
);
251 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc
*srcDesc
,
252 struct Vmxnet3_RxCompDesc
*dstDesc
)
255 u32
*src
= (u32
*)srcDesc
;
256 u32
*dst
= (u32
*)dstDesc
;
257 for (i
= 0; i
< sizeof(struct Vmxnet3_RxCompDesc
) / sizeof(u32
); i
++) {
258 *dst
= le32_to_cpu(*src
);
265 /* Used to read bitfield values from double words. */
266 static u32
get_bitfield32(const __le32
*bitfield
, u32 pos
, u32 size
)
268 u32 temp
= le32_to_cpu(*bitfield
);
269 u32 mask
= ((1 << size
) - 1) << pos
;
277 #endif /* __BIG_ENDIAN_BITFIELD */
279 #ifdef __BIG_ENDIAN_BITFIELD
281 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
282 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
283 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
284 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
285 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
286 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
287 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
288 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
289 VMXNET3_TCD_GEN_SIZE)
290 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
291 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
292 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
294 vmxnet3_RxCompToCPU((rcd), (tmp)); \
296 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
298 vmxnet3_RxDescToCPU((rxd), (tmp)); \
303 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
304 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
305 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
306 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
307 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
308 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
310 #endif /* __BIG_ENDIAN_BITFIELD */
314 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info
*tbi
,
315 struct pci_dev
*pdev
)
317 if (tbi
->map_type
== VMXNET3_MAP_SINGLE
)
318 pci_unmap_single(pdev
, tbi
->dma_addr
, tbi
->len
,
320 else if (tbi
->map_type
== VMXNET3_MAP_PAGE
)
321 pci_unmap_page(pdev
, tbi
->dma_addr
, tbi
->len
,
324 BUG_ON(tbi
->map_type
!= VMXNET3_MAP_NONE
);
326 tbi
->map_type
= VMXNET3_MAP_NONE
; /* to help debugging */
331 vmxnet3_unmap_pkt(u32 eop_idx
, struct vmxnet3_tx_queue
*tq
,
332 struct pci_dev
*pdev
, struct vmxnet3_adapter
*adapter
)
337 /* no out of order completion */
338 BUG_ON(tq
->buf_info
[eop_idx
].sop_idx
!= tq
->tx_ring
.next2comp
);
339 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq
->tx_ring
.base
[eop_idx
].txd
)) != 1);
341 skb
= tq
->buf_info
[eop_idx
].skb
;
343 tq
->buf_info
[eop_idx
].skb
= NULL
;
345 VMXNET3_INC_RING_IDX_ONLY(eop_idx
, tq
->tx_ring
.size
);
347 while (tq
->tx_ring
.next2comp
!= eop_idx
) {
348 vmxnet3_unmap_tx_buf(tq
->buf_info
+ tq
->tx_ring
.next2comp
,
351 /* update next2comp w/o tx_lock. Since we are marking more,
352 * instead of less, tx ring entries avail, the worst case is
353 * that the tx routine incorrectly re-queues a pkt due to
354 * insufficient tx ring entries.
356 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
360 dev_kfree_skb_any(skb
);
366 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue
*tq
,
367 struct vmxnet3_adapter
*adapter
)
370 union Vmxnet3_GenericDesc
*gdesc
;
372 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
373 while (VMXNET3_TCD_GET_GEN(&gdesc
->tcd
) == tq
->comp_ring
.gen
) {
374 completed
+= vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
375 &gdesc
->tcd
), tq
, adapter
->pdev
,
378 vmxnet3_comp_ring_adv_next2proc(&tq
->comp_ring
);
379 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
383 spin_lock(&tq
->tx_lock
);
384 if (unlikely(vmxnet3_tq_stopped(tq
, adapter
) &&
385 vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
) >
386 VMXNET3_WAKE_QUEUE_THRESHOLD(tq
) &&
387 netif_carrier_ok(adapter
->netdev
))) {
388 vmxnet3_tq_wake(tq
, adapter
);
390 spin_unlock(&tq
->tx_lock
);
397 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue
*tq
,
398 struct vmxnet3_adapter
*adapter
)
402 while (tq
->tx_ring
.next2comp
!= tq
->tx_ring
.next2fill
) {
403 struct vmxnet3_tx_buf_info
*tbi
;
405 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2comp
;
407 vmxnet3_unmap_tx_buf(tbi
, adapter
->pdev
);
409 dev_kfree_skb_any(tbi
->skb
);
412 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
415 /* sanity check, verify all buffers are indeed unmapped and freed */
416 for (i
= 0; i
< tq
->tx_ring
.size
; i
++) {
417 BUG_ON(tq
->buf_info
[i
].skb
!= NULL
||
418 tq
->buf_info
[i
].map_type
!= VMXNET3_MAP_NONE
);
421 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
422 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
424 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
425 tq
->comp_ring
.next2proc
= 0;
430 vmxnet3_tq_destroy(struct vmxnet3_tx_queue
*tq
,
431 struct vmxnet3_adapter
*adapter
)
433 if (tq
->tx_ring
.base
) {
434 pci_free_consistent(adapter
->pdev
, tq
->tx_ring
.size
*
435 sizeof(struct Vmxnet3_TxDesc
),
436 tq
->tx_ring
.base
, tq
->tx_ring
.basePA
);
437 tq
->tx_ring
.base
= NULL
;
439 if (tq
->data_ring
.base
) {
440 pci_free_consistent(adapter
->pdev
, tq
->data_ring
.size
*
441 sizeof(struct Vmxnet3_TxDataDesc
),
442 tq
->data_ring
.base
, tq
->data_ring
.basePA
);
443 tq
->data_ring
.base
= NULL
;
445 if (tq
->comp_ring
.base
) {
446 pci_free_consistent(adapter
->pdev
, tq
->comp_ring
.size
*
447 sizeof(struct Vmxnet3_TxCompDesc
),
448 tq
->comp_ring
.base
, tq
->comp_ring
.basePA
);
449 tq
->comp_ring
.base
= NULL
;
456 /* Destroy all tx queues */
458 vmxnet3_tq_destroy_all(struct vmxnet3_adapter
*adapter
)
462 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
463 vmxnet3_tq_destroy(&adapter
->tx_queue
[i
], adapter
);
468 vmxnet3_tq_init(struct vmxnet3_tx_queue
*tq
,
469 struct vmxnet3_adapter
*adapter
)
473 /* reset the tx ring contents to 0 and reset the tx ring states */
474 memset(tq
->tx_ring
.base
, 0, tq
->tx_ring
.size
*
475 sizeof(struct Vmxnet3_TxDesc
));
476 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
477 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
479 memset(tq
->data_ring
.base
, 0, tq
->data_ring
.size
*
480 sizeof(struct Vmxnet3_TxDataDesc
));
482 /* reset the tx comp ring contents to 0 and reset comp ring states */
483 memset(tq
->comp_ring
.base
, 0, tq
->comp_ring
.size
*
484 sizeof(struct Vmxnet3_TxCompDesc
));
485 tq
->comp_ring
.next2proc
= 0;
486 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
488 /* reset the bookkeeping data */
489 memset(tq
->buf_info
, 0, sizeof(tq
->buf_info
[0]) * tq
->tx_ring
.size
);
490 for (i
= 0; i
< tq
->tx_ring
.size
; i
++)
491 tq
->buf_info
[i
].map_type
= VMXNET3_MAP_NONE
;
493 /* stats are not reset */
498 vmxnet3_tq_create(struct vmxnet3_tx_queue
*tq
,
499 struct vmxnet3_adapter
*adapter
)
501 BUG_ON(tq
->tx_ring
.base
|| tq
->data_ring
.base
||
502 tq
->comp_ring
.base
|| tq
->buf_info
);
504 tq
->tx_ring
.base
= pci_alloc_consistent(adapter
->pdev
, tq
->tx_ring
.size
505 * sizeof(struct Vmxnet3_TxDesc
),
506 &tq
->tx_ring
.basePA
);
507 if (!tq
->tx_ring
.base
) {
508 netdev_err(adapter
->netdev
, "failed to allocate tx ring\n");
512 tq
->data_ring
.base
= pci_alloc_consistent(adapter
->pdev
,
514 sizeof(struct Vmxnet3_TxDataDesc
),
515 &tq
->data_ring
.basePA
);
516 if (!tq
->data_ring
.base
) {
517 netdev_err(adapter
->netdev
, "failed to allocate data ring\n");
521 tq
->comp_ring
.base
= pci_alloc_consistent(adapter
->pdev
,
523 sizeof(struct Vmxnet3_TxCompDesc
),
524 &tq
->comp_ring
.basePA
);
525 if (!tq
->comp_ring
.base
) {
526 netdev_err(adapter
->netdev
, "failed to allocate tx comp ring\n");
530 tq
->buf_info
= kcalloc(tq
->tx_ring
.size
, sizeof(tq
->buf_info
[0]),
538 vmxnet3_tq_destroy(tq
, adapter
);
543 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter
*adapter
)
547 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
548 vmxnet3_tq_cleanup(&adapter
->tx_queue
[i
], adapter
);
552 * starting from ring->next2fill, allocate rx buffers for the given ring
553 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
554 * are allocated or allocation fails
558 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue
*rq
, u32 ring_idx
,
559 int num_to_alloc
, struct vmxnet3_adapter
*adapter
)
561 int num_allocated
= 0;
562 struct vmxnet3_rx_buf_info
*rbi_base
= rq
->buf_info
[ring_idx
];
563 struct vmxnet3_cmd_ring
*ring
= &rq
->rx_ring
[ring_idx
];
566 while (num_allocated
<= num_to_alloc
) {
567 struct vmxnet3_rx_buf_info
*rbi
;
568 union Vmxnet3_GenericDesc
*gd
;
570 rbi
= rbi_base
+ ring
->next2fill
;
571 gd
= ring
->base
+ ring
->next2fill
;
573 if (rbi
->buf_type
== VMXNET3_RX_BUF_SKB
) {
574 if (rbi
->skb
== NULL
) {
575 rbi
->skb
= __netdev_alloc_skb_ip_align(adapter
->netdev
,
578 if (unlikely(rbi
->skb
== NULL
)) {
579 rq
->stats
.rx_buf_alloc_failure
++;
583 rbi
->dma_addr
= pci_map_single(adapter
->pdev
,
584 rbi
->skb
->data
, rbi
->len
,
587 /* rx buffer skipped by the device */
589 val
= VMXNET3_RXD_BTYPE_HEAD
<< VMXNET3_RXD_BTYPE_SHIFT
;
591 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_PAGE
||
592 rbi
->len
!= PAGE_SIZE
);
594 if (rbi
->page
== NULL
) {
595 rbi
->page
= alloc_page(GFP_ATOMIC
);
596 if (unlikely(rbi
->page
== NULL
)) {
597 rq
->stats
.rx_buf_alloc_failure
++;
600 rbi
->dma_addr
= pci_map_page(adapter
->pdev
,
601 rbi
->page
, 0, PAGE_SIZE
,
604 /* rx buffers skipped by the device */
606 val
= VMXNET3_RXD_BTYPE_BODY
<< VMXNET3_RXD_BTYPE_SHIFT
;
609 BUG_ON(rbi
->dma_addr
== 0);
610 gd
->rxd
.addr
= cpu_to_le64(rbi
->dma_addr
);
611 gd
->dword
[2] = cpu_to_le32((!ring
->gen
<< VMXNET3_RXD_GEN_SHIFT
)
614 /* Fill the last buffer but dont mark it ready, or else the
615 * device will think that the queue is full */
616 if (num_allocated
== num_to_alloc
)
619 gd
->dword
[2] |= cpu_to_le32(ring
->gen
<< VMXNET3_RXD_GEN_SHIFT
);
621 vmxnet3_cmd_ring_adv_next2fill(ring
);
624 netdev_dbg(adapter
->netdev
,
625 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
626 num_allocated
, ring
->next2fill
, ring
->next2comp
);
628 /* so that the device can distinguish a full ring and an empty ring */
629 BUG_ON(num_allocated
!= 0 && ring
->next2fill
== ring
->next2comp
);
631 return num_allocated
;
636 vmxnet3_append_frag(struct sk_buff
*skb
, struct Vmxnet3_RxCompDesc
*rcd
,
637 struct vmxnet3_rx_buf_info
*rbi
)
639 struct skb_frag_struct
*frag
= skb_shinfo(skb
)->frags
+
640 skb_shinfo(skb
)->nr_frags
;
642 BUG_ON(skb_shinfo(skb
)->nr_frags
>= MAX_SKB_FRAGS
);
644 __skb_frag_set_page(frag
, rbi
->page
);
645 frag
->page_offset
= 0;
646 skb_frag_size_set(frag
, rcd
->len
);
647 skb
->data_len
+= rcd
->len
;
648 skb
->truesize
+= PAGE_SIZE
;
649 skb_shinfo(skb
)->nr_frags
++;
654 vmxnet3_map_pkt(struct sk_buff
*skb
, struct vmxnet3_tx_ctx
*ctx
,
655 struct vmxnet3_tx_queue
*tq
, struct pci_dev
*pdev
,
656 struct vmxnet3_adapter
*adapter
)
659 unsigned long buf_offset
;
661 union Vmxnet3_GenericDesc
*gdesc
;
662 struct vmxnet3_tx_buf_info
*tbi
= NULL
;
664 BUG_ON(ctx
->copy_size
> skb_headlen(skb
));
666 /* use the previous gen bit for the SOP desc */
667 dw2
= (tq
->tx_ring
.gen
^ 0x1) << VMXNET3_TXD_GEN_SHIFT
;
669 ctx
->sop_txd
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
670 gdesc
= ctx
->sop_txd
; /* both loops below can be skipped */
672 /* no need to map the buffer if headers are copied */
673 if (ctx
->copy_size
) {
674 ctx
->sop_txd
->txd
.addr
= cpu_to_le64(tq
->data_ring
.basePA
+
675 tq
->tx_ring
.next2fill
*
676 sizeof(struct Vmxnet3_TxDataDesc
));
677 ctx
->sop_txd
->dword
[2] = cpu_to_le32(dw2
| ctx
->copy_size
);
678 ctx
->sop_txd
->dword
[3] = 0;
680 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
681 tbi
->map_type
= VMXNET3_MAP_NONE
;
683 netdev_dbg(adapter
->netdev
,
684 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
685 tq
->tx_ring
.next2fill
,
686 le64_to_cpu(ctx
->sop_txd
->txd
.addr
),
687 ctx
->sop_txd
->dword
[2], ctx
->sop_txd
->dword
[3]);
688 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
690 /* use the right gen for non-SOP desc */
691 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
694 /* linear part can use multiple tx desc if it's big */
695 len
= skb_headlen(skb
) - ctx
->copy_size
;
696 buf_offset
= ctx
->copy_size
;
700 if (len
< VMXNET3_MAX_TX_BUF_SIZE
) {
704 buf_size
= VMXNET3_MAX_TX_BUF_SIZE
;
705 /* spec says that for TxDesc.len, 0 == 2^14 */
708 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
709 tbi
->map_type
= VMXNET3_MAP_SINGLE
;
710 tbi
->dma_addr
= pci_map_single(adapter
->pdev
,
711 skb
->data
+ buf_offset
, buf_size
,
716 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
717 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
719 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
720 gdesc
->dword
[2] = cpu_to_le32(dw2
);
723 netdev_dbg(adapter
->netdev
,
724 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
725 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
726 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
727 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
728 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
731 buf_offset
+= buf_size
;
734 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
735 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
739 len
= skb_frag_size(frag
);
741 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
742 if (len
< VMXNET3_MAX_TX_BUF_SIZE
) {
746 buf_size
= VMXNET3_MAX_TX_BUF_SIZE
;
747 /* spec says that for TxDesc.len, 0 == 2^14 */
749 tbi
->map_type
= VMXNET3_MAP_PAGE
;
750 tbi
->dma_addr
= skb_frag_dma_map(&adapter
->pdev
->dev
, frag
,
751 buf_offset
, buf_size
,
756 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
757 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
759 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
760 gdesc
->dword
[2] = cpu_to_le32(dw2
);
763 netdev_dbg(adapter
->netdev
,
764 "txd[%u]: 0x%llu %u %u\n",
765 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
766 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
767 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
768 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
771 buf_offset
+= buf_size
;
775 ctx
->eop_txd
= gdesc
;
777 /* set the last buf_info for the pkt */
779 tbi
->sop_idx
= ctx
->sop_txd
- tq
->tx_ring
.base
;
783 /* Init all tx queues */
785 vmxnet3_tq_init_all(struct vmxnet3_adapter
*adapter
)
789 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
790 vmxnet3_tq_init(&adapter
->tx_queue
[i
], adapter
);
795 * parse and copy relevant protocol headers:
796 * For a tso pkt, relevant headers are L2/3/4 including options
797 * For a pkt requesting csum offloading, they are L2/3 and may include L4
798 * if it's a TCP/UDP pkt
801 * -1: error happens during parsing
802 * 0: protocol headers parsed, but too big to be copied
803 * 1: protocol headers parsed and copied
806 * 1. related *ctx fields are updated.
807 * 2. ctx->copy_size is # of bytes copied
808 * 3. the portion copied is guaranteed to be in the linear part
812 vmxnet3_parse_and_copy_hdr(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
813 struct vmxnet3_tx_ctx
*ctx
,
814 struct vmxnet3_adapter
*adapter
)
816 struct Vmxnet3_TxDataDesc
*tdd
;
818 if (ctx
->mss
) { /* TSO */
819 ctx
->eth_ip_hdr_size
= skb_transport_offset(skb
);
820 ctx
->l4_hdr_size
= tcp_hdrlen(skb
);
821 ctx
->copy_size
= ctx
->eth_ip_hdr_size
+ ctx
->l4_hdr_size
;
823 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
824 ctx
->eth_ip_hdr_size
= skb_checksum_start_offset(skb
);
827 const struct iphdr
*iph
= ip_hdr(skb
);
829 if (iph
->protocol
== IPPROTO_TCP
)
830 ctx
->l4_hdr_size
= tcp_hdrlen(skb
);
831 else if (iph
->protocol
== IPPROTO_UDP
)
832 ctx
->l4_hdr_size
= sizeof(struct udphdr
);
834 ctx
->l4_hdr_size
= 0;
836 /* for simplicity, don't copy L4 headers */
837 ctx
->l4_hdr_size
= 0;
839 ctx
->copy_size
= min(ctx
->eth_ip_hdr_size
+
840 ctx
->l4_hdr_size
, skb
->len
);
842 ctx
->eth_ip_hdr_size
= 0;
843 ctx
->l4_hdr_size
= 0;
844 /* copy as much as allowed */
845 ctx
->copy_size
= min((unsigned int)VMXNET3_HDR_COPY_SIZE
849 /* make sure headers are accessible directly */
850 if (unlikely(!pskb_may_pull(skb
, ctx
->copy_size
)))
854 if (unlikely(ctx
->copy_size
> VMXNET3_HDR_COPY_SIZE
)) {
855 tq
->stats
.oversized_hdr
++;
860 tdd
= tq
->data_ring
.base
+ tq
->tx_ring
.next2fill
;
862 memcpy(tdd
->data
, skb
->data
, ctx
->copy_size
);
863 netdev_dbg(adapter
->netdev
,
864 "copy %u bytes to dataRing[%u]\n",
865 ctx
->copy_size
, tq
->tx_ring
.next2fill
);
874 vmxnet3_prepare_tso(struct sk_buff
*skb
,
875 struct vmxnet3_tx_ctx
*ctx
)
877 struct tcphdr
*tcph
= tcp_hdr(skb
);
880 struct iphdr
*iph
= ip_hdr(skb
);
883 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
886 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
888 tcph
->check
= ~csum_ipv6_magic(&iph
->saddr
, &iph
->daddr
, 0,
893 static int txd_estimate(const struct sk_buff
*skb
)
895 int count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) + 1;
898 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
899 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
901 count
+= VMXNET3_TXD_NEEDED(skb_frag_size(frag
));
907 * Transmits a pkt thru a given tq
909 * NETDEV_TX_OK: descriptors are setup successfully
910 * NETDEV_TX_OK: error occurred, the pkt is dropped
911 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
914 * 1. tx ring may be changed
915 * 2. tq stats may be updated accordingly
916 * 3. shared->txNumDeferred may be updated
920 vmxnet3_tq_xmit(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
921 struct vmxnet3_adapter
*adapter
, struct net_device
*netdev
)
926 struct vmxnet3_tx_ctx ctx
;
927 union Vmxnet3_GenericDesc
*gdesc
;
928 #ifdef __BIG_ENDIAN_BITFIELD
929 /* Use temporary descriptor to avoid touching bits multiple times */
930 union Vmxnet3_GenericDesc tempTxDesc
;
933 count
= txd_estimate(skb
);
935 ctx
.ipv4
= (vlan_get_protocol(skb
) == cpu_to_be16(ETH_P_IP
));
937 ctx
.mss
= skb_shinfo(skb
)->gso_size
;
939 if (skb_header_cloned(skb
)) {
940 if (unlikely(pskb_expand_head(skb
, 0, 0,
942 tq
->stats
.drop_tso
++;
945 tq
->stats
.copy_skb_header
++;
947 vmxnet3_prepare_tso(skb
, &ctx
);
949 if (unlikely(count
> VMXNET3_MAX_TXD_PER_PKT
)) {
951 /* non-tso pkts must not use more than
952 * VMXNET3_MAX_TXD_PER_PKT entries
954 if (skb_linearize(skb
) != 0) {
955 tq
->stats
.drop_too_many_frags
++;
958 tq
->stats
.linearized
++;
960 /* recalculate the # of descriptors to use */
961 count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) + 1;
965 spin_lock_irqsave(&tq
->tx_lock
, flags
);
967 if (count
> vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
)) {
968 tq
->stats
.tx_ring_full
++;
969 netdev_dbg(adapter
->netdev
,
970 "tx queue stopped on %s, next2comp %u"
971 " next2fill %u\n", adapter
->netdev
->name
,
972 tq
->tx_ring
.next2comp
, tq
->tx_ring
.next2fill
);
974 vmxnet3_tq_stop(tq
, adapter
);
975 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
976 return NETDEV_TX_BUSY
;
980 ret
= vmxnet3_parse_and_copy_hdr(skb
, tq
, &ctx
, adapter
);
982 BUG_ON(ret
<= 0 && ctx
.copy_size
!= 0);
983 /* hdrs parsed, check against other limits */
985 if (unlikely(ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
>
986 VMXNET3_MAX_TX_BUF_SIZE
)) {
990 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
991 if (unlikely(ctx
.eth_ip_hdr_size
+
993 VMXNET3_MAX_CSUM_OFFSET
)) {
999 tq
->stats
.drop_hdr_inspect_err
++;
1000 goto unlock_drop_pkt
;
1003 /* fill tx descs related to addr & len */
1004 vmxnet3_map_pkt(skb
, &ctx
, tq
, adapter
->pdev
, adapter
);
1006 /* setup the EOP desc */
1007 ctx
.eop_txd
->dword
[3] = cpu_to_le32(VMXNET3_TXD_CQ
| VMXNET3_TXD_EOP
);
1009 /* setup the SOP desc */
1010 #ifdef __BIG_ENDIAN_BITFIELD
1011 gdesc
= &tempTxDesc
;
1012 gdesc
->dword
[2] = ctx
.sop_txd
->dword
[2];
1013 gdesc
->dword
[3] = ctx
.sop_txd
->dword
[3];
1015 gdesc
= ctx
.sop_txd
;
1018 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
;
1019 gdesc
->txd
.om
= VMXNET3_OM_TSO
;
1020 gdesc
->txd
.msscof
= ctx
.mss
;
1021 le32_add_cpu(&tq
->shared
->txNumDeferred
, (skb
->len
-
1022 gdesc
->txd
.hlen
+ ctx
.mss
- 1) / ctx
.mss
);
1024 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1025 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
;
1026 gdesc
->txd
.om
= VMXNET3_OM_CSUM
;
1027 gdesc
->txd
.msscof
= ctx
.eth_ip_hdr_size
+
1031 gdesc
->txd
.msscof
= 0;
1033 le32_add_cpu(&tq
->shared
->txNumDeferred
, 1);
1036 if (vlan_tx_tag_present(skb
)) {
1038 gdesc
->txd
.tci
= vlan_tx_tag_get(skb
);
1041 /* finally flips the GEN bit of the SOP desc. */
1042 gdesc
->dword
[2] = cpu_to_le32(le32_to_cpu(gdesc
->dword
[2]) ^
1044 #ifdef __BIG_ENDIAN_BITFIELD
1045 /* Finished updating in bitfields of Tx Desc, so write them in original
1048 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc
*)gdesc
,
1049 (struct Vmxnet3_TxDesc
*)ctx
.sop_txd
);
1050 gdesc
= ctx
.sop_txd
;
1052 netdev_dbg(adapter
->netdev
,
1053 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1055 tq
->tx_ring
.base
), le64_to_cpu(gdesc
->txd
.addr
),
1056 le32_to_cpu(gdesc
->dword
[2]), le32_to_cpu(gdesc
->dword
[3]));
1058 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1060 if (le32_to_cpu(tq
->shared
->txNumDeferred
) >=
1061 le32_to_cpu(tq
->shared
->txThreshold
)) {
1062 tq
->shared
->txNumDeferred
= 0;
1063 VMXNET3_WRITE_BAR0_REG(adapter
,
1064 VMXNET3_REG_TXPROD
+ tq
->qid
* 8,
1065 tq
->tx_ring
.next2fill
);
1068 return NETDEV_TX_OK
;
1071 tq
->stats
.drop_oversized_hdr
++;
1073 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1075 tq
->stats
.drop_total
++;
1077 return NETDEV_TX_OK
;
1082 vmxnet3_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
1084 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1086 BUG_ON(skb
->queue_mapping
> adapter
->num_tx_queues
);
1087 return vmxnet3_tq_xmit(skb
,
1088 &adapter
->tx_queue
[skb
->queue_mapping
],
1094 vmxnet3_rx_csum(struct vmxnet3_adapter
*adapter
,
1095 struct sk_buff
*skb
,
1096 union Vmxnet3_GenericDesc
*gdesc
)
1098 if (!gdesc
->rcd
.cnc
&& adapter
->netdev
->features
& NETIF_F_RXCSUM
) {
1099 /* typical case: TCP/UDP over IP and both csums are correct */
1100 if ((le32_to_cpu(gdesc
->dword
[3]) & VMXNET3_RCD_CSUM_OK
) ==
1101 VMXNET3_RCD_CSUM_OK
) {
1102 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1103 BUG_ON(!(gdesc
->rcd
.tcp
|| gdesc
->rcd
.udp
));
1104 BUG_ON(!(gdesc
->rcd
.v4
|| gdesc
->rcd
.v6
));
1105 BUG_ON(gdesc
->rcd
.frg
);
1107 if (gdesc
->rcd
.csum
) {
1108 skb
->csum
= htons(gdesc
->rcd
.csum
);
1109 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1111 skb_checksum_none_assert(skb
);
1115 skb_checksum_none_assert(skb
);
1121 vmxnet3_rx_error(struct vmxnet3_rx_queue
*rq
, struct Vmxnet3_RxCompDesc
*rcd
,
1122 struct vmxnet3_rx_ctx
*ctx
, struct vmxnet3_adapter
*adapter
)
1124 rq
->stats
.drop_err
++;
1126 rq
->stats
.drop_fcs
++;
1128 rq
->stats
.drop_total
++;
1131 * We do not unmap and chain the rx buffer to the skb.
1132 * We basically pretend this buffer is not used and will be recycled
1133 * by vmxnet3_rq_alloc_rx_buf()
1137 * ctx->skb may be NULL if this is the first and the only one
1141 dev_kfree_skb_irq(ctx
->skb
);
1148 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue
*rq
,
1149 struct vmxnet3_adapter
*adapter
, int quota
)
1151 static const u32 rxprod_reg
[2] = {
1152 VMXNET3_REG_RXPROD
, VMXNET3_REG_RXPROD2
1155 bool skip_page_frags
= false;
1156 struct Vmxnet3_RxCompDesc
*rcd
;
1157 struct vmxnet3_rx_ctx
*ctx
= &rq
->rx_ctx
;
1158 #ifdef __BIG_ENDIAN_BITFIELD
1159 struct Vmxnet3_RxDesc rxCmdDesc
;
1160 struct Vmxnet3_RxCompDesc rxComp
;
1162 vmxnet3_getRxComp(rcd
, &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
,
1164 while (rcd
->gen
== rq
->comp_ring
.gen
) {
1165 struct vmxnet3_rx_buf_info
*rbi
;
1166 struct sk_buff
*skb
, *new_skb
= NULL
;
1167 struct page
*new_page
= NULL
;
1169 struct Vmxnet3_RxDesc
*rxd
;
1171 struct vmxnet3_cmd_ring
*ring
= NULL
;
1172 if (num_rxd
>= quota
) {
1173 /* we may stop even before we see the EOP desc of
1179 BUG_ON(rcd
->rqID
!= rq
->qid
&& rcd
->rqID
!= rq
->qid2
);
1181 ring_idx
= rcd
->rqID
< adapter
->num_rx_queues
? 0 : 1;
1182 ring
= rq
->rx_ring
+ ring_idx
;
1183 vmxnet3_getRxDesc(rxd
, &rq
->rx_ring
[ring_idx
].base
[idx
].rxd
,
1185 rbi
= rq
->buf_info
[ring_idx
] + idx
;
1187 BUG_ON(rxd
->addr
!= rbi
->dma_addr
||
1188 rxd
->len
!= rbi
->len
);
1190 if (unlikely(rcd
->eop
&& rcd
->err
)) {
1191 vmxnet3_rx_error(rq
, rcd
, ctx
, adapter
);
1195 if (rcd
->sop
) { /* first buf of the pkt */
1196 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_HEAD
||
1197 rcd
->rqID
!= rq
->qid
);
1199 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_SKB
);
1200 BUG_ON(ctx
->skb
!= NULL
|| rbi
->skb
== NULL
);
1202 if (unlikely(rcd
->len
== 0)) {
1203 /* Pretend the rx buffer is skipped. */
1204 BUG_ON(!(rcd
->sop
&& rcd
->eop
));
1205 netdev_dbg(adapter
->netdev
,
1206 "rxRing[%u][%u] 0 length\n",
1211 skip_page_frags
= false;
1212 ctx
->skb
= rbi
->skb
;
1213 new_skb
= netdev_alloc_skb_ip_align(adapter
->netdev
,
1215 if (new_skb
== NULL
) {
1216 /* Skb allocation failed, do not handover this
1217 * skb to stack. Reuse it. Drop the existing pkt
1219 rq
->stats
.rx_buf_alloc_failure
++;
1221 rq
->stats
.drop_total
++;
1222 skip_page_frags
= true;
1226 pci_unmap_single(adapter
->pdev
, rbi
->dma_addr
, rbi
->len
,
1227 PCI_DMA_FROMDEVICE
);
1229 skb_put(ctx
->skb
, rcd
->len
);
1231 /* Immediate refill */
1233 rbi
->dma_addr
= pci_map_single(adapter
->pdev
,
1234 rbi
->skb
->data
, rbi
->len
,
1235 PCI_DMA_FROMDEVICE
);
1236 rxd
->addr
= cpu_to_le64(rbi
->dma_addr
);
1237 rxd
->len
= rbi
->len
;
1240 BUG_ON(ctx
->skb
== NULL
&& !skip_page_frags
);
1242 /* non SOP buffer must be type 1 in most cases */
1243 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_PAGE
);
1244 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_BODY
);
1246 /* If an sop buffer was dropped, skip all
1247 * following non-sop fragments. They will be reused.
1249 if (skip_page_frags
)
1252 new_page
= alloc_page(GFP_ATOMIC
);
1253 if (unlikely(new_page
== NULL
)) {
1254 /* Replacement page frag could not be allocated.
1255 * Reuse this page. Drop the pkt and free the
1256 * skb which contained this page as a frag. Skip
1257 * processing all the following non-sop frags.
1259 rq
->stats
.rx_buf_alloc_failure
++;
1260 dev_kfree_skb(ctx
->skb
);
1262 skip_page_frags
= true;
1267 pci_unmap_page(adapter
->pdev
,
1268 rbi
->dma_addr
, rbi
->len
,
1269 PCI_DMA_FROMDEVICE
);
1271 vmxnet3_append_frag(ctx
->skb
, rcd
, rbi
);
1274 /* Immediate refill */
1275 rbi
->page
= new_page
;
1276 rbi
->dma_addr
= pci_map_page(adapter
->pdev
, rbi
->page
,
1278 PCI_DMA_FROMDEVICE
);
1279 rxd
->addr
= cpu_to_le64(rbi
->dma_addr
);
1280 rxd
->len
= rbi
->len
;
1286 skb
->len
+= skb
->data_len
;
1288 vmxnet3_rx_csum(adapter
, skb
,
1289 (union Vmxnet3_GenericDesc
*)rcd
);
1290 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
1292 if (unlikely(rcd
->ts
))
1293 __vlan_hwaccel_put_tag(skb
, rcd
->tci
);
1295 if (adapter
->netdev
->features
& NETIF_F_LRO
)
1296 netif_receive_skb(skb
);
1298 napi_gro_receive(&rq
->napi
, skb
);
1304 /* device may have skipped some rx descs */
1305 ring
->next2comp
= idx
;
1306 num_to_alloc
= vmxnet3_cmd_ring_desc_avail(ring
);
1307 ring
= rq
->rx_ring
+ ring_idx
;
1308 while (num_to_alloc
) {
1309 vmxnet3_getRxDesc(rxd
, &ring
->base
[ring
->next2fill
].rxd
,
1313 /* Recv desc is ready to be used by the device */
1314 rxd
->gen
= ring
->gen
;
1315 vmxnet3_cmd_ring_adv_next2fill(ring
);
1319 /* if needed, update the register */
1320 if (unlikely(rq
->shared
->updateRxProd
)) {
1321 VMXNET3_WRITE_BAR0_REG(adapter
,
1322 rxprod_reg
[ring_idx
] + rq
->qid
* 8,
1326 vmxnet3_comp_ring_adv_next2proc(&rq
->comp_ring
);
1327 vmxnet3_getRxComp(rcd
,
1328 &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
, &rxComp
);
1336 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue
*rq
,
1337 struct vmxnet3_adapter
*adapter
)
1340 struct Vmxnet3_RxDesc
*rxd
;
1342 for (ring_idx
= 0; ring_idx
< 2; ring_idx
++) {
1343 for (i
= 0; i
< rq
->rx_ring
[ring_idx
].size
; i
++) {
1344 #ifdef __BIG_ENDIAN_BITFIELD
1345 struct Vmxnet3_RxDesc rxDesc
;
1347 vmxnet3_getRxDesc(rxd
,
1348 &rq
->rx_ring
[ring_idx
].base
[i
].rxd
, &rxDesc
);
1350 if (rxd
->btype
== VMXNET3_RXD_BTYPE_HEAD
&&
1351 rq
->buf_info
[ring_idx
][i
].skb
) {
1352 pci_unmap_single(adapter
->pdev
, rxd
->addr
,
1353 rxd
->len
, PCI_DMA_FROMDEVICE
);
1354 dev_kfree_skb(rq
->buf_info
[ring_idx
][i
].skb
);
1355 rq
->buf_info
[ring_idx
][i
].skb
= NULL
;
1356 } else if (rxd
->btype
== VMXNET3_RXD_BTYPE_BODY
&&
1357 rq
->buf_info
[ring_idx
][i
].page
) {
1358 pci_unmap_page(adapter
->pdev
, rxd
->addr
,
1359 rxd
->len
, PCI_DMA_FROMDEVICE
);
1360 put_page(rq
->buf_info
[ring_idx
][i
].page
);
1361 rq
->buf_info
[ring_idx
][i
].page
= NULL
;
1365 rq
->rx_ring
[ring_idx
].gen
= VMXNET3_INIT_GEN
;
1366 rq
->rx_ring
[ring_idx
].next2fill
=
1367 rq
->rx_ring
[ring_idx
].next2comp
= 0;
1370 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1371 rq
->comp_ring
.next2proc
= 0;
1376 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter
*adapter
)
1380 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1381 vmxnet3_rq_cleanup(&adapter
->rx_queue
[i
], adapter
);
1385 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue
*rq
,
1386 struct vmxnet3_adapter
*adapter
)
1391 /* all rx buffers must have already been freed */
1392 for (i
= 0; i
< 2; i
++) {
1393 if (rq
->buf_info
[i
]) {
1394 for (j
= 0; j
< rq
->rx_ring
[i
].size
; j
++)
1395 BUG_ON(rq
->buf_info
[i
][j
].page
!= NULL
);
1400 kfree(rq
->buf_info
[0]);
1402 for (i
= 0; i
< 2; i
++) {
1403 if (rq
->rx_ring
[i
].base
) {
1404 pci_free_consistent(adapter
->pdev
, rq
->rx_ring
[i
].size
1405 * sizeof(struct Vmxnet3_RxDesc
),
1406 rq
->rx_ring
[i
].base
,
1407 rq
->rx_ring
[i
].basePA
);
1408 rq
->rx_ring
[i
].base
= NULL
;
1410 rq
->buf_info
[i
] = NULL
;
1413 if (rq
->comp_ring
.base
) {
1414 pci_free_consistent(adapter
->pdev
, rq
->comp_ring
.size
*
1415 sizeof(struct Vmxnet3_RxCompDesc
),
1416 rq
->comp_ring
.base
, rq
->comp_ring
.basePA
);
1417 rq
->comp_ring
.base
= NULL
;
1423 vmxnet3_rq_init(struct vmxnet3_rx_queue
*rq
,
1424 struct vmxnet3_adapter
*adapter
)
1428 /* initialize buf_info */
1429 for (i
= 0; i
< rq
->rx_ring
[0].size
; i
++) {
1431 /* 1st buf for a pkt is skbuff */
1432 if (i
% adapter
->rx_buf_per_pkt
== 0) {
1433 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_SKB
;
1434 rq
->buf_info
[0][i
].len
= adapter
->skb_buf_size
;
1435 } else { /* subsequent bufs for a pkt is frag */
1436 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1437 rq
->buf_info
[0][i
].len
= PAGE_SIZE
;
1440 for (i
= 0; i
< rq
->rx_ring
[1].size
; i
++) {
1441 rq
->buf_info
[1][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1442 rq
->buf_info
[1][i
].len
= PAGE_SIZE
;
1445 /* reset internal state and allocate buffers for both rings */
1446 for (i
= 0; i
< 2; i
++) {
1447 rq
->rx_ring
[i
].next2fill
= rq
->rx_ring
[i
].next2comp
= 0;
1449 memset(rq
->rx_ring
[i
].base
, 0, rq
->rx_ring
[i
].size
*
1450 sizeof(struct Vmxnet3_RxDesc
));
1451 rq
->rx_ring
[i
].gen
= VMXNET3_INIT_GEN
;
1453 if (vmxnet3_rq_alloc_rx_buf(rq
, 0, rq
->rx_ring
[0].size
- 1,
1455 /* at least has 1 rx buffer for the 1st ring */
1458 vmxnet3_rq_alloc_rx_buf(rq
, 1, rq
->rx_ring
[1].size
- 1, adapter
);
1460 /* reset the comp ring */
1461 rq
->comp_ring
.next2proc
= 0;
1462 memset(rq
->comp_ring
.base
, 0, rq
->comp_ring
.size
*
1463 sizeof(struct Vmxnet3_RxCompDesc
));
1464 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1467 rq
->rx_ctx
.skb
= NULL
;
1469 /* stats are not reset */
1475 vmxnet3_rq_init_all(struct vmxnet3_adapter
*adapter
)
1479 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1480 err
= vmxnet3_rq_init(&adapter
->rx_queue
[i
], adapter
);
1481 if (unlikely(err
)) {
1482 dev_err(&adapter
->netdev
->dev
, "%s: failed to "
1483 "initialize rx queue%i\n",
1484 adapter
->netdev
->name
, i
);
1494 vmxnet3_rq_create(struct vmxnet3_rx_queue
*rq
, struct vmxnet3_adapter
*adapter
)
1498 struct vmxnet3_rx_buf_info
*bi
;
1500 for (i
= 0; i
< 2; i
++) {
1502 sz
= rq
->rx_ring
[i
].size
* sizeof(struct Vmxnet3_RxDesc
);
1503 rq
->rx_ring
[i
].base
= pci_alloc_consistent(adapter
->pdev
, sz
,
1504 &rq
->rx_ring
[i
].basePA
);
1505 if (!rq
->rx_ring
[i
].base
) {
1506 netdev_err(adapter
->netdev
,
1507 "failed to allocate rx ring %d\n", i
);
1512 sz
= rq
->comp_ring
.size
* sizeof(struct Vmxnet3_RxCompDesc
);
1513 rq
->comp_ring
.base
= pci_alloc_consistent(adapter
->pdev
, sz
,
1514 &rq
->comp_ring
.basePA
);
1515 if (!rq
->comp_ring
.base
) {
1516 netdev_err(adapter
->netdev
, "failed to allocate rx comp ring\n");
1520 sz
= sizeof(struct vmxnet3_rx_buf_info
) * (rq
->rx_ring
[0].size
+
1521 rq
->rx_ring
[1].size
);
1522 bi
= kzalloc(sz
, GFP_KERNEL
);
1526 rq
->buf_info
[0] = bi
;
1527 rq
->buf_info
[1] = bi
+ rq
->rx_ring
[0].size
;
1532 vmxnet3_rq_destroy(rq
, adapter
);
1538 vmxnet3_rq_create_all(struct vmxnet3_adapter
*adapter
)
1542 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1543 err
= vmxnet3_rq_create(&adapter
->rx_queue
[i
], adapter
);
1544 if (unlikely(err
)) {
1545 dev_err(&adapter
->netdev
->dev
,
1546 "%s: failed to create rx queue%i\n",
1547 adapter
->netdev
->name
, i
);
1553 vmxnet3_rq_destroy_all(adapter
);
1558 /* Multiple queue aware polling function for tx and rx */
1561 vmxnet3_do_poll(struct vmxnet3_adapter
*adapter
, int budget
)
1563 int rcd_done
= 0, i
;
1564 if (unlikely(adapter
->shared
->ecr
))
1565 vmxnet3_process_events(adapter
);
1566 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1567 vmxnet3_tq_tx_complete(&adapter
->tx_queue
[i
], adapter
);
1569 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1570 rcd_done
+= vmxnet3_rq_rx_complete(&adapter
->rx_queue
[i
],
1577 vmxnet3_poll(struct napi_struct
*napi
, int budget
)
1579 struct vmxnet3_rx_queue
*rx_queue
= container_of(napi
,
1580 struct vmxnet3_rx_queue
, napi
);
1583 rxd_done
= vmxnet3_do_poll(rx_queue
->adapter
, budget
);
1585 if (rxd_done
< budget
) {
1586 napi_complete(napi
);
1587 vmxnet3_enable_all_intrs(rx_queue
->adapter
);
1593 * NAPI polling function for MSI-X mode with multiple Rx queues
1594 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1598 vmxnet3_poll_rx_only(struct napi_struct
*napi
, int budget
)
1600 struct vmxnet3_rx_queue
*rq
= container_of(napi
,
1601 struct vmxnet3_rx_queue
, napi
);
1602 struct vmxnet3_adapter
*adapter
= rq
->adapter
;
1605 /* When sharing interrupt with corresponding tx queue, process
1606 * tx completions in that queue as well
1608 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
) {
1609 struct vmxnet3_tx_queue
*tq
=
1610 &adapter
->tx_queue
[rq
- adapter
->rx_queue
];
1611 vmxnet3_tq_tx_complete(tq
, adapter
);
1614 rxd_done
= vmxnet3_rq_rx_complete(rq
, adapter
, budget
);
1616 if (rxd_done
< budget
) {
1617 napi_complete(napi
);
1618 vmxnet3_enable_intr(adapter
, rq
->comp_ring
.intr_idx
);
1624 #ifdef CONFIG_PCI_MSI
1627 * Handle completion interrupts on tx queues
1628 * Returns whether or not the intr is handled
1632 vmxnet3_msix_tx(int irq
, void *data
)
1634 struct vmxnet3_tx_queue
*tq
= data
;
1635 struct vmxnet3_adapter
*adapter
= tq
->adapter
;
1637 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1638 vmxnet3_disable_intr(adapter
, tq
->comp_ring
.intr_idx
);
1640 /* Handle the case where only one irq is allocate for all tx queues */
1641 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
) {
1643 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1644 struct vmxnet3_tx_queue
*txq
= &adapter
->tx_queue
[i
];
1645 vmxnet3_tq_tx_complete(txq
, adapter
);
1648 vmxnet3_tq_tx_complete(tq
, adapter
);
1650 vmxnet3_enable_intr(adapter
, tq
->comp_ring
.intr_idx
);
1657 * Handle completion interrupts on rx queues. Returns whether or not the
1662 vmxnet3_msix_rx(int irq
, void *data
)
1664 struct vmxnet3_rx_queue
*rq
= data
;
1665 struct vmxnet3_adapter
*adapter
= rq
->adapter
;
1667 /* disable intr if needed */
1668 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1669 vmxnet3_disable_intr(adapter
, rq
->comp_ring
.intr_idx
);
1670 napi_schedule(&rq
->napi
);
1676 *----------------------------------------------------------------------------
1678 * vmxnet3_msix_event --
1680 * vmxnet3 msix event intr handler
1683 * whether or not the intr is handled
1685 *----------------------------------------------------------------------------
1689 vmxnet3_msix_event(int irq
, void *data
)
1691 struct net_device
*dev
= data
;
1692 struct vmxnet3_adapter
*adapter
= netdev_priv(dev
);
1694 /* disable intr if needed */
1695 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1696 vmxnet3_disable_intr(adapter
, adapter
->intr
.event_intr_idx
);
1698 if (adapter
->shared
->ecr
)
1699 vmxnet3_process_events(adapter
);
1701 vmxnet3_enable_intr(adapter
, adapter
->intr
.event_intr_idx
);
1706 #endif /* CONFIG_PCI_MSI */
1709 /* Interrupt handler for vmxnet3 */
1711 vmxnet3_intr(int irq
, void *dev_id
)
1713 struct net_device
*dev
= dev_id
;
1714 struct vmxnet3_adapter
*adapter
= netdev_priv(dev
);
1716 if (adapter
->intr
.type
== VMXNET3_IT_INTX
) {
1717 u32 icr
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_ICR
);
1718 if (unlikely(icr
== 0))
1724 /* disable intr if needed */
1725 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1726 vmxnet3_disable_all_intrs(adapter
);
1728 napi_schedule(&adapter
->rx_queue
[0].napi
);
1733 #ifdef CONFIG_NET_POLL_CONTROLLER
1735 /* netpoll callback. */
1737 vmxnet3_netpoll(struct net_device
*netdev
)
1739 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1741 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1742 vmxnet3_disable_all_intrs(adapter
);
1744 vmxnet3_do_poll(adapter
, adapter
->rx_queue
[0].rx_ring
[0].size
);
1745 vmxnet3_enable_all_intrs(adapter
);
1748 #endif /* CONFIG_NET_POLL_CONTROLLER */
1751 vmxnet3_request_irqs(struct vmxnet3_adapter
*adapter
)
1753 struct vmxnet3_intr
*intr
= &adapter
->intr
;
1757 #ifdef CONFIG_PCI_MSI
1758 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
1759 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1760 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
) {
1761 sprintf(adapter
->tx_queue
[i
].name
, "%s-tx-%d",
1762 adapter
->netdev
->name
, vector
);
1764 intr
->msix_entries
[vector
].vector
,
1766 adapter
->tx_queue
[i
].name
,
1767 &adapter
->tx_queue
[i
]);
1769 sprintf(adapter
->tx_queue
[i
].name
, "%s-rxtx-%d",
1770 adapter
->netdev
->name
, vector
);
1773 dev_err(&adapter
->netdev
->dev
,
1774 "Failed to request irq for MSIX, %s, "
1776 adapter
->tx_queue
[i
].name
, err
);
1780 /* Handle the case where only 1 MSIx was allocated for
1782 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
) {
1783 for (; i
< adapter
->num_tx_queues
; i
++)
1784 adapter
->tx_queue
[i
].comp_ring
.intr_idx
1789 adapter
->tx_queue
[i
].comp_ring
.intr_idx
1793 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
)
1796 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1797 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
)
1798 sprintf(adapter
->rx_queue
[i
].name
, "%s-rx-%d",
1799 adapter
->netdev
->name
, vector
);
1801 sprintf(adapter
->rx_queue
[i
].name
, "%s-rxtx-%d",
1802 adapter
->netdev
->name
, vector
);
1803 err
= request_irq(intr
->msix_entries
[vector
].vector
,
1805 adapter
->rx_queue
[i
].name
,
1806 &(adapter
->rx_queue
[i
]));
1808 netdev_err(adapter
->netdev
,
1809 "Failed to request irq for MSIX, "
1811 adapter
->rx_queue
[i
].name
, err
);
1815 adapter
->rx_queue
[i
].comp_ring
.intr_idx
= vector
++;
1818 sprintf(intr
->event_msi_vector_name
, "%s-event-%d",
1819 adapter
->netdev
->name
, vector
);
1820 err
= request_irq(intr
->msix_entries
[vector
].vector
,
1821 vmxnet3_msix_event
, 0,
1822 intr
->event_msi_vector_name
, adapter
->netdev
);
1823 intr
->event_intr_idx
= vector
;
1825 } else if (intr
->type
== VMXNET3_IT_MSI
) {
1826 adapter
->num_rx_queues
= 1;
1827 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
, 0,
1828 adapter
->netdev
->name
, adapter
->netdev
);
1831 adapter
->num_rx_queues
= 1;
1832 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
,
1833 IRQF_SHARED
, adapter
->netdev
->name
,
1835 #ifdef CONFIG_PCI_MSI
1838 intr
->num_intrs
= vector
+ 1;
1840 netdev_err(adapter
->netdev
,
1841 "Failed to request irq (intr type:%d), error %d\n",
1844 /* Number of rx queues will not change after this */
1845 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1846 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
1848 rq
->qid2
= i
+ adapter
->num_rx_queues
;
1853 /* init our intr settings */
1854 for (i
= 0; i
< intr
->num_intrs
; i
++)
1855 intr
->mod_levels
[i
] = UPT1_IML_ADAPTIVE
;
1856 if (adapter
->intr
.type
!= VMXNET3_IT_MSIX
) {
1857 adapter
->intr
.event_intr_idx
= 0;
1858 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1859 adapter
->tx_queue
[i
].comp_ring
.intr_idx
= 0;
1860 adapter
->rx_queue
[0].comp_ring
.intr_idx
= 0;
1863 netdev_info(adapter
->netdev
,
1864 "intr type %u, mode %u, %u vectors allocated\n",
1865 intr
->type
, intr
->mask_mode
, intr
->num_intrs
);
1873 vmxnet3_free_irqs(struct vmxnet3_adapter
*adapter
)
1875 struct vmxnet3_intr
*intr
= &adapter
->intr
;
1876 BUG_ON(intr
->type
== VMXNET3_IT_AUTO
|| intr
->num_intrs
<= 0);
1878 switch (intr
->type
) {
1879 #ifdef CONFIG_PCI_MSI
1880 case VMXNET3_IT_MSIX
:
1884 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
) {
1885 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1886 free_irq(intr
->msix_entries
[vector
++].vector
,
1887 &(adapter
->tx_queue
[i
]));
1888 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
)
1893 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1894 free_irq(intr
->msix_entries
[vector
++].vector
,
1895 &(adapter
->rx_queue
[i
]));
1898 free_irq(intr
->msix_entries
[vector
].vector
,
1900 BUG_ON(vector
>= intr
->num_intrs
);
1904 case VMXNET3_IT_MSI
:
1905 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
1907 case VMXNET3_IT_INTX
:
1908 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
1917 vmxnet3_restore_vlan(struct vmxnet3_adapter
*adapter
)
1919 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1922 /* allow untagged pkts */
1923 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, 0);
1925 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1926 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
1931 vmxnet3_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1933 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1935 if (!(netdev
->flags
& IFF_PROMISC
)) {
1936 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1937 unsigned long flags
;
1939 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
1940 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
1941 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1942 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1943 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
1946 set_bit(vid
, adapter
->active_vlans
);
1953 vmxnet3_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1955 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1957 if (!(netdev
->flags
& IFF_PROMISC
)) {
1958 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1959 unsigned long flags
;
1961 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable
, vid
);
1962 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
1963 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1964 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1965 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
1968 clear_bit(vid
, adapter
->active_vlans
);
1975 vmxnet3_copy_mc(struct net_device
*netdev
)
1978 u32 sz
= netdev_mc_count(netdev
) * ETH_ALEN
;
1980 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1982 /* We may be called with BH disabled */
1983 buf
= kmalloc(sz
, GFP_ATOMIC
);
1985 struct netdev_hw_addr
*ha
;
1988 netdev_for_each_mc_addr(ha
, netdev
)
1989 memcpy(buf
+ i
++ * ETH_ALEN
, ha
->addr
,
1998 vmxnet3_set_mc(struct net_device
*netdev
)
2000 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2001 unsigned long flags
;
2002 struct Vmxnet3_RxFilterConf
*rxConf
=
2003 &adapter
->shared
->devRead
.rxFilterConf
;
2004 u8
*new_table
= NULL
;
2005 u32 new_mode
= VMXNET3_RXM_UCAST
;
2007 if (netdev
->flags
& IFF_PROMISC
) {
2008 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
2009 memset(vfTable
, 0, VMXNET3_VFT_SIZE
* sizeof(*vfTable
));
2011 new_mode
|= VMXNET3_RXM_PROMISC
;
2013 vmxnet3_restore_vlan(adapter
);
2016 if (netdev
->flags
& IFF_BROADCAST
)
2017 new_mode
|= VMXNET3_RXM_BCAST
;
2019 if (netdev
->flags
& IFF_ALLMULTI
)
2020 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
2022 if (!netdev_mc_empty(netdev
)) {
2023 new_table
= vmxnet3_copy_mc(netdev
);
2025 new_mode
|= VMXNET3_RXM_MCAST
;
2026 rxConf
->mfTableLen
= cpu_to_le16(
2027 netdev_mc_count(netdev
) * ETH_ALEN
);
2028 rxConf
->mfTablePA
= cpu_to_le64(virt_to_phys(
2031 netdev_info(netdev
, "failed to copy mcast list"
2032 ", setting ALL_MULTI\n");
2033 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
2038 if (!(new_mode
& VMXNET3_RXM_MCAST
)) {
2039 rxConf
->mfTableLen
= 0;
2040 rxConf
->mfTablePA
= 0;
2043 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2044 if (new_mode
!= rxConf
->rxMode
) {
2045 rxConf
->rxMode
= cpu_to_le32(new_mode
);
2046 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2047 VMXNET3_CMD_UPDATE_RX_MODE
);
2048 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2049 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
2052 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2053 VMXNET3_CMD_UPDATE_MAC_FILTERS
);
2054 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2060 vmxnet3_rq_destroy_all(struct vmxnet3_adapter
*adapter
)
2064 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2065 vmxnet3_rq_destroy(&adapter
->rx_queue
[i
], adapter
);
2070 * Set up driver_shared based on settings in adapter.
2074 vmxnet3_setup_driver_shared(struct vmxnet3_adapter
*adapter
)
2076 struct Vmxnet3_DriverShared
*shared
= adapter
->shared
;
2077 struct Vmxnet3_DSDevRead
*devRead
= &shared
->devRead
;
2078 struct Vmxnet3_TxQueueConf
*tqc
;
2079 struct Vmxnet3_RxQueueConf
*rqc
;
2082 memset(shared
, 0, sizeof(*shared
));
2084 /* driver settings */
2085 shared
->magic
= cpu_to_le32(VMXNET3_REV1_MAGIC
);
2086 devRead
->misc
.driverInfo
.version
= cpu_to_le32(
2087 VMXNET3_DRIVER_VERSION_NUM
);
2088 devRead
->misc
.driverInfo
.gos
.gosBits
= (sizeof(void *) == 4 ?
2089 VMXNET3_GOS_BITS_32
: VMXNET3_GOS_BITS_64
);
2090 devRead
->misc
.driverInfo
.gos
.gosType
= VMXNET3_GOS_TYPE_LINUX
;
2091 *((u32
*)&devRead
->misc
.driverInfo
.gos
) = cpu_to_le32(
2092 *((u32
*)&devRead
->misc
.driverInfo
.gos
));
2093 devRead
->misc
.driverInfo
.vmxnet3RevSpt
= cpu_to_le32(1);
2094 devRead
->misc
.driverInfo
.uptVerSpt
= cpu_to_le32(1);
2096 devRead
->misc
.ddPA
= cpu_to_le64(virt_to_phys(adapter
));
2097 devRead
->misc
.ddLen
= cpu_to_le32(sizeof(struct vmxnet3_adapter
));
2099 /* set up feature flags */
2100 if (adapter
->netdev
->features
& NETIF_F_RXCSUM
)
2101 devRead
->misc
.uptFeatures
|= UPT1_F_RXCSUM
;
2103 if (adapter
->netdev
->features
& NETIF_F_LRO
) {
2104 devRead
->misc
.uptFeatures
|= UPT1_F_LRO
;
2105 devRead
->misc
.maxNumRxSG
= cpu_to_le16(1 + MAX_SKB_FRAGS
);
2107 if (adapter
->netdev
->features
& NETIF_F_HW_VLAN_RX
)
2108 devRead
->misc
.uptFeatures
|= UPT1_F_RXVLAN
;
2110 devRead
->misc
.mtu
= cpu_to_le32(adapter
->netdev
->mtu
);
2111 devRead
->misc
.queueDescPA
= cpu_to_le64(adapter
->queue_desc_pa
);
2112 devRead
->misc
.queueDescLen
= cpu_to_le32(
2113 adapter
->num_tx_queues
* sizeof(struct Vmxnet3_TxQueueDesc
) +
2114 adapter
->num_rx_queues
* sizeof(struct Vmxnet3_RxQueueDesc
));
2116 /* tx queue settings */
2117 devRead
->misc
.numTxQueues
= adapter
->num_tx_queues
;
2118 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2119 struct vmxnet3_tx_queue
*tq
= &adapter
->tx_queue
[i
];
2120 BUG_ON(adapter
->tx_queue
[i
].tx_ring
.base
== NULL
);
2121 tqc
= &adapter
->tqd_start
[i
].conf
;
2122 tqc
->txRingBasePA
= cpu_to_le64(tq
->tx_ring
.basePA
);
2123 tqc
->dataRingBasePA
= cpu_to_le64(tq
->data_ring
.basePA
);
2124 tqc
->compRingBasePA
= cpu_to_le64(tq
->comp_ring
.basePA
);
2125 tqc
->ddPA
= cpu_to_le64(virt_to_phys(tq
->buf_info
));
2126 tqc
->txRingSize
= cpu_to_le32(tq
->tx_ring
.size
);
2127 tqc
->dataRingSize
= cpu_to_le32(tq
->data_ring
.size
);
2128 tqc
->compRingSize
= cpu_to_le32(tq
->comp_ring
.size
);
2129 tqc
->ddLen
= cpu_to_le32(
2130 sizeof(struct vmxnet3_tx_buf_info
) *
2132 tqc
->intrIdx
= tq
->comp_ring
.intr_idx
;
2135 /* rx queue settings */
2136 devRead
->misc
.numRxQueues
= adapter
->num_rx_queues
;
2137 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2138 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
2139 rqc
= &adapter
->rqd_start
[i
].conf
;
2140 rqc
->rxRingBasePA
[0] = cpu_to_le64(rq
->rx_ring
[0].basePA
);
2141 rqc
->rxRingBasePA
[1] = cpu_to_le64(rq
->rx_ring
[1].basePA
);
2142 rqc
->compRingBasePA
= cpu_to_le64(rq
->comp_ring
.basePA
);
2143 rqc
->ddPA
= cpu_to_le64(virt_to_phys(
2145 rqc
->rxRingSize
[0] = cpu_to_le32(rq
->rx_ring
[0].size
);
2146 rqc
->rxRingSize
[1] = cpu_to_le32(rq
->rx_ring
[1].size
);
2147 rqc
->compRingSize
= cpu_to_le32(rq
->comp_ring
.size
);
2148 rqc
->ddLen
= cpu_to_le32(
2149 sizeof(struct vmxnet3_rx_buf_info
) *
2150 (rqc
->rxRingSize
[0] +
2151 rqc
->rxRingSize
[1]));
2152 rqc
->intrIdx
= rq
->comp_ring
.intr_idx
;
2156 memset(adapter
->rss_conf
, 0, sizeof(*adapter
->rss_conf
));
2159 struct UPT1_RSSConf
*rssConf
= adapter
->rss_conf
;
2160 static const uint8_t rss_key
[UPT1_RSS_MAX_KEY_SIZE
] = {
2161 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
2162 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
2163 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
2164 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
2165 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
2168 devRead
->misc
.uptFeatures
|= UPT1_F_RSS
;
2169 devRead
->misc
.numRxQueues
= adapter
->num_rx_queues
;
2170 rssConf
->hashType
= UPT1_RSS_HASH_TYPE_TCP_IPV4
|
2171 UPT1_RSS_HASH_TYPE_IPV4
|
2172 UPT1_RSS_HASH_TYPE_TCP_IPV6
|
2173 UPT1_RSS_HASH_TYPE_IPV6
;
2174 rssConf
->hashFunc
= UPT1_RSS_HASH_FUNC_TOEPLITZ
;
2175 rssConf
->hashKeySize
= UPT1_RSS_MAX_KEY_SIZE
;
2176 rssConf
->indTableSize
= VMXNET3_RSS_IND_TABLE_SIZE
;
2177 memcpy(rssConf
->hashKey
, rss_key
, sizeof(rss_key
));
2179 for (i
= 0; i
< rssConf
->indTableSize
; i
++)
2180 rssConf
->indTable
[i
] = ethtool_rxfh_indir_default(
2181 i
, adapter
->num_rx_queues
);
2183 devRead
->rssConfDesc
.confVer
= 1;
2184 devRead
->rssConfDesc
.confLen
= sizeof(*rssConf
);
2185 devRead
->rssConfDesc
.confPA
= virt_to_phys(rssConf
);
2188 #endif /* VMXNET3_RSS */
2191 devRead
->intrConf
.autoMask
= adapter
->intr
.mask_mode
==
2193 devRead
->intrConf
.numIntrs
= adapter
->intr
.num_intrs
;
2194 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
2195 devRead
->intrConf
.modLevels
[i
] = adapter
->intr
.mod_levels
[i
];
2197 devRead
->intrConf
.eventIntrIdx
= adapter
->intr
.event_intr_idx
;
2198 devRead
->intrConf
.intrCtrl
|= cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
2200 /* rx filter settings */
2201 devRead
->rxFilterConf
.rxMode
= 0;
2202 vmxnet3_restore_vlan(adapter
);
2203 vmxnet3_write_mac_addr(adapter
, adapter
->netdev
->dev_addr
);
2205 /* the rest are already zeroed */
2210 vmxnet3_activate_dev(struct vmxnet3_adapter
*adapter
)
2214 unsigned long flags
;
2216 netdev_dbg(adapter
->netdev
, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2217 " ring sizes %u %u %u\n", adapter
->netdev
->name
,
2218 adapter
->skb_buf_size
, adapter
->rx_buf_per_pkt
,
2219 adapter
->tx_queue
[0].tx_ring
.size
,
2220 adapter
->rx_queue
[0].rx_ring
[0].size
,
2221 adapter
->rx_queue
[0].rx_ring
[1].size
);
2223 vmxnet3_tq_init_all(adapter
);
2224 err
= vmxnet3_rq_init_all(adapter
);
2226 netdev_err(adapter
->netdev
,
2227 "Failed to init rx queue error %d\n", err
);
2231 err
= vmxnet3_request_irqs(adapter
);
2233 netdev_err(adapter
->netdev
,
2234 "Failed to setup irq for error %d\n", err
);
2238 vmxnet3_setup_driver_shared(adapter
);
2240 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, VMXNET3_GET_ADDR_LO(
2241 adapter
->shared_pa
));
2242 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, VMXNET3_GET_ADDR_HI(
2243 adapter
->shared_pa
));
2244 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2245 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2246 VMXNET3_CMD_ACTIVATE_DEV
);
2247 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
2248 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2251 netdev_err(adapter
->netdev
,
2252 "Failed to activate dev: error %u\n", ret
);
2257 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2258 VMXNET3_WRITE_BAR0_REG(adapter
,
2259 VMXNET3_REG_RXPROD
+ i
* VMXNET3_REG_ALIGN
,
2260 adapter
->rx_queue
[i
].rx_ring
[0].next2fill
);
2261 VMXNET3_WRITE_BAR0_REG(adapter
, (VMXNET3_REG_RXPROD2
+
2262 (i
* VMXNET3_REG_ALIGN
)),
2263 adapter
->rx_queue
[i
].rx_ring
[1].next2fill
);
2266 /* Apply the rx filter settins last. */
2267 vmxnet3_set_mc(adapter
->netdev
);
2270 * Check link state when first activating device. It will start the
2271 * tx queue if the link is up.
2273 vmxnet3_check_link(adapter
, true);
2274 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2275 napi_enable(&adapter
->rx_queue
[i
].napi
);
2276 vmxnet3_enable_all_intrs(adapter
);
2277 clear_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
2281 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, 0);
2282 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, 0);
2283 vmxnet3_free_irqs(adapter
);
2286 /* free up buffers we allocated */
2287 vmxnet3_rq_cleanup_all(adapter
);
2293 vmxnet3_reset_dev(struct vmxnet3_adapter
*adapter
)
2295 unsigned long flags
;
2296 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2297 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_RESET_DEV
);
2298 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2303 vmxnet3_quiesce_dev(struct vmxnet3_adapter
*adapter
)
2306 unsigned long flags
;
2307 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
))
2311 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2312 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2313 VMXNET3_CMD_QUIESCE_DEV
);
2314 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2315 vmxnet3_disable_all_intrs(adapter
);
2317 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2318 napi_disable(&adapter
->rx_queue
[i
].napi
);
2319 netif_tx_disable(adapter
->netdev
);
2320 adapter
->link_speed
= 0;
2321 netif_carrier_off(adapter
->netdev
);
2323 vmxnet3_tq_cleanup_all(adapter
);
2324 vmxnet3_rq_cleanup_all(adapter
);
2325 vmxnet3_free_irqs(adapter
);
2331 vmxnet3_write_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
2336 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACL
, tmp
);
2338 tmp
= (mac
[5] << 8) | mac
[4];
2339 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACH
, tmp
);
2344 vmxnet3_set_mac_addr(struct net_device
*netdev
, void *p
)
2346 struct sockaddr
*addr
= p
;
2347 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2349 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2350 vmxnet3_write_mac_addr(adapter
, addr
->sa_data
);
2356 /* ==================== initialization and cleanup routines ============ */
2359 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter
*adapter
, bool *dma64
)
2362 unsigned long mmio_start
, mmio_len
;
2363 struct pci_dev
*pdev
= adapter
->pdev
;
2365 err
= pci_enable_device(pdev
);
2367 dev_err(&pdev
->dev
, "Failed to enable adapter: error %d\n", err
);
2371 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
2372 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0) {
2374 "pci_set_consistent_dma_mask failed\n");
2380 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
2382 "pci_set_dma_mask failed\n");
2389 err
= pci_request_selected_regions(pdev
, (1 << 2) - 1,
2390 vmxnet3_driver_name
);
2393 "Failed to request region for adapter: error %d\n", err
);
2397 pci_set_master(pdev
);
2399 mmio_start
= pci_resource_start(pdev
, 0);
2400 mmio_len
= pci_resource_len(pdev
, 0);
2401 adapter
->hw_addr0
= ioremap(mmio_start
, mmio_len
);
2402 if (!adapter
->hw_addr0
) {
2403 dev_err(&pdev
->dev
, "Failed to map bar0\n");
2408 mmio_start
= pci_resource_start(pdev
, 1);
2409 mmio_len
= pci_resource_len(pdev
, 1);
2410 adapter
->hw_addr1
= ioremap(mmio_start
, mmio_len
);
2411 if (!adapter
->hw_addr1
) {
2412 dev_err(&pdev
->dev
, "Failed to map bar1\n");
2419 iounmap(adapter
->hw_addr0
);
2421 pci_release_selected_regions(pdev
, (1 << 2) - 1);
2423 pci_disable_device(pdev
);
2429 vmxnet3_free_pci_resources(struct vmxnet3_adapter
*adapter
)
2431 BUG_ON(!adapter
->pdev
);
2433 iounmap(adapter
->hw_addr0
);
2434 iounmap(adapter
->hw_addr1
);
2435 pci_release_selected_regions(adapter
->pdev
, (1 << 2) - 1);
2436 pci_disable_device(adapter
->pdev
);
2441 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter
*adapter
)
2443 size_t sz
, i
, ring0_size
, ring1_size
, comp_size
;
2444 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[0];
2447 if (adapter
->netdev
->mtu
<= VMXNET3_MAX_SKB_BUF_SIZE
-
2448 VMXNET3_MAX_ETH_HDR_SIZE
) {
2449 adapter
->skb_buf_size
= adapter
->netdev
->mtu
+
2450 VMXNET3_MAX_ETH_HDR_SIZE
;
2451 if (adapter
->skb_buf_size
< VMXNET3_MIN_T0_BUF_SIZE
)
2452 adapter
->skb_buf_size
= VMXNET3_MIN_T0_BUF_SIZE
;
2454 adapter
->rx_buf_per_pkt
= 1;
2456 adapter
->skb_buf_size
= VMXNET3_MAX_SKB_BUF_SIZE
;
2457 sz
= adapter
->netdev
->mtu
- VMXNET3_MAX_SKB_BUF_SIZE
+
2458 VMXNET3_MAX_ETH_HDR_SIZE
;
2459 adapter
->rx_buf_per_pkt
= 1 + (sz
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
2463 * for simplicity, force the ring0 size to be a multiple of
2464 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2466 sz
= adapter
->rx_buf_per_pkt
* VMXNET3_RING_SIZE_ALIGN
;
2467 ring0_size
= adapter
->rx_queue
[0].rx_ring
[0].size
;
2468 ring0_size
= (ring0_size
+ sz
- 1) / sz
* sz
;
2469 ring0_size
= min_t(u32
, ring0_size
, VMXNET3_RX_RING_MAX_SIZE
/
2471 ring1_size
= adapter
->rx_queue
[0].rx_ring
[1].size
;
2472 comp_size
= ring0_size
+ ring1_size
;
2474 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2475 rq
= &adapter
->rx_queue
[i
];
2476 rq
->rx_ring
[0].size
= ring0_size
;
2477 rq
->rx_ring
[1].size
= ring1_size
;
2478 rq
->comp_ring
.size
= comp_size
;
2484 vmxnet3_create_queues(struct vmxnet3_adapter
*adapter
, u32 tx_ring_size
,
2485 u32 rx_ring_size
, u32 rx_ring2_size
)
2489 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2490 struct vmxnet3_tx_queue
*tq
= &adapter
->tx_queue
[i
];
2491 tq
->tx_ring
.size
= tx_ring_size
;
2492 tq
->data_ring
.size
= tx_ring_size
;
2493 tq
->comp_ring
.size
= tx_ring_size
;
2494 tq
->shared
= &adapter
->tqd_start
[i
].ctrl
;
2496 tq
->adapter
= adapter
;
2498 err
= vmxnet3_tq_create(tq
, adapter
);
2500 * Too late to change num_tx_queues. We cannot do away with
2501 * lesser number of queues than what we asked for
2507 adapter
->rx_queue
[0].rx_ring
[0].size
= rx_ring_size
;
2508 adapter
->rx_queue
[0].rx_ring
[1].size
= rx_ring2_size
;
2509 vmxnet3_adjust_rx_ring_size(adapter
);
2510 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2511 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
2512 /* qid and qid2 for rx queues will be assigned later when num
2513 * of rx queues is finalized after allocating intrs */
2514 rq
->shared
= &adapter
->rqd_start
[i
].ctrl
;
2515 rq
->adapter
= adapter
;
2516 err
= vmxnet3_rq_create(rq
, adapter
);
2519 netdev_err(adapter
->netdev
,
2520 "Could not allocate any rx queues. "
2524 netdev_info(adapter
->netdev
,
2525 "Number of rx queues changed "
2527 adapter
->num_rx_queues
= i
;
2535 vmxnet3_tq_destroy_all(adapter
);
2540 vmxnet3_open(struct net_device
*netdev
)
2542 struct vmxnet3_adapter
*adapter
;
2545 adapter
= netdev_priv(netdev
);
2547 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2548 spin_lock_init(&adapter
->tx_queue
[i
].tx_lock
);
2550 err
= vmxnet3_create_queues(adapter
, VMXNET3_DEF_TX_RING_SIZE
,
2551 VMXNET3_DEF_RX_RING_SIZE
,
2552 VMXNET3_DEF_RX_RING_SIZE
);
2556 err
= vmxnet3_activate_dev(adapter
);
2563 vmxnet3_rq_destroy_all(adapter
);
2564 vmxnet3_tq_destroy_all(adapter
);
2571 vmxnet3_close(struct net_device
*netdev
)
2573 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2576 * Reset_work may be in the middle of resetting the device, wait for its
2579 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2582 vmxnet3_quiesce_dev(adapter
);
2584 vmxnet3_rq_destroy_all(adapter
);
2585 vmxnet3_tq_destroy_all(adapter
);
2587 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2595 vmxnet3_force_close(struct vmxnet3_adapter
*adapter
)
2600 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2601 * vmxnet3_close() will deadlock.
2603 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
));
2605 /* we need to enable NAPI, otherwise dev_close will deadlock */
2606 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2607 napi_enable(&adapter
->rx_queue
[i
].napi
);
2608 dev_close(adapter
->netdev
);
2613 vmxnet3_change_mtu(struct net_device
*netdev
, int new_mtu
)
2615 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2618 if (new_mtu
< VMXNET3_MIN_MTU
|| new_mtu
> VMXNET3_MAX_MTU
)
2621 netdev
->mtu
= new_mtu
;
2624 * Reset_work may be in the middle of resetting the device, wait for its
2627 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2630 if (netif_running(netdev
)) {
2631 vmxnet3_quiesce_dev(adapter
);
2632 vmxnet3_reset_dev(adapter
);
2634 /* we need to re-create the rx queue based on the new mtu */
2635 vmxnet3_rq_destroy_all(adapter
);
2636 vmxnet3_adjust_rx_ring_size(adapter
);
2637 err
= vmxnet3_rq_create_all(adapter
);
2640 "failed to re-create rx queues, "
2641 " error %d. Closing it.\n", err
);
2645 err
= vmxnet3_activate_dev(adapter
);
2648 "failed to re-activate, error %d. "
2649 "Closing it\n", err
);
2655 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2657 vmxnet3_force_close(adapter
);
2664 vmxnet3_declare_features(struct vmxnet3_adapter
*adapter
, bool dma64
)
2666 struct net_device
*netdev
= adapter
->netdev
;
2668 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
2669 NETIF_F_HW_CSUM
| NETIF_F_HW_VLAN_TX
|
2670 NETIF_F_HW_VLAN_RX
| NETIF_F_TSO
| NETIF_F_TSO6
|
2673 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
2674 netdev
->vlan_features
= netdev
->hw_features
&
2675 ~(NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
2676 netdev
->features
= netdev
->hw_features
| NETIF_F_HW_VLAN_FILTER
;
2681 vmxnet3_read_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
2685 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACL
);
2688 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACH
);
2689 mac
[4] = tmp
& 0xff;
2690 mac
[5] = (tmp
>> 8) & 0xff;
2693 #ifdef CONFIG_PCI_MSI
2696 * Enable MSIx vectors.
2698 * 0 on successful enabling of required vectors,
2699 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
2701 * number of vectors which can be enabled otherwise (this number is smaller
2702 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2706 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter
*adapter
,
2709 int err
= 0, vector_threshold
;
2710 vector_threshold
= VMXNET3_LINUX_MIN_MSIX_VECT
;
2712 while (vectors
>= vector_threshold
) {
2713 err
= pci_enable_msix(adapter
->pdev
, adapter
->intr
.msix_entries
,
2716 adapter
->intr
.num_intrs
= vectors
;
2718 } else if (err
< 0) {
2719 dev_err(&adapter
->netdev
->dev
,
2720 "Failed to enable MSI-X, error: %d\n", err
);
2722 } else if (err
< vector_threshold
) {
2725 /* If fails to enable required number of MSI-x vectors
2726 * try enabling minimum number of vectors required.
2728 dev_err(&adapter
->netdev
->dev
,
2729 "Failed to enable %d MSI-X, trying %d instead\n",
2730 vectors
, vector_threshold
);
2731 vectors
= vector_threshold
;
2735 dev_info(&adapter
->pdev
->dev
,
2736 "Number of MSI-X interrupts which can be allocated "
2737 "is lower than min threshold required.\n");
2742 #endif /* CONFIG_PCI_MSI */
2745 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter
*adapter
)
2748 unsigned long flags
;
2751 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2752 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2753 VMXNET3_CMD_GET_CONF_INTR
);
2754 cfg
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
2755 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2756 adapter
->intr
.type
= cfg
& 0x3;
2757 adapter
->intr
.mask_mode
= (cfg
>> 2) & 0x3;
2759 if (adapter
->intr
.type
== VMXNET3_IT_AUTO
) {
2760 adapter
->intr
.type
= VMXNET3_IT_MSIX
;
2763 #ifdef CONFIG_PCI_MSI
2764 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
2765 int vector
, err
= 0;
2767 adapter
->intr
.num_intrs
= (adapter
->share_intr
==
2768 VMXNET3_INTR_TXSHARE
) ? 1 :
2769 adapter
->num_tx_queues
;
2770 adapter
->intr
.num_intrs
+= (adapter
->share_intr
==
2771 VMXNET3_INTR_BUDDYSHARE
) ? 0 :
2772 adapter
->num_rx_queues
;
2773 adapter
->intr
.num_intrs
+= 1; /* for link event */
2775 adapter
->intr
.num_intrs
= (adapter
->intr
.num_intrs
>
2776 VMXNET3_LINUX_MIN_MSIX_VECT
2777 ? adapter
->intr
.num_intrs
:
2778 VMXNET3_LINUX_MIN_MSIX_VECT
);
2780 for (vector
= 0; vector
< adapter
->intr
.num_intrs
; vector
++)
2781 adapter
->intr
.msix_entries
[vector
].entry
= vector
;
2783 err
= vmxnet3_acquire_msix_vectors(adapter
,
2784 adapter
->intr
.num_intrs
);
2785 /* If we cannot allocate one MSIx vector per queue
2786 * then limit the number of rx queues to 1
2788 if (err
== VMXNET3_LINUX_MIN_MSIX_VECT
) {
2789 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
2790 || adapter
->num_rx_queues
!= 1) {
2791 adapter
->share_intr
= VMXNET3_INTR_TXSHARE
;
2792 netdev_err(adapter
->netdev
,
2793 "Number of rx queues : 1\n");
2794 adapter
->num_rx_queues
= 1;
2795 adapter
->intr
.num_intrs
=
2796 VMXNET3_LINUX_MIN_MSIX_VECT
;
2803 /* If we cannot allocate MSIx vectors use only one rx queue */
2804 dev_info(&adapter
->pdev
->dev
,
2805 "Failed to enable MSI-X, error %d. "
2806 "Limiting #rx queues to 1, try MSI.\n", err
);
2808 adapter
->intr
.type
= VMXNET3_IT_MSI
;
2811 if (adapter
->intr
.type
== VMXNET3_IT_MSI
) {
2813 err
= pci_enable_msi(adapter
->pdev
);
2815 adapter
->num_rx_queues
= 1;
2816 adapter
->intr
.num_intrs
= 1;
2820 #endif /* CONFIG_PCI_MSI */
2822 adapter
->num_rx_queues
= 1;
2823 dev_info(&adapter
->netdev
->dev
,
2824 "Using INTx interrupt, #Rx queues: 1.\n");
2825 adapter
->intr
.type
= VMXNET3_IT_INTX
;
2827 /* INT-X related setting */
2828 adapter
->intr
.num_intrs
= 1;
2833 vmxnet3_free_intr_resources(struct vmxnet3_adapter
*adapter
)
2835 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
)
2836 pci_disable_msix(adapter
->pdev
);
2837 else if (adapter
->intr
.type
== VMXNET3_IT_MSI
)
2838 pci_disable_msi(adapter
->pdev
);
2840 BUG_ON(adapter
->intr
.type
!= VMXNET3_IT_INTX
);
2845 vmxnet3_tx_timeout(struct net_device
*netdev
)
2847 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2848 adapter
->tx_timeout_count
++;
2850 netdev_err(adapter
->netdev
, "tx hang\n");
2851 schedule_work(&adapter
->work
);
2852 netif_wake_queue(adapter
->netdev
);
2857 vmxnet3_reset_work(struct work_struct
*data
)
2859 struct vmxnet3_adapter
*adapter
;
2861 adapter
= container_of(data
, struct vmxnet3_adapter
, work
);
2863 /* if another thread is resetting the device, no need to proceed */
2864 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2867 /* if the device is closed, we must leave it alone */
2869 if (netif_running(adapter
->netdev
)) {
2870 netdev_notice(adapter
->netdev
, "resetting\n");
2871 vmxnet3_quiesce_dev(adapter
);
2872 vmxnet3_reset_dev(adapter
);
2873 vmxnet3_activate_dev(adapter
);
2875 netdev_info(adapter
->netdev
, "already closed\n");
2879 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2884 vmxnet3_probe_device(struct pci_dev
*pdev
,
2885 const struct pci_device_id
*id
)
2887 static const struct net_device_ops vmxnet3_netdev_ops
= {
2888 .ndo_open
= vmxnet3_open
,
2889 .ndo_stop
= vmxnet3_close
,
2890 .ndo_start_xmit
= vmxnet3_xmit_frame
,
2891 .ndo_set_mac_address
= vmxnet3_set_mac_addr
,
2892 .ndo_change_mtu
= vmxnet3_change_mtu
,
2893 .ndo_set_features
= vmxnet3_set_features
,
2894 .ndo_get_stats64
= vmxnet3_get_stats64
,
2895 .ndo_tx_timeout
= vmxnet3_tx_timeout
,
2896 .ndo_set_rx_mode
= vmxnet3_set_mc
,
2897 .ndo_vlan_rx_add_vid
= vmxnet3_vlan_rx_add_vid
,
2898 .ndo_vlan_rx_kill_vid
= vmxnet3_vlan_rx_kill_vid
,
2899 #ifdef CONFIG_NET_POLL_CONTROLLER
2900 .ndo_poll_controller
= vmxnet3_netpoll
,
2904 bool dma64
= false; /* stupid gcc */
2906 struct net_device
*netdev
;
2907 struct vmxnet3_adapter
*adapter
;
2913 if (!pci_msi_enabled())
2918 num_rx_queues
= min(VMXNET3_DEVICE_MAX_RX_QUEUES
,
2919 (int)num_online_cpus());
2923 num_rx_queues
= rounddown_pow_of_two(num_rx_queues
);
2926 num_tx_queues
= min(VMXNET3_DEVICE_MAX_TX_QUEUES
,
2927 (int)num_online_cpus());
2931 num_tx_queues
= rounddown_pow_of_two(num_tx_queues
);
2932 netdev
= alloc_etherdev_mq(sizeof(struct vmxnet3_adapter
),
2933 max(num_tx_queues
, num_rx_queues
));
2934 dev_info(&pdev
->dev
,
2935 "# of Tx queues : %d, # of Rx queues : %d\n",
2936 num_tx_queues
, num_rx_queues
);
2941 pci_set_drvdata(pdev
, netdev
);
2942 adapter
= netdev_priv(netdev
);
2943 adapter
->netdev
= netdev
;
2944 adapter
->pdev
= pdev
;
2946 spin_lock_init(&adapter
->cmd_lock
);
2947 adapter
->shared
= pci_alloc_consistent(adapter
->pdev
,
2948 sizeof(struct Vmxnet3_DriverShared
),
2949 &adapter
->shared_pa
);
2950 if (!adapter
->shared
) {
2951 dev_err(&pdev
->dev
, "Failed to allocate memory\n");
2953 goto err_alloc_shared
;
2956 adapter
->num_rx_queues
= num_rx_queues
;
2957 adapter
->num_tx_queues
= num_tx_queues
;
2959 size
= sizeof(struct Vmxnet3_TxQueueDesc
) * adapter
->num_tx_queues
;
2960 size
+= sizeof(struct Vmxnet3_RxQueueDesc
) * adapter
->num_rx_queues
;
2961 adapter
->tqd_start
= pci_alloc_consistent(adapter
->pdev
, size
,
2962 &adapter
->queue_desc_pa
);
2964 if (!adapter
->tqd_start
) {
2965 dev_err(&pdev
->dev
, "Failed to allocate memory\n");
2967 goto err_alloc_queue_desc
;
2969 adapter
->rqd_start
= (struct Vmxnet3_RxQueueDesc
*)(adapter
->tqd_start
+
2970 adapter
->num_tx_queues
);
2972 adapter
->pm_conf
= kmalloc(sizeof(struct Vmxnet3_PMConf
), GFP_KERNEL
);
2973 if (adapter
->pm_conf
== NULL
) {
2980 adapter
->rss_conf
= kmalloc(sizeof(struct UPT1_RSSConf
), GFP_KERNEL
);
2981 if (adapter
->rss_conf
== NULL
) {
2985 #endif /* VMXNET3_RSS */
2987 err
= vmxnet3_alloc_pci_resources(adapter
, &dma64
);
2991 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_VRRS
);
2993 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_VRRS
, 1);
2996 "Incompatible h/w version (0x%x) for adapter\n", ver
);
3001 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_UVRS
);
3003 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_UVRS
, 1);
3006 "Incompatible upt version (0x%x) for adapter\n", ver
);
3011 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3012 vmxnet3_declare_features(adapter
, dma64
);
3014 if (adapter
->num_tx_queues
== adapter
->num_rx_queues
)
3015 adapter
->share_intr
= VMXNET3_INTR_BUDDYSHARE
;
3017 adapter
->share_intr
= VMXNET3_INTR_DONTSHARE
;
3019 vmxnet3_alloc_intr_resources(adapter
);
3022 if (adapter
->num_rx_queues
> 1 &&
3023 adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
3024 adapter
->rss
= true;
3025 dev_dbg(&pdev
->dev
, "RSS is enabled.\n");
3027 adapter
->rss
= false;
3031 vmxnet3_read_mac_addr(adapter
, mac
);
3032 memcpy(netdev
->dev_addr
, mac
, netdev
->addr_len
);
3034 netdev
->netdev_ops
= &vmxnet3_netdev_ops
;
3035 vmxnet3_set_ethtool_ops(netdev
);
3036 netdev
->watchdog_timeo
= 5 * HZ
;
3038 INIT_WORK(&adapter
->work
, vmxnet3_reset_work
);
3039 set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
3041 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
3043 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3044 netif_napi_add(adapter
->netdev
,
3045 &adapter
->rx_queue
[i
].napi
,
3046 vmxnet3_poll_rx_only
, 64);
3049 netif_napi_add(adapter
->netdev
, &adapter
->rx_queue
[0].napi
,
3053 netif_set_real_num_tx_queues(adapter
->netdev
, adapter
->num_tx_queues
);
3054 netif_set_real_num_rx_queues(adapter
->netdev
, adapter
->num_rx_queues
);
3056 err
= register_netdev(netdev
);
3059 dev_err(&pdev
->dev
, "Failed to register adapter\n");
3063 vmxnet3_check_link(adapter
, false);
3067 vmxnet3_free_intr_resources(adapter
);
3069 vmxnet3_free_pci_resources(adapter
);
3072 kfree(adapter
->rss_conf
);
3075 kfree(adapter
->pm_conf
);
3077 pci_free_consistent(adapter
->pdev
, size
, adapter
->tqd_start
,
3078 adapter
->queue_desc_pa
);
3079 err_alloc_queue_desc
:
3080 pci_free_consistent(adapter
->pdev
, sizeof(struct Vmxnet3_DriverShared
),
3081 adapter
->shared
, adapter
->shared_pa
);
3083 pci_set_drvdata(pdev
, NULL
);
3084 free_netdev(netdev
);
3090 vmxnet3_remove_device(struct pci_dev
*pdev
)
3092 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3093 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3099 num_rx_queues
= min(VMXNET3_DEVICE_MAX_RX_QUEUES
,
3100 (int)num_online_cpus());
3104 num_rx_queues
= rounddown_pow_of_two(num_rx_queues
);
3106 cancel_work_sync(&adapter
->work
);
3108 unregister_netdev(netdev
);
3110 vmxnet3_free_intr_resources(adapter
);
3111 vmxnet3_free_pci_resources(adapter
);
3113 kfree(adapter
->rss_conf
);
3115 kfree(adapter
->pm_conf
);
3117 size
= sizeof(struct Vmxnet3_TxQueueDesc
) * adapter
->num_tx_queues
;
3118 size
+= sizeof(struct Vmxnet3_RxQueueDesc
) * num_rx_queues
;
3119 pci_free_consistent(adapter
->pdev
, size
, adapter
->tqd_start
,
3120 adapter
->queue_desc_pa
);
3121 pci_free_consistent(adapter
->pdev
, sizeof(struct Vmxnet3_DriverShared
),
3122 adapter
->shared
, adapter
->shared_pa
);
3123 free_netdev(netdev
);
3130 vmxnet3_suspend(struct device
*device
)
3132 struct pci_dev
*pdev
= to_pci_dev(device
);
3133 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3134 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3135 struct Vmxnet3_PMConf
*pmConf
;
3136 struct ethhdr
*ehdr
;
3137 struct arphdr
*ahdr
;
3139 struct in_device
*in_dev
;
3140 struct in_ifaddr
*ifa
;
3141 unsigned long flags
;
3144 if (!netif_running(netdev
))
3147 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3148 napi_disable(&adapter
->rx_queue
[i
].napi
);
3150 vmxnet3_disable_all_intrs(adapter
);
3151 vmxnet3_free_irqs(adapter
);
3152 vmxnet3_free_intr_resources(adapter
);
3154 netif_device_detach(netdev
);
3155 netif_tx_stop_all_queues(netdev
);
3157 /* Create wake-up filters. */
3158 pmConf
= adapter
->pm_conf
;
3159 memset(pmConf
, 0, sizeof(*pmConf
));
3161 if (adapter
->wol
& WAKE_UCAST
) {
3162 pmConf
->filters
[i
].patternSize
= ETH_ALEN
;
3163 pmConf
->filters
[i
].maskSize
= 1;
3164 memcpy(pmConf
->filters
[i
].pattern
, netdev
->dev_addr
, ETH_ALEN
);
3165 pmConf
->filters
[i
].mask
[0] = 0x3F; /* LSB ETH_ALEN bits */
3167 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
3171 if (adapter
->wol
& WAKE_ARP
) {
3172 in_dev
= in_dev_get(netdev
);
3176 ifa
= (struct in_ifaddr
*)in_dev
->ifa_list
;
3180 pmConf
->filters
[i
].patternSize
= ETH_HLEN
+ /* Ethernet header*/
3181 sizeof(struct arphdr
) + /* ARP header */
3182 2 * ETH_ALEN
+ /* 2 Ethernet addresses*/
3183 2 * sizeof(u32
); /*2 IPv4 addresses */
3184 pmConf
->filters
[i
].maskSize
=
3185 (pmConf
->filters
[i
].patternSize
- 1) / 8 + 1;
3187 /* ETH_P_ARP in Ethernet header. */
3188 ehdr
= (struct ethhdr
*)pmConf
->filters
[i
].pattern
;
3189 ehdr
->h_proto
= htons(ETH_P_ARP
);
3191 /* ARPOP_REQUEST in ARP header. */
3192 ahdr
= (struct arphdr
*)&pmConf
->filters
[i
].pattern
[ETH_HLEN
];
3193 ahdr
->ar_op
= htons(ARPOP_REQUEST
);
3194 arpreq
= (u8
*)(ahdr
+ 1);
3196 /* The Unicast IPv4 address in 'tip' field. */
3197 arpreq
+= 2 * ETH_ALEN
+ sizeof(u32
);
3198 *(u32
*)arpreq
= ifa
->ifa_address
;
3200 /* The mask for the relevant bits. */
3201 pmConf
->filters
[i
].mask
[0] = 0x00;
3202 pmConf
->filters
[i
].mask
[1] = 0x30; /* ETH_P_ARP */
3203 pmConf
->filters
[i
].mask
[2] = 0x30; /* ARPOP_REQUEST */
3204 pmConf
->filters
[i
].mask
[3] = 0x00;
3205 pmConf
->filters
[i
].mask
[4] = 0xC0; /* IPv4 TIP */
3206 pmConf
->filters
[i
].mask
[5] = 0x03; /* IPv4 TIP */
3209 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
3214 if (adapter
->wol
& WAKE_MAGIC
)
3215 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_MAGIC
;
3217 pmConf
->numFilters
= i
;
3219 adapter
->shared
->devRead
.pmConfDesc
.confVer
= cpu_to_le32(1);
3220 adapter
->shared
->devRead
.pmConfDesc
.confLen
= cpu_to_le32(sizeof(
3222 adapter
->shared
->devRead
.pmConfDesc
.confPA
= cpu_to_le64(virt_to_phys(
3225 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3226 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3227 VMXNET3_CMD_UPDATE_PMCFG
);
3228 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3230 pci_save_state(pdev
);
3231 pci_enable_wake(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
),
3233 pci_disable_device(pdev
);
3234 pci_set_power_state(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
));
3241 vmxnet3_resume(struct device
*device
)
3244 unsigned long flags
;
3245 struct pci_dev
*pdev
= to_pci_dev(device
);
3246 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3247 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3248 struct Vmxnet3_PMConf
*pmConf
;
3250 if (!netif_running(netdev
))
3253 /* Destroy wake-up filters. */
3254 pmConf
= adapter
->pm_conf
;
3255 memset(pmConf
, 0, sizeof(*pmConf
));
3257 adapter
->shared
->devRead
.pmConfDesc
.confVer
= cpu_to_le32(1);
3258 adapter
->shared
->devRead
.pmConfDesc
.confLen
= cpu_to_le32(sizeof(
3260 adapter
->shared
->devRead
.pmConfDesc
.confPA
= cpu_to_le64(virt_to_phys(
3263 netif_device_attach(netdev
);
3264 pci_set_power_state(pdev
, PCI_D0
);
3265 pci_restore_state(pdev
);
3266 err
= pci_enable_device_mem(pdev
);
3270 pci_enable_wake(pdev
, PCI_D0
, 0);
3272 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3273 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3274 VMXNET3_CMD_UPDATE_PMCFG
);
3275 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3276 vmxnet3_alloc_intr_resources(adapter
);
3277 vmxnet3_request_irqs(adapter
);
3278 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3279 napi_enable(&adapter
->rx_queue
[i
].napi
);
3280 vmxnet3_enable_all_intrs(adapter
);
3285 static const struct dev_pm_ops vmxnet3_pm_ops
= {
3286 .suspend
= vmxnet3_suspend
,
3287 .resume
= vmxnet3_resume
,
3291 static struct pci_driver vmxnet3_driver
= {
3292 .name
= vmxnet3_driver_name
,
3293 .id_table
= vmxnet3_pciid_table
,
3294 .probe
= vmxnet3_probe_device
,
3295 .remove
= vmxnet3_remove_device
,
3297 .driver
.pm
= &vmxnet3_pm_ops
,
3303 vmxnet3_init_module(void)
3305 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC
,
3306 VMXNET3_DRIVER_VERSION_REPORT
);
3307 return pci_register_driver(&vmxnet3_driver
);
3310 module_init(vmxnet3_init_module
);
3314 vmxnet3_exit_module(void)
3316 pci_unregister_driver(&vmxnet3_driver
);
3319 module_exit(vmxnet3_exit_module
);
3321 MODULE_AUTHOR("VMware, Inc.");
3322 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC
);
3323 MODULE_LICENSE("GPL v2");
3324 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING
);