2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
30 #include "vmxnet3_int.h"
32 char vmxnet3_driver_name
[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
37 * Last entry must be all 0s
39 static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table
) = {
40 {PCI_VDEVICE(VMWARE
, PCI_DEVICE_ID_VMWARE_VMXNET3
)},
44 MODULE_DEVICE_TABLE(pci
, vmxnet3_pciid_table
);
46 static atomic_t devices_found
;
48 #define VMXNET3_MAX_DEVICES 10
49 static int enable_mq
= 1;
50 static int irq_share_mode
;
53 vmxnet3_write_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
);
56 * Enable/Disable the given intr
59 vmxnet3_enable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
61 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 0);
66 vmxnet3_disable_intr(struct vmxnet3_adapter
*adapter
, unsigned intr_idx
)
68 VMXNET3_WRITE_BAR0_REG(adapter
, VMXNET3_REG_IMR
+ intr_idx
* 8, 1);
73 * Enable/Disable all intrs used by the device
76 vmxnet3_enable_all_intrs(struct vmxnet3_adapter
*adapter
)
80 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
81 vmxnet3_enable_intr(adapter
, i
);
82 adapter
->shared
->devRead
.intrConf
.intrCtrl
&=
83 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL
);
88 vmxnet3_disable_all_intrs(struct vmxnet3_adapter
*adapter
)
92 adapter
->shared
->devRead
.intrConf
.intrCtrl
|=
93 cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
94 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
95 vmxnet3_disable_intr(adapter
, i
);
100 vmxnet3_ack_events(struct vmxnet3_adapter
*adapter
, u32 events
)
102 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_ECR
, events
);
107 vmxnet3_tq_stopped(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
114 vmxnet3_tq_start(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
117 netif_start_subqueue(adapter
->netdev
, tq
- adapter
->tx_queue
);
122 vmxnet3_tq_wake(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
125 netif_wake_subqueue(adapter
->netdev
, (tq
- adapter
->tx_queue
));
130 vmxnet3_tq_stop(struct vmxnet3_tx_queue
*tq
, struct vmxnet3_adapter
*adapter
)
134 netif_stop_subqueue(adapter
->netdev
, (tq
- adapter
->tx_queue
));
139 * Check the link state. This may start or stop the tx queue.
142 vmxnet3_check_link(struct vmxnet3_adapter
*adapter
, bool affectTxQueue
)
148 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
149 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_GET_LINK
);
150 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
151 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
153 adapter
->link_speed
= ret
>> 16;
154 if (ret
& 1) { /* Link is up. */
155 printk(KERN_INFO
"%s: NIC Link is Up %d Mbps\n",
156 adapter
->netdev
->name
, adapter
->link_speed
);
157 if (!netif_carrier_ok(adapter
->netdev
))
158 netif_carrier_on(adapter
->netdev
);
161 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
162 vmxnet3_tq_start(&adapter
->tx_queue
[i
],
166 printk(KERN_INFO
"%s: NIC Link is Down\n",
167 adapter
->netdev
->name
);
168 if (netif_carrier_ok(adapter
->netdev
))
169 netif_carrier_off(adapter
->netdev
);
172 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
173 vmxnet3_tq_stop(&adapter
->tx_queue
[i
], adapter
);
179 vmxnet3_process_events(struct vmxnet3_adapter
*adapter
)
183 u32 events
= le32_to_cpu(adapter
->shared
->ecr
);
187 vmxnet3_ack_events(adapter
, events
);
189 /* Check if link state has changed */
190 if (events
& VMXNET3_ECR_LINK
)
191 vmxnet3_check_link(adapter
, true);
193 /* Check if there is an error on xmit/recv queues */
194 if (events
& (VMXNET3_ECR_TQERR
| VMXNET3_ECR_RQERR
)) {
195 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
196 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
197 VMXNET3_CMD_GET_QUEUE_STATUS
);
198 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
200 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
201 if (adapter
->tqd_start
[i
].status
.stopped
)
202 dev_err(&adapter
->netdev
->dev
,
203 "%s: tq[%d] error 0x%x\n",
204 adapter
->netdev
->name
, i
, le32_to_cpu(
205 adapter
->tqd_start
[i
].status
.error
));
206 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
207 if (adapter
->rqd_start
[i
].status
.stopped
)
208 dev_err(&adapter
->netdev
->dev
,
209 "%s: rq[%d] error 0x%x\n",
210 adapter
->netdev
->name
, i
,
211 adapter
->rqd_start
[i
].status
.error
);
213 schedule_work(&adapter
->work
);
217 #ifdef __BIG_ENDIAN_BITFIELD
219 * The device expects the bitfields in shared structures to be written in
220 * little endian. When CPU is big endian, the following routines are used to
221 * correctly read and write into ABI.
222 * The general technique used here is : double word bitfields are defined in
223 * opposite order for big endian architecture. Then before reading them in
224 * driver the complete double word is translated using le32_to_cpu. Similarly
225 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
226 * double words into required format.
227 * In order to avoid touching bits in shared structure more than once, temporary
228 * descriptors are used. These are passed as srcDesc to following functions.
230 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc
*srcDesc
,
231 struct Vmxnet3_RxDesc
*dstDesc
)
233 u32
*src
= (u32
*)srcDesc
+ 2;
234 u32
*dst
= (u32
*)dstDesc
+ 2;
235 dstDesc
->addr
= le64_to_cpu(srcDesc
->addr
);
236 *dst
= le32_to_cpu(*src
);
237 dstDesc
->ext1
= le32_to_cpu(srcDesc
->ext1
);
240 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc
*srcDesc
,
241 struct Vmxnet3_TxDesc
*dstDesc
)
244 u32
*src
= (u32
*)(srcDesc
+ 1);
245 u32
*dst
= (u32
*)(dstDesc
+ 1);
247 /* Working backwards so that the gen bit is set at the end. */
248 for (i
= 2; i
> 0; i
--) {
251 *dst
= cpu_to_le32(*src
);
256 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc
*srcDesc
,
257 struct Vmxnet3_RxCompDesc
*dstDesc
)
260 u32
*src
= (u32
*)srcDesc
;
261 u32
*dst
= (u32
*)dstDesc
;
262 for (i
= 0; i
< sizeof(struct Vmxnet3_RxCompDesc
) / sizeof(u32
); i
++) {
263 *dst
= le32_to_cpu(*src
);
270 /* Used to read bitfield values from double words. */
271 static u32
get_bitfield32(const __le32
*bitfield
, u32 pos
, u32 size
)
273 u32 temp
= le32_to_cpu(*bitfield
);
274 u32 mask
= ((1 << size
) - 1) << pos
;
282 #endif /* __BIG_ENDIAN_BITFIELD */
284 #ifdef __BIG_ENDIAN_BITFIELD
286 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
287 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
288 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
289 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
290 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
291 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
292 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
293 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
294 VMXNET3_TCD_GEN_SIZE)
295 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
296 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
297 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
299 vmxnet3_RxCompToCPU((rcd), (tmp)); \
301 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
303 vmxnet3_RxDescToCPU((rxd), (tmp)); \
308 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
309 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
310 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
311 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
312 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
313 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
315 #endif /* __BIG_ENDIAN_BITFIELD */
319 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info
*tbi
,
320 struct pci_dev
*pdev
)
322 if (tbi
->map_type
== VMXNET3_MAP_SINGLE
)
323 pci_unmap_single(pdev
, tbi
->dma_addr
, tbi
->len
,
325 else if (tbi
->map_type
== VMXNET3_MAP_PAGE
)
326 pci_unmap_page(pdev
, tbi
->dma_addr
, tbi
->len
,
329 BUG_ON(tbi
->map_type
!= VMXNET3_MAP_NONE
);
331 tbi
->map_type
= VMXNET3_MAP_NONE
; /* to help debugging */
336 vmxnet3_unmap_pkt(u32 eop_idx
, struct vmxnet3_tx_queue
*tq
,
337 struct pci_dev
*pdev
, struct vmxnet3_adapter
*adapter
)
342 /* no out of order completion */
343 BUG_ON(tq
->buf_info
[eop_idx
].sop_idx
!= tq
->tx_ring
.next2comp
);
344 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq
->tx_ring
.base
[eop_idx
].txd
)) != 1);
346 skb
= tq
->buf_info
[eop_idx
].skb
;
348 tq
->buf_info
[eop_idx
].skb
= NULL
;
350 VMXNET3_INC_RING_IDX_ONLY(eop_idx
, tq
->tx_ring
.size
);
352 while (tq
->tx_ring
.next2comp
!= eop_idx
) {
353 vmxnet3_unmap_tx_buf(tq
->buf_info
+ tq
->tx_ring
.next2comp
,
356 /* update next2comp w/o tx_lock. Since we are marking more,
357 * instead of less, tx ring entries avail, the worst case is
358 * that the tx routine incorrectly re-queues a pkt due to
359 * insufficient tx ring entries.
361 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
365 dev_kfree_skb_any(skb
);
371 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue
*tq
,
372 struct vmxnet3_adapter
*adapter
)
375 union Vmxnet3_GenericDesc
*gdesc
;
377 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
378 while (VMXNET3_TCD_GET_GEN(&gdesc
->tcd
) == tq
->comp_ring
.gen
) {
379 completed
+= vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
380 &gdesc
->tcd
), tq
, adapter
->pdev
,
383 vmxnet3_comp_ring_adv_next2proc(&tq
->comp_ring
);
384 gdesc
= tq
->comp_ring
.base
+ tq
->comp_ring
.next2proc
;
388 spin_lock(&tq
->tx_lock
);
389 if (unlikely(vmxnet3_tq_stopped(tq
, adapter
) &&
390 vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
) >
391 VMXNET3_WAKE_QUEUE_THRESHOLD(tq
) &&
392 netif_carrier_ok(adapter
->netdev
))) {
393 vmxnet3_tq_wake(tq
, adapter
);
395 spin_unlock(&tq
->tx_lock
);
402 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue
*tq
,
403 struct vmxnet3_adapter
*adapter
)
407 while (tq
->tx_ring
.next2comp
!= tq
->tx_ring
.next2fill
) {
408 struct vmxnet3_tx_buf_info
*tbi
;
410 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2comp
;
412 vmxnet3_unmap_tx_buf(tbi
, adapter
->pdev
);
414 dev_kfree_skb_any(tbi
->skb
);
417 vmxnet3_cmd_ring_adv_next2comp(&tq
->tx_ring
);
420 /* sanity check, verify all buffers are indeed unmapped and freed */
421 for (i
= 0; i
< tq
->tx_ring
.size
; i
++) {
422 BUG_ON(tq
->buf_info
[i
].skb
!= NULL
||
423 tq
->buf_info
[i
].map_type
!= VMXNET3_MAP_NONE
);
426 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
427 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
429 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
430 tq
->comp_ring
.next2proc
= 0;
435 vmxnet3_tq_destroy(struct vmxnet3_tx_queue
*tq
,
436 struct vmxnet3_adapter
*adapter
)
438 if (tq
->tx_ring
.base
) {
439 pci_free_consistent(adapter
->pdev
, tq
->tx_ring
.size
*
440 sizeof(struct Vmxnet3_TxDesc
),
441 tq
->tx_ring
.base
, tq
->tx_ring
.basePA
);
442 tq
->tx_ring
.base
= NULL
;
444 if (tq
->data_ring
.base
) {
445 pci_free_consistent(adapter
->pdev
, tq
->data_ring
.size
*
446 sizeof(struct Vmxnet3_TxDataDesc
),
447 tq
->data_ring
.base
, tq
->data_ring
.basePA
);
448 tq
->data_ring
.base
= NULL
;
450 if (tq
->comp_ring
.base
) {
451 pci_free_consistent(adapter
->pdev
, tq
->comp_ring
.size
*
452 sizeof(struct Vmxnet3_TxCompDesc
),
453 tq
->comp_ring
.base
, tq
->comp_ring
.basePA
);
454 tq
->comp_ring
.base
= NULL
;
461 /* Destroy all tx queues */
463 vmxnet3_tq_destroy_all(struct vmxnet3_adapter
*adapter
)
467 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
468 vmxnet3_tq_destroy(&adapter
->tx_queue
[i
], adapter
);
473 vmxnet3_tq_init(struct vmxnet3_tx_queue
*tq
,
474 struct vmxnet3_adapter
*adapter
)
478 /* reset the tx ring contents to 0 and reset the tx ring states */
479 memset(tq
->tx_ring
.base
, 0, tq
->tx_ring
.size
*
480 sizeof(struct Vmxnet3_TxDesc
));
481 tq
->tx_ring
.next2fill
= tq
->tx_ring
.next2comp
= 0;
482 tq
->tx_ring
.gen
= VMXNET3_INIT_GEN
;
484 memset(tq
->data_ring
.base
, 0, tq
->data_ring
.size
*
485 sizeof(struct Vmxnet3_TxDataDesc
));
487 /* reset the tx comp ring contents to 0 and reset comp ring states */
488 memset(tq
->comp_ring
.base
, 0, tq
->comp_ring
.size
*
489 sizeof(struct Vmxnet3_TxCompDesc
));
490 tq
->comp_ring
.next2proc
= 0;
491 tq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
493 /* reset the bookkeeping data */
494 memset(tq
->buf_info
, 0, sizeof(tq
->buf_info
[0]) * tq
->tx_ring
.size
);
495 for (i
= 0; i
< tq
->tx_ring
.size
; i
++)
496 tq
->buf_info
[i
].map_type
= VMXNET3_MAP_NONE
;
498 /* stats are not reset */
503 vmxnet3_tq_create(struct vmxnet3_tx_queue
*tq
,
504 struct vmxnet3_adapter
*adapter
)
506 BUG_ON(tq
->tx_ring
.base
|| tq
->data_ring
.base
||
507 tq
->comp_ring
.base
|| tq
->buf_info
);
509 tq
->tx_ring
.base
= pci_alloc_consistent(adapter
->pdev
, tq
->tx_ring
.size
510 * sizeof(struct Vmxnet3_TxDesc
),
511 &tq
->tx_ring
.basePA
);
512 if (!tq
->tx_ring
.base
) {
513 printk(KERN_ERR
"%s: failed to allocate tx ring\n",
514 adapter
->netdev
->name
);
518 tq
->data_ring
.base
= pci_alloc_consistent(adapter
->pdev
,
520 sizeof(struct Vmxnet3_TxDataDesc
),
521 &tq
->data_ring
.basePA
);
522 if (!tq
->data_ring
.base
) {
523 printk(KERN_ERR
"%s: failed to allocate data ring\n",
524 adapter
->netdev
->name
);
528 tq
->comp_ring
.base
= pci_alloc_consistent(adapter
->pdev
,
530 sizeof(struct Vmxnet3_TxCompDesc
),
531 &tq
->comp_ring
.basePA
);
532 if (!tq
->comp_ring
.base
) {
533 printk(KERN_ERR
"%s: failed to allocate tx comp ring\n",
534 adapter
->netdev
->name
);
538 tq
->buf_info
= kcalloc(tq
->tx_ring
.size
, sizeof(tq
->buf_info
[0]),
546 vmxnet3_tq_destroy(tq
, adapter
);
551 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter
*adapter
)
555 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
556 vmxnet3_tq_cleanup(&adapter
->tx_queue
[i
], adapter
);
560 * starting from ring->next2fill, allocate rx buffers for the given ring
561 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
562 * are allocated or allocation fails
566 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue
*rq
, u32 ring_idx
,
567 int num_to_alloc
, struct vmxnet3_adapter
*adapter
)
569 int num_allocated
= 0;
570 struct vmxnet3_rx_buf_info
*rbi_base
= rq
->buf_info
[ring_idx
];
571 struct vmxnet3_cmd_ring
*ring
= &rq
->rx_ring
[ring_idx
];
574 while (num_allocated
<= num_to_alloc
) {
575 struct vmxnet3_rx_buf_info
*rbi
;
576 union Vmxnet3_GenericDesc
*gd
;
578 rbi
= rbi_base
+ ring
->next2fill
;
579 gd
= ring
->base
+ ring
->next2fill
;
581 if (rbi
->buf_type
== VMXNET3_RX_BUF_SKB
) {
582 if (rbi
->skb
== NULL
) {
583 rbi
->skb
= dev_alloc_skb(rbi
->len
+
585 if (unlikely(rbi
->skb
== NULL
)) {
586 rq
->stats
.rx_buf_alloc_failure
++;
589 rbi
->skb
->dev
= adapter
->netdev
;
591 skb_reserve(rbi
->skb
, NET_IP_ALIGN
);
592 rbi
->dma_addr
= pci_map_single(adapter
->pdev
,
593 rbi
->skb
->data
, rbi
->len
,
596 /* rx buffer skipped by the device */
598 val
= VMXNET3_RXD_BTYPE_HEAD
<< VMXNET3_RXD_BTYPE_SHIFT
;
600 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_PAGE
||
601 rbi
->len
!= PAGE_SIZE
);
603 if (rbi
->page
== NULL
) {
604 rbi
->page
= alloc_page(GFP_ATOMIC
);
605 if (unlikely(rbi
->page
== NULL
)) {
606 rq
->stats
.rx_buf_alloc_failure
++;
609 rbi
->dma_addr
= pci_map_page(adapter
->pdev
,
610 rbi
->page
, 0, PAGE_SIZE
,
613 /* rx buffers skipped by the device */
615 val
= VMXNET3_RXD_BTYPE_BODY
<< VMXNET3_RXD_BTYPE_SHIFT
;
618 BUG_ON(rbi
->dma_addr
== 0);
619 gd
->rxd
.addr
= cpu_to_le64(rbi
->dma_addr
);
620 gd
->dword
[2] = cpu_to_le32((!ring
->gen
<< VMXNET3_RXD_GEN_SHIFT
)
623 /* Fill the last buffer but dont mark it ready, or else the
624 * device will think that the queue is full */
625 if (num_allocated
== num_to_alloc
)
628 gd
->dword
[2] |= cpu_to_le32(ring
->gen
<< VMXNET3_RXD_GEN_SHIFT
);
630 vmxnet3_cmd_ring_adv_next2fill(ring
);
632 rq
->uncommitted
[ring_idx
] += num_allocated
;
634 dev_dbg(&adapter
->netdev
->dev
,
635 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
636 "%u, uncommitted %u\n", num_allocated
, ring
->next2fill
,
637 ring
->next2comp
, rq
->uncommitted
[ring_idx
]);
639 /* so that the device can distinguish a full ring and an empty ring */
640 BUG_ON(num_allocated
!= 0 && ring
->next2fill
== ring
->next2comp
);
642 return num_allocated
;
647 vmxnet3_append_frag(struct sk_buff
*skb
, struct Vmxnet3_RxCompDesc
*rcd
,
648 struct vmxnet3_rx_buf_info
*rbi
)
650 struct skb_frag_struct
*frag
= skb_shinfo(skb
)->frags
+
651 skb_shinfo(skb
)->nr_frags
;
653 BUG_ON(skb_shinfo(skb
)->nr_frags
>= MAX_SKB_FRAGS
);
655 __skb_frag_set_page(frag
, rbi
->page
);
656 frag
->page_offset
= 0;
657 skb_frag_size_set(frag
, rcd
->len
);
658 skb
->data_len
+= rcd
->len
;
659 skb
->truesize
+= PAGE_SIZE
;
660 skb_shinfo(skb
)->nr_frags
++;
665 vmxnet3_map_pkt(struct sk_buff
*skb
, struct vmxnet3_tx_ctx
*ctx
,
666 struct vmxnet3_tx_queue
*tq
, struct pci_dev
*pdev
,
667 struct vmxnet3_adapter
*adapter
)
670 unsigned long buf_offset
;
672 union Vmxnet3_GenericDesc
*gdesc
;
673 struct vmxnet3_tx_buf_info
*tbi
= NULL
;
675 BUG_ON(ctx
->copy_size
> skb_headlen(skb
));
677 /* use the previous gen bit for the SOP desc */
678 dw2
= (tq
->tx_ring
.gen
^ 0x1) << VMXNET3_TXD_GEN_SHIFT
;
680 ctx
->sop_txd
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
681 gdesc
= ctx
->sop_txd
; /* both loops below can be skipped */
683 /* no need to map the buffer if headers are copied */
684 if (ctx
->copy_size
) {
685 ctx
->sop_txd
->txd
.addr
= cpu_to_le64(tq
->data_ring
.basePA
+
686 tq
->tx_ring
.next2fill
*
687 sizeof(struct Vmxnet3_TxDataDesc
));
688 ctx
->sop_txd
->dword
[2] = cpu_to_le32(dw2
| ctx
->copy_size
);
689 ctx
->sop_txd
->dword
[3] = 0;
691 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
692 tbi
->map_type
= VMXNET3_MAP_NONE
;
694 dev_dbg(&adapter
->netdev
->dev
,
695 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
696 tq
->tx_ring
.next2fill
,
697 le64_to_cpu(ctx
->sop_txd
->txd
.addr
),
698 ctx
->sop_txd
->dword
[2], ctx
->sop_txd
->dword
[3]);
699 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
701 /* use the right gen for non-SOP desc */
702 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
705 /* linear part can use multiple tx desc if it's big */
706 len
= skb_headlen(skb
) - ctx
->copy_size
;
707 buf_offset
= ctx
->copy_size
;
711 if (len
< VMXNET3_MAX_TX_BUF_SIZE
) {
715 buf_size
= VMXNET3_MAX_TX_BUF_SIZE
;
716 /* spec says that for TxDesc.len, 0 == 2^14 */
719 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
720 tbi
->map_type
= VMXNET3_MAP_SINGLE
;
721 tbi
->dma_addr
= pci_map_single(adapter
->pdev
,
722 skb
->data
+ buf_offset
, buf_size
,
727 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
728 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
730 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
731 gdesc
->dword
[2] = cpu_to_le32(dw2
);
734 dev_dbg(&adapter
->netdev
->dev
,
735 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
736 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
737 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
738 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
739 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
742 buf_offset
+= buf_size
;
745 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
746 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
750 len
= skb_frag_size(frag
);
752 tbi
= tq
->buf_info
+ tq
->tx_ring
.next2fill
;
753 if (len
< VMXNET3_MAX_TX_BUF_SIZE
) {
757 buf_size
= VMXNET3_MAX_TX_BUF_SIZE
;
758 /* spec says that for TxDesc.len, 0 == 2^14 */
760 tbi
->map_type
= VMXNET3_MAP_PAGE
;
761 tbi
->dma_addr
= skb_frag_dma_map(&adapter
->pdev
->dev
, frag
,
762 buf_offset
, buf_size
,
767 gdesc
= tq
->tx_ring
.base
+ tq
->tx_ring
.next2fill
;
768 BUG_ON(gdesc
->txd
.gen
== tq
->tx_ring
.gen
);
770 gdesc
->txd
.addr
= cpu_to_le64(tbi
->dma_addr
);
771 gdesc
->dword
[2] = cpu_to_le32(dw2
);
774 dev_dbg(&adapter
->netdev
->dev
,
775 "txd[%u]: 0x%llu %u %u\n",
776 tq
->tx_ring
.next2fill
, le64_to_cpu(gdesc
->txd
.addr
),
777 le32_to_cpu(gdesc
->dword
[2]), gdesc
->dword
[3]);
778 vmxnet3_cmd_ring_adv_next2fill(&tq
->tx_ring
);
779 dw2
= tq
->tx_ring
.gen
<< VMXNET3_TXD_GEN_SHIFT
;
782 buf_offset
+= buf_size
;
786 ctx
->eop_txd
= gdesc
;
788 /* set the last buf_info for the pkt */
790 tbi
->sop_idx
= ctx
->sop_txd
- tq
->tx_ring
.base
;
794 /* Init all tx queues */
796 vmxnet3_tq_init_all(struct vmxnet3_adapter
*adapter
)
800 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
801 vmxnet3_tq_init(&adapter
->tx_queue
[i
], adapter
);
806 * parse and copy relevant protocol headers:
807 * For a tso pkt, relevant headers are L2/3/4 including options
808 * For a pkt requesting csum offloading, they are L2/3 and may include L4
809 * if it's a TCP/UDP pkt
812 * -1: error happens during parsing
813 * 0: protocol headers parsed, but too big to be copied
814 * 1: protocol headers parsed and copied
817 * 1. related *ctx fields are updated.
818 * 2. ctx->copy_size is # of bytes copied
819 * 3. the portion copied is guaranteed to be in the linear part
823 vmxnet3_parse_and_copy_hdr(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
824 struct vmxnet3_tx_ctx
*ctx
,
825 struct vmxnet3_adapter
*adapter
)
827 struct Vmxnet3_TxDataDesc
*tdd
;
829 if (ctx
->mss
) { /* TSO */
830 ctx
->eth_ip_hdr_size
= skb_transport_offset(skb
);
831 ctx
->l4_hdr_size
= tcp_hdrlen(skb
);
832 ctx
->copy_size
= ctx
->eth_ip_hdr_size
+ ctx
->l4_hdr_size
;
834 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
835 ctx
->eth_ip_hdr_size
= skb_checksum_start_offset(skb
);
838 const struct iphdr
*iph
= ip_hdr(skb
);
840 if (iph
->protocol
== IPPROTO_TCP
)
841 ctx
->l4_hdr_size
= tcp_hdrlen(skb
);
842 else if (iph
->protocol
== IPPROTO_UDP
)
843 ctx
->l4_hdr_size
= sizeof(struct udphdr
);
845 ctx
->l4_hdr_size
= 0;
847 /* for simplicity, don't copy L4 headers */
848 ctx
->l4_hdr_size
= 0;
850 ctx
->copy_size
= min(ctx
->eth_ip_hdr_size
+
851 ctx
->l4_hdr_size
, skb
->len
);
853 ctx
->eth_ip_hdr_size
= 0;
854 ctx
->l4_hdr_size
= 0;
855 /* copy as much as allowed */
856 ctx
->copy_size
= min((unsigned int)VMXNET3_HDR_COPY_SIZE
860 /* make sure headers are accessible directly */
861 if (unlikely(!pskb_may_pull(skb
, ctx
->copy_size
)))
865 if (unlikely(ctx
->copy_size
> VMXNET3_HDR_COPY_SIZE
)) {
866 tq
->stats
.oversized_hdr
++;
871 tdd
= tq
->data_ring
.base
+ tq
->tx_ring
.next2fill
;
873 memcpy(tdd
->data
, skb
->data
, ctx
->copy_size
);
874 dev_dbg(&adapter
->netdev
->dev
,
875 "copy %u bytes to dataRing[%u]\n",
876 ctx
->copy_size
, tq
->tx_ring
.next2fill
);
885 vmxnet3_prepare_tso(struct sk_buff
*skb
,
886 struct vmxnet3_tx_ctx
*ctx
)
888 struct tcphdr
*tcph
= tcp_hdr(skb
);
891 struct iphdr
*iph
= ip_hdr(skb
);
894 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
897 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
899 tcph
->check
= ~csum_ipv6_magic(&iph
->saddr
, &iph
->daddr
, 0,
904 static int txd_estimate(const struct sk_buff
*skb
)
906 int count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) + 1;
909 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
910 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
912 count
+= VMXNET3_TXD_NEEDED(skb_frag_size(frag
));
918 * Transmits a pkt thru a given tq
920 * NETDEV_TX_OK: descriptors are setup successfully
921 * NETDEV_TX_OK: error occurred, the pkt is dropped
922 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
925 * 1. tx ring may be changed
926 * 2. tq stats may be updated accordingly
927 * 3. shared->txNumDeferred may be updated
931 vmxnet3_tq_xmit(struct sk_buff
*skb
, struct vmxnet3_tx_queue
*tq
,
932 struct vmxnet3_adapter
*adapter
, struct net_device
*netdev
)
937 struct vmxnet3_tx_ctx ctx
;
938 union Vmxnet3_GenericDesc
*gdesc
;
939 #ifdef __BIG_ENDIAN_BITFIELD
940 /* Use temporary descriptor to avoid touching bits multiple times */
941 union Vmxnet3_GenericDesc tempTxDesc
;
944 count
= txd_estimate(skb
);
946 ctx
.ipv4
= (vlan_get_protocol(skb
) == cpu_to_be16(ETH_P_IP
));
948 ctx
.mss
= skb_shinfo(skb
)->gso_size
;
950 if (skb_header_cloned(skb
)) {
951 if (unlikely(pskb_expand_head(skb
, 0, 0,
953 tq
->stats
.drop_tso
++;
956 tq
->stats
.copy_skb_header
++;
958 vmxnet3_prepare_tso(skb
, &ctx
);
960 if (unlikely(count
> VMXNET3_MAX_TXD_PER_PKT
)) {
962 /* non-tso pkts must not use more than
963 * VMXNET3_MAX_TXD_PER_PKT entries
965 if (skb_linearize(skb
) != 0) {
966 tq
->stats
.drop_too_many_frags
++;
969 tq
->stats
.linearized
++;
971 /* recalculate the # of descriptors to use */
972 count
= VMXNET3_TXD_NEEDED(skb_headlen(skb
)) + 1;
976 spin_lock_irqsave(&tq
->tx_lock
, flags
);
978 if (count
> vmxnet3_cmd_ring_desc_avail(&tq
->tx_ring
)) {
979 tq
->stats
.tx_ring_full
++;
980 dev_dbg(&adapter
->netdev
->dev
,
981 "tx queue stopped on %s, next2comp %u"
982 " next2fill %u\n", adapter
->netdev
->name
,
983 tq
->tx_ring
.next2comp
, tq
->tx_ring
.next2fill
);
985 vmxnet3_tq_stop(tq
, adapter
);
986 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
987 return NETDEV_TX_BUSY
;
991 ret
= vmxnet3_parse_and_copy_hdr(skb
, tq
, &ctx
, adapter
);
993 BUG_ON(ret
<= 0 && ctx
.copy_size
!= 0);
994 /* hdrs parsed, check against other limits */
996 if (unlikely(ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
>
997 VMXNET3_MAX_TX_BUF_SIZE
)) {
1001 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1002 if (unlikely(ctx
.eth_ip_hdr_size
+
1004 VMXNET3_MAX_CSUM_OFFSET
)) {
1010 tq
->stats
.drop_hdr_inspect_err
++;
1011 goto unlock_drop_pkt
;
1014 /* fill tx descs related to addr & len */
1015 vmxnet3_map_pkt(skb
, &ctx
, tq
, adapter
->pdev
, adapter
);
1017 /* setup the EOP desc */
1018 ctx
.eop_txd
->dword
[3] = cpu_to_le32(VMXNET3_TXD_CQ
| VMXNET3_TXD_EOP
);
1020 /* setup the SOP desc */
1021 #ifdef __BIG_ENDIAN_BITFIELD
1022 gdesc
= &tempTxDesc
;
1023 gdesc
->dword
[2] = ctx
.sop_txd
->dword
[2];
1024 gdesc
->dword
[3] = ctx
.sop_txd
->dword
[3];
1026 gdesc
= ctx
.sop_txd
;
1029 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
+ ctx
.l4_hdr_size
;
1030 gdesc
->txd
.om
= VMXNET3_OM_TSO
;
1031 gdesc
->txd
.msscof
= ctx
.mss
;
1032 le32_add_cpu(&tq
->shared
->txNumDeferred
, (skb
->len
-
1033 gdesc
->txd
.hlen
+ ctx
.mss
- 1) / ctx
.mss
);
1035 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1036 gdesc
->txd
.hlen
= ctx
.eth_ip_hdr_size
;
1037 gdesc
->txd
.om
= VMXNET3_OM_CSUM
;
1038 gdesc
->txd
.msscof
= ctx
.eth_ip_hdr_size
+
1042 gdesc
->txd
.msscof
= 0;
1044 le32_add_cpu(&tq
->shared
->txNumDeferred
, 1);
1047 if (vlan_tx_tag_present(skb
)) {
1049 gdesc
->txd
.tci
= vlan_tx_tag_get(skb
);
1052 /* finally flips the GEN bit of the SOP desc. */
1053 gdesc
->dword
[2] = cpu_to_le32(le32_to_cpu(gdesc
->dword
[2]) ^
1055 #ifdef __BIG_ENDIAN_BITFIELD
1056 /* Finished updating in bitfields of Tx Desc, so write them in original
1059 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc
*)gdesc
,
1060 (struct Vmxnet3_TxDesc
*)ctx
.sop_txd
);
1061 gdesc
= ctx
.sop_txd
;
1063 dev_dbg(&adapter
->netdev
->dev
,
1064 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1066 tq
->tx_ring
.base
), le64_to_cpu(gdesc
->txd
.addr
),
1067 le32_to_cpu(gdesc
->dword
[2]), le32_to_cpu(gdesc
->dword
[3]));
1069 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1071 if (le32_to_cpu(tq
->shared
->txNumDeferred
) >=
1072 le32_to_cpu(tq
->shared
->txThreshold
)) {
1073 tq
->shared
->txNumDeferred
= 0;
1074 VMXNET3_WRITE_BAR0_REG(adapter
,
1075 VMXNET3_REG_TXPROD
+ tq
->qid
* 8,
1076 tq
->tx_ring
.next2fill
);
1079 return NETDEV_TX_OK
;
1082 tq
->stats
.drop_oversized_hdr
++;
1084 spin_unlock_irqrestore(&tq
->tx_lock
, flags
);
1086 tq
->stats
.drop_total
++;
1088 return NETDEV_TX_OK
;
1093 vmxnet3_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
1095 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1097 BUG_ON(skb
->queue_mapping
> adapter
->num_tx_queues
);
1098 return vmxnet3_tq_xmit(skb
,
1099 &adapter
->tx_queue
[skb
->queue_mapping
],
1105 vmxnet3_rx_csum(struct vmxnet3_adapter
*adapter
,
1106 struct sk_buff
*skb
,
1107 union Vmxnet3_GenericDesc
*gdesc
)
1109 if (!gdesc
->rcd
.cnc
&& adapter
->netdev
->features
& NETIF_F_RXCSUM
) {
1110 /* typical case: TCP/UDP over IP and both csums are correct */
1111 if ((le32_to_cpu(gdesc
->dword
[3]) & VMXNET3_RCD_CSUM_OK
) ==
1112 VMXNET3_RCD_CSUM_OK
) {
1113 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1114 BUG_ON(!(gdesc
->rcd
.tcp
|| gdesc
->rcd
.udp
));
1115 BUG_ON(!(gdesc
->rcd
.v4
|| gdesc
->rcd
.v6
));
1116 BUG_ON(gdesc
->rcd
.frg
);
1118 if (gdesc
->rcd
.csum
) {
1119 skb
->csum
= htons(gdesc
->rcd
.csum
);
1120 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1122 skb_checksum_none_assert(skb
);
1126 skb_checksum_none_assert(skb
);
1132 vmxnet3_rx_error(struct vmxnet3_rx_queue
*rq
, struct Vmxnet3_RxCompDesc
*rcd
,
1133 struct vmxnet3_rx_ctx
*ctx
, struct vmxnet3_adapter
*adapter
)
1135 rq
->stats
.drop_err
++;
1137 rq
->stats
.drop_fcs
++;
1139 rq
->stats
.drop_total
++;
1142 * We do not unmap and chain the rx buffer to the skb.
1143 * We basically pretend this buffer is not used and will be recycled
1144 * by vmxnet3_rq_alloc_rx_buf()
1148 * ctx->skb may be NULL if this is the first and the only one
1152 dev_kfree_skb_irq(ctx
->skb
);
1159 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue
*rq
,
1160 struct vmxnet3_adapter
*adapter
, int quota
)
1162 static const u32 rxprod_reg
[2] = {
1163 VMXNET3_REG_RXPROD
, VMXNET3_REG_RXPROD2
1166 bool skip_page_frags
= false;
1167 struct Vmxnet3_RxCompDesc
*rcd
;
1168 struct vmxnet3_rx_ctx
*ctx
= &rq
->rx_ctx
;
1169 #ifdef __BIG_ENDIAN_BITFIELD
1170 struct Vmxnet3_RxDesc rxCmdDesc
;
1171 struct Vmxnet3_RxCompDesc rxComp
;
1173 vmxnet3_getRxComp(rcd
, &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
,
1175 while (rcd
->gen
== rq
->comp_ring
.gen
) {
1176 struct vmxnet3_rx_buf_info
*rbi
;
1177 struct sk_buff
*skb
, *new_skb
= NULL
;
1178 struct page
*new_page
= NULL
;
1180 struct Vmxnet3_RxDesc
*rxd
;
1182 struct vmxnet3_cmd_ring
*ring
= NULL
;
1183 if (num_rxd
>= quota
) {
1184 /* we may stop even before we see the EOP desc of
1190 BUG_ON(rcd
->rqID
!= rq
->qid
&& rcd
->rqID
!= rq
->qid2
);
1192 ring_idx
= rcd
->rqID
< adapter
->num_rx_queues
? 0 : 1;
1193 ring
= rq
->rx_ring
+ ring_idx
;
1194 vmxnet3_getRxDesc(rxd
, &rq
->rx_ring
[ring_idx
].base
[idx
].rxd
,
1196 rbi
= rq
->buf_info
[ring_idx
] + idx
;
1198 BUG_ON(rxd
->addr
!= rbi
->dma_addr
||
1199 rxd
->len
!= rbi
->len
);
1201 if (unlikely(rcd
->eop
&& rcd
->err
)) {
1202 vmxnet3_rx_error(rq
, rcd
, ctx
, adapter
);
1206 if (rcd
->sop
) { /* first buf of the pkt */
1207 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_HEAD
||
1208 rcd
->rqID
!= rq
->qid
);
1210 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_SKB
);
1211 BUG_ON(ctx
->skb
!= NULL
|| rbi
->skb
== NULL
);
1213 if (unlikely(rcd
->len
== 0)) {
1214 /* Pretend the rx buffer is skipped. */
1215 BUG_ON(!(rcd
->sop
&& rcd
->eop
));
1216 dev_dbg(&adapter
->netdev
->dev
,
1217 "rxRing[%u][%u] 0 length\n",
1222 skip_page_frags
= false;
1223 ctx
->skb
= rbi
->skb
;
1224 new_skb
= dev_alloc_skb(rbi
->len
+ NET_IP_ALIGN
);
1225 if (new_skb
== NULL
) {
1226 /* Skb allocation failed, do not handover this
1227 * skb to stack. Reuse it. Drop the existing pkt
1229 rq
->stats
.rx_buf_alloc_failure
++;
1231 rq
->stats
.drop_total
++;
1232 skip_page_frags
= true;
1236 pci_unmap_single(adapter
->pdev
, rbi
->dma_addr
, rbi
->len
,
1237 PCI_DMA_FROMDEVICE
);
1239 skb_put(ctx
->skb
, rcd
->len
);
1241 /* Immediate refill */
1242 new_skb
->dev
= adapter
->netdev
;
1243 skb_reserve(new_skb
, NET_IP_ALIGN
);
1245 rbi
->dma_addr
= pci_map_single(adapter
->pdev
,
1246 rbi
->skb
->data
, rbi
->len
,
1247 PCI_DMA_FROMDEVICE
);
1248 rxd
->addr
= cpu_to_le64(rbi
->dma_addr
);
1249 rxd
->len
= rbi
->len
;
1252 BUG_ON(ctx
->skb
== NULL
&& !skip_page_frags
);
1254 /* non SOP buffer must be type 1 in most cases */
1255 BUG_ON(rbi
->buf_type
!= VMXNET3_RX_BUF_PAGE
);
1256 BUG_ON(rxd
->btype
!= VMXNET3_RXD_BTYPE_BODY
);
1258 /* If an sop buffer was dropped, skip all
1259 * following non-sop fragments. They will be reused.
1261 if (skip_page_frags
)
1264 new_page
= alloc_page(GFP_ATOMIC
);
1265 if (unlikely(new_page
== NULL
)) {
1266 /* Replacement page frag could not be allocated.
1267 * Reuse this page. Drop the pkt and free the
1268 * skb which contained this page as a frag. Skip
1269 * processing all the following non-sop frags.
1271 rq
->stats
.rx_buf_alloc_failure
++;
1272 dev_kfree_skb(ctx
->skb
);
1274 skip_page_frags
= true;
1279 pci_unmap_page(adapter
->pdev
,
1280 rbi
->dma_addr
, rbi
->len
,
1281 PCI_DMA_FROMDEVICE
);
1283 vmxnet3_append_frag(ctx
->skb
, rcd
, rbi
);
1286 /* Immediate refill */
1287 rbi
->page
= new_page
;
1288 rbi
->dma_addr
= pci_map_page(adapter
->pdev
, rbi
->page
,
1290 PCI_DMA_FROMDEVICE
);
1291 rxd
->addr
= cpu_to_le64(rbi
->dma_addr
);
1292 rxd
->len
= rbi
->len
;
1298 skb
->len
+= skb
->data_len
;
1300 vmxnet3_rx_csum(adapter
, skb
,
1301 (union Vmxnet3_GenericDesc
*)rcd
);
1302 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
1304 if (unlikely(rcd
->ts
))
1305 __vlan_hwaccel_put_tag(skb
, rcd
->tci
);
1307 if (adapter
->netdev
->features
& NETIF_F_LRO
)
1308 netif_receive_skb(skb
);
1310 napi_gro_receive(&rq
->napi
, skb
);
1316 /* device may have skipped some rx descs */
1317 ring
->next2comp
= idx
;
1318 num_to_alloc
= vmxnet3_cmd_ring_desc_avail(ring
);
1319 ring
= rq
->rx_ring
+ ring_idx
;
1320 while (num_to_alloc
) {
1321 vmxnet3_getRxDesc(rxd
, &ring
->base
[ring
->next2fill
].rxd
,
1325 /* Recv desc is ready to be used by the device */
1326 rxd
->gen
= ring
->gen
;
1327 vmxnet3_cmd_ring_adv_next2fill(ring
);
1331 /* if needed, update the register */
1332 if (unlikely(rq
->shared
->updateRxProd
)) {
1333 VMXNET3_WRITE_BAR0_REG(adapter
,
1334 rxprod_reg
[ring_idx
] + rq
->qid
* 8,
1336 rq
->uncommitted
[ring_idx
] = 0;
1339 vmxnet3_comp_ring_adv_next2proc(&rq
->comp_ring
);
1340 vmxnet3_getRxComp(rcd
,
1341 &rq
->comp_ring
.base
[rq
->comp_ring
.next2proc
].rcd
, &rxComp
);
1349 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue
*rq
,
1350 struct vmxnet3_adapter
*adapter
)
1353 struct Vmxnet3_RxDesc
*rxd
;
1355 for (ring_idx
= 0; ring_idx
< 2; ring_idx
++) {
1356 for (i
= 0; i
< rq
->rx_ring
[ring_idx
].size
; i
++) {
1357 #ifdef __BIG_ENDIAN_BITFIELD
1358 struct Vmxnet3_RxDesc rxDesc
;
1360 vmxnet3_getRxDesc(rxd
,
1361 &rq
->rx_ring
[ring_idx
].base
[i
].rxd
, &rxDesc
);
1363 if (rxd
->btype
== VMXNET3_RXD_BTYPE_HEAD
&&
1364 rq
->buf_info
[ring_idx
][i
].skb
) {
1365 pci_unmap_single(adapter
->pdev
, rxd
->addr
,
1366 rxd
->len
, PCI_DMA_FROMDEVICE
);
1367 dev_kfree_skb(rq
->buf_info
[ring_idx
][i
].skb
);
1368 rq
->buf_info
[ring_idx
][i
].skb
= NULL
;
1369 } else if (rxd
->btype
== VMXNET3_RXD_BTYPE_BODY
&&
1370 rq
->buf_info
[ring_idx
][i
].page
) {
1371 pci_unmap_page(adapter
->pdev
, rxd
->addr
,
1372 rxd
->len
, PCI_DMA_FROMDEVICE
);
1373 put_page(rq
->buf_info
[ring_idx
][i
].page
);
1374 rq
->buf_info
[ring_idx
][i
].page
= NULL
;
1378 rq
->rx_ring
[ring_idx
].gen
= VMXNET3_INIT_GEN
;
1379 rq
->rx_ring
[ring_idx
].next2fill
=
1380 rq
->rx_ring
[ring_idx
].next2comp
= 0;
1381 rq
->uncommitted
[ring_idx
] = 0;
1384 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1385 rq
->comp_ring
.next2proc
= 0;
1390 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter
*adapter
)
1394 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1395 vmxnet3_rq_cleanup(&adapter
->rx_queue
[i
], adapter
);
1399 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue
*rq
,
1400 struct vmxnet3_adapter
*adapter
)
1405 /* all rx buffers must have already been freed */
1406 for (i
= 0; i
< 2; i
++) {
1407 if (rq
->buf_info
[i
]) {
1408 for (j
= 0; j
< rq
->rx_ring
[i
].size
; j
++)
1409 BUG_ON(rq
->buf_info
[i
][j
].page
!= NULL
);
1414 kfree(rq
->buf_info
[0]);
1416 for (i
= 0; i
< 2; i
++) {
1417 if (rq
->rx_ring
[i
].base
) {
1418 pci_free_consistent(adapter
->pdev
, rq
->rx_ring
[i
].size
1419 * sizeof(struct Vmxnet3_RxDesc
),
1420 rq
->rx_ring
[i
].base
,
1421 rq
->rx_ring
[i
].basePA
);
1422 rq
->rx_ring
[i
].base
= NULL
;
1424 rq
->buf_info
[i
] = NULL
;
1427 if (rq
->comp_ring
.base
) {
1428 pci_free_consistent(adapter
->pdev
, rq
->comp_ring
.size
*
1429 sizeof(struct Vmxnet3_RxCompDesc
),
1430 rq
->comp_ring
.base
, rq
->comp_ring
.basePA
);
1431 rq
->comp_ring
.base
= NULL
;
1437 vmxnet3_rq_init(struct vmxnet3_rx_queue
*rq
,
1438 struct vmxnet3_adapter
*adapter
)
1442 /* initialize buf_info */
1443 for (i
= 0; i
< rq
->rx_ring
[0].size
; i
++) {
1445 /* 1st buf for a pkt is skbuff */
1446 if (i
% adapter
->rx_buf_per_pkt
== 0) {
1447 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_SKB
;
1448 rq
->buf_info
[0][i
].len
= adapter
->skb_buf_size
;
1449 } else { /* subsequent bufs for a pkt is frag */
1450 rq
->buf_info
[0][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1451 rq
->buf_info
[0][i
].len
= PAGE_SIZE
;
1454 for (i
= 0; i
< rq
->rx_ring
[1].size
; i
++) {
1455 rq
->buf_info
[1][i
].buf_type
= VMXNET3_RX_BUF_PAGE
;
1456 rq
->buf_info
[1][i
].len
= PAGE_SIZE
;
1459 /* reset internal state and allocate buffers for both rings */
1460 for (i
= 0; i
< 2; i
++) {
1461 rq
->rx_ring
[i
].next2fill
= rq
->rx_ring
[i
].next2comp
= 0;
1462 rq
->uncommitted
[i
] = 0;
1464 memset(rq
->rx_ring
[i
].base
, 0, rq
->rx_ring
[i
].size
*
1465 sizeof(struct Vmxnet3_RxDesc
));
1466 rq
->rx_ring
[i
].gen
= VMXNET3_INIT_GEN
;
1468 if (vmxnet3_rq_alloc_rx_buf(rq
, 0, rq
->rx_ring
[0].size
- 1,
1470 /* at least has 1 rx buffer for the 1st ring */
1473 vmxnet3_rq_alloc_rx_buf(rq
, 1, rq
->rx_ring
[1].size
- 1, adapter
);
1475 /* reset the comp ring */
1476 rq
->comp_ring
.next2proc
= 0;
1477 memset(rq
->comp_ring
.base
, 0, rq
->comp_ring
.size
*
1478 sizeof(struct Vmxnet3_RxCompDesc
));
1479 rq
->comp_ring
.gen
= VMXNET3_INIT_GEN
;
1482 rq
->rx_ctx
.skb
= NULL
;
1484 /* stats are not reset */
1490 vmxnet3_rq_init_all(struct vmxnet3_adapter
*adapter
)
1494 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1495 err
= vmxnet3_rq_init(&adapter
->rx_queue
[i
], adapter
);
1496 if (unlikely(err
)) {
1497 dev_err(&adapter
->netdev
->dev
, "%s: failed to "
1498 "initialize rx queue%i\n",
1499 adapter
->netdev
->name
, i
);
1509 vmxnet3_rq_create(struct vmxnet3_rx_queue
*rq
, struct vmxnet3_adapter
*adapter
)
1513 struct vmxnet3_rx_buf_info
*bi
;
1515 for (i
= 0; i
< 2; i
++) {
1517 sz
= rq
->rx_ring
[i
].size
* sizeof(struct Vmxnet3_RxDesc
);
1518 rq
->rx_ring
[i
].base
= pci_alloc_consistent(adapter
->pdev
, sz
,
1519 &rq
->rx_ring
[i
].basePA
);
1520 if (!rq
->rx_ring
[i
].base
) {
1521 printk(KERN_ERR
"%s: failed to allocate rx ring %d\n",
1522 adapter
->netdev
->name
, i
);
1527 sz
= rq
->comp_ring
.size
* sizeof(struct Vmxnet3_RxCompDesc
);
1528 rq
->comp_ring
.base
= pci_alloc_consistent(adapter
->pdev
, sz
,
1529 &rq
->comp_ring
.basePA
);
1530 if (!rq
->comp_ring
.base
) {
1531 printk(KERN_ERR
"%s: failed to allocate rx comp ring\n",
1532 adapter
->netdev
->name
);
1536 sz
= sizeof(struct vmxnet3_rx_buf_info
) * (rq
->rx_ring
[0].size
+
1537 rq
->rx_ring
[1].size
);
1538 bi
= kzalloc(sz
, GFP_KERNEL
);
1542 rq
->buf_info
[0] = bi
;
1543 rq
->buf_info
[1] = bi
+ rq
->rx_ring
[0].size
;
1548 vmxnet3_rq_destroy(rq
, adapter
);
1554 vmxnet3_rq_create_all(struct vmxnet3_adapter
*adapter
)
1558 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1559 err
= vmxnet3_rq_create(&adapter
->rx_queue
[i
], adapter
);
1560 if (unlikely(err
)) {
1561 dev_err(&adapter
->netdev
->dev
,
1562 "%s: failed to create rx queue%i\n",
1563 adapter
->netdev
->name
, i
);
1569 vmxnet3_rq_destroy_all(adapter
);
1574 /* Multiple queue aware polling function for tx and rx */
1577 vmxnet3_do_poll(struct vmxnet3_adapter
*adapter
, int budget
)
1579 int rcd_done
= 0, i
;
1580 if (unlikely(adapter
->shared
->ecr
))
1581 vmxnet3_process_events(adapter
);
1582 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1583 vmxnet3_tq_tx_complete(&adapter
->tx_queue
[i
], adapter
);
1585 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1586 rcd_done
+= vmxnet3_rq_rx_complete(&adapter
->rx_queue
[i
],
1593 vmxnet3_poll(struct napi_struct
*napi
, int budget
)
1595 struct vmxnet3_rx_queue
*rx_queue
= container_of(napi
,
1596 struct vmxnet3_rx_queue
, napi
);
1599 rxd_done
= vmxnet3_do_poll(rx_queue
->adapter
, budget
);
1601 if (rxd_done
< budget
) {
1602 napi_complete(napi
);
1603 vmxnet3_enable_all_intrs(rx_queue
->adapter
);
1609 * NAPI polling function for MSI-X mode with multiple Rx queues
1610 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1614 vmxnet3_poll_rx_only(struct napi_struct
*napi
, int budget
)
1616 struct vmxnet3_rx_queue
*rq
= container_of(napi
,
1617 struct vmxnet3_rx_queue
, napi
);
1618 struct vmxnet3_adapter
*adapter
= rq
->adapter
;
1621 /* When sharing interrupt with corresponding tx queue, process
1622 * tx completions in that queue as well
1624 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
) {
1625 struct vmxnet3_tx_queue
*tq
=
1626 &adapter
->tx_queue
[rq
- adapter
->rx_queue
];
1627 vmxnet3_tq_tx_complete(tq
, adapter
);
1630 rxd_done
= vmxnet3_rq_rx_complete(rq
, adapter
, budget
);
1632 if (rxd_done
< budget
) {
1633 napi_complete(napi
);
1634 vmxnet3_enable_intr(adapter
, rq
->comp_ring
.intr_idx
);
1640 #ifdef CONFIG_PCI_MSI
1643 * Handle completion interrupts on tx queues
1644 * Returns whether or not the intr is handled
1648 vmxnet3_msix_tx(int irq
, void *data
)
1650 struct vmxnet3_tx_queue
*tq
= data
;
1651 struct vmxnet3_adapter
*adapter
= tq
->adapter
;
1653 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1654 vmxnet3_disable_intr(adapter
, tq
->comp_ring
.intr_idx
);
1656 /* Handle the case where only one irq is allocate for all tx queues */
1657 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
) {
1659 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1660 struct vmxnet3_tx_queue
*txq
= &adapter
->tx_queue
[i
];
1661 vmxnet3_tq_tx_complete(txq
, adapter
);
1664 vmxnet3_tq_tx_complete(tq
, adapter
);
1666 vmxnet3_enable_intr(adapter
, tq
->comp_ring
.intr_idx
);
1673 * Handle completion interrupts on rx queues. Returns whether or not the
1678 vmxnet3_msix_rx(int irq
, void *data
)
1680 struct vmxnet3_rx_queue
*rq
= data
;
1681 struct vmxnet3_adapter
*adapter
= rq
->adapter
;
1683 /* disable intr if needed */
1684 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1685 vmxnet3_disable_intr(adapter
, rq
->comp_ring
.intr_idx
);
1686 napi_schedule(&rq
->napi
);
1692 *----------------------------------------------------------------------------
1694 * vmxnet3_msix_event --
1696 * vmxnet3 msix event intr handler
1699 * whether or not the intr is handled
1701 *----------------------------------------------------------------------------
1705 vmxnet3_msix_event(int irq
, void *data
)
1707 struct net_device
*dev
= data
;
1708 struct vmxnet3_adapter
*adapter
= netdev_priv(dev
);
1710 /* disable intr if needed */
1711 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1712 vmxnet3_disable_intr(adapter
, adapter
->intr
.event_intr_idx
);
1714 if (adapter
->shared
->ecr
)
1715 vmxnet3_process_events(adapter
);
1717 vmxnet3_enable_intr(adapter
, adapter
->intr
.event_intr_idx
);
1722 #endif /* CONFIG_PCI_MSI */
1725 /* Interrupt handler for vmxnet3 */
1727 vmxnet3_intr(int irq
, void *dev_id
)
1729 struct net_device
*dev
= dev_id
;
1730 struct vmxnet3_adapter
*adapter
= netdev_priv(dev
);
1732 if (adapter
->intr
.type
== VMXNET3_IT_INTX
) {
1733 u32 icr
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_ICR
);
1734 if (unlikely(icr
== 0))
1740 /* disable intr if needed */
1741 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1742 vmxnet3_disable_all_intrs(adapter
);
1744 napi_schedule(&adapter
->rx_queue
[0].napi
);
1749 #ifdef CONFIG_NET_POLL_CONTROLLER
1751 /* netpoll callback. */
1753 vmxnet3_netpoll(struct net_device
*netdev
)
1755 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1757 if (adapter
->intr
.mask_mode
== VMXNET3_IMM_ACTIVE
)
1758 vmxnet3_disable_all_intrs(adapter
);
1760 vmxnet3_do_poll(adapter
, adapter
->rx_queue
[0].rx_ring
[0].size
);
1761 vmxnet3_enable_all_intrs(adapter
);
1764 #endif /* CONFIG_NET_POLL_CONTROLLER */
1767 vmxnet3_request_irqs(struct vmxnet3_adapter
*adapter
)
1769 struct vmxnet3_intr
*intr
= &adapter
->intr
;
1773 #ifdef CONFIG_PCI_MSI
1774 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
1775 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1776 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
) {
1777 sprintf(adapter
->tx_queue
[i
].name
, "%s-tx-%d",
1778 adapter
->netdev
->name
, vector
);
1780 intr
->msix_entries
[vector
].vector
,
1782 adapter
->tx_queue
[i
].name
,
1783 &adapter
->tx_queue
[i
]);
1785 sprintf(adapter
->tx_queue
[i
].name
, "%s-rxtx-%d",
1786 adapter
->netdev
->name
, vector
);
1789 dev_err(&adapter
->netdev
->dev
,
1790 "Failed to request irq for MSIX, %s, "
1792 adapter
->tx_queue
[i
].name
, err
);
1796 /* Handle the case where only 1 MSIx was allocated for
1798 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
) {
1799 for (; i
< adapter
->num_tx_queues
; i
++)
1800 adapter
->tx_queue
[i
].comp_ring
.intr_idx
1805 adapter
->tx_queue
[i
].comp_ring
.intr_idx
1809 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
)
1812 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1813 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
)
1814 sprintf(adapter
->rx_queue
[i
].name
, "%s-rx-%d",
1815 adapter
->netdev
->name
, vector
);
1817 sprintf(adapter
->rx_queue
[i
].name
, "%s-rxtx-%d",
1818 adapter
->netdev
->name
, vector
);
1819 err
= request_irq(intr
->msix_entries
[vector
].vector
,
1821 adapter
->rx_queue
[i
].name
,
1822 &(adapter
->rx_queue
[i
]));
1824 printk(KERN_ERR
"Failed to request irq for MSIX"
1826 adapter
->rx_queue
[i
].name
, err
);
1830 adapter
->rx_queue
[i
].comp_ring
.intr_idx
= vector
++;
1833 sprintf(intr
->event_msi_vector_name
, "%s-event-%d",
1834 adapter
->netdev
->name
, vector
);
1835 err
= request_irq(intr
->msix_entries
[vector
].vector
,
1836 vmxnet3_msix_event
, 0,
1837 intr
->event_msi_vector_name
, adapter
->netdev
);
1838 intr
->event_intr_idx
= vector
;
1840 } else if (intr
->type
== VMXNET3_IT_MSI
) {
1841 adapter
->num_rx_queues
= 1;
1842 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
, 0,
1843 adapter
->netdev
->name
, adapter
->netdev
);
1846 adapter
->num_rx_queues
= 1;
1847 err
= request_irq(adapter
->pdev
->irq
, vmxnet3_intr
,
1848 IRQF_SHARED
, adapter
->netdev
->name
,
1850 #ifdef CONFIG_PCI_MSI
1853 intr
->num_intrs
= vector
+ 1;
1855 printk(KERN_ERR
"Failed to request irq %s (intr type:%d), error"
1856 ":%d\n", adapter
->netdev
->name
, intr
->type
, err
);
1858 /* Number of rx queues will not change after this */
1859 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1860 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
1862 rq
->qid2
= i
+ adapter
->num_rx_queues
;
1867 /* init our intr settings */
1868 for (i
= 0; i
< intr
->num_intrs
; i
++)
1869 intr
->mod_levels
[i
] = UPT1_IML_ADAPTIVE
;
1870 if (adapter
->intr
.type
!= VMXNET3_IT_MSIX
) {
1871 adapter
->intr
.event_intr_idx
= 0;
1872 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1873 adapter
->tx_queue
[i
].comp_ring
.intr_idx
= 0;
1874 adapter
->rx_queue
[0].comp_ring
.intr_idx
= 0;
1877 printk(KERN_INFO
"%s: intr type %u, mode %u, %u vectors "
1878 "allocated\n", adapter
->netdev
->name
, intr
->type
,
1879 intr
->mask_mode
, intr
->num_intrs
);
1887 vmxnet3_free_irqs(struct vmxnet3_adapter
*adapter
)
1889 struct vmxnet3_intr
*intr
= &adapter
->intr
;
1890 BUG_ON(intr
->type
== VMXNET3_IT_AUTO
|| intr
->num_intrs
<= 0);
1892 switch (intr
->type
) {
1893 #ifdef CONFIG_PCI_MSI
1894 case VMXNET3_IT_MSIX
:
1898 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
) {
1899 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1900 free_irq(intr
->msix_entries
[vector
++].vector
,
1901 &(adapter
->tx_queue
[i
]));
1902 if (adapter
->share_intr
== VMXNET3_INTR_TXSHARE
)
1907 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1908 free_irq(intr
->msix_entries
[vector
++].vector
,
1909 &(adapter
->rx_queue
[i
]));
1912 free_irq(intr
->msix_entries
[vector
].vector
,
1914 BUG_ON(vector
>= intr
->num_intrs
);
1918 case VMXNET3_IT_MSI
:
1919 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
1921 case VMXNET3_IT_INTX
:
1922 free_irq(adapter
->pdev
->irq
, adapter
->netdev
);
1931 vmxnet3_restore_vlan(struct vmxnet3_adapter
*adapter
)
1933 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1936 /* allow untagged pkts */
1937 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, 0);
1939 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1940 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
1945 vmxnet3_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1947 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1949 if (!(netdev
->flags
& IFF_PROMISC
)) {
1950 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1951 unsigned long flags
;
1953 VMXNET3_SET_VFTABLE_ENTRY(vfTable
, vid
);
1954 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
1955 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1956 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1957 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
1960 set_bit(vid
, adapter
->active_vlans
);
1967 vmxnet3_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1969 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
1971 if (!(netdev
->flags
& IFF_PROMISC
)) {
1972 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
1973 unsigned long flags
;
1975 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable
, vid
);
1976 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
1977 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
1978 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
1979 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
1982 clear_bit(vid
, adapter
->active_vlans
);
1989 vmxnet3_copy_mc(struct net_device
*netdev
)
1992 u32 sz
= netdev_mc_count(netdev
) * ETH_ALEN
;
1994 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1996 /* We may be called with BH disabled */
1997 buf
= kmalloc(sz
, GFP_ATOMIC
);
1999 struct netdev_hw_addr
*ha
;
2002 netdev_for_each_mc_addr(ha
, netdev
)
2003 memcpy(buf
+ i
++ * ETH_ALEN
, ha
->addr
,
2012 vmxnet3_set_mc(struct net_device
*netdev
)
2014 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2015 unsigned long flags
;
2016 struct Vmxnet3_RxFilterConf
*rxConf
=
2017 &adapter
->shared
->devRead
.rxFilterConf
;
2018 u8
*new_table
= NULL
;
2019 u32 new_mode
= VMXNET3_RXM_UCAST
;
2021 if (netdev
->flags
& IFF_PROMISC
) {
2022 u32
*vfTable
= adapter
->shared
->devRead
.rxFilterConf
.vfTable
;
2023 memset(vfTable
, 0, VMXNET3_VFT_SIZE
* sizeof(*vfTable
));
2025 new_mode
|= VMXNET3_RXM_PROMISC
;
2027 vmxnet3_restore_vlan(adapter
);
2030 if (netdev
->flags
& IFF_BROADCAST
)
2031 new_mode
|= VMXNET3_RXM_BCAST
;
2033 if (netdev
->flags
& IFF_ALLMULTI
)
2034 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
2036 if (!netdev_mc_empty(netdev
)) {
2037 new_table
= vmxnet3_copy_mc(netdev
);
2039 new_mode
|= VMXNET3_RXM_MCAST
;
2040 rxConf
->mfTableLen
= cpu_to_le16(
2041 netdev_mc_count(netdev
) * ETH_ALEN
);
2042 rxConf
->mfTablePA
= cpu_to_le64(virt_to_phys(
2045 printk(KERN_INFO
"%s: failed to copy mcast list"
2046 ", setting ALL_MULTI\n", netdev
->name
);
2047 new_mode
|= VMXNET3_RXM_ALL_MULTI
;
2052 if (!(new_mode
& VMXNET3_RXM_MCAST
)) {
2053 rxConf
->mfTableLen
= 0;
2054 rxConf
->mfTablePA
= 0;
2057 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2058 if (new_mode
!= rxConf
->rxMode
) {
2059 rxConf
->rxMode
= cpu_to_le32(new_mode
);
2060 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2061 VMXNET3_CMD_UPDATE_RX_MODE
);
2062 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2063 VMXNET3_CMD_UPDATE_VLAN_FILTERS
);
2066 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2067 VMXNET3_CMD_UPDATE_MAC_FILTERS
);
2068 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2074 vmxnet3_rq_destroy_all(struct vmxnet3_adapter
*adapter
)
2078 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2079 vmxnet3_rq_destroy(&adapter
->rx_queue
[i
], adapter
);
2084 * Set up driver_shared based on settings in adapter.
2088 vmxnet3_setup_driver_shared(struct vmxnet3_adapter
*adapter
)
2090 struct Vmxnet3_DriverShared
*shared
= adapter
->shared
;
2091 struct Vmxnet3_DSDevRead
*devRead
= &shared
->devRead
;
2092 struct Vmxnet3_TxQueueConf
*tqc
;
2093 struct Vmxnet3_RxQueueConf
*rqc
;
2096 memset(shared
, 0, sizeof(*shared
));
2098 /* driver settings */
2099 shared
->magic
= cpu_to_le32(VMXNET3_REV1_MAGIC
);
2100 devRead
->misc
.driverInfo
.version
= cpu_to_le32(
2101 VMXNET3_DRIVER_VERSION_NUM
);
2102 devRead
->misc
.driverInfo
.gos
.gosBits
= (sizeof(void *) == 4 ?
2103 VMXNET3_GOS_BITS_32
: VMXNET3_GOS_BITS_64
);
2104 devRead
->misc
.driverInfo
.gos
.gosType
= VMXNET3_GOS_TYPE_LINUX
;
2105 *((u32
*)&devRead
->misc
.driverInfo
.gos
) = cpu_to_le32(
2106 *((u32
*)&devRead
->misc
.driverInfo
.gos
));
2107 devRead
->misc
.driverInfo
.vmxnet3RevSpt
= cpu_to_le32(1);
2108 devRead
->misc
.driverInfo
.uptVerSpt
= cpu_to_le32(1);
2110 devRead
->misc
.ddPA
= cpu_to_le64(virt_to_phys(adapter
));
2111 devRead
->misc
.ddLen
= cpu_to_le32(sizeof(struct vmxnet3_adapter
));
2113 /* set up feature flags */
2114 if (adapter
->netdev
->features
& NETIF_F_RXCSUM
)
2115 devRead
->misc
.uptFeatures
|= UPT1_F_RXCSUM
;
2117 if (adapter
->netdev
->features
& NETIF_F_LRO
) {
2118 devRead
->misc
.uptFeatures
|= UPT1_F_LRO
;
2119 devRead
->misc
.maxNumRxSG
= cpu_to_le16(1 + MAX_SKB_FRAGS
);
2121 if (adapter
->netdev
->features
& NETIF_F_HW_VLAN_RX
)
2122 devRead
->misc
.uptFeatures
|= UPT1_F_RXVLAN
;
2124 devRead
->misc
.mtu
= cpu_to_le32(adapter
->netdev
->mtu
);
2125 devRead
->misc
.queueDescPA
= cpu_to_le64(adapter
->queue_desc_pa
);
2126 devRead
->misc
.queueDescLen
= cpu_to_le32(
2127 adapter
->num_tx_queues
* sizeof(struct Vmxnet3_TxQueueDesc
) +
2128 adapter
->num_rx_queues
* sizeof(struct Vmxnet3_RxQueueDesc
));
2130 /* tx queue settings */
2131 devRead
->misc
.numTxQueues
= adapter
->num_tx_queues
;
2132 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2133 struct vmxnet3_tx_queue
*tq
= &adapter
->tx_queue
[i
];
2134 BUG_ON(adapter
->tx_queue
[i
].tx_ring
.base
== NULL
);
2135 tqc
= &adapter
->tqd_start
[i
].conf
;
2136 tqc
->txRingBasePA
= cpu_to_le64(tq
->tx_ring
.basePA
);
2137 tqc
->dataRingBasePA
= cpu_to_le64(tq
->data_ring
.basePA
);
2138 tqc
->compRingBasePA
= cpu_to_le64(tq
->comp_ring
.basePA
);
2139 tqc
->ddPA
= cpu_to_le64(virt_to_phys(tq
->buf_info
));
2140 tqc
->txRingSize
= cpu_to_le32(tq
->tx_ring
.size
);
2141 tqc
->dataRingSize
= cpu_to_le32(tq
->data_ring
.size
);
2142 tqc
->compRingSize
= cpu_to_le32(tq
->comp_ring
.size
);
2143 tqc
->ddLen
= cpu_to_le32(
2144 sizeof(struct vmxnet3_tx_buf_info
) *
2146 tqc
->intrIdx
= tq
->comp_ring
.intr_idx
;
2149 /* rx queue settings */
2150 devRead
->misc
.numRxQueues
= adapter
->num_rx_queues
;
2151 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2152 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
2153 rqc
= &adapter
->rqd_start
[i
].conf
;
2154 rqc
->rxRingBasePA
[0] = cpu_to_le64(rq
->rx_ring
[0].basePA
);
2155 rqc
->rxRingBasePA
[1] = cpu_to_le64(rq
->rx_ring
[1].basePA
);
2156 rqc
->compRingBasePA
= cpu_to_le64(rq
->comp_ring
.basePA
);
2157 rqc
->ddPA
= cpu_to_le64(virt_to_phys(
2159 rqc
->rxRingSize
[0] = cpu_to_le32(rq
->rx_ring
[0].size
);
2160 rqc
->rxRingSize
[1] = cpu_to_le32(rq
->rx_ring
[1].size
);
2161 rqc
->compRingSize
= cpu_to_le32(rq
->comp_ring
.size
);
2162 rqc
->ddLen
= cpu_to_le32(
2163 sizeof(struct vmxnet3_rx_buf_info
) *
2164 (rqc
->rxRingSize
[0] +
2165 rqc
->rxRingSize
[1]));
2166 rqc
->intrIdx
= rq
->comp_ring
.intr_idx
;
2170 memset(adapter
->rss_conf
, 0, sizeof(*adapter
->rss_conf
));
2173 struct UPT1_RSSConf
*rssConf
= adapter
->rss_conf
;
2174 devRead
->misc
.uptFeatures
|= UPT1_F_RSS
;
2175 devRead
->misc
.numRxQueues
= adapter
->num_rx_queues
;
2176 rssConf
->hashType
= UPT1_RSS_HASH_TYPE_TCP_IPV4
|
2177 UPT1_RSS_HASH_TYPE_IPV4
|
2178 UPT1_RSS_HASH_TYPE_TCP_IPV6
|
2179 UPT1_RSS_HASH_TYPE_IPV6
;
2180 rssConf
->hashFunc
= UPT1_RSS_HASH_FUNC_TOEPLITZ
;
2181 rssConf
->hashKeySize
= UPT1_RSS_MAX_KEY_SIZE
;
2182 rssConf
->indTableSize
= VMXNET3_RSS_IND_TABLE_SIZE
;
2183 get_random_bytes(&rssConf
->hashKey
[0], rssConf
->hashKeySize
);
2184 for (i
= 0; i
< rssConf
->indTableSize
; i
++)
2185 rssConf
->indTable
[i
] = ethtool_rxfh_indir_default(
2186 i
, adapter
->num_rx_queues
);
2188 devRead
->rssConfDesc
.confVer
= 1;
2189 devRead
->rssConfDesc
.confLen
= sizeof(*rssConf
);
2190 devRead
->rssConfDesc
.confPA
= virt_to_phys(rssConf
);
2193 #endif /* VMXNET3_RSS */
2196 devRead
->intrConf
.autoMask
= adapter
->intr
.mask_mode
==
2198 devRead
->intrConf
.numIntrs
= adapter
->intr
.num_intrs
;
2199 for (i
= 0; i
< adapter
->intr
.num_intrs
; i
++)
2200 devRead
->intrConf
.modLevels
[i
] = adapter
->intr
.mod_levels
[i
];
2202 devRead
->intrConf
.eventIntrIdx
= adapter
->intr
.event_intr_idx
;
2203 devRead
->intrConf
.intrCtrl
|= cpu_to_le32(VMXNET3_IC_DISABLE_ALL
);
2205 /* rx filter settings */
2206 devRead
->rxFilterConf
.rxMode
= 0;
2207 vmxnet3_restore_vlan(adapter
);
2208 vmxnet3_write_mac_addr(adapter
, adapter
->netdev
->dev_addr
);
2210 /* the rest are already zeroed */
2215 vmxnet3_activate_dev(struct vmxnet3_adapter
*adapter
)
2219 unsigned long flags
;
2221 dev_dbg(&adapter
->netdev
->dev
, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2222 " ring sizes %u %u %u\n", adapter
->netdev
->name
,
2223 adapter
->skb_buf_size
, adapter
->rx_buf_per_pkt
,
2224 adapter
->tx_queue
[0].tx_ring
.size
,
2225 adapter
->rx_queue
[0].rx_ring
[0].size
,
2226 adapter
->rx_queue
[0].rx_ring
[1].size
);
2228 vmxnet3_tq_init_all(adapter
);
2229 err
= vmxnet3_rq_init_all(adapter
);
2231 printk(KERN_ERR
"Failed to init rx queue for %s: error %d\n",
2232 adapter
->netdev
->name
, err
);
2236 err
= vmxnet3_request_irqs(adapter
);
2238 printk(KERN_ERR
"Failed to setup irq for %s: error %d\n",
2239 adapter
->netdev
->name
, err
);
2243 vmxnet3_setup_driver_shared(adapter
);
2245 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, VMXNET3_GET_ADDR_LO(
2246 adapter
->shared_pa
));
2247 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, VMXNET3_GET_ADDR_HI(
2248 adapter
->shared_pa
));
2249 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2250 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2251 VMXNET3_CMD_ACTIVATE_DEV
);
2252 ret
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
2253 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2256 printk(KERN_ERR
"Failed to activate dev %s: error %u\n",
2257 adapter
->netdev
->name
, ret
);
2262 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2263 VMXNET3_WRITE_BAR0_REG(adapter
,
2264 VMXNET3_REG_RXPROD
+ i
* VMXNET3_REG_ALIGN
,
2265 adapter
->rx_queue
[i
].rx_ring
[0].next2fill
);
2266 VMXNET3_WRITE_BAR0_REG(adapter
, (VMXNET3_REG_RXPROD2
+
2267 (i
* VMXNET3_REG_ALIGN
)),
2268 adapter
->rx_queue
[i
].rx_ring
[1].next2fill
);
2271 /* Apply the rx filter settins last. */
2272 vmxnet3_set_mc(adapter
->netdev
);
2275 * Check link state when first activating device. It will start the
2276 * tx queue if the link is up.
2278 vmxnet3_check_link(adapter
, true);
2279 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2280 napi_enable(&adapter
->rx_queue
[i
].napi
);
2281 vmxnet3_enable_all_intrs(adapter
);
2282 clear_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
2286 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAL
, 0);
2287 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_DSAH
, 0);
2288 vmxnet3_free_irqs(adapter
);
2291 /* free up buffers we allocated */
2292 vmxnet3_rq_cleanup_all(adapter
);
2298 vmxnet3_reset_dev(struct vmxnet3_adapter
*adapter
)
2300 unsigned long flags
;
2301 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2302 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
, VMXNET3_CMD_RESET_DEV
);
2303 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2308 vmxnet3_quiesce_dev(struct vmxnet3_adapter
*adapter
)
2311 unsigned long flags
;
2312 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
))
2316 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2317 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2318 VMXNET3_CMD_QUIESCE_DEV
);
2319 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2320 vmxnet3_disable_all_intrs(adapter
);
2322 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2323 napi_disable(&adapter
->rx_queue
[i
].napi
);
2324 netif_tx_disable(adapter
->netdev
);
2325 adapter
->link_speed
= 0;
2326 netif_carrier_off(adapter
->netdev
);
2328 vmxnet3_tq_cleanup_all(adapter
);
2329 vmxnet3_rq_cleanup_all(adapter
);
2330 vmxnet3_free_irqs(adapter
);
2336 vmxnet3_write_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
2341 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACL
, tmp
);
2343 tmp
= (mac
[5] << 8) | mac
[4];
2344 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_MACH
, tmp
);
2349 vmxnet3_set_mac_addr(struct net_device
*netdev
, void *p
)
2351 struct sockaddr
*addr
= p
;
2352 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2354 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2355 vmxnet3_write_mac_addr(adapter
, addr
->sa_data
);
2361 /* ==================== initialization and cleanup routines ============ */
2364 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter
*adapter
, bool *dma64
)
2367 unsigned long mmio_start
, mmio_len
;
2368 struct pci_dev
*pdev
= adapter
->pdev
;
2370 err
= pci_enable_device(pdev
);
2372 printk(KERN_ERR
"Failed to enable adapter %s: error %d\n",
2373 pci_name(pdev
), err
);
2377 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) == 0) {
2378 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0) {
2379 printk(KERN_ERR
"pci_set_consistent_dma_mask failed "
2380 "for adapter %s\n", pci_name(pdev
));
2386 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
2387 printk(KERN_ERR
"pci_set_dma_mask failed for adapter "
2388 "%s\n", pci_name(pdev
));
2395 err
= pci_request_selected_regions(pdev
, (1 << 2) - 1,
2396 vmxnet3_driver_name
);
2398 printk(KERN_ERR
"Failed to request region for adapter %s: "
2399 "error %d\n", pci_name(pdev
), err
);
2403 pci_set_master(pdev
);
2405 mmio_start
= pci_resource_start(pdev
, 0);
2406 mmio_len
= pci_resource_len(pdev
, 0);
2407 adapter
->hw_addr0
= ioremap(mmio_start
, mmio_len
);
2408 if (!adapter
->hw_addr0
) {
2409 printk(KERN_ERR
"Failed to map bar0 for adapter %s\n",
2415 mmio_start
= pci_resource_start(pdev
, 1);
2416 mmio_len
= pci_resource_len(pdev
, 1);
2417 adapter
->hw_addr1
= ioremap(mmio_start
, mmio_len
);
2418 if (!adapter
->hw_addr1
) {
2419 printk(KERN_ERR
"Failed to map bar1 for adapter %s\n",
2427 iounmap(adapter
->hw_addr0
);
2429 pci_release_selected_regions(pdev
, (1 << 2) - 1);
2431 pci_disable_device(pdev
);
2437 vmxnet3_free_pci_resources(struct vmxnet3_adapter
*adapter
)
2439 BUG_ON(!adapter
->pdev
);
2441 iounmap(adapter
->hw_addr0
);
2442 iounmap(adapter
->hw_addr1
);
2443 pci_release_selected_regions(adapter
->pdev
, (1 << 2) - 1);
2444 pci_disable_device(adapter
->pdev
);
2449 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter
*adapter
)
2451 size_t sz
, i
, ring0_size
, ring1_size
, comp_size
;
2452 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[0];
2455 if (adapter
->netdev
->mtu
<= VMXNET3_MAX_SKB_BUF_SIZE
-
2456 VMXNET3_MAX_ETH_HDR_SIZE
) {
2457 adapter
->skb_buf_size
= adapter
->netdev
->mtu
+
2458 VMXNET3_MAX_ETH_HDR_SIZE
;
2459 if (adapter
->skb_buf_size
< VMXNET3_MIN_T0_BUF_SIZE
)
2460 adapter
->skb_buf_size
= VMXNET3_MIN_T0_BUF_SIZE
;
2462 adapter
->rx_buf_per_pkt
= 1;
2464 adapter
->skb_buf_size
= VMXNET3_MAX_SKB_BUF_SIZE
;
2465 sz
= adapter
->netdev
->mtu
- VMXNET3_MAX_SKB_BUF_SIZE
+
2466 VMXNET3_MAX_ETH_HDR_SIZE
;
2467 adapter
->rx_buf_per_pkt
= 1 + (sz
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
2471 * for simplicity, force the ring0 size to be a multiple of
2472 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2474 sz
= adapter
->rx_buf_per_pkt
* VMXNET3_RING_SIZE_ALIGN
;
2475 ring0_size
= adapter
->rx_queue
[0].rx_ring
[0].size
;
2476 ring0_size
= (ring0_size
+ sz
- 1) / sz
* sz
;
2477 ring0_size
= min_t(u32
, ring0_size
, VMXNET3_RX_RING_MAX_SIZE
/
2479 ring1_size
= adapter
->rx_queue
[0].rx_ring
[1].size
;
2480 comp_size
= ring0_size
+ ring1_size
;
2482 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2483 rq
= &adapter
->rx_queue
[i
];
2484 rq
->rx_ring
[0].size
= ring0_size
;
2485 rq
->rx_ring
[1].size
= ring1_size
;
2486 rq
->comp_ring
.size
= comp_size
;
2492 vmxnet3_create_queues(struct vmxnet3_adapter
*adapter
, u32 tx_ring_size
,
2493 u32 rx_ring_size
, u32 rx_ring2_size
)
2497 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2498 struct vmxnet3_tx_queue
*tq
= &adapter
->tx_queue
[i
];
2499 tq
->tx_ring
.size
= tx_ring_size
;
2500 tq
->data_ring
.size
= tx_ring_size
;
2501 tq
->comp_ring
.size
= tx_ring_size
;
2502 tq
->shared
= &adapter
->tqd_start
[i
].ctrl
;
2504 tq
->adapter
= adapter
;
2506 err
= vmxnet3_tq_create(tq
, adapter
);
2508 * Too late to change num_tx_queues. We cannot do away with
2509 * lesser number of queues than what we asked for
2515 adapter
->rx_queue
[0].rx_ring
[0].size
= rx_ring_size
;
2516 adapter
->rx_queue
[0].rx_ring
[1].size
= rx_ring2_size
;
2517 vmxnet3_adjust_rx_ring_size(adapter
);
2518 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2519 struct vmxnet3_rx_queue
*rq
= &adapter
->rx_queue
[i
];
2520 /* qid and qid2 for rx queues will be assigned later when num
2521 * of rx queues is finalized after allocating intrs */
2522 rq
->shared
= &adapter
->rqd_start
[i
].ctrl
;
2523 rq
->adapter
= adapter
;
2524 err
= vmxnet3_rq_create(rq
, adapter
);
2527 printk(KERN_ERR
"Could not allocate any rx"
2528 "queues. Aborting.\n");
2531 printk(KERN_INFO
"Number of rx queues changed "
2533 adapter
->num_rx_queues
= i
;
2541 vmxnet3_tq_destroy_all(adapter
);
2546 vmxnet3_open(struct net_device
*netdev
)
2548 struct vmxnet3_adapter
*adapter
;
2551 adapter
= netdev_priv(netdev
);
2553 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2554 spin_lock_init(&adapter
->tx_queue
[i
].tx_lock
);
2556 err
= vmxnet3_create_queues(adapter
, VMXNET3_DEF_TX_RING_SIZE
,
2557 VMXNET3_DEF_RX_RING_SIZE
,
2558 VMXNET3_DEF_RX_RING_SIZE
);
2562 err
= vmxnet3_activate_dev(adapter
);
2569 vmxnet3_rq_destroy_all(adapter
);
2570 vmxnet3_tq_destroy_all(adapter
);
2577 vmxnet3_close(struct net_device
*netdev
)
2579 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2582 * Reset_work may be in the middle of resetting the device, wait for its
2585 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2588 vmxnet3_quiesce_dev(adapter
);
2590 vmxnet3_rq_destroy_all(adapter
);
2591 vmxnet3_tq_destroy_all(adapter
);
2593 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2601 vmxnet3_force_close(struct vmxnet3_adapter
*adapter
)
2606 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2607 * vmxnet3_close() will deadlock.
2609 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
));
2611 /* we need to enable NAPI, otherwise dev_close will deadlock */
2612 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2613 napi_enable(&adapter
->rx_queue
[i
].napi
);
2614 dev_close(adapter
->netdev
);
2619 vmxnet3_change_mtu(struct net_device
*netdev
, int new_mtu
)
2621 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2624 if (new_mtu
< VMXNET3_MIN_MTU
|| new_mtu
> VMXNET3_MAX_MTU
)
2627 netdev
->mtu
= new_mtu
;
2630 * Reset_work may be in the middle of resetting the device, wait for its
2633 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2636 if (netif_running(netdev
)) {
2637 vmxnet3_quiesce_dev(adapter
);
2638 vmxnet3_reset_dev(adapter
);
2640 /* we need to re-create the rx queue based on the new mtu */
2641 vmxnet3_rq_destroy_all(adapter
);
2642 vmxnet3_adjust_rx_ring_size(adapter
);
2643 err
= vmxnet3_rq_create_all(adapter
);
2645 printk(KERN_ERR
"%s: failed to re-create rx queues,"
2646 " error %d. Closing it.\n", netdev
->name
, err
);
2650 err
= vmxnet3_activate_dev(adapter
);
2652 printk(KERN_ERR
"%s: failed to re-activate, error %d. "
2653 "Closing it\n", netdev
->name
, err
);
2659 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2661 vmxnet3_force_close(adapter
);
2668 vmxnet3_declare_features(struct vmxnet3_adapter
*adapter
, bool dma64
)
2670 struct net_device
*netdev
= adapter
->netdev
;
2672 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
2673 NETIF_F_HW_CSUM
| NETIF_F_HW_VLAN_TX
|
2674 NETIF_F_HW_VLAN_RX
| NETIF_F_TSO
| NETIF_F_TSO6
|
2677 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
2678 netdev
->vlan_features
= netdev
->hw_features
&
2679 ~(NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
2680 netdev
->features
= netdev
->hw_features
| NETIF_F_HW_VLAN_FILTER
;
2682 netdev_info(adapter
->netdev
,
2683 "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
2684 dma64
? " highDMA" : "");
2689 vmxnet3_read_mac_addr(struct vmxnet3_adapter
*adapter
, u8
*mac
)
2693 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACL
);
2696 tmp
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_MACH
);
2697 mac
[4] = tmp
& 0xff;
2698 mac
[5] = (tmp
>> 8) & 0xff;
2701 #ifdef CONFIG_PCI_MSI
2704 * Enable MSIx vectors.
2706 * 0 on successful enabling of required vectors,
2707 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
2709 * number of vectors which can be enabled otherwise (this number is smaller
2710 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2714 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter
*adapter
,
2717 int err
= 0, vector_threshold
;
2718 vector_threshold
= VMXNET3_LINUX_MIN_MSIX_VECT
;
2720 while (vectors
>= vector_threshold
) {
2721 err
= pci_enable_msix(adapter
->pdev
, adapter
->intr
.msix_entries
,
2724 adapter
->intr
.num_intrs
= vectors
;
2726 } else if (err
< 0) {
2727 netdev_err(adapter
->netdev
,
2728 "Failed to enable MSI-X, error: %d\n", err
);
2730 } else if (err
< vector_threshold
) {
2733 /* If fails to enable required number of MSI-x vectors
2734 * try enabling minimum number of vectors required.
2736 netdev_err(adapter
->netdev
,
2737 "Failed to enable %d MSI-X, trying %d instead\n",
2738 vectors
, vector_threshold
);
2739 vectors
= vector_threshold
;
2743 netdev_info(adapter
->netdev
,
2744 "Number of MSI-X interrupts which can be allocated are lower than min threshold required.\n");
2749 #endif /* CONFIG_PCI_MSI */
2752 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter
*adapter
)
2755 unsigned long flags
;
2758 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
2759 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
2760 VMXNET3_CMD_GET_CONF_INTR
);
2761 cfg
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_CMD
);
2762 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
2763 adapter
->intr
.type
= cfg
& 0x3;
2764 adapter
->intr
.mask_mode
= (cfg
>> 2) & 0x3;
2766 if (adapter
->intr
.type
== VMXNET3_IT_AUTO
) {
2767 adapter
->intr
.type
= VMXNET3_IT_MSIX
;
2770 #ifdef CONFIG_PCI_MSI
2771 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
2772 int vector
, err
= 0;
2774 adapter
->intr
.num_intrs
= (adapter
->share_intr
==
2775 VMXNET3_INTR_TXSHARE
) ? 1 :
2776 adapter
->num_tx_queues
;
2777 adapter
->intr
.num_intrs
+= (adapter
->share_intr
==
2778 VMXNET3_INTR_BUDDYSHARE
) ? 0 :
2779 adapter
->num_rx_queues
;
2780 adapter
->intr
.num_intrs
+= 1; /* for link event */
2782 adapter
->intr
.num_intrs
= (adapter
->intr
.num_intrs
>
2783 VMXNET3_LINUX_MIN_MSIX_VECT
2784 ? adapter
->intr
.num_intrs
:
2785 VMXNET3_LINUX_MIN_MSIX_VECT
);
2787 for (vector
= 0; vector
< adapter
->intr
.num_intrs
; vector
++)
2788 adapter
->intr
.msix_entries
[vector
].entry
= vector
;
2790 err
= vmxnet3_acquire_msix_vectors(adapter
,
2791 adapter
->intr
.num_intrs
);
2792 /* If we cannot allocate one MSIx vector per queue
2793 * then limit the number of rx queues to 1
2795 if (err
== VMXNET3_LINUX_MIN_MSIX_VECT
) {
2796 if (adapter
->share_intr
!= VMXNET3_INTR_BUDDYSHARE
2797 || adapter
->num_rx_queues
!= 1) {
2798 adapter
->share_intr
= VMXNET3_INTR_TXSHARE
;
2799 printk(KERN_ERR
"Number of rx queues : 1\n");
2800 adapter
->num_rx_queues
= 1;
2801 adapter
->intr
.num_intrs
=
2802 VMXNET3_LINUX_MIN_MSIX_VECT
;
2809 /* If we cannot allocate MSIx vectors use only one rx queue */
2810 netdev_info(adapter
->netdev
,
2811 "Failed to enable MSI-X, error %d . Limiting #rx queues to 1, try MSI.\n",
2814 adapter
->intr
.type
= VMXNET3_IT_MSI
;
2817 if (adapter
->intr
.type
== VMXNET3_IT_MSI
) {
2819 err
= pci_enable_msi(adapter
->pdev
);
2821 adapter
->num_rx_queues
= 1;
2822 adapter
->intr
.num_intrs
= 1;
2826 #endif /* CONFIG_PCI_MSI */
2828 adapter
->num_rx_queues
= 1;
2829 printk(KERN_INFO
"Using INTx interrupt, #Rx queues: 1.\n");
2830 adapter
->intr
.type
= VMXNET3_IT_INTX
;
2832 /* INT-X related setting */
2833 adapter
->intr
.num_intrs
= 1;
2838 vmxnet3_free_intr_resources(struct vmxnet3_adapter
*adapter
)
2840 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
)
2841 pci_disable_msix(adapter
->pdev
);
2842 else if (adapter
->intr
.type
== VMXNET3_IT_MSI
)
2843 pci_disable_msi(adapter
->pdev
);
2845 BUG_ON(adapter
->intr
.type
!= VMXNET3_IT_INTX
);
2850 vmxnet3_tx_timeout(struct net_device
*netdev
)
2852 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
2853 adapter
->tx_timeout_count
++;
2855 printk(KERN_ERR
"%s: tx hang\n", adapter
->netdev
->name
);
2856 schedule_work(&adapter
->work
);
2857 netif_wake_queue(adapter
->netdev
);
2862 vmxnet3_reset_work(struct work_struct
*data
)
2864 struct vmxnet3_adapter
*adapter
;
2866 adapter
= container_of(data
, struct vmxnet3_adapter
, work
);
2868 /* if another thread is resetting the device, no need to proceed */
2869 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
))
2872 /* if the device is closed, we must leave it alone */
2874 if (netif_running(adapter
->netdev
)) {
2875 printk(KERN_INFO
"%s: resetting\n", adapter
->netdev
->name
);
2876 vmxnet3_quiesce_dev(adapter
);
2877 vmxnet3_reset_dev(adapter
);
2878 vmxnet3_activate_dev(adapter
);
2880 printk(KERN_INFO
"%s: already closed\n", adapter
->netdev
->name
);
2884 clear_bit(VMXNET3_STATE_BIT_RESETTING
, &adapter
->state
);
2889 vmxnet3_probe_device(struct pci_dev
*pdev
,
2890 const struct pci_device_id
*id
)
2892 static const struct net_device_ops vmxnet3_netdev_ops
= {
2893 .ndo_open
= vmxnet3_open
,
2894 .ndo_stop
= vmxnet3_close
,
2895 .ndo_start_xmit
= vmxnet3_xmit_frame
,
2896 .ndo_set_mac_address
= vmxnet3_set_mac_addr
,
2897 .ndo_change_mtu
= vmxnet3_change_mtu
,
2898 .ndo_set_features
= vmxnet3_set_features
,
2899 .ndo_get_stats64
= vmxnet3_get_stats64
,
2900 .ndo_tx_timeout
= vmxnet3_tx_timeout
,
2901 .ndo_set_rx_mode
= vmxnet3_set_mc
,
2902 .ndo_vlan_rx_add_vid
= vmxnet3_vlan_rx_add_vid
,
2903 .ndo_vlan_rx_kill_vid
= vmxnet3_vlan_rx_kill_vid
,
2904 #ifdef CONFIG_NET_POLL_CONTROLLER
2905 .ndo_poll_controller
= vmxnet3_netpoll
,
2909 bool dma64
= false; /* stupid gcc */
2911 struct net_device
*netdev
;
2912 struct vmxnet3_adapter
*adapter
;
2918 if (!pci_msi_enabled())
2923 num_rx_queues
= min(VMXNET3_DEVICE_MAX_RX_QUEUES
,
2924 (int)num_online_cpus());
2928 num_rx_queues
= rounddown_pow_of_two(num_rx_queues
);
2931 num_tx_queues
= min(VMXNET3_DEVICE_MAX_TX_QUEUES
,
2932 (int)num_online_cpus());
2936 num_tx_queues
= rounddown_pow_of_two(num_tx_queues
);
2937 netdev
= alloc_etherdev_mq(sizeof(struct vmxnet3_adapter
),
2938 max(num_tx_queues
, num_rx_queues
));
2939 printk(KERN_INFO
"# of Tx queues : %d, # of Rx queues : %d\n",
2940 num_tx_queues
, num_rx_queues
);
2945 pci_set_drvdata(pdev
, netdev
);
2946 adapter
= netdev_priv(netdev
);
2947 adapter
->netdev
= netdev
;
2948 adapter
->pdev
= pdev
;
2950 spin_lock_init(&adapter
->cmd_lock
);
2951 adapter
->shared
= pci_alloc_consistent(adapter
->pdev
,
2952 sizeof(struct Vmxnet3_DriverShared
),
2953 &adapter
->shared_pa
);
2954 if (!adapter
->shared
) {
2955 printk(KERN_ERR
"Failed to allocate memory for %s\n",
2958 goto err_alloc_shared
;
2961 adapter
->num_rx_queues
= num_rx_queues
;
2962 adapter
->num_tx_queues
= num_tx_queues
;
2964 size
= sizeof(struct Vmxnet3_TxQueueDesc
) * adapter
->num_tx_queues
;
2965 size
+= sizeof(struct Vmxnet3_RxQueueDesc
) * adapter
->num_rx_queues
;
2966 adapter
->tqd_start
= pci_alloc_consistent(adapter
->pdev
, size
,
2967 &adapter
->queue_desc_pa
);
2969 if (!adapter
->tqd_start
) {
2970 printk(KERN_ERR
"Failed to allocate memory for %s\n",
2973 goto err_alloc_queue_desc
;
2975 adapter
->rqd_start
= (struct Vmxnet3_RxQueueDesc
*)(adapter
->tqd_start
+
2976 adapter
->num_tx_queues
);
2978 adapter
->pm_conf
= kmalloc(sizeof(struct Vmxnet3_PMConf
), GFP_KERNEL
);
2979 if (adapter
->pm_conf
== NULL
) {
2986 adapter
->rss_conf
= kmalloc(sizeof(struct UPT1_RSSConf
), GFP_KERNEL
);
2987 if (adapter
->rss_conf
== NULL
) {
2991 #endif /* VMXNET3_RSS */
2993 err
= vmxnet3_alloc_pci_resources(adapter
, &dma64
);
2997 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_VRRS
);
2999 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_VRRS
, 1);
3001 printk(KERN_ERR
"Incompatible h/w version (0x%x) for adapter"
3002 " %s\n", ver
, pci_name(pdev
));
3007 ver
= VMXNET3_READ_BAR1_REG(adapter
, VMXNET3_REG_UVRS
);
3009 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_UVRS
, 1);
3011 printk(KERN_ERR
"Incompatible upt version (0x%x) for "
3012 "adapter %s\n", ver
, pci_name(pdev
));
3017 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3018 vmxnet3_declare_features(adapter
, dma64
);
3020 adapter
->dev_number
= atomic_read(&devices_found
);
3022 adapter
->share_intr
= irq_share_mode
;
3023 if (adapter
->share_intr
== VMXNET3_INTR_BUDDYSHARE
&&
3024 adapter
->num_tx_queues
!= adapter
->num_rx_queues
)
3025 adapter
->share_intr
= VMXNET3_INTR_DONTSHARE
;
3027 vmxnet3_alloc_intr_resources(adapter
);
3030 if (adapter
->num_rx_queues
> 1 &&
3031 adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
3032 adapter
->rss
= true;
3033 printk(KERN_INFO
"RSS is enabled.\n");
3035 adapter
->rss
= false;
3039 vmxnet3_read_mac_addr(adapter
, mac
);
3040 memcpy(netdev
->dev_addr
, mac
, netdev
->addr_len
);
3042 netdev
->netdev_ops
= &vmxnet3_netdev_ops
;
3043 vmxnet3_set_ethtool_ops(netdev
);
3044 netdev
->watchdog_timeo
= 5 * HZ
;
3046 INIT_WORK(&adapter
->work
, vmxnet3_reset_work
);
3047 set_bit(VMXNET3_STATE_BIT_QUIESCED
, &adapter
->state
);
3049 if (adapter
->intr
.type
== VMXNET3_IT_MSIX
) {
3051 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3052 netif_napi_add(adapter
->netdev
,
3053 &adapter
->rx_queue
[i
].napi
,
3054 vmxnet3_poll_rx_only
, 64);
3057 netif_napi_add(adapter
->netdev
, &adapter
->rx_queue
[0].napi
,
3061 netif_set_real_num_tx_queues(adapter
->netdev
, adapter
->num_tx_queues
);
3062 netif_set_real_num_rx_queues(adapter
->netdev
, adapter
->num_rx_queues
);
3064 err
= register_netdev(netdev
);
3067 printk(KERN_ERR
"Failed to register adapter %s\n",
3072 vmxnet3_check_link(adapter
, false);
3073 atomic_inc(&devices_found
);
3077 vmxnet3_free_intr_resources(adapter
);
3079 vmxnet3_free_pci_resources(adapter
);
3082 kfree(adapter
->rss_conf
);
3085 kfree(adapter
->pm_conf
);
3087 pci_free_consistent(adapter
->pdev
, size
, adapter
->tqd_start
,
3088 adapter
->queue_desc_pa
);
3089 err_alloc_queue_desc
:
3090 pci_free_consistent(adapter
->pdev
, sizeof(struct Vmxnet3_DriverShared
),
3091 adapter
->shared
, adapter
->shared_pa
);
3093 pci_set_drvdata(pdev
, NULL
);
3094 free_netdev(netdev
);
3100 vmxnet3_remove_device(struct pci_dev
*pdev
)
3102 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3103 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3109 num_rx_queues
= min(VMXNET3_DEVICE_MAX_RX_QUEUES
,
3110 (int)num_online_cpus());
3114 num_rx_queues
= rounddown_pow_of_two(num_rx_queues
);
3116 cancel_work_sync(&adapter
->work
);
3118 unregister_netdev(netdev
);
3120 vmxnet3_free_intr_resources(adapter
);
3121 vmxnet3_free_pci_resources(adapter
);
3123 kfree(adapter
->rss_conf
);
3125 kfree(adapter
->pm_conf
);
3127 size
= sizeof(struct Vmxnet3_TxQueueDesc
) * adapter
->num_tx_queues
;
3128 size
+= sizeof(struct Vmxnet3_RxQueueDesc
) * num_rx_queues
;
3129 pci_free_consistent(adapter
->pdev
, size
, adapter
->tqd_start
,
3130 adapter
->queue_desc_pa
);
3131 pci_free_consistent(adapter
->pdev
, sizeof(struct Vmxnet3_DriverShared
),
3132 adapter
->shared
, adapter
->shared_pa
);
3133 free_netdev(netdev
);
3140 vmxnet3_suspend(struct device
*device
)
3142 struct pci_dev
*pdev
= to_pci_dev(device
);
3143 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3144 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3145 struct Vmxnet3_PMConf
*pmConf
;
3146 struct ethhdr
*ehdr
;
3147 struct arphdr
*ahdr
;
3149 struct in_device
*in_dev
;
3150 struct in_ifaddr
*ifa
;
3151 unsigned long flags
;
3154 if (!netif_running(netdev
))
3157 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3158 napi_disable(&adapter
->rx_queue
[i
].napi
);
3160 vmxnet3_disable_all_intrs(adapter
);
3161 vmxnet3_free_irqs(adapter
);
3162 vmxnet3_free_intr_resources(adapter
);
3164 netif_device_detach(netdev
);
3165 netif_tx_stop_all_queues(netdev
);
3167 /* Create wake-up filters. */
3168 pmConf
= adapter
->pm_conf
;
3169 memset(pmConf
, 0, sizeof(*pmConf
));
3171 if (adapter
->wol
& WAKE_UCAST
) {
3172 pmConf
->filters
[i
].patternSize
= ETH_ALEN
;
3173 pmConf
->filters
[i
].maskSize
= 1;
3174 memcpy(pmConf
->filters
[i
].pattern
, netdev
->dev_addr
, ETH_ALEN
);
3175 pmConf
->filters
[i
].mask
[0] = 0x3F; /* LSB ETH_ALEN bits */
3177 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
3181 if (adapter
->wol
& WAKE_ARP
) {
3182 in_dev
= in_dev_get(netdev
);
3186 ifa
= (struct in_ifaddr
*)in_dev
->ifa_list
;
3190 pmConf
->filters
[i
].patternSize
= ETH_HLEN
+ /* Ethernet header*/
3191 sizeof(struct arphdr
) + /* ARP header */
3192 2 * ETH_ALEN
+ /* 2 Ethernet addresses*/
3193 2 * sizeof(u32
); /*2 IPv4 addresses */
3194 pmConf
->filters
[i
].maskSize
=
3195 (pmConf
->filters
[i
].patternSize
- 1) / 8 + 1;
3197 /* ETH_P_ARP in Ethernet header. */
3198 ehdr
= (struct ethhdr
*)pmConf
->filters
[i
].pattern
;
3199 ehdr
->h_proto
= htons(ETH_P_ARP
);
3201 /* ARPOP_REQUEST in ARP header. */
3202 ahdr
= (struct arphdr
*)&pmConf
->filters
[i
].pattern
[ETH_HLEN
];
3203 ahdr
->ar_op
= htons(ARPOP_REQUEST
);
3204 arpreq
= (u8
*)(ahdr
+ 1);
3206 /* The Unicast IPv4 address in 'tip' field. */
3207 arpreq
+= 2 * ETH_ALEN
+ sizeof(u32
);
3208 *(u32
*)arpreq
= ifa
->ifa_address
;
3210 /* The mask for the relevant bits. */
3211 pmConf
->filters
[i
].mask
[0] = 0x00;
3212 pmConf
->filters
[i
].mask
[1] = 0x30; /* ETH_P_ARP */
3213 pmConf
->filters
[i
].mask
[2] = 0x30; /* ARPOP_REQUEST */
3214 pmConf
->filters
[i
].mask
[3] = 0x00;
3215 pmConf
->filters
[i
].mask
[4] = 0xC0; /* IPv4 TIP */
3216 pmConf
->filters
[i
].mask
[5] = 0x03; /* IPv4 TIP */
3219 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_FILTER
;
3224 if (adapter
->wol
& WAKE_MAGIC
)
3225 pmConf
->wakeUpEvents
|= VMXNET3_PM_WAKEUP_MAGIC
;
3227 pmConf
->numFilters
= i
;
3229 adapter
->shared
->devRead
.pmConfDesc
.confVer
= cpu_to_le32(1);
3230 adapter
->shared
->devRead
.pmConfDesc
.confLen
= cpu_to_le32(sizeof(
3232 adapter
->shared
->devRead
.pmConfDesc
.confPA
= cpu_to_le64(virt_to_phys(
3235 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3236 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3237 VMXNET3_CMD_UPDATE_PMCFG
);
3238 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3240 pci_save_state(pdev
);
3241 pci_enable_wake(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
),
3243 pci_disable_device(pdev
);
3244 pci_set_power_state(pdev
, pci_choose_state(pdev
, PMSG_SUSPEND
));
3251 vmxnet3_resume(struct device
*device
)
3254 unsigned long flags
;
3255 struct pci_dev
*pdev
= to_pci_dev(device
);
3256 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3257 struct vmxnet3_adapter
*adapter
= netdev_priv(netdev
);
3258 struct Vmxnet3_PMConf
*pmConf
;
3260 if (!netif_running(netdev
))
3263 /* Destroy wake-up filters. */
3264 pmConf
= adapter
->pm_conf
;
3265 memset(pmConf
, 0, sizeof(*pmConf
));
3267 adapter
->shared
->devRead
.pmConfDesc
.confVer
= cpu_to_le32(1);
3268 adapter
->shared
->devRead
.pmConfDesc
.confLen
= cpu_to_le32(sizeof(
3270 adapter
->shared
->devRead
.pmConfDesc
.confPA
= cpu_to_le64(virt_to_phys(
3273 netif_device_attach(netdev
);
3274 pci_set_power_state(pdev
, PCI_D0
);
3275 pci_restore_state(pdev
);
3276 err
= pci_enable_device_mem(pdev
);
3280 pci_enable_wake(pdev
, PCI_D0
, 0);
3282 spin_lock_irqsave(&adapter
->cmd_lock
, flags
);
3283 VMXNET3_WRITE_BAR1_REG(adapter
, VMXNET3_REG_CMD
,
3284 VMXNET3_CMD_UPDATE_PMCFG
);
3285 spin_unlock_irqrestore(&adapter
->cmd_lock
, flags
);
3286 vmxnet3_alloc_intr_resources(adapter
);
3287 vmxnet3_request_irqs(adapter
);
3288 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3289 napi_enable(&adapter
->rx_queue
[i
].napi
);
3290 vmxnet3_enable_all_intrs(adapter
);
3295 static const struct dev_pm_ops vmxnet3_pm_ops
= {
3296 .suspend
= vmxnet3_suspend
,
3297 .resume
= vmxnet3_resume
,
3301 static struct pci_driver vmxnet3_driver
= {
3302 .name
= vmxnet3_driver_name
,
3303 .id_table
= vmxnet3_pciid_table
,
3304 .probe
= vmxnet3_probe_device
,
3305 .remove
= vmxnet3_remove_device
,
3307 .driver
.pm
= &vmxnet3_pm_ops
,
3313 vmxnet3_init_module(void)
3315 printk(KERN_INFO
"%s - version %s\n", VMXNET3_DRIVER_DESC
,
3316 VMXNET3_DRIVER_VERSION_REPORT
);
3317 return pci_register_driver(&vmxnet3_driver
);
3320 module_init(vmxnet3_init_module
);
3324 vmxnet3_exit_module(void)
3326 pci_unregister_driver(&vmxnet3_driver
);
3329 module_exit(vmxnet3_exit_module
);
3331 MODULE_AUTHOR("VMware, Inc.");
3332 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC
);
3333 MODULE_LICENSE("GPL v2");
3334 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING
);