1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
28 #include "i40e_prototype.h"
29 static int i40evf_setup_all_tx_resources(struct i40evf_adapter
*adapter
);
30 static int i40evf_setup_all_rx_resources(struct i40evf_adapter
*adapter
);
31 static int i40evf_close(struct net_device
*netdev
);
33 char i40evf_driver_name
[] = "i40evf";
34 static const char i40evf_driver_string
[] =
35 "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
39 #define DRV_VERSION_MAJOR 1
40 #define DRV_VERSION_MINOR 5
41 #define DRV_VERSION_BUILD 10
42 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
43 __stringify(DRV_VERSION_MINOR) "." \
44 __stringify(DRV_VERSION_BUILD) \
46 const char i40evf_driver_version
[] = DRV_VERSION
;
47 static const char i40evf_copyright
[] =
48 "Copyright (c) 2013 - 2015 Intel Corporation.";
50 /* i40evf_pci_tbl - PCI Device ID Table
52 * Wildcard entries (PCI_ANY_ID) should come last
53 * Last entry must be all 0s
55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
56 * Class, Class Mask, private data (not used) }
58 static const struct pci_device_id i40evf_pci_tbl
[] = {
59 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_VF
), 0},
60 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_X722_VF
), 0},
61 /* required last entry */
65 MODULE_DEVICE_TABLE(pci
, i40evf_pci_tbl
);
67 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
68 MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
69 MODULE_LICENSE("GPL");
70 MODULE_VERSION(DRV_VERSION
);
72 static struct workqueue_struct
*i40evf_wq
;
75 * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
76 * @hw: pointer to the HW structure
77 * @mem: ptr to mem struct to fill out
78 * @size: size of memory requested
79 * @alignment: what to align the allocation to
81 i40e_status
i40evf_allocate_dma_mem_d(struct i40e_hw
*hw
,
82 struct i40e_dma_mem
*mem
,
83 u64 size
, u32 alignment
)
85 struct i40evf_adapter
*adapter
= (struct i40evf_adapter
*)hw
->back
;
88 return I40E_ERR_PARAM
;
90 mem
->size
= ALIGN(size
, alignment
);
91 mem
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, mem
->size
,
92 (dma_addr_t
*)&mem
->pa
, GFP_KERNEL
);
96 return I40E_ERR_NO_MEMORY
;
100 * i40evf_free_dma_mem_d - OS specific memory free for shared code
101 * @hw: pointer to the HW structure
102 * @mem: ptr to mem struct to free
104 i40e_status
i40evf_free_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
)
106 struct i40evf_adapter
*adapter
= (struct i40evf_adapter
*)hw
->back
;
108 if (!mem
|| !mem
->va
)
109 return I40E_ERR_PARAM
;
110 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
,
111 mem
->va
, (dma_addr_t
)mem
->pa
);
116 * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
117 * @hw: pointer to the HW structure
118 * @mem: ptr to mem struct to fill out
119 * @size: size of memory requested
121 i40e_status
i40evf_allocate_virt_mem_d(struct i40e_hw
*hw
,
122 struct i40e_virt_mem
*mem
, u32 size
)
125 return I40E_ERR_PARAM
;
128 mem
->va
= kzalloc(size
, GFP_KERNEL
);
133 return I40E_ERR_NO_MEMORY
;
137 * i40evf_free_virt_mem_d - OS specific memory free for shared code
138 * @hw: pointer to the HW structure
139 * @mem: ptr to mem struct to free
141 i40e_status
i40evf_free_virt_mem_d(struct i40e_hw
*hw
,
142 struct i40e_virt_mem
*mem
)
145 return I40E_ERR_PARAM
;
147 /* it's ok to kfree a NULL pointer */
154 * i40evf_debug_d - OS dependent version of debug printing
155 * @hw: pointer to the HW structure
156 * @mask: debug level mask
157 * @fmt_str: printf-type format description
159 void i40evf_debug_d(void *hw
, u32 mask
, char *fmt_str
, ...)
164 if (!(mask
& ((struct i40e_hw
*)hw
)->debug_mask
))
167 va_start(argptr
, fmt_str
);
168 vsnprintf(buf
, sizeof(buf
), fmt_str
, argptr
);
171 /* the debug string is already formatted with a newline */
176 * i40evf_schedule_reset - Set the flags and schedule a reset event
177 * @adapter: board private structure
179 void i40evf_schedule_reset(struct i40evf_adapter
*adapter
)
181 if (!(adapter
->flags
&
182 (I40EVF_FLAG_RESET_PENDING
| I40EVF_FLAG_RESET_NEEDED
))) {
183 adapter
->flags
|= I40EVF_FLAG_RESET_NEEDED
;
184 schedule_work(&adapter
->reset_task
);
189 * i40evf_tx_timeout - Respond to a Tx Hang
190 * @netdev: network interface device structure
192 static void i40evf_tx_timeout(struct net_device
*netdev
)
194 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
196 adapter
->tx_timeout_count
++;
197 i40evf_schedule_reset(adapter
);
201 * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
202 * @adapter: board private structure
204 static void i40evf_misc_irq_disable(struct i40evf_adapter
*adapter
)
206 struct i40e_hw
*hw
= &adapter
->hw
;
208 wr32(hw
, I40E_VFINT_DYN_CTL01
, 0);
211 rd32(hw
, I40E_VFGEN_RSTAT
);
213 synchronize_irq(adapter
->msix_entries
[0].vector
);
217 * i40evf_misc_irq_enable - Enable default interrupt generation settings
218 * @adapter: board private structure
220 static void i40evf_misc_irq_enable(struct i40evf_adapter
*adapter
)
222 struct i40e_hw
*hw
= &adapter
->hw
;
224 wr32(hw
, I40E_VFINT_DYN_CTL01
, I40E_VFINT_DYN_CTL01_INTENA_MASK
|
225 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK
);
226 wr32(hw
, I40E_VFINT_ICR0_ENA1
, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK
);
229 rd32(hw
, I40E_VFGEN_RSTAT
);
233 * i40evf_irq_disable - Mask off interrupt generation on the NIC
234 * @adapter: board private structure
236 static void i40evf_irq_disable(struct i40evf_adapter
*adapter
)
239 struct i40e_hw
*hw
= &adapter
->hw
;
241 if (!adapter
->msix_entries
)
244 for (i
= 1; i
< adapter
->num_msix_vectors
; i
++) {
245 wr32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1), 0);
246 synchronize_irq(adapter
->msix_entries
[i
].vector
);
249 rd32(hw
, I40E_VFGEN_RSTAT
);
253 * i40evf_irq_enable_queues - Enable interrupt for specified queues
254 * @adapter: board private structure
255 * @mask: bitmap of queues to enable
257 void i40evf_irq_enable_queues(struct i40evf_adapter
*adapter
, u32 mask
)
259 struct i40e_hw
*hw
= &adapter
->hw
;
262 for (i
= 1; i
< adapter
->num_msix_vectors
; i
++) {
263 if (mask
& BIT(i
- 1)) {
264 wr32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1),
265 I40E_VFINT_DYN_CTLN1_INTENA_MASK
|
266 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK
|
267 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK
);
273 * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
274 * @adapter: board private structure
275 * @mask: bitmap of vectors to trigger
277 static void i40evf_fire_sw_int(struct i40evf_adapter
*adapter
, u32 mask
)
279 struct i40e_hw
*hw
= &adapter
->hw
;
284 dyn_ctl
= rd32(hw
, I40E_VFINT_DYN_CTL01
);
285 dyn_ctl
|= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK
|
286 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK
|
287 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK
;
288 wr32(hw
, I40E_VFINT_DYN_CTL01
, dyn_ctl
);
290 for (i
= 1; i
< adapter
->num_msix_vectors
; i
++) {
292 dyn_ctl
= rd32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1));
293 dyn_ctl
|= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK
|
294 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK
|
295 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK
;
296 wr32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1), dyn_ctl
);
302 * i40evf_irq_enable - Enable default interrupt generation settings
303 * @adapter: board private structure
304 * @flush: boolean value whether to run rd32()
306 void i40evf_irq_enable(struct i40evf_adapter
*adapter
, bool flush
)
308 struct i40e_hw
*hw
= &adapter
->hw
;
310 i40evf_misc_irq_enable(adapter
);
311 i40evf_irq_enable_queues(adapter
, ~0);
314 rd32(hw
, I40E_VFGEN_RSTAT
);
318 * i40evf_msix_aq - Interrupt handler for vector 0
319 * @irq: interrupt number
320 * @data: pointer to netdev
322 static irqreturn_t
i40evf_msix_aq(int irq
, void *data
)
324 struct net_device
*netdev
= data
;
325 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
326 struct i40e_hw
*hw
= &adapter
->hw
;
329 /* handle non-queue interrupts, these reads clear the registers */
330 val
= rd32(hw
, I40E_VFINT_ICR01
);
331 val
= rd32(hw
, I40E_VFINT_ICR0_ENA1
);
333 val
= rd32(hw
, I40E_VFINT_DYN_CTL01
) |
334 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK
;
335 wr32(hw
, I40E_VFINT_DYN_CTL01
, val
);
337 /* schedule work on the private workqueue */
338 schedule_work(&adapter
->adminq_task
);
344 * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
345 * @irq: interrupt number
346 * @data: pointer to a q_vector
348 static irqreturn_t
i40evf_msix_clean_rings(int irq
, void *data
)
350 struct i40e_q_vector
*q_vector
= data
;
352 if (!q_vector
->tx
.ring
&& !q_vector
->rx
.ring
)
355 napi_schedule_irqoff(&q_vector
->napi
);
361 * i40evf_map_vector_to_rxq - associate irqs with rx queues
362 * @adapter: board private structure
363 * @v_idx: interrupt number
364 * @r_idx: queue number
367 i40evf_map_vector_to_rxq(struct i40evf_adapter
*adapter
, int v_idx
, int r_idx
)
369 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[v_idx
];
370 struct i40e_ring
*rx_ring
= &adapter
->rx_rings
[r_idx
];
372 rx_ring
->q_vector
= q_vector
;
373 rx_ring
->next
= q_vector
->rx
.ring
;
374 rx_ring
->vsi
= &adapter
->vsi
;
375 q_vector
->rx
.ring
= rx_ring
;
376 q_vector
->rx
.count
++;
377 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
378 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
382 * i40evf_map_vector_to_txq - associate irqs with tx queues
383 * @adapter: board private structure
384 * @v_idx: interrupt number
385 * @t_idx: queue number
388 i40evf_map_vector_to_txq(struct i40evf_adapter
*adapter
, int v_idx
, int t_idx
)
390 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[v_idx
];
391 struct i40e_ring
*tx_ring
= &adapter
->tx_rings
[t_idx
];
393 tx_ring
->q_vector
= q_vector
;
394 tx_ring
->next
= q_vector
->tx
.ring
;
395 tx_ring
->vsi
= &adapter
->vsi
;
396 q_vector
->tx
.ring
= tx_ring
;
397 q_vector
->tx
.count
++;
398 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
399 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
400 q_vector
->num_ringpairs
++;
401 q_vector
->ring_mask
|= BIT(t_idx
);
405 * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
406 * @adapter: board private structure to initialize
408 * This function maps descriptor rings to the queue-specific vectors
409 * we were allotted through the MSI-X enabling code. Ideally, we'd have
410 * one vector per ring/queue, but on a constrained vector budget, we
411 * group the rings as "efficiently" as possible. You would add new
412 * mapping configurations in here.
414 static int i40evf_map_rings_to_vectors(struct i40evf_adapter
*adapter
)
418 int rxr_idx
= 0, txr_idx
= 0;
419 int rxr_remaining
= adapter
->num_active_queues
;
420 int txr_remaining
= adapter
->num_active_queues
;
425 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
427 /* The ideal configuration...
428 * We have enough vectors to map one per queue.
430 if (q_vectors
>= (rxr_remaining
* 2)) {
431 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
432 i40evf_map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
434 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
435 i40evf_map_vector_to_txq(adapter
, v_start
, txr_idx
);
439 /* If we don't have enough vectors for a 1-to-1
440 * mapping, we'll have to group them so there are
441 * multiple queues per vector.
442 * Re-adjusting *qpv takes care of the remainder.
444 for (i
= v_start
; i
< q_vectors
; i
++) {
445 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
446 for (j
= 0; j
< rqpv
; j
++) {
447 i40evf_map_vector_to_rxq(adapter
, i
, rxr_idx
);
452 for (i
= v_start
; i
< q_vectors
; i
++) {
453 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
454 for (j
= 0; j
< tqpv
; j
++) {
455 i40evf_map_vector_to_txq(adapter
, i
, txr_idx
);
462 adapter
->aq_required
|= I40EVF_FLAG_AQ_MAP_VECTORS
;
467 #ifdef CONFIG_NET_POLL_CONTROLLER
469 * i40evf_netpoll - A Polling 'interrupt' handler
470 * @netdev: network interface device structure
472 * This is used by netconsole to send skbs without having to re-enable
473 * interrupts. It's not called while the normal interrupt routine is executing.
475 static void i40evf_netpoll(struct net_device
*netdev
)
477 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
478 int q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
481 /* if interface is down do nothing */
482 if (test_bit(__I40E_DOWN
, &adapter
->vsi
.state
))
485 for (i
= 0; i
< q_vectors
; i
++)
486 i40evf_msix_clean_rings(0, &adapter
->q_vectors
[i
]);
491 * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
492 * @adapter: board private structure
494 * Allocates MSI-X vectors for tx and rx handling, and requests
495 * interrupts from the kernel.
498 i40evf_request_traffic_irqs(struct i40evf_adapter
*adapter
, char *basename
)
500 int vector
, err
, q_vectors
;
501 int rx_int_idx
= 0, tx_int_idx
= 0;
503 i40evf_irq_disable(adapter
);
504 /* Decrement for Other and TCP Timer vectors */
505 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
507 for (vector
= 0; vector
< q_vectors
; vector
++) {
508 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[vector
];
510 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
511 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
512 "i40evf-%s-%s-%d", basename
,
513 "TxRx", rx_int_idx
++);
515 } else if (q_vector
->rx
.ring
) {
516 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
517 "i40evf-%s-%s-%d", basename
,
519 } else if (q_vector
->tx
.ring
) {
520 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
521 "i40evf-%s-%s-%d", basename
,
524 /* skip this unused q_vector */
528 adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
,
529 i40evf_msix_clean_rings
,
534 dev_info(&adapter
->pdev
->dev
,
535 "Request_irq failed, error: %d\n", err
);
536 goto free_queue_irqs
;
538 /* assign the mask for this irq */
539 irq_set_affinity_hint(
540 adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
,
541 q_vector
->affinity_mask
);
549 irq_set_affinity_hint(
550 adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
,
552 free_irq(adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
,
553 &adapter
->q_vectors
[vector
]);
559 * i40evf_request_misc_irq - Initialize MSI-X interrupts
560 * @adapter: board private structure
562 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
563 * vector is only for the admin queue, and stays active even when the netdev
566 static int i40evf_request_misc_irq(struct i40evf_adapter
*adapter
)
568 struct net_device
*netdev
= adapter
->netdev
;
571 snprintf(adapter
->misc_vector_name
,
572 sizeof(adapter
->misc_vector_name
) - 1, "i40evf-%s:mbx",
573 dev_name(&adapter
->pdev
->dev
));
574 err
= request_irq(adapter
->msix_entries
[0].vector
,
576 adapter
->misc_vector_name
, netdev
);
578 dev_err(&adapter
->pdev
->dev
,
579 "request_irq for %s failed: %d\n",
580 adapter
->misc_vector_name
, err
);
581 free_irq(adapter
->msix_entries
[0].vector
, netdev
);
587 * i40evf_free_traffic_irqs - Free MSI-X interrupts
588 * @adapter: board private structure
590 * Frees all MSI-X vectors other than 0.
592 static void i40evf_free_traffic_irqs(struct i40evf_adapter
*adapter
)
597 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
599 for (i
= 0; i
< q_vectors
; i
++) {
600 irq_set_affinity_hint(adapter
->msix_entries
[i
+1].vector
,
602 free_irq(adapter
->msix_entries
[i
+1].vector
,
603 &adapter
->q_vectors
[i
]);
608 * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
609 * @adapter: board private structure
611 * Frees MSI-X vector 0.
613 static void i40evf_free_misc_irq(struct i40evf_adapter
*adapter
)
615 struct net_device
*netdev
= adapter
->netdev
;
617 free_irq(adapter
->msix_entries
[0].vector
, netdev
);
621 * i40evf_configure_tx - Configure Transmit Unit after Reset
622 * @adapter: board private structure
624 * Configure the Tx unit of the MAC after a reset.
626 static void i40evf_configure_tx(struct i40evf_adapter
*adapter
)
628 struct i40e_hw
*hw
= &adapter
->hw
;
631 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
632 adapter
->tx_rings
[i
].tail
= hw
->hw_addr
+ I40E_QTX_TAIL1(i
);
636 * i40evf_configure_rx - Configure Receive Unit after Reset
637 * @adapter: board private structure
639 * Configure the Rx unit of the MAC after a reset.
641 static void i40evf_configure_rx(struct i40evf_adapter
*adapter
)
643 struct i40e_hw
*hw
= &adapter
->hw
;
644 struct net_device
*netdev
= adapter
->netdev
;
645 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
650 /* Set the RX buffer length according to the mode */
651 if (adapter
->flags
& I40EVF_FLAG_RX_PS_ENABLED
||
652 netdev
->mtu
<= ETH_DATA_LEN
)
653 rx_buf_len
= I40EVF_RXBUFFER_2048
;
655 rx_buf_len
= ALIGN(max_frame
, 1024);
657 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
658 adapter
->rx_rings
[i
].tail
= hw
->hw_addr
+ I40E_QRX_TAIL1(i
);
659 adapter
->rx_rings
[i
].rx_buf_len
= rx_buf_len
;
660 if (adapter
->flags
& I40EVF_FLAG_RX_PS_ENABLED
) {
661 set_ring_ps_enabled(&adapter
->rx_rings
[i
]);
662 adapter
->rx_rings
[i
].rx_hdr_len
= I40E_RX_HDR_SIZE
;
664 clear_ring_ps_enabled(&adapter
->rx_rings
[i
]);
670 * i40evf_find_vlan - Search filter list for specific vlan filter
671 * @adapter: board private structure
674 * Returns ptr to the filter object or NULL
677 i40evf_vlan_filter
*i40evf_find_vlan(struct i40evf_adapter
*adapter
, u16 vlan
)
679 struct i40evf_vlan_filter
*f
;
681 list_for_each_entry(f
, &adapter
->vlan_filter_list
, list
) {
689 * i40evf_add_vlan - Add a vlan filter to the list
690 * @adapter: board private structure
693 * Returns ptr to the filter object or NULL when no memory available.
696 i40evf_vlan_filter
*i40evf_add_vlan(struct i40evf_adapter
*adapter
, u16 vlan
)
698 struct i40evf_vlan_filter
*f
= NULL
;
701 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
702 &adapter
->crit_section
)) {
708 f
= i40evf_find_vlan(adapter
, vlan
);
710 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
716 INIT_LIST_HEAD(&f
->list
);
717 list_add(&f
->list
, &adapter
->vlan_filter_list
);
719 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_VLAN_FILTER
;
723 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
729 * i40evf_del_vlan - Remove a vlan filter from the list
730 * @adapter: board private structure
733 static void i40evf_del_vlan(struct i40evf_adapter
*adapter
, u16 vlan
)
735 struct i40evf_vlan_filter
*f
;
738 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
739 &adapter
->crit_section
)) {
745 f
= i40evf_find_vlan(adapter
, vlan
);
748 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_VLAN_FILTER
;
750 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
754 * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
755 * @netdev: network device struct
758 static int i40evf_vlan_rx_add_vid(struct net_device
*netdev
,
759 __always_unused __be16 proto
, u16 vid
)
761 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
763 if (!VLAN_ALLOWED(adapter
))
765 if (i40evf_add_vlan(adapter
, vid
) == NULL
)
771 * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
772 * @netdev: network device struct
775 static int i40evf_vlan_rx_kill_vid(struct net_device
*netdev
,
776 __always_unused __be16 proto
, u16 vid
)
778 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
780 if (VLAN_ALLOWED(adapter
)) {
781 i40evf_del_vlan(adapter
, vid
);
788 * i40evf_find_filter - Search filter list for specific mac filter
789 * @adapter: board private structure
790 * @macaddr: the MAC address
792 * Returns ptr to the filter object or NULL
795 i40evf_mac_filter
*i40evf_find_filter(struct i40evf_adapter
*adapter
,
798 struct i40evf_mac_filter
*f
;
803 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
804 if (ether_addr_equal(macaddr
, f
->macaddr
))
811 * i40e_add_filter - Add a mac filter to the filter list
812 * @adapter: board private structure
813 * @macaddr: the MAC address
815 * Returns ptr to the filter object or NULL when no memory available.
818 i40evf_mac_filter
*i40evf_add_filter(struct i40evf_adapter
*adapter
,
821 struct i40evf_mac_filter
*f
;
827 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
828 &adapter
->crit_section
)) {
834 f
= i40evf_find_filter(adapter
, macaddr
);
836 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
838 clear_bit(__I40EVF_IN_CRITICAL_TASK
,
839 &adapter
->crit_section
);
843 ether_addr_copy(f
->macaddr
, macaddr
);
845 list_add(&f
->list
, &adapter
->mac_filter_list
);
847 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_MAC_FILTER
;
850 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
855 * i40evf_set_mac - NDO callback to set port mac address
856 * @netdev: network interface device structure
857 * @p: pointer to an address structure
859 * Returns 0 on success, negative on failure
861 static int i40evf_set_mac(struct net_device
*netdev
, void *p
)
863 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
864 struct i40e_hw
*hw
= &adapter
->hw
;
865 struct i40evf_mac_filter
*f
;
866 struct sockaddr
*addr
= p
;
868 if (!is_valid_ether_addr(addr
->sa_data
))
869 return -EADDRNOTAVAIL
;
871 if (ether_addr_equal(netdev
->dev_addr
, addr
->sa_data
))
874 if (adapter
->flags
& I40EVF_FLAG_ADDR_SET_BY_PF
)
877 f
= i40evf_find_filter(adapter
, hw
->mac
.addr
);
880 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_MAC_FILTER
;
883 f
= i40evf_add_filter(adapter
, addr
->sa_data
);
885 ether_addr_copy(hw
->mac
.addr
, addr
->sa_data
);
886 ether_addr_copy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
);
889 return (f
== NULL
) ? -ENOMEM
: 0;
893 * i40evf_set_rx_mode - NDO callback to set the netdev filters
894 * @netdev: network interface device structure
896 static void i40evf_set_rx_mode(struct net_device
*netdev
)
898 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
899 struct i40evf_mac_filter
*f
, *ftmp
;
900 struct netdev_hw_addr
*uca
;
901 struct netdev_hw_addr
*mca
;
902 struct netdev_hw_addr
*ha
;
905 /* add addr if not already in the filter list */
906 netdev_for_each_uc_addr(uca
, netdev
) {
907 i40evf_add_filter(adapter
, uca
->addr
);
909 netdev_for_each_mc_addr(mca
, netdev
) {
910 i40evf_add_filter(adapter
, mca
->addr
);
913 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
914 &adapter
->crit_section
)) {
917 dev_err(&adapter
->pdev
->dev
,
918 "Failed to get lock in %s\n", __func__
);
922 /* remove filter if not in netdev list */
923 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
, list
) {
924 netdev_for_each_mc_addr(mca
, netdev
)
925 if (ether_addr_equal(mca
->addr
, f
->macaddr
))
926 goto bottom_of_search_loop
;
928 netdev_for_each_uc_addr(uca
, netdev
)
929 if (ether_addr_equal(uca
->addr
, f
->macaddr
))
930 goto bottom_of_search_loop
;
932 for_each_dev_addr(netdev
, ha
)
933 if (ether_addr_equal(ha
->addr
, f
->macaddr
))
934 goto bottom_of_search_loop
;
936 if (ether_addr_equal(f
->macaddr
, adapter
->hw
.mac
.addr
))
937 goto bottom_of_search_loop
;
939 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
941 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_MAC_FILTER
;
943 bottom_of_search_loop
:
947 if (netdev
->flags
& IFF_PROMISC
&&
948 !(adapter
->flags
& I40EVF_FLAG_PROMISC_ON
))
949 adapter
->aq_required
|= I40EVF_FLAG_AQ_REQUEST_PROMISC
;
950 else if (!(netdev
->flags
& IFF_PROMISC
) &&
951 adapter
->flags
& I40EVF_FLAG_PROMISC_ON
)
952 adapter
->aq_required
|= I40EVF_FLAG_AQ_RELEASE_PROMISC
;
954 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
958 * i40evf_napi_enable_all - enable NAPI on all queue vectors
959 * @adapter: board private structure
961 static void i40evf_napi_enable_all(struct i40evf_adapter
*adapter
)
964 struct i40e_q_vector
*q_vector
;
965 int q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
967 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
968 struct napi_struct
*napi
;
970 q_vector
= &adapter
->q_vectors
[q_idx
];
971 napi
= &q_vector
->napi
;
977 * i40evf_napi_disable_all - disable NAPI on all queue vectors
978 * @adapter: board private structure
980 static void i40evf_napi_disable_all(struct i40evf_adapter
*adapter
)
983 struct i40e_q_vector
*q_vector
;
984 int q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
986 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
987 q_vector
= &adapter
->q_vectors
[q_idx
];
988 napi_disable(&q_vector
->napi
);
993 * i40evf_configure - set up transmit and receive data structures
994 * @adapter: board private structure
996 static void i40evf_configure(struct i40evf_adapter
*adapter
)
998 struct net_device
*netdev
= adapter
->netdev
;
1001 i40evf_set_rx_mode(netdev
);
1003 i40evf_configure_tx(adapter
);
1004 i40evf_configure_rx(adapter
);
1005 adapter
->aq_required
|= I40EVF_FLAG_AQ_CONFIGURE_QUEUES
;
1007 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
1008 struct i40e_ring
*ring
= &adapter
->rx_rings
[i
];
1010 if (adapter
->flags
& I40EVF_FLAG_RX_PS_ENABLED
) {
1011 i40evf_alloc_rx_headers(ring
);
1012 i40evf_alloc_rx_buffers_ps(ring
, ring
->count
);
1014 i40evf_alloc_rx_buffers_1buf(ring
, ring
->count
);
1016 ring
->next_to_use
= ring
->count
- 1;
1017 writel(ring
->next_to_use
, ring
->tail
);
1022 * i40evf_up_complete - Finish the last steps of bringing up a connection
1023 * @adapter: board private structure
1025 static int i40evf_up_complete(struct i40evf_adapter
*adapter
)
1027 adapter
->state
= __I40EVF_RUNNING
;
1028 clear_bit(__I40E_DOWN
, &adapter
->vsi
.state
);
1030 i40evf_napi_enable_all(adapter
);
1032 adapter
->aq_required
|= I40EVF_FLAG_AQ_ENABLE_QUEUES
;
1033 mod_timer_pending(&adapter
->watchdog_timer
, jiffies
+ 1);
1038 * i40e_down - Shutdown the connection processing
1039 * @adapter: board private structure
1041 void i40evf_down(struct i40evf_adapter
*adapter
)
1043 struct net_device
*netdev
= adapter
->netdev
;
1044 struct i40evf_mac_filter
*f
;
1046 if (adapter
->state
<= __I40EVF_DOWN_PENDING
)
1049 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
1050 &adapter
->crit_section
))
1051 usleep_range(500, 1000);
1053 netif_carrier_off(netdev
);
1054 netif_tx_disable(netdev
);
1055 i40evf_napi_disable_all(adapter
);
1056 i40evf_irq_disable(adapter
);
1058 /* remove all MAC filters */
1059 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
1062 /* remove all VLAN filters */
1063 list_for_each_entry(f
, &adapter
->vlan_filter_list
, list
) {
1066 if (!(adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
) &&
1067 adapter
->state
!= __I40EVF_RESETTING
) {
1068 /* cancel any current operation */
1069 adapter
->current_op
= I40E_VIRTCHNL_OP_UNKNOWN
;
1070 /* Schedule operations to close down the HW. Don't wait
1071 * here for this to complete. The watchdog is still running
1072 * and it will take care of this.
1074 adapter
->aq_required
= I40EVF_FLAG_AQ_DEL_MAC_FILTER
;
1075 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_VLAN_FILTER
;
1076 adapter
->aq_required
|= I40EVF_FLAG_AQ_DISABLE_QUEUES
;
1079 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1083 * i40evf_acquire_msix_vectors - Setup the MSIX capability
1084 * @adapter: board private structure
1085 * @vectors: number of vectors to request
1087 * Work with the OS to set up the MSIX vectors needed.
1089 * Returns 0 on success, negative on failure
1092 i40evf_acquire_msix_vectors(struct i40evf_adapter
*adapter
, int vectors
)
1094 int err
, vector_threshold
;
1096 /* We'll want at least 3 (vector_threshold):
1097 * 0) Other (Admin Queue and link, mostly)
1101 vector_threshold
= MIN_MSIX_COUNT
;
1103 /* The more we get, the more we will assign to Tx/Rx Cleanup
1104 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1105 * Right now, we simply care about how many we'll get; we'll
1106 * set them up later while requesting irq's.
1108 err
= pci_enable_msix_range(adapter
->pdev
, adapter
->msix_entries
,
1109 vector_threshold
, vectors
);
1111 dev_err(&adapter
->pdev
->dev
, "Unable to allocate MSI-X interrupts\n");
1112 kfree(adapter
->msix_entries
);
1113 adapter
->msix_entries
= NULL
;
1117 /* Adjust for only the vectors we'll use, which is minimum
1118 * of max_msix_q_vectors + NONQ_VECS, or the number of
1119 * vectors we were allocated.
1121 adapter
->num_msix_vectors
= err
;
1126 * i40evf_free_queues - Free memory for all rings
1127 * @adapter: board private structure to initialize
1129 * Free all of the memory associated with queue pairs.
1131 static void i40evf_free_queues(struct i40evf_adapter
*adapter
)
1133 if (!adapter
->vsi_res
)
1135 kfree(adapter
->tx_rings
);
1136 adapter
->tx_rings
= NULL
;
1137 kfree(adapter
->rx_rings
);
1138 adapter
->rx_rings
= NULL
;
1142 * i40evf_alloc_queues - Allocate memory for all rings
1143 * @adapter: board private structure to initialize
1145 * We allocate one ring per queue at run-time since we don't know the
1146 * number of queues at compile-time. The polling_netdev array is
1147 * intended for Multiqueue, but should work fine with a single queue.
1149 static int i40evf_alloc_queues(struct i40evf_adapter
*adapter
)
1153 adapter
->tx_rings
= kcalloc(adapter
->num_active_queues
,
1154 sizeof(struct i40e_ring
), GFP_KERNEL
);
1155 if (!adapter
->tx_rings
)
1157 adapter
->rx_rings
= kcalloc(adapter
->num_active_queues
,
1158 sizeof(struct i40e_ring
), GFP_KERNEL
);
1159 if (!adapter
->rx_rings
)
1162 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
1163 struct i40e_ring
*tx_ring
;
1164 struct i40e_ring
*rx_ring
;
1166 tx_ring
= &adapter
->tx_rings
[i
];
1168 tx_ring
->queue_index
= i
;
1169 tx_ring
->netdev
= adapter
->netdev
;
1170 tx_ring
->dev
= &adapter
->pdev
->dev
;
1171 tx_ring
->count
= adapter
->tx_desc_count
;
1172 if (adapter
->flags
& I40E_FLAG_WB_ON_ITR_CAPABLE
)
1173 tx_ring
->flags
|= I40E_TXR_FLAGS_WB_ON_ITR
;
1175 rx_ring
= &adapter
->rx_rings
[i
];
1176 rx_ring
->queue_index
= i
;
1177 rx_ring
->netdev
= adapter
->netdev
;
1178 rx_ring
->dev
= &adapter
->pdev
->dev
;
1179 rx_ring
->count
= adapter
->rx_desc_count
;
1185 i40evf_free_queues(adapter
);
1190 * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
1191 * @adapter: board private structure to initialize
1193 * Attempt to configure the interrupts using the best available
1194 * capabilities of the hardware and the kernel.
1196 static int i40evf_set_interrupt_capability(struct i40evf_adapter
*adapter
)
1198 int vector
, v_budget
;
1202 if (!adapter
->vsi_res
) {
1206 pairs
= adapter
->num_active_queues
;
1208 /* It's easy to be greedy for MSI-X vectors, but it really
1209 * doesn't do us much good if we have a lot more vectors
1210 * than CPU's. So let's be conservative and only ask for
1211 * (roughly) twice the number of vectors as there are CPU's.
1213 v_budget
= min_t(int, pairs
, (int)(num_online_cpus() * 2)) + NONQ_VECS
;
1214 v_budget
= min_t(int, v_budget
, (int)adapter
->vf_res
->max_vectors
);
1216 adapter
->msix_entries
= kcalloc(v_budget
,
1217 sizeof(struct msix_entry
), GFP_KERNEL
);
1218 if (!adapter
->msix_entries
) {
1223 for (vector
= 0; vector
< v_budget
; vector
++)
1224 adapter
->msix_entries
[vector
].entry
= vector
;
1226 err
= i40evf_acquire_msix_vectors(adapter
, v_budget
);
1229 netif_set_real_num_rx_queues(adapter
->netdev
, pairs
);
1230 netif_set_real_num_tx_queues(adapter
->netdev
, pairs
);
1235 * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
1236 * @adapter: board private structure
1238 * Return 0 on success, negative on failure
1240 static int i40evf_config_rss_aq(struct i40evf_adapter
*adapter
)
1242 struct i40e_aqc_get_set_rss_key_data
*rss_key
=
1243 (struct i40e_aqc_get_set_rss_key_data
*)adapter
->rss_key
;
1244 struct i40e_hw
*hw
= &adapter
->hw
;
1247 if (adapter
->current_op
!= I40E_VIRTCHNL_OP_UNKNOWN
) {
1248 /* bail because we already have a command pending */
1249 dev_err(&adapter
->pdev
->dev
, "Cannot configure RSS, command %d pending\n",
1250 adapter
->current_op
);
1254 ret
= i40evf_aq_set_rss_key(hw
, adapter
->vsi
.id
, rss_key
);
1256 dev_err(&adapter
->pdev
->dev
, "Cannot set RSS key, err %s aq_err %s\n",
1257 i40evf_stat_str(hw
, ret
),
1258 i40evf_aq_str(hw
, hw
->aq
.asq_last_status
));
1263 ret
= i40evf_aq_set_rss_lut(hw
, adapter
->vsi
.id
, false,
1264 adapter
->rss_lut
, adapter
->rss_lut_size
);
1266 dev_err(&adapter
->pdev
->dev
, "Cannot set RSS lut, err %s aq_err %s\n",
1267 i40evf_stat_str(hw
, ret
),
1268 i40evf_aq_str(hw
, hw
->aq
.asq_last_status
));
1276 * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
1277 * @adapter: board private structure
1279 * Returns 0 on success, negative on failure
1281 static int i40evf_config_rss_reg(struct i40evf_adapter
*adapter
)
1283 struct i40e_hw
*hw
= &adapter
->hw
;
1287 dw
= (u32
*)adapter
->rss_key
;
1288 for (i
= 0; i
<= adapter
->rss_key_size
/ 4; i
++)
1289 wr32(hw
, I40E_VFQF_HKEY(i
), dw
[i
]);
1291 dw
= (u32
*)adapter
->rss_lut
;
1292 for (i
= 0; i
<= adapter
->rss_lut_size
/ 4; i
++)
1293 wr32(hw
, I40E_VFQF_HLUT(i
), dw
[i
]);
1301 * i40evf_config_rss - Configure RSS keys and lut
1302 * @adapter: board private structure
1304 * Returns 0 on success, negative on failure
1306 int i40evf_config_rss(struct i40evf_adapter
*adapter
)
1309 if (RSS_PF(adapter
)) {
1310 adapter
->aq_required
|= I40EVF_FLAG_AQ_SET_RSS_LUT
|
1311 I40EVF_FLAG_AQ_SET_RSS_KEY
;
1313 } else if (RSS_AQ(adapter
)) {
1314 return i40evf_config_rss_aq(adapter
);
1316 return i40evf_config_rss_reg(adapter
);
1321 * i40evf_fill_rss_lut - Fill the lut with default values
1322 * @adapter: board private structure
1324 static void i40evf_fill_rss_lut(struct i40evf_adapter
*adapter
)
1328 for (i
= 0; i
< adapter
->rss_lut_size
; i
++)
1329 adapter
->rss_lut
[i
] = i
% adapter
->num_active_queues
;
1333 * i40evf_init_rss - Prepare for RSS
1334 * @adapter: board private structure
1336 * Return 0 on success, negative on failure
1338 static int i40evf_init_rss(struct i40evf_adapter
*adapter
)
1340 struct i40e_hw
*hw
= &adapter
->hw
;
1343 if (!RSS_PF(adapter
)) {
1344 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1345 if (adapter
->vf_res
->vf_offload_flags
&
1346 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
)
1347 adapter
->hena
= I40E_DEFAULT_RSS_HENA_EXPANDED
;
1349 adapter
->hena
= I40E_DEFAULT_RSS_HENA
;
1351 wr32(hw
, I40E_VFQF_HENA(0), (u32
)adapter
->hena
);
1352 wr32(hw
, I40E_VFQF_HENA(1), (u32
)(adapter
->hena
>> 32));
1355 i40evf_fill_rss_lut(adapter
);
1357 netdev_rss_key_fill((void *)adapter
->rss_key
, adapter
->rss_key_size
);
1358 ret
= i40evf_config_rss(adapter
);
1364 * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
1365 * @adapter: board private structure to initialize
1367 * We allocate one q_vector per queue interrupt. If allocation fails we
1370 static int i40evf_alloc_q_vectors(struct i40evf_adapter
*adapter
)
1372 int q_idx
= 0, num_q_vectors
;
1373 struct i40e_q_vector
*q_vector
;
1375 num_q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1376 adapter
->q_vectors
= kcalloc(num_q_vectors
, sizeof(*q_vector
),
1378 if (!adapter
->q_vectors
)
1381 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1382 q_vector
= &adapter
->q_vectors
[q_idx
];
1383 q_vector
->adapter
= adapter
;
1384 q_vector
->vsi
= &adapter
->vsi
;
1385 q_vector
->v_idx
= q_idx
;
1386 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
1387 i40evf_napi_poll
, NAPI_POLL_WEIGHT
);
1394 * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
1395 * @adapter: board private structure to initialize
1397 * This function frees the memory allocated to the q_vectors. In addition if
1398 * NAPI is enabled it will delete any references to the NAPI struct prior
1399 * to freeing the q_vector.
1401 static void i40evf_free_q_vectors(struct i40evf_adapter
*adapter
)
1403 int q_idx
, num_q_vectors
;
1406 num_q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1407 napi_vectors
= adapter
->num_active_queues
;
1409 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1410 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[q_idx
];
1411 if (q_idx
< napi_vectors
)
1412 netif_napi_del(&q_vector
->napi
);
1414 kfree(adapter
->q_vectors
);
1418 * i40evf_reset_interrupt_capability - Reset MSIX setup
1419 * @adapter: board private structure
1422 void i40evf_reset_interrupt_capability(struct i40evf_adapter
*adapter
)
1424 pci_disable_msix(adapter
->pdev
);
1425 kfree(adapter
->msix_entries
);
1426 adapter
->msix_entries
= NULL
;
1430 * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
1431 * @adapter: board private structure to initialize
1434 int i40evf_init_interrupt_scheme(struct i40evf_adapter
*adapter
)
1438 err
= i40evf_set_interrupt_capability(adapter
);
1440 dev_err(&adapter
->pdev
->dev
,
1441 "Unable to setup interrupt capabilities\n");
1442 goto err_set_interrupt
;
1445 err
= i40evf_alloc_q_vectors(adapter
);
1447 dev_err(&adapter
->pdev
->dev
,
1448 "Unable to allocate memory for queue vectors\n");
1449 goto err_alloc_q_vectors
;
1452 err
= i40evf_alloc_queues(adapter
);
1454 dev_err(&adapter
->pdev
->dev
,
1455 "Unable to allocate memory for queues\n");
1456 goto err_alloc_queues
;
1459 dev_info(&adapter
->pdev
->dev
, "Multiqueue %s: Queue pair count = %u",
1460 (adapter
->num_active_queues
> 1) ? "Enabled" : "Disabled",
1461 adapter
->num_active_queues
);
1465 i40evf_free_q_vectors(adapter
);
1466 err_alloc_q_vectors
:
1467 i40evf_reset_interrupt_capability(adapter
);
1473 * i40evf_free_rss - Free memory used by RSS structs
1474 * @adapter: board private structure
1476 static void i40evf_free_rss(struct i40evf_adapter
*adapter
)
1478 kfree(adapter
->rss_key
);
1479 adapter
->rss_key
= NULL
;
1481 kfree(adapter
->rss_lut
);
1482 adapter
->rss_lut
= NULL
;
1486 * i40evf_watchdog_timer - Periodic call-back timer
1487 * @data: pointer to adapter disguised as unsigned long
1489 static void i40evf_watchdog_timer(unsigned long data
)
1491 struct i40evf_adapter
*adapter
= (struct i40evf_adapter
*)data
;
1493 schedule_work(&adapter
->watchdog_task
);
1494 /* timer will be rescheduled in watchdog task */
1498 * i40evf_watchdog_task - Periodic call-back task
1499 * @work: pointer to work_struct
1501 static void i40evf_watchdog_task(struct work_struct
*work
)
1503 struct i40evf_adapter
*adapter
= container_of(work
,
1504 struct i40evf_adapter
,
1506 struct i40e_hw
*hw
= &adapter
->hw
;
1509 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
))
1510 goto restart_watchdog
;
1512 if (adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
) {
1513 reg_val
= rd32(hw
, I40E_VFGEN_RSTAT
) &
1514 I40E_VFGEN_RSTAT_VFR_STATE_MASK
;
1515 if ((reg_val
== I40E_VFR_VFACTIVE
) ||
1516 (reg_val
== I40E_VFR_COMPLETED
)) {
1517 /* A chance for redemption! */
1518 dev_err(&adapter
->pdev
->dev
, "Hardware came out of reset. Attempting reinit.\n");
1519 adapter
->state
= __I40EVF_STARTUP
;
1520 adapter
->flags
&= ~I40EVF_FLAG_PF_COMMS_FAILED
;
1521 schedule_delayed_work(&adapter
->init_task
, 10);
1522 clear_bit(__I40EVF_IN_CRITICAL_TASK
,
1523 &adapter
->crit_section
);
1524 /* Don't reschedule the watchdog, since we've restarted
1525 * the init task. When init_task contacts the PF and
1526 * gets everything set up again, it'll restart the
1527 * watchdog for us. Down, boy. Sit. Stay. Woof.
1531 adapter
->aq_required
= 0;
1532 adapter
->current_op
= I40E_VIRTCHNL_OP_UNKNOWN
;
1536 if ((adapter
->state
< __I40EVF_DOWN
) ||
1537 (adapter
->flags
& I40EVF_FLAG_RESET_PENDING
))
1540 /* check for reset */
1541 reg_val
= rd32(hw
, I40E_VF_ARQLEN1
) & I40E_VF_ARQLEN1_ARQENABLE_MASK
;
1542 if (!(adapter
->flags
& I40EVF_FLAG_RESET_PENDING
) && !reg_val
) {
1543 adapter
->state
= __I40EVF_RESETTING
;
1544 adapter
->flags
|= I40EVF_FLAG_RESET_PENDING
;
1545 dev_err(&adapter
->pdev
->dev
, "Hardware reset detected\n");
1546 schedule_work(&adapter
->reset_task
);
1547 adapter
->aq_required
= 0;
1548 adapter
->current_op
= I40E_VIRTCHNL_OP_UNKNOWN
;
1552 /* Process admin queue tasks. After init, everything gets done
1553 * here so we don't race on the admin queue.
1555 if (adapter
->current_op
) {
1556 if (!i40evf_asq_done(hw
)) {
1557 dev_dbg(&adapter
->pdev
->dev
, "Admin queue timeout\n");
1558 i40evf_send_api_ver(adapter
);
1562 if (adapter
->aq_required
& I40EVF_FLAG_AQ_GET_CONFIG
) {
1563 i40evf_send_vf_config_msg(adapter
);
1567 if (adapter
->aq_required
& I40EVF_FLAG_AQ_DISABLE_QUEUES
) {
1568 i40evf_disable_queues(adapter
);
1572 if (adapter
->aq_required
& I40EVF_FLAG_AQ_MAP_VECTORS
) {
1573 i40evf_map_queues(adapter
);
1577 if (adapter
->aq_required
& I40EVF_FLAG_AQ_ADD_MAC_FILTER
) {
1578 i40evf_add_ether_addrs(adapter
);
1582 if (adapter
->aq_required
& I40EVF_FLAG_AQ_ADD_VLAN_FILTER
) {
1583 i40evf_add_vlans(adapter
);
1587 if (adapter
->aq_required
& I40EVF_FLAG_AQ_DEL_MAC_FILTER
) {
1588 i40evf_del_ether_addrs(adapter
);
1592 if (adapter
->aq_required
& I40EVF_FLAG_AQ_DEL_VLAN_FILTER
) {
1593 i40evf_del_vlans(adapter
);
1597 if (adapter
->aq_required
& I40EVF_FLAG_AQ_CONFIGURE_QUEUES
) {
1598 i40evf_configure_queues(adapter
);
1602 if (adapter
->aq_required
& I40EVF_FLAG_AQ_ENABLE_QUEUES
) {
1603 i40evf_enable_queues(adapter
);
1607 if (adapter
->aq_required
& I40EVF_FLAG_AQ_CONFIGURE_RSS
) {
1608 /* This message goes straight to the firmware, not the
1609 * PF, so we don't have to set current_op as we will
1610 * not get a response through the ARQ.
1612 i40evf_init_rss(adapter
);
1613 adapter
->aq_required
&= ~I40EVF_FLAG_AQ_CONFIGURE_RSS
;
1616 if (adapter
->aq_required
& I40EVF_FLAG_AQ_GET_HENA
) {
1617 i40evf_get_hena(adapter
);
1620 if (adapter
->aq_required
& I40EVF_FLAG_AQ_SET_HENA
) {
1621 i40evf_set_hena(adapter
);
1624 if (adapter
->aq_required
& I40EVF_FLAG_AQ_SET_RSS_KEY
) {
1625 i40evf_set_rss_key(adapter
);
1628 if (adapter
->aq_required
& I40EVF_FLAG_AQ_SET_RSS_LUT
) {
1629 i40evf_set_rss_lut(adapter
);
1633 if (adapter
->aq_required
& I40EVF_FLAG_AQ_REQUEST_PROMISC
) {
1634 i40evf_set_promiscuous(adapter
, I40E_FLAG_VF_UNICAST_PROMISC
|
1635 I40E_FLAG_VF_MULTICAST_PROMISC
);
1639 if (adapter
->aq_required
& I40EVF_FLAG_AQ_RELEASE_PROMISC
) {
1640 i40evf_set_promiscuous(adapter
, 0);
1644 if (adapter
->state
== __I40EVF_RUNNING
)
1645 i40evf_request_stats(adapter
);
1647 if (adapter
->state
== __I40EVF_RUNNING
) {
1648 i40evf_irq_enable_queues(adapter
, ~0);
1649 i40evf_fire_sw_int(adapter
, 0xFF);
1651 i40evf_fire_sw_int(adapter
, 0x1);
1654 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1656 if (adapter
->state
== __I40EVF_REMOVE
)
1658 if (adapter
->aq_required
)
1659 mod_timer(&adapter
->watchdog_timer
,
1660 jiffies
+ msecs_to_jiffies(20));
1662 mod_timer(&adapter
->watchdog_timer
, jiffies
+ (HZ
* 2));
1663 schedule_work(&adapter
->adminq_task
);
1666 #define I40EVF_RESET_WAIT_MS 10
1667 #define I40EVF_RESET_WAIT_COUNT 500
1669 * i40evf_reset_task - Call-back task to handle hardware reset
1670 * @work: pointer to work_struct
1672 * During reset we need to shut down and reinitialize the admin queue
1673 * before we can use it to communicate with the PF again. We also clear
1674 * and reinit the rings because that context is lost as well.
1676 static void i40evf_reset_task(struct work_struct
*work
)
1678 struct i40evf_adapter
*adapter
= container_of(work
,
1679 struct i40evf_adapter
,
1681 struct net_device
*netdev
= adapter
->netdev
;
1682 struct i40e_hw
*hw
= &adapter
->hw
;
1683 struct i40evf_vlan_filter
*vlf
;
1684 struct i40evf_mac_filter
*f
;
1688 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
1689 &adapter
->crit_section
))
1690 usleep_range(500, 1000);
1692 i40evf_misc_irq_disable(adapter
);
1693 if (adapter
->flags
& I40EVF_FLAG_RESET_NEEDED
) {
1694 adapter
->flags
&= ~I40EVF_FLAG_RESET_NEEDED
;
1695 /* Restart the AQ here. If we have been reset but didn't
1696 * detect it, or if the PF had to reinit, our AQ will be hosed.
1698 i40evf_shutdown_adminq(hw
);
1699 i40evf_init_adminq(hw
);
1700 i40evf_request_reset(adapter
);
1702 adapter
->flags
|= I40EVF_FLAG_RESET_PENDING
;
1704 /* poll until we see the reset actually happen */
1705 for (i
= 0; i
< I40EVF_RESET_WAIT_COUNT
; i
++) {
1706 reg_val
= rd32(hw
, I40E_VF_ARQLEN1
) &
1707 I40E_VF_ARQLEN1_ARQENABLE_MASK
;
1710 usleep_range(5000, 10000);
1712 if (i
== I40EVF_RESET_WAIT_COUNT
) {
1713 dev_info(&adapter
->pdev
->dev
, "Never saw reset\n");
1714 goto continue_reset
; /* act like the reset happened */
1717 /* wait until the reset is complete and the PF is responding to us */
1718 for (i
= 0; i
< I40EVF_RESET_WAIT_COUNT
; i
++) {
1719 reg_val
= rd32(hw
, I40E_VFGEN_RSTAT
) &
1720 I40E_VFGEN_RSTAT_VFR_STATE_MASK
;
1721 if (reg_val
== I40E_VFR_VFACTIVE
)
1723 msleep(I40EVF_RESET_WAIT_MS
);
1725 pci_set_master(adapter
->pdev
);
1726 /* extra wait to make sure minimum wait is met */
1727 msleep(I40EVF_RESET_WAIT_MS
);
1728 if (i
== I40EVF_RESET_WAIT_COUNT
) {
1729 struct i40evf_mac_filter
*ftmp
;
1730 struct i40evf_vlan_filter
*fv
, *fvtmp
;
1732 /* reset never finished */
1733 dev_err(&adapter
->pdev
->dev
, "Reset never finished (%x)\n",
1735 adapter
->flags
|= I40EVF_FLAG_PF_COMMS_FAILED
;
1737 if (netif_running(adapter
->netdev
)) {
1738 set_bit(__I40E_DOWN
, &adapter
->vsi
.state
);
1739 netif_carrier_off(netdev
);
1740 netif_tx_disable(netdev
);
1741 i40evf_napi_disable_all(adapter
);
1742 i40evf_irq_disable(adapter
);
1743 i40evf_free_traffic_irqs(adapter
);
1744 i40evf_free_all_tx_resources(adapter
);
1745 i40evf_free_all_rx_resources(adapter
);
1748 /* Delete all of the filters, both MAC and VLAN. */
1749 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
,
1755 list_for_each_entry_safe(fv
, fvtmp
, &adapter
->vlan_filter_list
,
1757 list_del(&fv
->list
);
1761 i40evf_free_misc_irq(adapter
);
1762 i40evf_reset_interrupt_capability(adapter
);
1763 i40evf_free_queues(adapter
);
1764 i40evf_free_q_vectors(adapter
);
1765 kfree(adapter
->vf_res
);
1766 i40evf_shutdown_adminq(hw
);
1767 adapter
->netdev
->flags
&= ~IFF_UP
;
1768 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1769 adapter
->flags
&= ~I40EVF_FLAG_RESET_PENDING
;
1770 adapter
->state
= __I40EVF_DOWN
;
1771 dev_info(&adapter
->pdev
->dev
, "Reset task did not complete, VF disabled\n");
1772 return; /* Do not attempt to reinit. It's dead, Jim. */
1776 if (netif_running(adapter
->netdev
)) {
1777 netif_carrier_off(netdev
);
1778 netif_tx_stop_all_queues(netdev
);
1779 i40evf_napi_disable_all(adapter
);
1781 i40evf_irq_disable(adapter
);
1783 adapter
->state
= __I40EVF_RESETTING
;
1784 adapter
->flags
&= ~I40EVF_FLAG_RESET_PENDING
;
1786 /* free the Tx/Rx rings and descriptors, might be better to just
1787 * re-use them sometime in the future
1789 i40evf_free_all_rx_resources(adapter
);
1790 i40evf_free_all_tx_resources(adapter
);
1792 /* kill and reinit the admin queue */
1793 if (i40evf_shutdown_adminq(hw
))
1794 dev_warn(&adapter
->pdev
->dev
, "Failed to shut down adminq\n");
1795 adapter
->current_op
= I40E_VIRTCHNL_OP_UNKNOWN
;
1796 err
= i40evf_init_adminq(hw
);
1798 dev_info(&adapter
->pdev
->dev
, "Failed to init adminq: %d\n",
1801 adapter
->aq_required
= I40EVF_FLAG_AQ_GET_CONFIG
;
1802 adapter
->aq_required
|= I40EVF_FLAG_AQ_MAP_VECTORS
;
1804 /* re-add all MAC filters */
1805 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
1808 /* re-add all VLAN filters */
1809 list_for_each_entry(vlf
, &adapter
->vlan_filter_list
, list
) {
1812 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_MAC_FILTER
;
1813 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_VLAN_FILTER
;
1814 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1815 i40evf_misc_irq_enable(adapter
);
1817 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 2);
1819 if (netif_running(adapter
->netdev
)) {
1820 /* allocate transmit descriptors */
1821 err
= i40evf_setup_all_tx_resources(adapter
);
1825 /* allocate receive descriptors */
1826 err
= i40evf_setup_all_rx_resources(adapter
);
1830 i40evf_configure(adapter
);
1832 err
= i40evf_up_complete(adapter
);
1836 i40evf_irq_enable(adapter
, true);
1838 adapter
->state
= __I40EVF_DOWN
;
1843 dev_err(&adapter
->pdev
->dev
, "failed to allocate resources during reinit\n");
1844 i40evf_close(adapter
->netdev
);
1848 * i40evf_adminq_task - worker thread to clean the admin queue
1849 * @work: pointer to work_struct containing our data
1851 static void i40evf_adminq_task(struct work_struct
*work
)
1853 struct i40evf_adapter
*adapter
=
1854 container_of(work
, struct i40evf_adapter
, adminq_task
);
1855 struct i40e_hw
*hw
= &adapter
->hw
;
1856 struct i40e_arq_event_info event
;
1857 struct i40e_virtchnl_msg
*v_msg
;
1862 if (adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
)
1865 event
.buf_len
= I40EVF_MAX_AQ_BUF_SIZE
;
1866 event
.msg_buf
= kzalloc(event
.buf_len
, GFP_KERNEL
);
1870 v_msg
= (struct i40e_virtchnl_msg
*)&event
.desc
;
1872 ret
= i40evf_clean_arq_element(hw
, &event
, &pending
);
1873 if (ret
|| !v_msg
->v_opcode
)
1874 break; /* No event to process or error cleaning ARQ */
1876 i40evf_virtchnl_completion(adapter
, v_msg
->v_opcode
,
1877 v_msg
->v_retval
, event
.msg_buf
,
1880 memset(event
.msg_buf
, 0, I40EVF_MAX_AQ_BUF_SIZE
);
1883 if ((adapter
->flags
&
1884 (I40EVF_FLAG_RESET_PENDING
| I40EVF_FLAG_RESET_NEEDED
)) ||
1885 adapter
->state
== __I40EVF_RESETTING
)
1888 /* check for error indications */
1889 val
= rd32(hw
, hw
->aq
.arq
.len
);
1890 if (val
== 0xdeadbeef) /* indicates device in reset */
1893 if (val
& I40E_VF_ARQLEN1_ARQVFE_MASK
) {
1894 dev_info(&adapter
->pdev
->dev
, "ARQ VF Error detected\n");
1895 val
&= ~I40E_VF_ARQLEN1_ARQVFE_MASK
;
1897 if (val
& I40E_VF_ARQLEN1_ARQOVFL_MASK
) {
1898 dev_info(&adapter
->pdev
->dev
, "ARQ Overflow Error detected\n");
1899 val
&= ~I40E_VF_ARQLEN1_ARQOVFL_MASK
;
1901 if (val
& I40E_VF_ARQLEN1_ARQCRIT_MASK
) {
1902 dev_info(&adapter
->pdev
->dev
, "ARQ Critical Error detected\n");
1903 val
&= ~I40E_VF_ARQLEN1_ARQCRIT_MASK
;
1906 wr32(hw
, hw
->aq
.arq
.len
, val
);
1908 val
= rd32(hw
, hw
->aq
.asq
.len
);
1910 if (val
& I40E_VF_ATQLEN1_ATQVFE_MASK
) {
1911 dev_info(&adapter
->pdev
->dev
, "ASQ VF Error detected\n");
1912 val
&= ~I40E_VF_ATQLEN1_ATQVFE_MASK
;
1914 if (val
& I40E_VF_ATQLEN1_ATQOVFL_MASK
) {
1915 dev_info(&adapter
->pdev
->dev
, "ASQ Overflow Error detected\n");
1916 val
&= ~I40E_VF_ATQLEN1_ATQOVFL_MASK
;
1918 if (val
& I40E_VF_ATQLEN1_ATQCRIT_MASK
) {
1919 dev_info(&adapter
->pdev
->dev
, "ASQ Critical Error detected\n");
1920 val
&= ~I40E_VF_ATQLEN1_ATQCRIT_MASK
;
1923 wr32(hw
, hw
->aq
.asq
.len
, val
);
1926 kfree(event
.msg_buf
);
1928 /* re-enable Admin queue interrupt cause */
1929 i40evf_misc_irq_enable(adapter
);
1933 * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
1934 * @adapter: board private structure
1936 * Free all transmit software resources
1938 void i40evf_free_all_tx_resources(struct i40evf_adapter
*adapter
)
1942 if (!adapter
->tx_rings
)
1945 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
1946 if (adapter
->tx_rings
[i
].desc
)
1947 i40evf_free_tx_resources(&adapter
->tx_rings
[i
]);
1951 * i40evf_setup_all_tx_resources - allocate all queues Tx resources
1952 * @adapter: board private structure
1954 * If this function returns with an error, then it's possible one or
1955 * more of the rings is populated (while the rest are not). It is the
1956 * callers duty to clean those orphaned rings.
1958 * Return 0 on success, negative on failure
1960 static int i40evf_setup_all_tx_resources(struct i40evf_adapter
*adapter
)
1964 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
1965 adapter
->tx_rings
[i
].count
= adapter
->tx_desc_count
;
1966 err
= i40evf_setup_tx_descriptors(&adapter
->tx_rings
[i
]);
1969 dev_err(&adapter
->pdev
->dev
,
1970 "Allocation for Tx Queue %u failed\n", i
);
1978 * i40evf_setup_all_rx_resources - allocate all queues Rx resources
1979 * @adapter: board private structure
1981 * If this function returns with an error, then it's possible one or
1982 * more of the rings is populated (while the rest are not). It is the
1983 * callers duty to clean those orphaned rings.
1985 * Return 0 on success, negative on failure
1987 static int i40evf_setup_all_rx_resources(struct i40evf_adapter
*adapter
)
1991 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
1992 adapter
->rx_rings
[i
].count
= adapter
->rx_desc_count
;
1993 err
= i40evf_setup_rx_descriptors(&adapter
->rx_rings
[i
]);
1996 dev_err(&adapter
->pdev
->dev
,
1997 "Allocation for Rx Queue %u failed\n", i
);
2004 * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
2005 * @adapter: board private structure
2007 * Free all receive software resources
2009 void i40evf_free_all_rx_resources(struct i40evf_adapter
*adapter
)
2013 if (!adapter
->rx_rings
)
2016 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
2017 if (adapter
->rx_rings
[i
].desc
)
2018 i40evf_free_rx_resources(&adapter
->rx_rings
[i
]);
2022 * i40evf_open - Called when a network interface is made active
2023 * @netdev: network interface device structure
2025 * Returns 0 on success, negative value on failure
2027 * The open entry point is called when a network interface is made
2028 * active by the system (IFF_UP). At this point all resources needed
2029 * for transmit and receive operations are allocated, the interrupt
2030 * handler is registered with the OS, the watchdog timer is started,
2031 * and the stack is notified that the interface is ready.
2033 static int i40evf_open(struct net_device
*netdev
)
2035 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2038 if (adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
) {
2039 dev_err(&adapter
->pdev
->dev
, "Unable to open device due to PF driver failure.\n");
2043 if (adapter
->state
!= __I40EVF_DOWN
)
2046 /* allocate transmit descriptors */
2047 err
= i40evf_setup_all_tx_resources(adapter
);
2051 /* allocate receive descriptors */
2052 err
= i40evf_setup_all_rx_resources(adapter
);
2056 /* clear any pending interrupts, may auto mask */
2057 err
= i40evf_request_traffic_irqs(adapter
, netdev
->name
);
2061 i40evf_add_filter(adapter
, adapter
->hw
.mac
.addr
);
2062 i40evf_configure(adapter
);
2064 err
= i40evf_up_complete(adapter
);
2068 i40evf_irq_enable(adapter
, true);
2073 i40evf_down(adapter
);
2074 i40evf_free_traffic_irqs(adapter
);
2076 i40evf_free_all_rx_resources(adapter
);
2078 i40evf_free_all_tx_resources(adapter
);
2084 * i40evf_close - Disables a network interface
2085 * @netdev: network interface device structure
2087 * Returns 0, this is not allowed to fail
2089 * The close entry point is called when an interface is de-activated
2090 * by the OS. The hardware is still under the drivers control, but
2091 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
2092 * are freed, along with all transmit and receive resources.
2094 static int i40evf_close(struct net_device
*netdev
)
2096 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2098 if (adapter
->state
<= __I40EVF_DOWN_PENDING
)
2102 set_bit(__I40E_DOWN
, &adapter
->vsi
.state
);
2104 i40evf_down(adapter
);
2105 adapter
->state
= __I40EVF_DOWN_PENDING
;
2106 i40evf_free_traffic_irqs(adapter
);
2112 * i40evf_get_stats - Get System Network Statistics
2113 * @netdev: network interface device structure
2115 * Returns the address of the device statistics structure.
2116 * The statistics are actually updated from the timer callback.
2118 static struct net_device_stats
*i40evf_get_stats(struct net_device
*netdev
)
2120 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2122 /* only return the current stats */
2123 return &adapter
->net_stats
;
2127 * i40evf_change_mtu - Change the Maximum Transfer Unit
2128 * @netdev: network interface device structure
2129 * @new_mtu: new value for maximum frame size
2131 * Returns 0 on success, negative on failure
2133 static int i40evf_change_mtu(struct net_device
*netdev
, int new_mtu
)
2135 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2136 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2138 if ((new_mtu
< 68) || (max_frame
> I40E_MAX_RXBUFFER
))
2141 netdev
->mtu
= new_mtu
;
2142 adapter
->flags
|= I40EVF_FLAG_RESET_NEEDED
;
2143 schedule_work(&adapter
->reset_task
);
2148 #define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
2149 NETIF_F_HW_VLAN_CTAG_RX |\
2150 NETIF_F_HW_VLAN_CTAG_FILTER)
2153 * i40evf_fix_features - fix up the netdev feature bits
2154 * @netdev: our net device
2155 * @features: desired feature bits
2157 * Returns fixed-up features bits
2159 static netdev_features_t
i40evf_fix_features(struct net_device
*netdev
,
2160 netdev_features_t features
)
2162 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2164 features
&= ~I40EVF_VLAN_FEATURES
;
2165 if (adapter
->vf_res
->vf_offload_flags
& I40E_VIRTCHNL_VF_OFFLOAD_VLAN
)
2166 features
|= I40EVF_VLAN_FEATURES
;
2170 static const struct net_device_ops i40evf_netdev_ops
= {
2171 .ndo_open
= i40evf_open
,
2172 .ndo_stop
= i40evf_close
,
2173 .ndo_start_xmit
= i40evf_xmit_frame
,
2174 .ndo_get_stats
= i40evf_get_stats
,
2175 .ndo_set_rx_mode
= i40evf_set_rx_mode
,
2176 .ndo_validate_addr
= eth_validate_addr
,
2177 .ndo_set_mac_address
= i40evf_set_mac
,
2178 .ndo_change_mtu
= i40evf_change_mtu
,
2179 .ndo_tx_timeout
= i40evf_tx_timeout
,
2180 .ndo_vlan_rx_add_vid
= i40evf_vlan_rx_add_vid
,
2181 .ndo_vlan_rx_kill_vid
= i40evf_vlan_rx_kill_vid
,
2182 .ndo_fix_features
= i40evf_fix_features
,
2183 #ifdef CONFIG_NET_POLL_CONTROLLER
2184 .ndo_poll_controller
= i40evf_netpoll
,
2189 * i40evf_check_reset_complete - check that VF reset is complete
2190 * @hw: pointer to hw struct
2192 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
2194 static int i40evf_check_reset_complete(struct i40e_hw
*hw
)
2199 for (i
= 0; i
< 100; i
++) {
2200 rstat
= rd32(hw
, I40E_VFGEN_RSTAT
) &
2201 I40E_VFGEN_RSTAT_VFR_STATE_MASK
;
2202 if ((rstat
== I40E_VFR_VFACTIVE
) ||
2203 (rstat
== I40E_VFR_COMPLETED
))
2205 usleep_range(10, 20);
2211 * i40evf_process_config - Process the config information we got from the PF
2212 * @adapter: board private structure
2214 * Verify that we have a valid config struct, and set up our netdev features
2215 * and our VSI struct.
2217 int i40evf_process_config(struct i40evf_adapter
*adapter
)
2219 struct i40e_virtchnl_vf_resource
*vfres
= adapter
->vf_res
;
2220 struct net_device
*netdev
= adapter
->netdev
;
2221 struct i40e_vsi
*vsi
= &adapter
->vsi
;
2224 /* got VF config message back from PF, now we can parse it */
2225 for (i
= 0; i
< vfres
->num_vsis
; i
++) {
2226 if (vfres
->vsi_res
[i
].vsi_type
== I40E_VSI_SRIOV
)
2227 adapter
->vsi_res
= &vfres
->vsi_res
[i
];
2229 if (!adapter
->vsi_res
) {
2230 dev_err(&adapter
->pdev
->dev
, "No LAN VSI found\n");
2234 netdev
->hw_enc_features
|= NETIF_F_SG
|
2238 NETIF_F_SOFT_FEATURES
|
2243 NETIF_F_GSO_GRE_CSUM
|
2246 NETIF_F_GSO_UDP_TUNNEL
|
2247 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
2248 NETIF_F_GSO_PARTIAL
|
2254 if (!(adapter
->flags
& I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
))
2255 netdev
->gso_partial_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
2257 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
2259 /* record features VLANs can make use of */
2260 netdev
->vlan_features
|= netdev
->hw_enc_features
|
2261 NETIF_F_TSO_MANGLEID
;
2263 /* Write features and hw_features separately to avoid polluting
2264 * with, or dropping, features that are set when we registgered.
2266 netdev
->hw_features
|= netdev
->hw_enc_features
;
2268 netdev
->features
|= netdev
->hw_enc_features
| I40EVF_VLAN_FEATURES
;
2269 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
2271 /* disable VLAN features if not supported */
2272 if (!(vfres
->vf_offload_flags
& I40E_VIRTCHNL_VF_OFFLOAD_VLAN
))
2273 netdev
->features
^= I40EVF_VLAN_FEATURES
;
2275 adapter
->vsi
.id
= adapter
->vsi_res
->vsi_id
;
2277 adapter
->vsi
.back
= adapter
;
2278 adapter
->vsi
.base_vector
= 1;
2279 adapter
->vsi
.work_limit
= I40E_DEFAULT_IRQ_WORK
;
2280 adapter
->vsi
.rx_itr_setting
= (I40E_ITR_DYNAMIC
|
2281 ITR_REG_TO_USEC(I40E_ITR_RX_DEF
));
2282 adapter
->vsi
.tx_itr_setting
= (I40E_ITR_DYNAMIC
|
2283 ITR_REG_TO_USEC(I40E_ITR_TX_DEF
));
2284 vsi
->netdev
= adapter
->netdev
;
2285 vsi
->qs_handle
= adapter
->vsi_res
->qset_handle
;
2286 if (vfres
->vf_offload_flags
& I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF
) {
2287 adapter
->rss_key_size
= vfres
->rss_key_size
;
2288 adapter
->rss_lut_size
= vfres
->rss_lut_size
;
2290 adapter
->rss_key_size
= I40EVF_HKEY_ARRAY_SIZE
;
2291 adapter
->rss_lut_size
= I40EVF_HLUT_ARRAY_SIZE
;
2298 * i40evf_init_task - worker thread to perform delayed initialization
2299 * @work: pointer to work_struct containing our data
2301 * This task completes the work that was begun in probe. Due to the nature
2302 * of VF-PF communications, we may need to wait tens of milliseconds to get
2303 * responses back from the PF. Rather than busy-wait in probe and bog down the
2304 * whole system, we'll do it in a task so we can sleep.
2305 * This task only runs during driver init. Once we've established
2306 * communications with the PF driver and set up our netdev, the watchdog
2309 static void i40evf_init_task(struct work_struct
*work
)
2311 struct i40evf_adapter
*adapter
= container_of(work
,
2312 struct i40evf_adapter
,
2314 struct net_device
*netdev
= adapter
->netdev
;
2315 struct i40e_hw
*hw
= &adapter
->hw
;
2316 struct pci_dev
*pdev
= adapter
->pdev
;
2319 switch (adapter
->state
) {
2320 case __I40EVF_STARTUP
:
2321 /* driver loaded, probe complete */
2322 adapter
->flags
&= ~I40EVF_FLAG_PF_COMMS_FAILED
;
2323 adapter
->flags
&= ~I40EVF_FLAG_RESET_PENDING
;
2324 err
= i40e_set_mac_type(hw
);
2326 dev_err(&pdev
->dev
, "Failed to set MAC type (%d)\n",
2330 err
= i40evf_check_reset_complete(hw
);
2332 dev_info(&pdev
->dev
, "Device is still in reset (%d), retrying\n",
2336 hw
->aq
.num_arq_entries
= I40EVF_AQ_LEN
;
2337 hw
->aq
.num_asq_entries
= I40EVF_AQ_LEN
;
2338 hw
->aq
.arq_buf_size
= I40EVF_MAX_AQ_BUF_SIZE
;
2339 hw
->aq
.asq_buf_size
= I40EVF_MAX_AQ_BUF_SIZE
;
2341 err
= i40evf_init_adminq(hw
);
2343 dev_err(&pdev
->dev
, "Failed to init Admin Queue (%d)\n",
2347 err
= i40evf_send_api_ver(adapter
);
2349 dev_err(&pdev
->dev
, "Unable to send to PF (%d)\n", err
);
2350 i40evf_shutdown_adminq(hw
);
2353 adapter
->state
= __I40EVF_INIT_VERSION_CHECK
;
2355 case __I40EVF_INIT_VERSION_CHECK
:
2356 if (!i40evf_asq_done(hw
)) {
2357 dev_err(&pdev
->dev
, "Admin queue command never completed\n");
2358 i40evf_shutdown_adminq(hw
);
2359 adapter
->state
= __I40EVF_STARTUP
;
2363 /* aq msg sent, awaiting reply */
2364 err
= i40evf_verify_api_ver(adapter
);
2366 if (err
== I40E_ERR_ADMIN_QUEUE_NO_WORK
)
2367 err
= i40evf_send_api_ver(adapter
);
2369 dev_err(&pdev
->dev
, "Unsupported PF API version %d.%d, expected %d.%d\n",
2370 adapter
->pf_version
.major
,
2371 adapter
->pf_version
.minor
,
2372 I40E_VIRTCHNL_VERSION_MAJOR
,
2373 I40E_VIRTCHNL_VERSION_MINOR
);
2376 err
= i40evf_send_vf_config_msg(adapter
);
2378 dev_err(&pdev
->dev
, "Unable to send config request (%d)\n",
2382 adapter
->state
= __I40EVF_INIT_GET_RESOURCES
;
2384 case __I40EVF_INIT_GET_RESOURCES
:
2385 /* aq msg sent, awaiting reply */
2386 if (!adapter
->vf_res
) {
2387 bufsz
= sizeof(struct i40e_virtchnl_vf_resource
) +
2389 sizeof(struct i40e_virtchnl_vsi_resource
));
2390 adapter
->vf_res
= kzalloc(bufsz
, GFP_KERNEL
);
2391 if (!adapter
->vf_res
)
2394 err
= i40evf_get_vf_config(adapter
);
2395 if (err
== I40E_ERR_ADMIN_QUEUE_NO_WORK
) {
2396 err
= i40evf_send_vf_config_msg(adapter
);
2398 } else if (err
== I40E_ERR_PARAM
) {
2399 /* We only get ERR_PARAM if the device is in a very bad
2400 * state or if we've been disabled for previous bad
2401 * behavior. Either way, we're done now.
2403 i40evf_shutdown_adminq(hw
);
2404 dev_err(&pdev
->dev
, "Unable to get VF config due to PF error condition, not retrying\n");
2408 dev_err(&pdev
->dev
, "Unable to get VF config (%d)\n",
2412 adapter
->state
= __I40EVF_INIT_SW
;
2418 if (hw
->mac
.type
== I40E_MAC_X722_VF
)
2419 adapter
->flags
|= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
;
2421 if (i40evf_process_config(adapter
))
2423 adapter
->current_op
= I40E_VIRTCHNL_OP_UNKNOWN
;
2425 adapter
->flags
|= I40EVF_FLAG_RX_CSUM_ENABLED
;
2426 adapter
->flags
|= I40EVF_FLAG_RX_1BUF_CAPABLE
;
2427 adapter
->flags
|= I40EVF_FLAG_RX_PS_CAPABLE
;
2429 /* Default to single buffer rx, can be changed through ethtool. */
2430 adapter
->flags
&= ~I40EVF_FLAG_RX_PS_ENABLED
;
2432 netdev
->netdev_ops
= &i40evf_netdev_ops
;
2433 i40evf_set_ethtool_ops(netdev
);
2434 netdev
->watchdog_timeo
= 5 * HZ
;
2436 if (!is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
2437 dev_info(&pdev
->dev
, "Invalid MAC address %pM, using random\n",
2438 adapter
->hw
.mac
.addr
);
2439 eth_hw_addr_random(netdev
);
2440 ether_addr_copy(adapter
->hw
.mac
.addr
, netdev
->dev_addr
);
2442 adapter
->flags
|= I40EVF_FLAG_ADDR_SET_BY_PF
;
2443 ether_addr_copy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
);
2444 ether_addr_copy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
);
2447 init_timer(&adapter
->watchdog_timer
);
2448 adapter
->watchdog_timer
.function
= &i40evf_watchdog_timer
;
2449 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
2450 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
2452 adapter
->num_active_queues
= min_t(int,
2453 adapter
->vsi_res
->num_queue_pairs
,
2454 (int)(num_online_cpus()));
2455 adapter
->tx_desc_count
= I40EVF_DEFAULT_TXD
;
2456 adapter
->rx_desc_count
= I40EVF_DEFAULT_RXD
;
2457 err
= i40evf_init_interrupt_scheme(adapter
);
2460 i40evf_map_rings_to_vectors(adapter
);
2461 if (adapter
->vf_res
->vf_offload_flags
&
2462 I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
)
2463 adapter
->flags
|= I40EVF_FLAG_WB_ON_ITR_CAPABLE
;
2465 err
= i40evf_request_misc_irq(adapter
);
2469 netif_carrier_off(netdev
);
2471 if (!adapter
->netdev_registered
) {
2472 err
= register_netdev(netdev
);
2477 adapter
->netdev_registered
= true;
2479 netif_tx_stop_all_queues(netdev
);
2481 dev_info(&pdev
->dev
, "MAC address: %pM\n", adapter
->hw
.mac
.addr
);
2482 if (netdev
->features
& NETIF_F_GRO
)
2483 dev_info(&pdev
->dev
, "GRO is enabled\n");
2485 adapter
->state
= __I40EVF_DOWN
;
2486 set_bit(__I40E_DOWN
, &adapter
->vsi
.state
);
2487 i40evf_misc_irq_enable(adapter
);
2489 adapter
->rss_key
= kzalloc(adapter
->rss_key_size
, GFP_KERNEL
);
2490 adapter
->rss_lut
= kzalloc(adapter
->rss_lut_size
, GFP_KERNEL
);
2491 if (!adapter
->rss_key
|| !adapter
->rss_lut
)
2494 if (RSS_AQ(adapter
)) {
2495 adapter
->aq_required
|= I40EVF_FLAG_AQ_CONFIGURE_RSS
;
2496 mod_timer_pending(&adapter
->watchdog_timer
, jiffies
+ 1);
2498 i40evf_init_rss(adapter
);
2502 schedule_delayed_work(&adapter
->init_task
, msecs_to_jiffies(30));
2505 i40evf_free_rss(adapter
);
2507 i40evf_free_misc_irq(adapter
);
2509 i40evf_reset_interrupt_capability(adapter
);
2511 kfree(adapter
->vf_res
);
2512 adapter
->vf_res
= NULL
;
2514 /* Things went into the weeds, so try again later */
2515 if (++adapter
->aq_wait_count
> I40EVF_AQ_MAX_ERR
) {
2516 dev_err(&pdev
->dev
, "Failed to communicate with PF; waiting before retry\n");
2517 adapter
->flags
|= I40EVF_FLAG_PF_COMMS_FAILED
;
2518 i40evf_shutdown_adminq(hw
);
2519 adapter
->state
= __I40EVF_STARTUP
;
2520 schedule_delayed_work(&adapter
->init_task
, HZ
* 5);
2523 schedule_delayed_work(&adapter
->init_task
, HZ
);
2527 * i40evf_shutdown - Shutdown the device in preparation for a reboot
2528 * @pdev: pci device structure
2530 static void i40evf_shutdown(struct pci_dev
*pdev
)
2532 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2533 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2535 netif_device_detach(netdev
);
2537 if (netif_running(netdev
))
2538 i40evf_close(netdev
);
2540 /* Prevent the watchdog from running. */
2541 adapter
->state
= __I40EVF_REMOVE
;
2542 adapter
->aq_required
= 0;
2545 pci_save_state(pdev
);
2548 pci_disable_device(pdev
);
2552 * i40evf_probe - Device Initialization Routine
2553 * @pdev: PCI device information struct
2554 * @ent: entry in i40evf_pci_tbl
2556 * Returns 0 on success, negative on failure
2558 * i40evf_probe initializes an adapter identified by a pci_dev structure.
2559 * The OS initialization, configuring of the adapter private structure,
2560 * and a hardware reset occur.
2562 static int i40evf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2564 struct net_device
*netdev
;
2565 struct i40evf_adapter
*adapter
= NULL
;
2566 struct i40e_hw
*hw
= NULL
;
2569 err
= pci_enable_device(pdev
);
2573 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
2575 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
2578 "DMA configuration failed: 0x%x\n", err
);
2583 err
= pci_request_regions(pdev
, i40evf_driver_name
);
2586 "pci_request_regions failed 0x%x\n", err
);
2590 pci_enable_pcie_error_reporting(pdev
);
2592 pci_set_master(pdev
);
2594 netdev
= alloc_etherdev_mq(sizeof(struct i40evf_adapter
), MAX_QUEUES
);
2597 goto err_alloc_etherdev
;
2600 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2602 pci_set_drvdata(pdev
, netdev
);
2603 adapter
= netdev_priv(netdev
);
2605 adapter
->netdev
= netdev
;
2606 adapter
->pdev
= pdev
;
2611 adapter
->msg_enable
= BIT(DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
2612 adapter
->state
= __I40EVF_STARTUP
;
2614 /* Call save state here because it relies on the adapter struct. */
2615 pci_save_state(pdev
);
2617 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
2618 pci_resource_len(pdev
, 0));
2623 hw
->vendor_id
= pdev
->vendor
;
2624 hw
->device_id
= pdev
->device
;
2625 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
2626 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2627 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2628 hw
->bus
.device
= PCI_SLOT(pdev
->devfn
);
2629 hw
->bus
.func
= PCI_FUNC(pdev
->devfn
);
2631 /* set up the locks for the AQ, do this only once in probe
2632 * and destroy them only once in remove
2634 mutex_init(&hw
->aq
.asq_mutex
);
2635 mutex_init(&hw
->aq
.arq_mutex
);
2637 INIT_LIST_HEAD(&adapter
->mac_filter_list
);
2638 INIT_LIST_HEAD(&adapter
->vlan_filter_list
);
2640 INIT_WORK(&adapter
->reset_task
, i40evf_reset_task
);
2641 INIT_WORK(&adapter
->adminq_task
, i40evf_adminq_task
);
2642 INIT_WORK(&adapter
->watchdog_task
, i40evf_watchdog_task
);
2643 INIT_DELAYED_WORK(&adapter
->init_task
, i40evf_init_task
);
2644 schedule_delayed_work(&adapter
->init_task
,
2645 msecs_to_jiffies(5 * (pdev
->devfn
& 0x07)));
2650 free_netdev(netdev
);
2652 pci_release_regions(pdev
);
2655 pci_disable_device(pdev
);
2661 * i40evf_suspend - Power management suspend routine
2662 * @pdev: PCI device information struct
2665 * Called when the system (VM) is entering sleep/suspend.
2667 static int i40evf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2669 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2670 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2673 netif_device_detach(netdev
);
2675 if (netif_running(netdev
)) {
2677 i40evf_down(adapter
);
2680 i40evf_free_misc_irq(adapter
);
2681 i40evf_reset_interrupt_capability(adapter
);
2683 retval
= pci_save_state(pdev
);
2687 pci_disable_device(pdev
);
2693 * i40evf_resume - Power management resume routine
2694 * @pdev: PCI device information struct
2696 * Called when the system (VM) is resumed from sleep/suspend.
2698 static int i40evf_resume(struct pci_dev
*pdev
)
2700 struct i40evf_adapter
*adapter
= pci_get_drvdata(pdev
);
2701 struct net_device
*netdev
= adapter
->netdev
;
2704 pci_set_power_state(pdev
, PCI_D0
);
2705 pci_restore_state(pdev
);
2706 /* pci_restore_state clears dev->state_saved so call
2707 * pci_save_state to restore it.
2709 pci_save_state(pdev
);
2711 err
= pci_enable_device_mem(pdev
);
2713 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend.\n");
2716 pci_set_master(pdev
);
2719 err
= i40evf_set_interrupt_capability(adapter
);
2722 dev_err(&pdev
->dev
, "Cannot enable MSI-X interrupts.\n");
2725 err
= i40evf_request_misc_irq(adapter
);
2728 dev_err(&pdev
->dev
, "Cannot get interrupt vector.\n");
2732 schedule_work(&adapter
->reset_task
);
2734 netif_device_attach(netdev
);
2739 #endif /* CONFIG_PM */
2741 * i40evf_remove - Device Removal Routine
2742 * @pdev: PCI device information struct
2744 * i40evf_remove is called by the PCI subsystem to alert the driver
2745 * that it should release a PCI device. The could be caused by a
2746 * Hot-Plug event, or because the driver is going to be removed from
2749 static void i40evf_remove(struct pci_dev
*pdev
)
2751 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2752 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2753 struct i40evf_mac_filter
*f
, *ftmp
;
2754 struct i40e_hw
*hw
= &adapter
->hw
;
2756 cancel_delayed_work_sync(&adapter
->init_task
);
2757 cancel_work_sync(&adapter
->reset_task
);
2759 if (adapter
->netdev_registered
) {
2760 unregister_netdev(netdev
);
2761 adapter
->netdev_registered
= false;
2764 /* Shut down all the garbage mashers on the detention level */
2765 adapter
->state
= __I40EVF_REMOVE
;
2766 adapter
->aq_required
= 0;
2767 i40evf_request_reset(adapter
);
2769 /* If the FW isn't responding, kick it once, but only once. */
2770 if (!i40evf_asq_done(hw
)) {
2771 i40evf_request_reset(adapter
);
2775 if (adapter
->msix_entries
) {
2776 i40evf_misc_irq_disable(adapter
);
2777 i40evf_free_misc_irq(adapter
);
2778 i40evf_reset_interrupt_capability(adapter
);
2779 i40evf_free_q_vectors(adapter
);
2782 if (adapter
->watchdog_timer
.function
)
2783 del_timer_sync(&adapter
->watchdog_timer
);
2785 flush_scheduled_work();
2787 i40evf_free_rss(adapter
);
2789 if (hw
->aq
.asq
.count
)
2790 i40evf_shutdown_adminq(hw
);
2792 /* destroy the locks only once, here */
2793 mutex_destroy(&hw
->aq
.arq_mutex
);
2794 mutex_destroy(&hw
->aq
.asq_mutex
);
2796 iounmap(hw
->hw_addr
);
2797 pci_release_regions(pdev
);
2799 i40evf_free_all_tx_resources(adapter
);
2800 i40evf_free_all_rx_resources(adapter
);
2801 i40evf_free_queues(adapter
);
2802 kfree(adapter
->vf_res
);
2803 /* If we got removed before an up/down sequence, we've got a filter
2804 * hanging out there that we need to get rid of.
2806 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
, list
) {
2810 list_for_each_entry_safe(f
, ftmp
, &adapter
->vlan_filter_list
, list
) {
2815 free_netdev(netdev
);
2817 pci_disable_pcie_error_reporting(pdev
);
2819 pci_disable_device(pdev
);
2822 static struct pci_driver i40evf_driver
= {
2823 .name
= i40evf_driver_name
,
2824 .id_table
= i40evf_pci_tbl
,
2825 .probe
= i40evf_probe
,
2826 .remove
= i40evf_remove
,
2828 .suspend
= i40evf_suspend
,
2829 .resume
= i40evf_resume
,
2831 .shutdown
= i40evf_shutdown
,
2835 * i40e_init_module - Driver Registration Routine
2837 * i40e_init_module is the first routine called when the driver is
2838 * loaded. All it does is register with the PCI subsystem.
2840 static int __init
i40evf_init_module(void)
2844 pr_info("i40evf: %s - version %s\n", i40evf_driver_string
,
2845 i40evf_driver_version
);
2847 pr_info("%s\n", i40evf_copyright
);
2849 i40evf_wq
= create_singlethread_workqueue(i40evf_driver_name
);
2851 pr_err("%s: Failed to create workqueue\n", i40evf_driver_name
);
2854 ret
= pci_register_driver(&i40evf_driver
);
2858 module_init(i40evf_init_module
);
2861 * i40e_exit_module - Driver Exit Cleanup Routine
2863 * i40e_exit_module is called just before the driver is removed
2866 static void __exit
i40evf_exit_module(void)
2868 pci_unregister_driver(&i40evf_driver
);
2869 destroy_workqueue(i40evf_wq
);
2872 module_exit(i40evf_exit_module
);