1 /* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 #include <linux/module.h>
22 #include <linux/aer.h>
26 static const struct fm10k_info
*fm10k_info_tbl
[] = {
27 [fm10k_device_pf
] = &fm10k_pf_info
,
28 [fm10k_device_vf
] = &fm10k_vf_info
,
32 * fm10k_pci_tbl - PCI Device ID Table
34 * Wildcard entries (PCI_ANY_ID) should come last
35 * Last entry must be all 0s
37 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
38 * Class, Class Mask, private data (not used) }
40 static const struct pci_device_id fm10k_pci_tbl
[] = {
41 { PCI_VDEVICE(INTEL
, FM10K_DEV_ID_PF
), fm10k_device_pf
},
42 { PCI_VDEVICE(INTEL
, FM10K_DEV_ID_VF
), fm10k_device_vf
},
43 /* required last entry */
46 MODULE_DEVICE_TABLE(pci
, fm10k_pci_tbl
);
48 u16
fm10k_read_pci_cfg_word(struct fm10k_hw
*hw
, u32 reg
)
50 struct fm10k_intfc
*interface
= hw
->back
;
53 if (FM10K_REMOVED(hw
->hw_addr
))
56 pci_read_config_word(interface
->pdev
, reg
, &value
);
58 fm10k_write_flush(hw
);
63 u32
fm10k_read_reg(struct fm10k_hw
*hw
, int reg
)
65 u32 __iomem
*hw_addr
= ACCESS_ONCE(hw
->hw_addr
);
68 if (FM10K_REMOVED(hw_addr
))
71 value
= readl(&hw_addr
[reg
]);
72 if (!(~value
) && (!reg
|| !(~readl(hw_addr
)))) {
73 struct fm10k_intfc
*interface
= hw
->back
;
74 struct net_device
*netdev
= interface
->netdev
;
77 netif_device_detach(netdev
);
78 netdev_err(netdev
, "PCIe link lost, device now detached\n");
84 static int fm10k_hw_ready(struct fm10k_intfc
*interface
)
86 struct fm10k_hw
*hw
= &interface
->hw
;
88 fm10k_write_flush(hw
);
90 return FM10K_REMOVED(hw
->hw_addr
) ? -ENODEV
: 0;
93 void fm10k_service_event_schedule(struct fm10k_intfc
*interface
)
95 if (!test_bit(__FM10K_SERVICE_DISABLE
, &interface
->state
) &&
96 !test_and_set_bit(__FM10K_SERVICE_SCHED
, &interface
->state
))
97 schedule_work(&interface
->service_task
);
100 static void fm10k_service_event_complete(struct fm10k_intfc
*interface
)
102 BUG_ON(!test_bit(__FM10K_SERVICE_SCHED
, &interface
->state
));
104 /* flush memory to make sure state is correct before next watchog */
105 smp_mb__before_atomic();
106 clear_bit(__FM10K_SERVICE_SCHED
, &interface
->state
);
110 * fm10k_service_timer - Timer Call-back
111 * @data: pointer to interface cast into an unsigned long
113 static void fm10k_service_timer(unsigned long data
)
115 struct fm10k_intfc
*interface
= (struct fm10k_intfc
*)data
;
117 /* Reset the timer */
118 mod_timer(&interface
->service_timer
, (HZ
* 2) + jiffies
);
120 fm10k_service_event_schedule(interface
);
123 static void fm10k_detach_subtask(struct fm10k_intfc
*interface
)
125 struct net_device
*netdev
= interface
->netdev
;
127 /* do nothing if device is still present or hw_addr is set */
128 if (netif_device_present(netdev
) || interface
->hw
.hw_addr
)
133 if (netif_running(netdev
))
139 static void fm10k_reinit(struct fm10k_intfc
*interface
)
141 struct net_device
*netdev
= interface
->netdev
;
142 struct fm10k_hw
*hw
= &interface
->hw
;
145 WARN_ON(in_interrupt());
147 /* put off any impending NetWatchDogTimeout */
148 netdev
->trans_start
= jiffies
;
150 while (test_and_set_bit(__FM10K_RESETTING
, &interface
->state
))
151 usleep_range(1000, 2000);
155 fm10k_iov_suspend(interface
->pdev
);
157 if (netif_running(netdev
))
160 fm10k_mbx_free_irq(interface
);
162 /* delay any future reset requests */
163 interface
->last_reset
= jiffies
+ (10 * HZ
);
165 /* reset and initialize the hardware so it is in a known state */
166 err
= hw
->mac
.ops
.reset_hw(hw
) ? : hw
->mac
.ops
.init_hw(hw
);
168 dev_err(&interface
->pdev
->dev
, "init_hw failed: %d\n", err
);
170 /* reassociate interrupts */
171 fm10k_mbx_request_irq(interface
);
174 fm10k_ts_reset(interface
);
176 if (netif_running(netdev
))
179 fm10k_iov_resume(interface
->pdev
);
183 clear_bit(__FM10K_RESETTING
, &interface
->state
);
186 static void fm10k_reset_subtask(struct fm10k_intfc
*interface
)
188 if (!(interface
->flags
& FM10K_FLAG_RESET_REQUESTED
))
191 interface
->flags
&= ~FM10K_FLAG_RESET_REQUESTED
;
193 netdev_err(interface
->netdev
, "Reset interface\n");
194 interface
->tx_timeout_count
++;
196 fm10k_reinit(interface
);
200 * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
201 * @interface: board private structure
203 * Configure the SWPRI to PC mapping for the port.
205 static void fm10k_configure_swpri_map(struct fm10k_intfc
*interface
)
207 struct net_device
*netdev
= interface
->netdev
;
208 struct fm10k_hw
*hw
= &interface
->hw
;
211 /* clear flag indicating update is needed */
212 interface
->flags
&= ~FM10K_FLAG_SWPRI_CONFIG
;
214 /* these registers are only available on the PF */
215 if (hw
->mac
.type
!= fm10k_mac_pf
)
218 /* configure SWPRI to PC map */
219 for (i
= 0; i
< FM10K_SWPRI_MAX
; i
++)
220 fm10k_write_reg(hw
, FM10K_SWPRI_MAP(i
),
221 netdev_get_prio_tc_map(netdev
, i
));
225 * fm10k_watchdog_update_host_state - Update the link status based on host.
226 * @interface: board private structure
228 static void fm10k_watchdog_update_host_state(struct fm10k_intfc
*interface
)
230 struct fm10k_hw
*hw
= &interface
->hw
;
233 if (test_bit(__FM10K_LINK_DOWN
, &interface
->state
)) {
234 interface
->host_ready
= false;
235 if (time_is_after_jiffies(interface
->link_down_event
))
237 clear_bit(__FM10K_LINK_DOWN
, &interface
->state
);
240 if (interface
->flags
& FM10K_FLAG_SWPRI_CONFIG
) {
241 if (rtnl_trylock()) {
242 fm10k_configure_swpri_map(interface
);
247 /* lock the mailbox for transmit and receive */
248 fm10k_mbx_lock(interface
);
250 err
= hw
->mac
.ops
.get_host_state(hw
, &interface
->host_ready
);
251 if (err
&& time_is_before_jiffies(interface
->last_reset
))
252 interface
->flags
|= FM10K_FLAG_RESET_REQUESTED
;
255 fm10k_mbx_unlock(interface
);
259 * fm10k_mbx_subtask - Process upstream and downstream mailboxes
260 * @interface: board private structure
262 * This function will process both the upstream and downstream mailboxes.
263 * It is necessary for us to hold the rtnl_lock while doing this as the
264 * mailbox accesses are protected by this lock.
266 static void fm10k_mbx_subtask(struct fm10k_intfc
*interface
)
268 /* process upstream mailbox and update device state */
269 fm10k_watchdog_update_host_state(interface
);
271 /* process downstream mailboxes */
272 fm10k_iov_mbx(interface
);
276 * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
277 * @interface: board private structure
279 static void fm10k_watchdog_host_is_ready(struct fm10k_intfc
*interface
)
281 struct net_device
*netdev
= interface
->netdev
;
283 /* only continue if link state is currently down */
284 if (netif_carrier_ok(netdev
))
287 netif_info(interface
, drv
, netdev
, "NIC Link is up\n");
289 netif_carrier_on(netdev
);
290 netif_tx_wake_all_queues(netdev
);
294 * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
295 * @interface: board private structure
297 static void fm10k_watchdog_host_not_ready(struct fm10k_intfc
*interface
)
299 struct net_device
*netdev
= interface
->netdev
;
301 /* only continue if link state is currently up */
302 if (!netif_carrier_ok(netdev
))
305 netif_info(interface
, drv
, netdev
, "NIC Link is down\n");
307 netif_carrier_off(netdev
);
308 netif_tx_stop_all_queues(netdev
);
312 * fm10k_update_stats - Update the board statistics counters.
313 * @interface: board private structure
315 void fm10k_update_stats(struct fm10k_intfc
*interface
)
317 struct net_device_stats
*net_stats
= &interface
->netdev
->stats
;
318 struct fm10k_hw
*hw
= &interface
->hw
;
319 u64 rx_errors
= 0, rx_csum_errors
= 0, tx_csum_errors
= 0;
320 u64 restart_queue
= 0, tx_busy
= 0, alloc_failed
= 0;
321 u64 rx_bytes_nic
= 0, rx_pkts_nic
= 0, rx_drops_nic
= 0;
322 u64 tx_bytes_nic
= 0, tx_pkts_nic
= 0;
326 /* do not allow stats update via service task for next second */
327 interface
->next_stats_update
= jiffies
+ HZ
;
329 /* gather some stats to the interface struct that are per queue */
330 for (bytes
= 0, pkts
= 0, i
= 0; i
< interface
->num_tx_queues
; i
++) {
331 struct fm10k_ring
*tx_ring
= interface
->tx_ring
[i
];
333 restart_queue
+= tx_ring
->tx_stats
.restart_queue
;
334 tx_busy
+= tx_ring
->tx_stats
.tx_busy
;
335 tx_csum_errors
+= tx_ring
->tx_stats
.csum_err
;
336 bytes
+= tx_ring
->stats
.bytes
;
337 pkts
+= tx_ring
->stats
.packets
;
340 interface
->restart_queue
= restart_queue
;
341 interface
->tx_busy
= tx_busy
;
342 net_stats
->tx_bytes
= bytes
;
343 net_stats
->tx_packets
= pkts
;
344 interface
->tx_csum_errors
= tx_csum_errors
;
345 /* gather some stats to the interface struct that are per queue */
346 for (bytes
= 0, pkts
= 0, i
= 0; i
< interface
->num_rx_queues
; i
++) {
347 struct fm10k_ring
*rx_ring
= interface
->rx_ring
[i
];
349 bytes
+= rx_ring
->stats
.bytes
;
350 pkts
+= rx_ring
->stats
.packets
;
351 alloc_failed
+= rx_ring
->rx_stats
.alloc_failed
;
352 rx_csum_errors
+= rx_ring
->rx_stats
.csum_err
;
353 rx_errors
+= rx_ring
->rx_stats
.errors
;
356 net_stats
->rx_bytes
= bytes
;
357 net_stats
->rx_packets
= pkts
;
358 interface
->alloc_failed
= alloc_failed
;
359 interface
->rx_csum_errors
= rx_csum_errors
;
360 interface
->rx_errors
= rx_errors
;
362 hw
->mac
.ops
.update_hw_stats(hw
, &interface
->stats
);
364 for (i
= 0; i
< FM10K_MAX_QUEUES_PF
; i
++) {
365 struct fm10k_hw_stats_q
*q
= &interface
->stats
.q
[i
];
367 tx_bytes_nic
+= q
->tx_bytes
.count
;
368 tx_pkts_nic
+= q
->tx_packets
.count
;
369 rx_bytes_nic
+= q
->rx_bytes
.count
;
370 rx_pkts_nic
+= q
->rx_packets
.count
;
371 rx_drops_nic
+= q
->rx_drops
.count
;
374 interface
->tx_bytes_nic
= tx_bytes_nic
;
375 interface
->tx_packets_nic
= tx_pkts_nic
;
376 interface
->rx_bytes_nic
= rx_bytes_nic
;
377 interface
->rx_packets_nic
= rx_pkts_nic
;
378 interface
->rx_drops_nic
= rx_drops_nic
;
380 /* Fill out the OS statistics structure */
381 net_stats
->rx_errors
= interface
->stats
.xec
.count
;
382 net_stats
->rx_dropped
= interface
->stats
.nodesc_drop
.count
;
386 * fm10k_watchdog_flush_tx - flush queues on host not ready
387 * @interface - pointer to the device interface structure
389 static void fm10k_watchdog_flush_tx(struct fm10k_intfc
*interface
)
391 int some_tx_pending
= 0;
394 /* nothing to do if carrier is up */
395 if (netif_carrier_ok(interface
->netdev
))
398 for (i
= 0; i
< interface
->num_tx_queues
; i
++) {
399 struct fm10k_ring
*tx_ring
= interface
->tx_ring
[i
];
401 if (tx_ring
->next_to_use
!= tx_ring
->next_to_clean
) {
407 /* We've lost link, so the controller stops DMA, but we've got
408 * queued Tx work that's never going to get done, so reset
409 * controller to flush Tx.
412 interface
->flags
|= FM10K_FLAG_RESET_REQUESTED
;
416 * fm10k_watchdog_subtask - check and bring link up
417 * @interface - pointer to the device interface structure
419 static void fm10k_watchdog_subtask(struct fm10k_intfc
*interface
)
421 /* if interface is down do nothing */
422 if (test_bit(__FM10K_DOWN
, &interface
->state
) ||
423 test_bit(__FM10K_RESETTING
, &interface
->state
))
426 if (interface
->host_ready
)
427 fm10k_watchdog_host_is_ready(interface
);
429 fm10k_watchdog_host_not_ready(interface
);
431 /* update stats only once every second */
432 if (time_is_before_jiffies(interface
->next_stats_update
))
433 fm10k_update_stats(interface
);
435 /* flush any uncompleted work */
436 fm10k_watchdog_flush_tx(interface
);
440 * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
441 * @interface - pointer to the device interface structure
443 * This function serves two purposes. First it strobes the interrupt lines
444 * in order to make certain interrupts are occurring. Secondly it sets the
445 * bits needed to check for TX hangs. As a result we should immediately
446 * determine if a hang has occurred.
448 static void fm10k_check_hang_subtask(struct fm10k_intfc
*interface
)
452 /* If we're down or resetting, just bail */
453 if (test_bit(__FM10K_DOWN
, &interface
->state
) ||
454 test_bit(__FM10K_RESETTING
, &interface
->state
))
457 /* rate limit tx hang checks to only once every 2 seconds */
458 if (time_is_after_eq_jiffies(interface
->next_tx_hang_check
))
460 interface
->next_tx_hang_check
= jiffies
+ (2 * HZ
);
462 if (netif_carrier_ok(interface
->netdev
)) {
463 /* Force detection of hung controller */
464 for (i
= 0; i
< interface
->num_tx_queues
; i
++)
465 set_check_for_tx_hang(interface
->tx_ring
[i
]);
467 /* Rearm all in-use q_vectors for immediate firing */
468 for (i
= 0; i
< interface
->num_q_vectors
; i
++) {
469 struct fm10k_q_vector
*qv
= interface
->q_vector
[i
];
471 if (!qv
->tx
.count
&& !qv
->rx
.count
)
473 writel(FM10K_ITR_ENABLE
| FM10K_ITR_PENDING2
, qv
->itr
);
479 * fm10k_service_task - manages and runs subtasks
480 * @work: pointer to work_struct containing our data
482 static void fm10k_service_task(struct work_struct
*work
)
484 struct fm10k_intfc
*interface
;
486 interface
= container_of(work
, struct fm10k_intfc
, service_task
);
488 /* tasks always capable of running, but must be rtnl protected */
489 fm10k_mbx_subtask(interface
);
490 fm10k_detach_subtask(interface
);
491 fm10k_reset_subtask(interface
);
493 /* tasks only run when interface is up */
494 fm10k_watchdog_subtask(interface
);
495 fm10k_check_hang_subtask(interface
);
496 fm10k_ts_tx_subtask(interface
);
498 /* release lock on service events to allow scheduling next event */
499 fm10k_service_event_complete(interface
);
503 * fm10k_configure_tx_ring - Configure Tx ring after Reset
504 * @interface: board private structure
505 * @ring: structure containing ring specific data
507 * Configure the Tx descriptor ring after a reset.
509 static void fm10k_configure_tx_ring(struct fm10k_intfc
*interface
,
510 struct fm10k_ring
*ring
)
512 struct fm10k_hw
*hw
= &interface
->hw
;
513 u64 tdba
= ring
->dma
;
514 u32 size
= ring
->count
* sizeof(struct fm10k_tx_desc
);
515 u32 txint
= FM10K_INT_MAP_DISABLE
;
516 u32 txdctl
= FM10K_TXDCTL_ENABLE
| (1 << FM10K_TXDCTL_MAX_TIME_SHIFT
);
517 u8 reg_idx
= ring
->reg_idx
;
519 /* disable queue to avoid issues while updating state */
520 fm10k_write_reg(hw
, FM10K_TXDCTL(reg_idx
), 0);
521 fm10k_write_flush(hw
);
523 /* possible poll here to verify ring resources have been cleaned */
525 /* set location and size for descriptor ring */
526 fm10k_write_reg(hw
, FM10K_TDBAL(reg_idx
), tdba
& DMA_BIT_MASK(32));
527 fm10k_write_reg(hw
, FM10K_TDBAH(reg_idx
), tdba
>> 32);
528 fm10k_write_reg(hw
, FM10K_TDLEN(reg_idx
), size
);
530 /* reset head and tail pointers */
531 fm10k_write_reg(hw
, FM10K_TDH(reg_idx
), 0);
532 fm10k_write_reg(hw
, FM10K_TDT(reg_idx
), 0);
534 /* store tail pointer */
535 ring
->tail
= &interface
->uc_addr
[FM10K_TDT(reg_idx
)];
537 /* reset ntu and ntc to place SW in sync with hardwdare */
538 ring
->next_to_clean
= 0;
539 ring
->next_to_use
= 0;
542 if (ring
->q_vector
) {
543 txint
= ring
->q_vector
->v_idx
+ NON_Q_VECTORS(hw
);
544 txint
|= FM10K_INT_MAP_TIMER0
;
547 fm10k_write_reg(hw
, FM10K_TXINT(reg_idx
), txint
);
549 /* enable use of FTAG bit in Tx descriptor, register is RO for VF */
550 fm10k_write_reg(hw
, FM10K_PFVTCTL(reg_idx
),
551 FM10K_PFVTCTL_FTAG_DESC_ENABLE
);
554 fm10k_write_reg(hw
, FM10K_TXDCTL(reg_idx
), txdctl
);
558 * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration
559 * @interface: board private structure
560 * @ring: structure containing ring specific data
562 * Verify the Tx descriptor ring is ready for transmit.
564 static void fm10k_enable_tx_ring(struct fm10k_intfc
*interface
,
565 struct fm10k_ring
*ring
)
567 struct fm10k_hw
*hw
= &interface
->hw
;
570 u8 reg_idx
= ring
->reg_idx
;
572 /* if we are already enabled just exit */
573 if (fm10k_read_reg(hw
, FM10K_TXDCTL(reg_idx
)) & FM10K_TXDCTL_ENABLE
)
576 /* poll to verify queue is enabled */
578 usleep_range(1000, 2000);
579 txdctl
= fm10k_read_reg(hw
, FM10K_TXDCTL(reg_idx
));
580 } while (!(txdctl
& FM10K_TXDCTL_ENABLE
) && --wait_loop
);
582 netif_err(interface
, drv
, interface
->netdev
,
583 "Could not enable Tx Queue %d\n", reg_idx
);
587 * fm10k_configure_tx - Configure Transmit Unit after Reset
588 * @interface: board private structure
590 * Configure the Tx unit of the MAC after a reset.
592 static void fm10k_configure_tx(struct fm10k_intfc
*interface
)
596 /* Setup the HW Tx Head and Tail descriptor pointers */
597 for (i
= 0; i
< interface
->num_tx_queues
; i
++)
598 fm10k_configure_tx_ring(interface
, interface
->tx_ring
[i
]);
600 /* poll here to verify that Tx rings are now enabled */
601 for (i
= 0; i
< interface
->num_tx_queues
; i
++)
602 fm10k_enable_tx_ring(interface
, interface
->tx_ring
[i
]);
606 * fm10k_configure_rx_ring - Configure Rx ring after Reset
607 * @interface: board private structure
608 * @ring: structure containing ring specific data
610 * Configure the Rx descriptor ring after a reset.
612 static void fm10k_configure_rx_ring(struct fm10k_intfc
*interface
,
613 struct fm10k_ring
*ring
)
615 u64 rdba
= ring
->dma
;
616 struct fm10k_hw
*hw
= &interface
->hw
;
617 u32 size
= ring
->count
* sizeof(union fm10k_rx_desc
);
618 u32 rxqctl
= FM10K_RXQCTL_ENABLE
| FM10K_RXQCTL_PF
;
619 u32 rxdctl
= FM10K_RXDCTL_WRITE_BACK_MIN_DELAY
;
620 u32 srrctl
= FM10K_SRRCTL_BUFFER_CHAINING_EN
;
621 u32 rxint
= FM10K_INT_MAP_DISABLE
;
622 u8 rx_pause
= interface
->rx_pause
;
623 u8 reg_idx
= ring
->reg_idx
;
625 /* disable queue to avoid issues while updating state */
626 fm10k_write_reg(hw
, FM10K_RXQCTL(reg_idx
), 0);
627 fm10k_write_flush(hw
);
629 /* possible poll here to verify ring resources have been cleaned */
631 /* set location and size for descriptor ring */
632 fm10k_write_reg(hw
, FM10K_RDBAL(reg_idx
), rdba
& DMA_BIT_MASK(32));
633 fm10k_write_reg(hw
, FM10K_RDBAH(reg_idx
), rdba
>> 32);
634 fm10k_write_reg(hw
, FM10K_RDLEN(reg_idx
), size
);
636 /* reset head and tail pointers */
637 fm10k_write_reg(hw
, FM10K_RDH(reg_idx
), 0);
638 fm10k_write_reg(hw
, FM10K_RDT(reg_idx
), 0);
640 /* store tail pointer */
641 ring
->tail
= &interface
->uc_addr
[FM10K_RDT(reg_idx
)];
643 /* reset ntu and ntc to place SW in sync with hardwdare */
644 ring
->next_to_clean
= 0;
645 ring
->next_to_use
= 0;
646 ring
->next_to_alloc
= 0;
648 /* Configure the Rx buffer size for one buff without split */
649 srrctl
|= FM10K_RX_BUFSZ
>> FM10K_SRRCTL_BSIZEPKT_SHIFT
;
651 /* Configure the Rx ring to suppress loopback packets */
652 srrctl
|= FM10K_SRRCTL_LOOPBACK_SUPPRESS
;
653 fm10k_write_reg(hw
, FM10K_SRRCTL(reg_idx
), srrctl
);
655 /* Enable drop on empty */
657 if (interface
->pfc_en
)
658 rx_pause
= interface
->pfc_en
;
660 if (!(rx_pause
& (1 << ring
->qos_pc
)))
661 rxdctl
|= FM10K_RXDCTL_DROP_ON_EMPTY
;
663 fm10k_write_reg(hw
, FM10K_RXDCTL(reg_idx
), rxdctl
);
665 /* assign default VLAN to queue */
666 ring
->vid
= hw
->mac
.default_vid
;
669 if (ring
->q_vector
) {
670 rxint
= ring
->q_vector
->v_idx
+ NON_Q_VECTORS(hw
);
671 rxint
|= FM10K_INT_MAP_TIMER1
;
674 fm10k_write_reg(hw
, FM10K_RXINT(reg_idx
), rxint
);
677 fm10k_write_reg(hw
, FM10K_RXQCTL(reg_idx
), rxqctl
);
679 /* place buffers on ring for receive data */
680 fm10k_alloc_rx_buffers(ring
, fm10k_desc_unused(ring
));
684 * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings
685 * @interface: board private structure
687 * Configure the drop enable bits for the Rx rings.
689 void fm10k_update_rx_drop_en(struct fm10k_intfc
*interface
)
691 struct fm10k_hw
*hw
= &interface
->hw
;
692 u8 rx_pause
= interface
->rx_pause
;
696 if (interface
->pfc_en
)
697 rx_pause
= interface
->pfc_en
;
700 for (i
= 0; i
< interface
->num_rx_queues
; i
++) {
701 struct fm10k_ring
*ring
= interface
->rx_ring
[i
];
702 u32 rxdctl
= FM10K_RXDCTL_WRITE_BACK_MIN_DELAY
;
703 u8 reg_idx
= ring
->reg_idx
;
705 if (!(rx_pause
& (1 << ring
->qos_pc
)))
706 rxdctl
|= FM10K_RXDCTL_DROP_ON_EMPTY
;
708 fm10k_write_reg(hw
, FM10K_RXDCTL(reg_idx
), rxdctl
);
713 * fm10k_configure_dglort - Configure Receive DGLORT after reset
714 * @interface: board private structure
716 * Configure the DGLORT description and RSS tables.
718 static void fm10k_configure_dglort(struct fm10k_intfc
*interface
)
720 struct fm10k_dglort_cfg dglort
= { 0 };
721 struct fm10k_hw
*hw
= &interface
->hw
;
725 /* Fill out hash function seeds */
726 for (i
= 0; i
< FM10K_RSSRK_SIZE
; i
++)
727 fm10k_write_reg(hw
, FM10K_RSSRK(0, i
), interface
->rssrk
[i
]);
729 /* Write RETA table to hardware */
730 for (i
= 0; i
< FM10K_RETA_SIZE
; i
++)
731 fm10k_write_reg(hw
, FM10K_RETA(0, i
), interface
->reta
[i
]);
733 /* Generate RSS hash based on packet types, TCP/UDP
734 * port numbers and/or IPv4/v6 src and dst addresses
736 mrqc
= FM10K_MRQC_IPV4
|
737 FM10K_MRQC_TCP_IPV4
|
741 if (interface
->flags
& FM10K_FLAG_RSS_FIELD_IPV4_UDP
)
742 mrqc
|= FM10K_MRQC_UDP_IPV4
;
743 if (interface
->flags
& FM10K_FLAG_RSS_FIELD_IPV6_UDP
)
744 mrqc
|= FM10K_MRQC_UDP_IPV6
;
746 fm10k_write_reg(hw
, FM10K_MRQC(0), mrqc
);
748 /* configure default DGLORT mapping for RSS/DCB */
749 dglort
.inner_rss
= 1;
750 dglort
.rss_l
= fls(interface
->ring_feature
[RING_F_RSS
].mask
);
751 dglort
.pc_l
= fls(interface
->ring_feature
[RING_F_QOS
].mask
);
752 hw
->mac
.ops
.configure_dglort_map(hw
, &dglort
);
754 /* assign GLORT per queue for queue mapped testing */
755 if (interface
->glort_count
> 64) {
756 memset(&dglort
, 0, sizeof(dglort
));
757 dglort
.inner_rss
= 1;
758 dglort
.glort
= interface
->glort
+ 64;
759 dglort
.idx
= fm10k_dglort_pf_queue
;
760 dglort
.queue_l
= fls(interface
->num_rx_queues
- 1);
761 hw
->mac
.ops
.configure_dglort_map(hw
, &dglort
);
764 /* assign glort value for RSS/DCB specific to this interface */
765 memset(&dglort
, 0, sizeof(dglort
));
766 dglort
.inner_rss
= 1;
767 dglort
.glort
= interface
->glort
;
768 dglort
.rss_l
= fls(interface
->ring_feature
[RING_F_RSS
].mask
);
769 dglort
.pc_l
= fls(interface
->ring_feature
[RING_F_QOS
].mask
);
770 /* configure DGLORT mapping for RSS/DCB */
771 dglort
.idx
= fm10k_dglort_pf_rss
;
772 if (interface
->l2_accel
)
773 dglort
.shared_l
= fls(interface
->l2_accel
->size
);
774 hw
->mac
.ops
.configure_dglort_map(hw
, &dglort
);
778 * fm10k_configure_rx - Configure Receive Unit after Reset
779 * @interface: board private structure
781 * Configure the Rx unit of the MAC after a reset.
783 static void fm10k_configure_rx(struct fm10k_intfc
*interface
)
787 /* Configure SWPRI to PC map */
788 fm10k_configure_swpri_map(interface
);
790 /* Configure RSS and DGLORT map */
791 fm10k_configure_dglort(interface
);
793 /* Setup the HW Rx Head and Tail descriptor pointers */
794 for (i
= 0; i
< interface
->num_rx_queues
; i
++)
795 fm10k_configure_rx_ring(interface
, interface
->rx_ring
[i
]);
797 /* possible poll here to verify that Rx rings are now enabled */
800 static void fm10k_napi_enable_all(struct fm10k_intfc
*interface
)
802 struct fm10k_q_vector
*q_vector
;
805 for (q_idx
= 0; q_idx
< interface
->num_q_vectors
; q_idx
++) {
806 q_vector
= interface
->q_vector
[q_idx
];
807 napi_enable(&q_vector
->napi
);
811 static irqreturn_t
fm10k_msix_clean_rings(int irq
, void *data
)
813 struct fm10k_q_vector
*q_vector
= data
;
815 if (q_vector
->rx
.count
|| q_vector
->tx
.count
)
816 napi_schedule(&q_vector
->napi
);
821 static irqreturn_t
fm10k_msix_mbx_vf(int irq
, void *data
)
823 struct fm10k_intfc
*interface
= data
;
824 struct fm10k_hw
*hw
= &interface
->hw
;
825 struct fm10k_mbx_info
*mbx
= &hw
->mbx
;
827 /* re-enable mailbox interrupt and indicate 20us delay */
828 fm10k_write_reg(hw
, FM10K_VFITR(FM10K_MBX_VECTOR
),
829 FM10K_ITR_ENABLE
| FM10K_MBX_INT_DELAY
);
831 /* service upstream mailbox */
832 if (fm10k_mbx_trylock(interface
)) {
833 mbx
->ops
.process(hw
, mbx
);
834 fm10k_mbx_unlock(interface
);
837 hw
->mac
.get_host_state
= 1;
838 fm10k_service_event_schedule(interface
);
843 #define FM10K_ERR_MSG(type) case (type): error = #type; break
844 static void fm10k_print_fault(struct fm10k_intfc
*interface
, int type
,
845 struct fm10k_fault
*fault
)
847 struct pci_dev
*pdev
= interface
->pdev
;
851 case FM10K_PCA_FAULT
:
852 switch (fault
->type
) {
854 error
= "Unknown PCA error";
856 FM10K_ERR_MSG(PCA_NO_FAULT
);
857 FM10K_ERR_MSG(PCA_UNMAPPED_ADDR
);
858 FM10K_ERR_MSG(PCA_BAD_QACCESS_PF
);
859 FM10K_ERR_MSG(PCA_BAD_QACCESS_VF
);
860 FM10K_ERR_MSG(PCA_MALICIOUS_REQ
);
861 FM10K_ERR_MSG(PCA_POISONED_TLP
);
862 FM10K_ERR_MSG(PCA_TLP_ABORT
);
865 case FM10K_THI_FAULT
:
866 switch (fault
->type
) {
868 error
= "Unknown THI error";
870 FM10K_ERR_MSG(THI_NO_FAULT
);
871 FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT
);
874 case FM10K_FUM_FAULT
:
875 switch (fault
->type
) {
877 error
= "Unknown FUM error";
879 FM10K_ERR_MSG(FUM_NO_FAULT
);
880 FM10K_ERR_MSG(FUM_UNMAPPED_ADDR
);
881 FM10K_ERR_MSG(FUM_BAD_VF_QACCESS
);
882 FM10K_ERR_MSG(FUM_ADD_DECODE_ERR
);
883 FM10K_ERR_MSG(FUM_RO_ERROR
);
884 FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR
);
885 FM10K_ERR_MSG(FUM_CSR_TIMEOUT
);
886 FM10K_ERR_MSG(FUM_INVALID_TYPE
);
887 FM10K_ERR_MSG(FUM_INVALID_LENGTH
);
888 FM10K_ERR_MSG(FUM_INVALID_BE
);
889 FM10K_ERR_MSG(FUM_INVALID_ALIGN
);
893 error
= "Undocumented fault";
898 "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
899 error
, fault
->address
, fault
->specinfo
,
900 PCI_SLOT(fault
->func
), PCI_FUNC(fault
->func
));
903 static void fm10k_report_fault(struct fm10k_intfc
*interface
, u32 eicr
)
905 struct fm10k_hw
*hw
= &interface
->hw
;
906 struct fm10k_fault fault
= { 0 };
909 for (eicr
&= FM10K_EICR_FAULT_MASK
, type
= FM10K_PCA_FAULT
;
911 eicr
>>= 1, type
+= FM10K_FAULT_SIZE
) {
912 /* only check if there is an error reported */
916 /* retrieve fault info */
917 err
= hw
->mac
.ops
.get_fault(hw
, type
, &fault
);
919 dev_err(&interface
->pdev
->dev
,
920 "error reading fault\n");
924 fm10k_print_fault(interface
, type
, &fault
);
928 static void fm10k_reset_drop_on_empty(struct fm10k_intfc
*interface
, u32 eicr
)
930 struct fm10k_hw
*hw
= &interface
->hw
;
931 const u32 rxdctl
= FM10K_RXDCTL_WRITE_BACK_MIN_DELAY
;
935 if (!(eicr
& FM10K_EICR_MAXHOLDTIME
))
938 maxholdq
= fm10k_read_reg(hw
, FM10K_MAXHOLDQ(7));
940 fm10k_write_reg(hw
, FM10K_MAXHOLDQ(7), maxholdq
);
942 if (maxholdq
& (1 << 31)) {
943 if (q
< FM10K_MAX_QUEUES_PF
) {
944 interface
->rx_overrun_pf
++;
945 fm10k_write_reg(hw
, FM10K_RXDCTL(q
), rxdctl
);
947 interface
->rx_overrun_vf
++;
961 maxholdq
= fm10k_read_reg(hw
, FM10K_MAXHOLDQ(q
/ 32));
963 fm10k_write_reg(hw
, FM10K_MAXHOLDQ(q
/ 32), maxholdq
);
967 static irqreturn_t
fm10k_msix_mbx_pf(int irq
, void *data
)
969 struct fm10k_intfc
*interface
= data
;
970 struct fm10k_hw
*hw
= &interface
->hw
;
971 struct fm10k_mbx_info
*mbx
= &hw
->mbx
;
974 /* unmask any set bits related to this interrupt */
975 eicr
= fm10k_read_reg(hw
, FM10K_EICR
);
976 fm10k_write_reg(hw
, FM10K_EICR
, eicr
& (FM10K_EICR_MAILBOX
|
977 FM10K_EICR_SWITCHREADY
|
978 FM10K_EICR_SWITCHNOTREADY
));
980 /* report any faults found to the message log */
981 fm10k_report_fault(interface
, eicr
);
983 /* reset any queues disabled due to receiver overrun */
984 fm10k_reset_drop_on_empty(interface
, eicr
);
986 /* service mailboxes */
987 if (fm10k_mbx_trylock(interface
)) {
988 mbx
->ops
.process(hw
, mbx
);
989 fm10k_iov_event(interface
);
990 fm10k_mbx_unlock(interface
);
993 /* if switch toggled state we should reset GLORTs */
994 if (eicr
& FM10K_EICR_SWITCHNOTREADY
) {
995 /* force link down for at least 4 seconds */
996 interface
->link_down_event
= jiffies
+ (4 * HZ
);
997 set_bit(__FM10K_LINK_DOWN
, &interface
->state
);
999 /* reset dglort_map back to no config */
1000 hw
->mac
.dglort_map
= FM10K_DGLORTMAP_NONE
;
1003 /* we should validate host state after interrupt event */
1004 hw
->mac
.get_host_state
= 1;
1005 fm10k_service_event_schedule(interface
);
1007 /* re-enable mailbox interrupt and indicate 20us delay */
1008 fm10k_write_reg(hw
, FM10K_ITR(FM10K_MBX_VECTOR
),
1009 FM10K_ITR_ENABLE
| FM10K_MBX_INT_DELAY
);
1014 void fm10k_mbx_free_irq(struct fm10k_intfc
*interface
)
1016 struct msix_entry
*entry
= &interface
->msix_entries
[FM10K_MBX_VECTOR
];
1017 struct fm10k_hw
*hw
= &interface
->hw
;
1020 /* disconnect the mailbox */
1021 hw
->mbx
.ops
.disconnect(hw
, &hw
->mbx
);
1023 /* disable Mailbox cause */
1024 if (hw
->mac
.type
== fm10k_mac_pf
) {
1025 fm10k_write_reg(hw
, FM10K_EIMR
,
1026 FM10K_EIMR_DISABLE(PCA_FAULT
) |
1027 FM10K_EIMR_DISABLE(FUM_FAULT
) |
1028 FM10K_EIMR_DISABLE(MAILBOX
) |
1029 FM10K_EIMR_DISABLE(SWITCHREADY
) |
1030 FM10K_EIMR_DISABLE(SWITCHNOTREADY
) |
1031 FM10K_EIMR_DISABLE(SRAMERROR
) |
1032 FM10K_EIMR_DISABLE(VFLR
) |
1033 FM10K_EIMR_DISABLE(MAXHOLDTIME
));
1034 itr_reg
= FM10K_ITR(FM10K_MBX_VECTOR
);
1036 itr_reg
= FM10K_VFITR(FM10K_MBX_VECTOR
);
1039 fm10k_write_reg(hw
, itr_reg
, FM10K_ITR_MASK_SET
);
1041 free_irq(entry
->vector
, interface
);
1044 static s32
fm10k_mbx_mac_addr(struct fm10k_hw
*hw
, u32
**results
,
1045 struct fm10k_mbx_info
*mbx
)
1047 bool vlan_override
= hw
->mac
.vlan_override
;
1048 u16 default_vid
= hw
->mac
.default_vid
;
1049 struct fm10k_intfc
*interface
;
1052 err
= fm10k_msg_mac_vlan_vf(hw
, results
, mbx
);
1056 interface
= container_of(hw
, struct fm10k_intfc
, hw
);
1058 /* MAC was changed so we need reset */
1059 if (is_valid_ether_addr(hw
->mac
.perm_addr
) &&
1060 memcmp(hw
->mac
.perm_addr
, hw
->mac
.addr
, ETH_ALEN
))
1061 interface
->flags
|= FM10K_FLAG_RESET_REQUESTED
;
1063 /* VLAN override was changed, or default VLAN changed */
1064 if ((vlan_override
!= hw
->mac
.vlan_override
) ||
1065 (default_vid
!= hw
->mac
.default_vid
))
1066 interface
->flags
|= FM10K_FLAG_RESET_REQUESTED
;
1071 static s32
fm10k_1588_msg_vf(struct fm10k_hw
*hw
, u32
**results
,
1072 struct fm10k_mbx_info
*mbx
)
1074 struct fm10k_intfc
*interface
;
1078 err
= fm10k_tlv_attr_get_u64(results
[FM10K_1588_MSG_TIMESTAMP
],
1083 interface
= container_of(hw
, struct fm10k_intfc
, hw
);
1085 fm10k_ts_tx_hwtstamp(interface
, 0, timestamp
);
1090 /* generic error handler for mailbox issues */
1091 static s32
fm10k_mbx_error(struct fm10k_hw
*hw
, u32
**results
,
1092 struct fm10k_mbx_info
*mbx
)
1094 struct fm10k_intfc
*interface
;
1095 struct pci_dev
*pdev
;
1097 interface
= container_of(hw
, struct fm10k_intfc
, hw
);
1098 pdev
= interface
->pdev
;
1100 dev_err(&pdev
->dev
, "Unknown message ID %u\n",
1101 **results
& FM10K_TLV_ID_MASK
);
1106 static const struct fm10k_msg_data vf_mbx_data
[] = {
1107 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test
),
1108 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr
),
1109 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf
),
1110 FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf
),
1111 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error
),
1114 static int fm10k_mbx_request_irq_vf(struct fm10k_intfc
*interface
)
1116 struct msix_entry
*entry
= &interface
->msix_entries
[FM10K_MBX_VECTOR
];
1117 struct net_device
*dev
= interface
->netdev
;
1118 struct fm10k_hw
*hw
= &interface
->hw
;
1121 /* Use timer0 for interrupt moderation on the mailbox */
1122 u32 itr
= FM10K_INT_MAP_TIMER0
| entry
->entry
;
1124 /* register mailbox handlers */
1125 err
= hw
->mbx
.ops
.register_handlers(&hw
->mbx
, vf_mbx_data
);
1129 /* request the IRQ */
1130 err
= request_irq(entry
->vector
, fm10k_msix_mbx_vf
, 0,
1131 dev
->name
, interface
);
1133 netif_err(interface
, probe
, dev
,
1134 "request_irq for msix_mbx failed: %d\n", err
);
1138 /* map all of the interrupt sources */
1139 fm10k_write_reg(hw
, FM10K_VFINT_MAP
, itr
);
1141 /* enable interrupt */
1142 fm10k_write_reg(hw
, FM10K_VFITR(entry
->entry
), FM10K_ITR_ENABLE
);
1147 static s32
fm10k_lport_map(struct fm10k_hw
*hw
, u32
**results
,
1148 struct fm10k_mbx_info
*mbx
)
1150 struct fm10k_intfc
*interface
;
1151 u32 dglort_map
= hw
->mac
.dglort_map
;
1154 err
= fm10k_msg_lport_map_pf(hw
, results
, mbx
);
1158 interface
= container_of(hw
, struct fm10k_intfc
, hw
);
1160 /* we need to reset if port count was just updated */
1161 if (dglort_map
!= hw
->mac
.dglort_map
)
1162 interface
->flags
|= FM10K_FLAG_RESET_REQUESTED
;
1167 static s32
fm10k_update_pvid(struct fm10k_hw
*hw
, u32
**results
,
1168 struct fm10k_mbx_info
*mbx
)
1170 struct fm10k_intfc
*interface
;
1175 err
= fm10k_tlv_attr_get_u32(results
[FM10K_PF_ATTR_ID_UPDATE_PVID
],
1180 /* extract values from the pvid update */
1181 glort
= FM10K_MSG_HDR_FIELD_GET(pvid_update
, UPDATE_PVID_GLORT
);
1182 pvid
= FM10K_MSG_HDR_FIELD_GET(pvid_update
, UPDATE_PVID_PVID
);
1184 /* if glort is not valid return error */
1185 if (!fm10k_glort_valid_pf(hw
, glort
))
1186 return FM10K_ERR_PARAM
;
1188 /* verify VID is valid */
1189 if (pvid
>= FM10K_VLAN_TABLE_VID_MAX
)
1190 return FM10K_ERR_PARAM
;
1192 interface
= container_of(hw
, struct fm10k_intfc
, hw
);
1194 /* check to see if this belongs to one of the VFs */
1195 err
= fm10k_iov_update_pvid(interface
, glort
, pvid
);
1199 /* we need to reset if default VLAN was just updated */
1200 if (pvid
!= hw
->mac
.default_vid
)
1201 interface
->flags
|= FM10K_FLAG_RESET_REQUESTED
;
1203 hw
->mac
.default_vid
= pvid
;
1208 static s32
fm10k_1588_msg_pf(struct fm10k_hw
*hw
, u32
**results
,
1209 struct fm10k_mbx_info
*mbx
)
1211 struct fm10k_swapi_1588_timestamp timestamp
;
1212 struct fm10k_iov_data
*iov_data
;
1213 struct fm10k_intfc
*interface
;
1217 err
= fm10k_tlv_attr_get_le_struct(
1218 results
[FM10K_PF_ATTR_ID_1588_TIMESTAMP
],
1219 ×tamp
, sizeof(timestamp
));
1223 interface
= container_of(hw
, struct fm10k_intfc
, hw
);
1225 if (timestamp
.dglort
) {
1226 fm10k_ts_tx_hwtstamp(interface
, timestamp
.dglort
,
1227 le64_to_cpu(timestamp
.egress
));
1231 /* either dglort or sglort must be set */
1232 if (!timestamp
.sglort
)
1233 return FM10K_ERR_PARAM
;
1235 /* verify GLORT is at least one of the ones we own */
1236 sglort
= le16_to_cpu(timestamp
.sglort
);
1237 if (!fm10k_glort_valid_pf(hw
, sglort
))
1238 return FM10K_ERR_PARAM
;
1240 if (sglort
== interface
->glort
) {
1241 fm10k_ts_tx_hwtstamp(interface
, 0,
1242 le64_to_cpu(timestamp
.ingress
));
1246 /* if there is no iov_data then there is no mailboxes to process */
1247 if (!ACCESS_ONCE(interface
->iov_data
))
1248 return FM10K_ERR_PARAM
;
1252 /* notify VF if this timestamp belongs to it */
1253 iov_data
= interface
->iov_data
;
1254 vf_idx
= (hw
->mac
.dglort_map
& FM10K_DGLORTMAP_NONE
) - sglort
;
1256 if (!iov_data
|| vf_idx
>= iov_data
->num_vfs
) {
1257 err
= FM10K_ERR_PARAM
;
1261 err
= hw
->iov
.ops
.report_timestamp(hw
, &iov_data
->vf_info
[vf_idx
],
1262 le64_to_cpu(timestamp
.ingress
));
1270 static const struct fm10k_msg_data pf_mbx_data
[] = {
1271 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES
, fm10k_msg_err_pf
),
1272 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE
, fm10k_msg_err_pf
),
1273 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map
),
1274 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE
, fm10k_msg_err_pf
),
1275 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE
, fm10k_msg_err_pf
),
1276 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid
),
1277 FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf
),
1278 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error
),
1281 static int fm10k_mbx_request_irq_pf(struct fm10k_intfc
*interface
)
1283 struct msix_entry
*entry
= &interface
->msix_entries
[FM10K_MBX_VECTOR
];
1284 struct net_device
*dev
= interface
->netdev
;
1285 struct fm10k_hw
*hw
= &interface
->hw
;
1288 /* Use timer0 for interrupt moderation on the mailbox */
1289 u32 mbx_itr
= FM10K_INT_MAP_TIMER0
| entry
->entry
;
1290 u32 other_itr
= FM10K_INT_MAP_IMMEDIATE
| entry
->entry
;
1292 /* register mailbox handlers */
1293 err
= hw
->mbx
.ops
.register_handlers(&hw
->mbx
, pf_mbx_data
);
1297 /* request the IRQ */
1298 err
= request_irq(entry
->vector
, fm10k_msix_mbx_pf
, 0,
1299 dev
->name
, interface
);
1301 netif_err(interface
, probe
, dev
,
1302 "request_irq for msix_mbx failed: %d\n", err
);
1306 /* Enable interrupts w/ no moderation for "other" interrupts */
1307 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_PCIeFault
), other_itr
);
1308 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_SwitchUpDown
), other_itr
);
1309 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_SRAM
), other_itr
);
1310 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_MaxHoldTime
), other_itr
);
1311 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_VFLR
), other_itr
);
1313 /* Enable interrupts w/ moderation for mailbox */
1314 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_Mailbox
), mbx_itr
);
1316 /* Enable individual interrupt causes */
1317 fm10k_write_reg(hw
, FM10K_EIMR
, FM10K_EIMR_ENABLE(PCA_FAULT
) |
1318 FM10K_EIMR_ENABLE(FUM_FAULT
) |
1319 FM10K_EIMR_ENABLE(MAILBOX
) |
1320 FM10K_EIMR_ENABLE(SWITCHREADY
) |
1321 FM10K_EIMR_ENABLE(SWITCHNOTREADY
) |
1322 FM10K_EIMR_ENABLE(SRAMERROR
) |
1323 FM10K_EIMR_ENABLE(VFLR
) |
1324 FM10K_EIMR_ENABLE(MAXHOLDTIME
));
1326 /* enable interrupt */
1327 fm10k_write_reg(hw
, FM10K_ITR(entry
->entry
), FM10K_ITR_ENABLE
);
1332 int fm10k_mbx_request_irq(struct fm10k_intfc
*interface
)
1334 struct fm10k_hw
*hw
= &interface
->hw
;
1337 /* enable Mailbox cause */
1338 if (hw
->mac
.type
== fm10k_mac_pf
)
1339 err
= fm10k_mbx_request_irq_pf(interface
);
1341 err
= fm10k_mbx_request_irq_vf(interface
);
1343 /* connect mailbox */
1345 err
= hw
->mbx
.ops
.connect(hw
, &hw
->mbx
);
1351 * fm10k_qv_free_irq - release interrupts associated with queue vectors
1352 * @interface: board private structure
1354 * Release all interrupts associated with this interface
1356 void fm10k_qv_free_irq(struct fm10k_intfc
*interface
)
1358 int vector
= interface
->num_q_vectors
;
1359 struct fm10k_hw
*hw
= &interface
->hw
;
1360 struct msix_entry
*entry
;
1362 entry
= &interface
->msix_entries
[NON_Q_VECTORS(hw
) + vector
];
1365 struct fm10k_q_vector
*q_vector
;
1369 q_vector
= interface
->q_vector
[vector
];
1371 if (!q_vector
->tx
.count
&& !q_vector
->rx
.count
)
1374 /* disable interrupts */
1376 writel(FM10K_ITR_MASK_SET
, q_vector
->itr
);
1378 free_irq(entry
->vector
, q_vector
);
1383 * fm10k_qv_request_irq - initialize interrupts for queue vectors
1384 * @interface: board private structure
1386 * Attempts to configure interrupts using the best available
1387 * capabilities of the hardware and kernel.
1389 int fm10k_qv_request_irq(struct fm10k_intfc
*interface
)
1391 struct net_device
*dev
= interface
->netdev
;
1392 struct fm10k_hw
*hw
= &interface
->hw
;
1393 struct msix_entry
*entry
;
1397 entry
= &interface
->msix_entries
[NON_Q_VECTORS(hw
)];
1399 for (vector
= 0; vector
< interface
->num_q_vectors
; vector
++) {
1400 struct fm10k_q_vector
*q_vector
= interface
->q_vector
[vector
];
1402 /* name the vector */
1403 if (q_vector
->tx
.count
&& q_vector
->rx
.count
) {
1404 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1405 "%s-TxRx-%d", dev
->name
, ri
++);
1407 } else if (q_vector
->rx
.count
) {
1408 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1409 "%s-rx-%d", dev
->name
, ri
++);
1410 } else if (q_vector
->tx
.count
) {
1411 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1412 "%s-tx-%d", dev
->name
, ti
++);
1414 /* skip this unused q_vector */
1418 /* Assign ITR register to q_vector */
1419 q_vector
->itr
= (hw
->mac
.type
== fm10k_mac_pf
) ?
1420 &interface
->uc_addr
[FM10K_ITR(entry
->entry
)] :
1421 &interface
->uc_addr
[FM10K_VFITR(entry
->entry
)];
1423 /* request the IRQ */
1424 err
= request_irq(entry
->vector
, &fm10k_msix_clean_rings
, 0,
1425 q_vector
->name
, q_vector
);
1427 netif_err(interface
, probe
, dev
,
1428 "request_irq failed for MSIX interrupt Error: %d\n",
1433 /* Enable q_vector */
1434 writel(FM10K_ITR_ENABLE
, q_vector
->itr
);
1442 /* wind through the ring freeing all entries and vectors */
1444 struct fm10k_q_vector
*q_vector
;
1448 q_vector
= interface
->q_vector
[vector
];
1450 if (!q_vector
->tx
.count
&& !q_vector
->rx
.count
)
1453 /* disable interrupts */
1455 writel(FM10K_ITR_MASK_SET
, q_vector
->itr
);
1457 free_irq(entry
->vector
, q_vector
);
1463 void fm10k_up(struct fm10k_intfc
*interface
)
1465 struct fm10k_hw
*hw
= &interface
->hw
;
1467 /* Enable Tx/Rx DMA */
1468 hw
->mac
.ops
.start_hw(hw
);
1470 /* configure Tx descriptor rings */
1471 fm10k_configure_tx(interface
);
1473 /* configure Rx descriptor rings */
1474 fm10k_configure_rx(interface
);
1476 /* configure interrupts */
1477 hw
->mac
.ops
.update_int_moderator(hw
);
1479 /* clear down bit to indicate we are ready to go */
1480 clear_bit(__FM10K_DOWN
, &interface
->state
);
1482 /* enable polling cleanups */
1483 fm10k_napi_enable_all(interface
);
1485 /* re-establish Rx filters */
1486 fm10k_restore_rx_state(interface
);
1488 /* enable transmits */
1489 netif_tx_start_all_queues(interface
->netdev
);
1491 /* kick off the service timer */
1492 hw
->mac
.get_host_state
= 1;
1493 mod_timer(&interface
->service_timer
, jiffies
);
1496 static void fm10k_napi_disable_all(struct fm10k_intfc
*interface
)
1498 struct fm10k_q_vector
*q_vector
;
1501 for (q_idx
= 0; q_idx
< interface
->num_q_vectors
; q_idx
++) {
1502 q_vector
= interface
->q_vector
[q_idx
];
1503 napi_disable(&q_vector
->napi
);
1507 void fm10k_down(struct fm10k_intfc
*interface
)
1509 struct net_device
*netdev
= interface
->netdev
;
1510 struct fm10k_hw
*hw
= &interface
->hw
;
1512 /* signal that we are down to the interrupt handler and service task */
1513 set_bit(__FM10K_DOWN
, &interface
->state
);
1515 /* call carrier off first to avoid false dev_watchdog timeouts */
1516 netif_carrier_off(netdev
);
1518 /* disable transmits */
1519 netif_tx_stop_all_queues(netdev
);
1520 netif_tx_disable(netdev
);
1522 /* reset Rx filters */
1523 fm10k_reset_rx_state(interface
);
1525 /* allow 10ms for device to quiesce */
1526 usleep_range(10000, 20000);
1528 /* disable polling routines */
1529 fm10k_napi_disable_all(interface
);
1531 del_timer_sync(&interface
->service_timer
);
1533 /* capture stats one last time before stopping interface */
1534 fm10k_update_stats(interface
);
1536 /* Disable DMA engine for Tx/Rx */
1537 hw
->mac
.ops
.stop_hw(hw
);
1539 /* free any buffers still on the rings */
1540 fm10k_clean_all_tx_rings(interface
);
1544 * fm10k_sw_init - Initialize general software structures
1545 * @interface: host interface private structure to initialize
1547 * fm10k_sw_init initializes the interface private data structure.
1548 * Fields are initialized based on PCI device information and
1549 * OS network device settings (MTU size).
1551 static int fm10k_sw_init(struct fm10k_intfc
*interface
,
1552 const struct pci_device_id
*ent
)
1554 const struct fm10k_info
*fi
= fm10k_info_tbl
[ent
->driver_data
];
1555 struct fm10k_hw
*hw
= &interface
->hw
;
1556 struct pci_dev
*pdev
= interface
->pdev
;
1557 struct net_device
*netdev
= interface
->netdev
;
1558 u32 rss_key
[FM10K_RSSRK_SIZE
];
1562 /* initialize back pointer */
1563 hw
->back
= interface
;
1564 hw
->hw_addr
= interface
->uc_addr
;
1566 /* PCI config space info */
1567 hw
->vendor_id
= pdev
->vendor
;
1568 hw
->device_id
= pdev
->device
;
1569 hw
->revision_id
= pdev
->revision
;
1570 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
1571 hw
->subsystem_device_id
= pdev
->subsystem_device
;
1574 memcpy(&hw
->mac
.ops
, fi
->mac_ops
, sizeof(hw
->mac
.ops
));
1575 hw
->mac
.type
= fi
->mac
;
1577 /* Setup IOV handlers */
1579 memcpy(&hw
->iov
.ops
, fi
->iov_ops
, sizeof(hw
->iov
.ops
));
1581 /* Set common capability flags and settings */
1582 rss
= min_t(int, FM10K_MAX_RSS_INDICES
, num_online_cpus());
1583 interface
->ring_feature
[RING_F_RSS
].limit
= rss
;
1584 fi
->get_invariants(hw
);
1586 /* pick up the PCIe bus settings for reporting later */
1587 if (hw
->mac
.ops
.get_bus_info
)
1588 hw
->mac
.ops
.get_bus_info(hw
);
1590 /* limit the usable DMA range */
1591 if (hw
->mac
.ops
.set_dma_mask
)
1592 hw
->mac
.ops
.set_dma_mask(hw
, dma_get_mask(&pdev
->dev
));
1594 /* update netdev with DMA restrictions */
1595 if (dma_get_mask(&pdev
->dev
) > DMA_BIT_MASK(32)) {
1596 netdev
->features
|= NETIF_F_HIGHDMA
;
1597 netdev
->vlan_features
|= NETIF_F_HIGHDMA
;
1600 /* delay any future reset requests */
1601 interface
->last_reset
= jiffies
+ (10 * HZ
);
1603 /* reset and initialize the hardware so it is in a known state */
1604 err
= hw
->mac
.ops
.reset_hw(hw
) ? : hw
->mac
.ops
.init_hw(hw
);
1606 dev_err(&pdev
->dev
, "init_hw failed: %d\n", err
);
1610 /* initialize hardware statistics */
1611 hw
->mac
.ops
.update_hw_stats(hw
, &interface
->stats
);
1613 /* Set upper limit on IOV VFs that can be allocated */
1614 pci_sriov_set_totalvfs(pdev
, hw
->iov
.total_vfs
);
1616 /* Start with random Ethernet address */
1617 eth_random_addr(hw
->mac
.addr
);
1619 /* Initialize MAC address from hardware */
1620 err
= hw
->mac
.ops
.read_mac_addr(hw
);
1622 dev_warn(&pdev
->dev
,
1623 "Failed to obtain MAC address defaulting to random\n");
1624 /* tag address assignment as random */
1625 netdev
->addr_assign_type
|= NET_ADDR_RANDOM
;
1628 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1629 memcpy(netdev
->perm_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1631 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
1632 dev_err(&pdev
->dev
, "Invalid MAC Address\n");
1636 /* assign BAR 4 resources for use with PTP */
1637 if (fm10k_read_reg(hw
, FM10K_CTRL
) & FM10K_CTRL_BAR4_ALLOWED
)
1638 interface
->sw_addr
= ioremap(pci_resource_start(pdev
, 4),
1639 pci_resource_len(pdev
, 4));
1640 hw
->sw_addr
= interface
->sw_addr
;
1642 /* Only the PF can support VXLAN and NVGRE offloads */
1643 if (hw
->mac
.type
!= fm10k_mac_pf
) {
1644 netdev
->hw_enc_features
= 0;
1645 netdev
->features
&= ~NETIF_F_GSO_UDP_TUNNEL
;
1646 netdev
->hw_features
&= ~NETIF_F_GSO_UDP_TUNNEL
;
1649 /* initialize DCBNL interface */
1650 fm10k_dcbnl_set_ops(netdev
);
1652 /* Initialize service timer and service task */
1653 set_bit(__FM10K_SERVICE_DISABLE
, &interface
->state
);
1654 setup_timer(&interface
->service_timer
, &fm10k_service_timer
,
1655 (unsigned long)interface
);
1656 INIT_WORK(&interface
->service_task
, fm10k_service_task
);
1658 /* Intitialize timestamp data */
1659 fm10k_ts_init(interface
);
1661 /* set default ring sizes */
1662 interface
->tx_ring_count
= FM10K_DEFAULT_TXD
;
1663 interface
->rx_ring_count
= FM10K_DEFAULT_RXD
;
1665 /* set default interrupt moderation */
1666 interface
->tx_itr
= FM10K_ITR_10K
;
1667 interface
->rx_itr
= FM10K_ITR_ADAPTIVE
| FM10K_ITR_20K
;
1669 /* initialize vxlan_port list */
1670 INIT_LIST_HEAD(&interface
->vxlan_port
);
1672 netdev_rss_key_fill(rss_key
, sizeof(rss_key
));
1673 memcpy(interface
->rssrk
, rss_key
, sizeof(rss_key
));
1675 /* Start off interface as being down */
1676 set_bit(__FM10K_DOWN
, &interface
->state
);
1681 static void fm10k_slot_warn(struct fm10k_intfc
*interface
)
1683 struct device
*dev
= &interface
->pdev
->dev
;
1684 struct fm10k_hw
*hw
= &interface
->hw
;
1686 if (hw
->mac
.ops
.is_slot_appropriate(hw
))
1690 "For optimal performance, a %s %s slot is recommended.\n",
1691 (hw
->bus_caps
.width
== fm10k_bus_width_pcie_x1
? "x1" :
1692 hw
->bus_caps
.width
== fm10k_bus_width_pcie_x4
? "x4" :
1694 (hw
->bus_caps
.speed
== fm10k_bus_speed_2500
? "2.5GT/s" :
1695 hw
->bus_caps
.speed
== fm10k_bus_speed_5000
? "5.0GT/s" :
1698 "A slot with more lanes and/or higher speed is suggested.\n");
1702 * fm10k_probe - Device Initialization Routine
1703 * @pdev: PCI device information struct
1704 * @ent: entry in fm10k_pci_tbl
1706 * Returns 0 on success, negative on failure
1708 * fm10k_probe initializes an interface identified by a pci_dev structure.
1709 * The OS initialization, configuring of the interface private structure,
1710 * and a hardware reset occur.
1712 static int fm10k_probe(struct pci_dev
*pdev
,
1713 const struct pci_device_id
*ent
)
1715 struct net_device
*netdev
;
1716 struct fm10k_intfc
*interface
;
1717 struct fm10k_hw
*hw
;
1721 err
= pci_enable_device_mem(pdev
);
1725 /* By default fm10k only supports a 48 bit DMA mask */
1726 dma_mask
= DMA_BIT_MASK(48) | dma_get_required_mask(&pdev
->dev
);
1728 if ((dma_mask
<= DMA_BIT_MASK(32)) ||
1729 dma_set_mask_and_coherent(&pdev
->dev
, dma_mask
)) {
1730 dma_mask
&= DMA_BIT_MASK(32);
1732 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
1733 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
1735 err
= dma_set_coherent_mask(&pdev
->dev
,
1739 "No usable DMA configuration, aborting\n");
1745 err
= pci_request_selected_regions(pdev
,
1746 pci_select_bars(pdev
,
1751 "pci_request_selected_regions failed 0x%x\n", err
);
1755 pci_enable_pcie_error_reporting(pdev
);
1757 pci_set_master(pdev
);
1758 pci_save_state(pdev
);
1760 netdev
= fm10k_alloc_netdev();
1763 goto err_alloc_netdev
;
1766 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1768 interface
= netdev_priv(netdev
);
1769 pci_set_drvdata(pdev
, interface
);
1771 interface
->netdev
= netdev
;
1772 interface
->pdev
= pdev
;
1773 hw
= &interface
->hw
;
1775 interface
->uc_addr
= ioremap(pci_resource_start(pdev
, 0),
1776 FM10K_UC_ADDR_SIZE
);
1777 if (!interface
->uc_addr
) {
1782 err
= fm10k_sw_init(interface
, ent
);
1786 /* enable debugfs support */
1787 fm10k_dbg_intfc_init(interface
);
1789 err
= fm10k_init_queueing_scheme(interface
);
1793 err
= fm10k_mbx_request_irq(interface
);
1795 goto err_mbx_interrupt
;
1797 /* final check of hardware state before registering the interface */
1798 err
= fm10k_hw_ready(interface
);
1802 err
= register_netdev(netdev
);
1806 /* carrier off reporting is important to ethtool even BEFORE open */
1807 netif_carrier_off(netdev
);
1809 /* stop all the transmit queues from transmitting until link is up */
1810 netif_tx_stop_all_queues(netdev
);
1812 /* Register PTP interface */
1813 fm10k_ptp_register(interface
);
1815 /* print bus type/speed/width info */
1816 dev_info(&pdev
->dev
, "(PCI Express:%s Width: %s Payload: %s)\n",
1817 (hw
->bus
.speed
== fm10k_bus_speed_8000
? "8.0GT/s" :
1818 hw
->bus
.speed
== fm10k_bus_speed_5000
? "5.0GT/s" :
1819 hw
->bus
.speed
== fm10k_bus_speed_2500
? "2.5GT/s" :
1821 (hw
->bus
.width
== fm10k_bus_width_pcie_x8
? "x8" :
1822 hw
->bus
.width
== fm10k_bus_width_pcie_x4
? "x4" :
1823 hw
->bus
.width
== fm10k_bus_width_pcie_x1
? "x1" :
1825 (hw
->bus
.payload
== fm10k_bus_payload_128
? "128B" :
1826 hw
->bus
.payload
== fm10k_bus_payload_256
? "256B" :
1827 hw
->bus
.payload
== fm10k_bus_payload_512
? "512B" :
1830 /* print warning for non-optimal configurations */
1831 fm10k_slot_warn(interface
);
1833 /* enable SR-IOV after registering netdev to enforce PF/VF ordering */
1834 fm10k_iov_configure(pdev
, 0);
1836 /* clear the service task disable bit to allow service task to start */
1837 clear_bit(__FM10K_SERVICE_DISABLE
, &interface
->state
);
1842 fm10k_mbx_free_irq(interface
);
1844 fm10k_clear_queueing_scheme(interface
);
1846 if (interface
->sw_addr
)
1847 iounmap(interface
->sw_addr
);
1848 iounmap(interface
->uc_addr
);
1850 free_netdev(netdev
);
1852 pci_release_selected_regions(pdev
,
1853 pci_select_bars(pdev
, IORESOURCE_MEM
));
1856 pci_disable_device(pdev
);
1861 * fm10k_remove - Device Removal Routine
1862 * @pdev: PCI device information struct
1864 * fm10k_remove is called by the PCI subsystem to alert the driver
1865 * that it should release a PCI device. The could be caused by a
1866 * Hot-Plug event, or because the driver is going to be removed from
1869 static void fm10k_remove(struct pci_dev
*pdev
)
1871 struct fm10k_intfc
*interface
= pci_get_drvdata(pdev
);
1872 struct net_device
*netdev
= interface
->netdev
;
1874 set_bit(__FM10K_SERVICE_DISABLE
, &interface
->state
);
1875 cancel_work_sync(&interface
->service_task
);
1877 /* free netdev, this may bounce the interrupts due to setup_tc */
1878 if (netdev
->reg_state
== NETREG_REGISTERED
)
1879 unregister_netdev(netdev
);
1881 /* cleanup timestamp handling */
1882 fm10k_ptp_unregister(interface
);
1885 fm10k_iov_disable(pdev
);
1887 /* disable mailbox interrupt */
1888 fm10k_mbx_free_irq(interface
);
1890 /* free interrupts */
1891 fm10k_clear_queueing_scheme(interface
);
1893 /* remove any debugfs interfaces */
1894 fm10k_dbg_intfc_exit(interface
);
1896 if (interface
->sw_addr
)
1897 iounmap(interface
->sw_addr
);
1898 iounmap(interface
->uc_addr
);
1900 free_netdev(netdev
);
1902 pci_release_selected_regions(pdev
,
1903 pci_select_bars(pdev
, IORESOURCE_MEM
));
1905 pci_disable_pcie_error_reporting(pdev
);
1907 pci_disable_device(pdev
);
1912 * fm10k_resume - Restore device to pre-sleep state
1913 * @pdev: PCI device information struct
1915 * fm10k_resume is called after the system has powered back up from a sleep
1916 * state and is ready to resume operation. This function is meant to restore
1917 * the device back to its pre-sleep state.
1919 static int fm10k_resume(struct pci_dev
*pdev
)
1921 struct fm10k_intfc
*interface
= pci_get_drvdata(pdev
);
1922 struct net_device
*netdev
= interface
->netdev
;
1923 struct fm10k_hw
*hw
= &interface
->hw
;
1926 pci_set_power_state(pdev
, PCI_D0
);
1927 pci_restore_state(pdev
);
1929 /* pci_restore_state clears dev->state_saved so call
1930 * pci_save_state to restore it.
1932 pci_save_state(pdev
);
1934 err
= pci_enable_device_mem(pdev
);
1936 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
1939 pci_set_master(pdev
);
1941 pci_wake_from_d3(pdev
, false);
1943 /* refresh hw_addr in case it was dropped */
1944 hw
->hw_addr
= interface
->uc_addr
;
1946 /* reset hardware to known state */
1947 err
= hw
->mac
.ops
.init_hw(&interface
->hw
);
1951 /* reset statistics starting values */
1952 hw
->mac
.ops
.rebind_hw_stats(hw
, &interface
->stats
);
1955 fm10k_ts_reset(interface
);
1959 err
= fm10k_init_queueing_scheme(interface
);
1961 fm10k_mbx_request_irq(interface
);
1962 if (netif_running(netdev
))
1963 err
= fm10k_open(netdev
);
1971 /* restore SR-IOV interface */
1972 fm10k_iov_resume(pdev
);
1974 netif_device_attach(netdev
);
1980 * fm10k_suspend - Prepare the device for a system sleep state
1981 * @pdev: PCI device information struct
1983 * fm10k_suspend is meant to shutdown the device prior to the system entering
1984 * a sleep state. The fm10k hardware does not support wake on lan so the
1985 * driver simply needs to shut down the device so it is in a low power state.
1987 static int fm10k_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1989 struct fm10k_intfc
*interface
= pci_get_drvdata(pdev
);
1990 struct net_device
*netdev
= interface
->netdev
;
1993 netif_device_detach(netdev
);
1995 fm10k_iov_suspend(pdev
);
1999 if (netif_running(netdev
))
2000 fm10k_close(netdev
);
2002 fm10k_mbx_free_irq(interface
);
2004 fm10k_clear_queueing_scheme(interface
);
2008 err
= pci_save_state(pdev
);
2012 pci_disable_device(pdev
);
2013 pci_wake_from_d3(pdev
, false);
2014 pci_set_power_state(pdev
, PCI_D3hot
);
2019 #endif /* CONFIG_PM */
2021 * fm10k_io_error_detected - called when PCI error is detected
2022 * @pdev: Pointer to PCI device
2023 * @state: The current pci connection state
2025 * This function is called after a PCI bus error affecting
2026 * this device has been detected.
2028 static pci_ers_result_t
fm10k_io_error_detected(struct pci_dev
*pdev
,
2029 pci_channel_state_t state
)
2031 struct fm10k_intfc
*interface
= pci_get_drvdata(pdev
);
2032 struct net_device
*netdev
= interface
->netdev
;
2034 netif_device_detach(netdev
);
2036 if (state
== pci_channel_io_perm_failure
)
2037 return PCI_ERS_RESULT_DISCONNECT
;
2039 if (netif_running(netdev
))
2040 fm10k_close(netdev
);
2042 fm10k_mbx_free_irq(interface
);
2044 pci_disable_device(pdev
);
2046 /* Request a slot reset. */
2047 return PCI_ERS_RESULT_NEED_RESET
;
2051 * fm10k_io_slot_reset - called after the pci bus has been reset.
2052 * @pdev: Pointer to PCI device
2054 * Restart the card from scratch, as if from a cold-boot.
2056 static pci_ers_result_t
fm10k_io_slot_reset(struct pci_dev
*pdev
)
2058 struct fm10k_intfc
*interface
= pci_get_drvdata(pdev
);
2059 pci_ers_result_t result
;
2061 if (pci_enable_device_mem(pdev
)) {
2063 "Cannot re-enable PCI device after reset.\n");
2064 result
= PCI_ERS_RESULT_DISCONNECT
;
2066 pci_set_master(pdev
);
2067 pci_restore_state(pdev
);
2069 /* After second error pci->state_saved is false, this
2070 * resets it so EEH doesn't break.
2072 pci_save_state(pdev
);
2074 pci_wake_from_d3(pdev
, false);
2076 /* refresh hw_addr in case it was dropped */
2077 interface
->hw
.hw_addr
= interface
->uc_addr
;
2079 interface
->flags
|= FM10K_FLAG_RESET_REQUESTED
;
2080 fm10k_service_event_schedule(interface
);
2082 result
= PCI_ERS_RESULT_RECOVERED
;
2085 pci_cleanup_aer_uncorrect_error_status(pdev
);
2091 * fm10k_io_resume - called when traffic can start flowing again.
2092 * @pdev: Pointer to PCI device
2094 * This callback is called when the error recovery driver tells us that
2095 * its OK to resume normal operation.
2097 static void fm10k_io_resume(struct pci_dev
*pdev
)
2099 struct fm10k_intfc
*interface
= pci_get_drvdata(pdev
);
2100 struct net_device
*netdev
= interface
->netdev
;
2101 struct fm10k_hw
*hw
= &interface
->hw
;
2104 /* reset hardware to known state */
2105 hw
->mac
.ops
.init_hw(&interface
->hw
);
2107 /* reset statistics starting values */
2108 hw
->mac
.ops
.rebind_hw_stats(hw
, &interface
->stats
);
2110 /* reassociate interrupts */
2111 fm10k_mbx_request_irq(interface
);
2114 fm10k_ts_reset(interface
);
2116 if (netif_running(netdev
))
2117 err
= fm10k_open(netdev
);
2119 /* final check of hardware state before registering the interface */
2120 err
= err
? : fm10k_hw_ready(interface
);
2123 netif_device_attach(netdev
);
2126 static const struct pci_error_handlers fm10k_err_handler
= {
2127 .error_detected
= fm10k_io_error_detected
,
2128 .slot_reset
= fm10k_io_slot_reset
,
2129 .resume
= fm10k_io_resume
,
2132 static struct pci_driver fm10k_driver
= {
2133 .name
= fm10k_driver_name
,
2134 .id_table
= fm10k_pci_tbl
,
2135 .probe
= fm10k_probe
,
2136 .remove
= fm10k_remove
,
2138 .suspend
= fm10k_suspend
,
2139 .resume
= fm10k_resume
,
2141 .sriov_configure
= fm10k_iov_configure
,
2142 .err_handler
= &fm10k_err_handler
2146 * fm10k_register_pci_driver - register driver interface
2148 * This funciton is called on module load in order to register the driver.
2150 int fm10k_register_pci_driver(void)
2152 return pci_register_driver(&fm10k_driver
);
2156 * fm10k_unregister_pci_driver - unregister driver interface
2158 * This funciton is called on module unload in order to remove the driver.
2160 void fm10k_unregister_pci_driver(void)
2162 pci_unregister_driver(&fm10k_driver
);