fm10k: enable bus master after every reset
[deliverable/linux.git] / drivers / net / ethernet / intel / fm10k / fm10k_pci.c
1 /* Intel(R) Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2016 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19 */
20
21 #include <linux/module.h>
22 #include <linux/aer.h>
23
24 #include "fm10k.h"
25
26 static const struct fm10k_info *fm10k_info_tbl[] = {
27 [fm10k_device_pf] = &fm10k_pf_info,
28 [fm10k_device_vf] = &fm10k_vf_info,
29 };
30
31 /**
32 * fm10k_pci_tbl - PCI Device ID Table
33 *
34 * Wildcard entries (PCI_ANY_ID) should come last
35 * Last entry must be all 0s
36 *
37 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
38 * Class, Class Mask, private data (not used) }
39 */
40 static const struct pci_device_id fm10k_pci_tbl[] = {
41 { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
42 { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf },
43 /* required last entry */
44 { 0, }
45 };
46 MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl);
47
48 u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
49 {
50 struct fm10k_intfc *interface = hw->back;
51 u16 value = 0;
52
53 if (FM10K_REMOVED(hw->hw_addr))
54 return ~value;
55
56 pci_read_config_word(interface->pdev, reg, &value);
57 if (value == 0xFFFF)
58 fm10k_write_flush(hw);
59
60 return value;
61 }
62
63 u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
64 {
65 u32 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
66 u32 value = 0;
67
68 if (FM10K_REMOVED(hw_addr))
69 return ~value;
70
71 value = readl(&hw_addr[reg]);
72 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
73 struct fm10k_intfc *interface = hw->back;
74 struct net_device *netdev = interface->netdev;
75
76 hw->hw_addr = NULL;
77 netif_device_detach(netdev);
78 netdev_err(netdev, "PCIe link lost, device now detached\n");
79 }
80
81 return value;
82 }
83
84 static int fm10k_hw_ready(struct fm10k_intfc *interface)
85 {
86 struct fm10k_hw *hw = &interface->hw;
87
88 fm10k_write_flush(hw);
89
90 return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0;
91 }
92
93 void fm10k_service_event_schedule(struct fm10k_intfc *interface)
94 {
95 if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) &&
96 !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state))
97 queue_work(fm10k_workqueue, &interface->service_task);
98 }
99
100 static void fm10k_service_event_complete(struct fm10k_intfc *interface)
101 {
102 WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
103
104 /* flush memory to make sure state is correct before next watchog */
105 smp_mb__before_atomic();
106 clear_bit(__FM10K_SERVICE_SCHED, &interface->state);
107 }
108
109 /**
110 * fm10k_service_timer - Timer Call-back
111 * @data: pointer to interface cast into an unsigned long
112 **/
113 static void fm10k_service_timer(unsigned long data)
114 {
115 struct fm10k_intfc *interface = (struct fm10k_intfc *)data;
116
117 /* Reset the timer */
118 mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
119
120 fm10k_service_event_schedule(interface);
121 }
122
123 static void fm10k_detach_subtask(struct fm10k_intfc *interface)
124 {
125 struct net_device *netdev = interface->netdev;
126
127 /* do nothing if device is still present or hw_addr is set */
128 if (netif_device_present(netdev) || interface->hw.hw_addr)
129 return;
130
131 rtnl_lock();
132
133 if (netif_running(netdev))
134 dev_close(netdev);
135
136 rtnl_unlock();
137 }
138
139 static void fm10k_prepare_for_reset(struct fm10k_intfc *interface)
140 {
141 struct net_device *netdev = interface->netdev;
142
143 WARN_ON(in_interrupt());
144
145 /* put off any impending NetWatchDogTimeout */
146 netif_trans_update(netdev);
147
148 while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
149 usleep_range(1000, 2000);
150
151 rtnl_lock();
152
153 fm10k_iov_suspend(interface->pdev);
154
155 if (netif_running(netdev))
156 fm10k_close(netdev);
157
158 fm10k_mbx_free_irq(interface);
159
160 /* free interrupts */
161 fm10k_clear_queueing_scheme(interface);
162
163 /* delay any future reset requests */
164 interface->last_reset = jiffies + (10 * HZ);
165
166 rtnl_unlock();
167 }
168
169 static int fm10k_handle_reset(struct fm10k_intfc *interface)
170 {
171 struct net_device *netdev = interface->netdev;
172 struct fm10k_hw *hw = &interface->hw;
173 int err;
174
175 rtnl_lock();
176
177 pci_set_master(interface->pdev);
178
179 /* reset and initialize the hardware so it is in a known state */
180 err = hw->mac.ops.reset_hw(hw);
181 if (err) {
182 dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err);
183 goto reinit_err;
184 }
185
186 err = hw->mac.ops.init_hw(hw);
187 if (err) {
188 dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
189 goto reinit_err;
190 }
191
192 err = fm10k_init_queueing_scheme(interface);
193 if (err) {
194 dev_err(&interface->pdev->dev,
195 "init_queueing_scheme failed: %d\n", err);
196 goto reinit_err;
197 }
198
199 /* re-associate interrupts */
200 err = fm10k_mbx_request_irq(interface);
201 if (err)
202 goto err_mbx_irq;
203
204 err = fm10k_hw_ready(interface);
205 if (err)
206 goto err_open;
207
208 /* update hardware address for VFs if perm_addr has changed */
209 if (hw->mac.type == fm10k_mac_vf) {
210 if (is_valid_ether_addr(hw->mac.perm_addr)) {
211 ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
212 ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
213 ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
214 netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
215 }
216
217 if (hw->mac.vlan_override)
218 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
219 else
220 netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
221 }
222
223 err = netif_running(netdev) ? fm10k_open(netdev) : 0;
224 if (err)
225 goto err_open;
226
227 fm10k_iov_resume(interface->pdev);
228
229 rtnl_unlock();
230
231 clear_bit(__FM10K_RESETTING, &interface->state);
232
233 return err;
234 err_open:
235 fm10k_mbx_free_irq(interface);
236 err_mbx_irq:
237 fm10k_clear_queueing_scheme(interface);
238 reinit_err:
239 netif_device_detach(netdev);
240
241 rtnl_unlock();
242
243 clear_bit(__FM10K_RESETTING, &interface->state);
244
245 return err;
246 }
247
248 static void fm10k_reinit(struct fm10k_intfc *interface)
249 {
250 int err;
251
252 fm10k_prepare_for_reset(interface);
253
254 err = fm10k_handle_reset(interface);
255 if (err)
256 dev_err(&interface->pdev->dev,
257 "fm10k_handle_reset failed: %d\n", err);
258 }
259
260 static void fm10k_reset_subtask(struct fm10k_intfc *interface)
261 {
262 if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED))
263 return;
264
265 interface->flags &= ~FM10K_FLAG_RESET_REQUESTED;
266
267 netdev_err(interface->netdev, "Reset interface\n");
268
269 fm10k_reinit(interface);
270 }
271
272 /**
273 * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
274 * @interface: board private structure
275 *
276 * Configure the SWPRI to PC mapping for the port.
277 **/
278 static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
279 {
280 struct net_device *netdev = interface->netdev;
281 struct fm10k_hw *hw = &interface->hw;
282 int i;
283
284 /* clear flag indicating update is needed */
285 interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG;
286
287 /* these registers are only available on the PF */
288 if (hw->mac.type != fm10k_mac_pf)
289 return;
290
291 /* configure SWPRI to PC map */
292 for (i = 0; i < FM10K_SWPRI_MAX; i++)
293 fm10k_write_reg(hw, FM10K_SWPRI_MAP(i),
294 netdev_get_prio_tc_map(netdev, i));
295 }
296
297 /**
298 * fm10k_watchdog_update_host_state - Update the link status based on host.
299 * @interface: board private structure
300 **/
301 static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
302 {
303 struct fm10k_hw *hw = &interface->hw;
304 s32 err;
305
306 if (test_bit(__FM10K_LINK_DOWN, &interface->state)) {
307 interface->host_ready = false;
308 if (time_is_after_jiffies(interface->link_down_event))
309 return;
310 clear_bit(__FM10K_LINK_DOWN, &interface->state);
311 }
312
313 if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) {
314 if (rtnl_trylock()) {
315 fm10k_configure_swpri_map(interface);
316 rtnl_unlock();
317 }
318 }
319
320 /* lock the mailbox for transmit and receive */
321 fm10k_mbx_lock(interface);
322
323 err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
324 if (err && time_is_before_jiffies(interface->last_reset))
325 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
326
327 /* free the lock */
328 fm10k_mbx_unlock(interface);
329 }
330
331 /**
332 * fm10k_mbx_subtask - Process upstream and downstream mailboxes
333 * @interface: board private structure
334 *
335 * This function will process both the upstream and downstream mailboxes.
336 **/
337 static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
338 {
339 /* process upstream mailbox and update device state */
340 fm10k_watchdog_update_host_state(interface);
341
342 /* process downstream mailboxes */
343 fm10k_iov_mbx(interface);
344 }
345
346 /**
347 * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
348 * @interface: board private structure
349 **/
350 static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface)
351 {
352 struct net_device *netdev = interface->netdev;
353
354 /* only continue if link state is currently down */
355 if (netif_carrier_ok(netdev))
356 return;
357
358 netif_info(interface, drv, netdev, "NIC Link is up\n");
359
360 netif_carrier_on(netdev);
361 netif_tx_wake_all_queues(netdev);
362 }
363
364 /**
365 * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
366 * @interface: board private structure
367 **/
368 static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface)
369 {
370 struct net_device *netdev = interface->netdev;
371
372 /* only continue if link state is currently up */
373 if (!netif_carrier_ok(netdev))
374 return;
375
376 netif_info(interface, drv, netdev, "NIC Link is down\n");
377
378 netif_carrier_off(netdev);
379 netif_tx_stop_all_queues(netdev);
380 }
381
382 /**
383 * fm10k_update_stats - Update the board statistics counters.
384 * @interface: board private structure
385 **/
386 void fm10k_update_stats(struct fm10k_intfc *interface)
387 {
388 struct net_device_stats *net_stats = &interface->netdev->stats;
389 struct fm10k_hw *hw = &interface->hw;
390 u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0;
391 u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0;
392 u64 rx_link_errors = 0;
393 u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
394 u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
395 u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
396 u64 tx_bytes_nic = 0, tx_pkts_nic = 0;
397 u64 bytes, pkts;
398 int i;
399
400 /* ensure only one thread updates stats at a time */
401 if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
402 return;
403
404 /* do not allow stats update via service task for next second */
405 interface->next_stats_update = jiffies + HZ;
406
407 /* gather some stats to the interface struct that are per queue */
408 for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
409 struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]);
410
411 if (!tx_ring)
412 continue;
413
414 restart_queue += tx_ring->tx_stats.restart_queue;
415 tx_busy += tx_ring->tx_stats.tx_busy;
416 tx_csum_errors += tx_ring->tx_stats.csum_err;
417 bytes += tx_ring->stats.bytes;
418 pkts += tx_ring->stats.packets;
419 hw_csum_tx_good += tx_ring->tx_stats.csum_good;
420 }
421
422 interface->restart_queue = restart_queue;
423 interface->tx_busy = tx_busy;
424 net_stats->tx_bytes = bytes;
425 net_stats->tx_packets = pkts;
426 interface->tx_csum_errors = tx_csum_errors;
427 interface->hw_csum_tx_good = hw_csum_tx_good;
428
429 /* gather some stats to the interface struct that are per queue */
430 for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
431 struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]);
432
433 if (!rx_ring)
434 continue;
435
436 bytes += rx_ring->stats.bytes;
437 pkts += rx_ring->stats.packets;
438 alloc_failed += rx_ring->rx_stats.alloc_failed;
439 rx_csum_errors += rx_ring->rx_stats.csum_err;
440 rx_errors += rx_ring->rx_stats.errors;
441 hw_csum_rx_good += rx_ring->rx_stats.csum_good;
442 rx_switch_errors += rx_ring->rx_stats.switch_errors;
443 rx_drops += rx_ring->rx_stats.drops;
444 rx_pp_errors += rx_ring->rx_stats.pp_errors;
445 rx_link_errors += rx_ring->rx_stats.link_errors;
446 rx_length_errors += rx_ring->rx_stats.length_errors;
447 }
448
449 net_stats->rx_bytes = bytes;
450 net_stats->rx_packets = pkts;
451 interface->alloc_failed = alloc_failed;
452 interface->rx_csum_errors = rx_csum_errors;
453 interface->hw_csum_rx_good = hw_csum_rx_good;
454 interface->rx_switch_errors = rx_switch_errors;
455 interface->rx_drops = rx_drops;
456 interface->rx_pp_errors = rx_pp_errors;
457 interface->rx_link_errors = rx_link_errors;
458 interface->rx_length_errors = rx_length_errors;
459
460 hw->mac.ops.update_hw_stats(hw, &interface->stats);
461
462 for (i = 0; i < hw->mac.max_queues; i++) {
463 struct fm10k_hw_stats_q *q = &interface->stats.q[i];
464
465 tx_bytes_nic += q->tx_bytes.count;
466 tx_pkts_nic += q->tx_packets.count;
467 rx_bytes_nic += q->rx_bytes.count;
468 rx_pkts_nic += q->rx_packets.count;
469 rx_drops_nic += q->rx_drops.count;
470 }
471
472 interface->tx_bytes_nic = tx_bytes_nic;
473 interface->tx_packets_nic = tx_pkts_nic;
474 interface->rx_bytes_nic = rx_bytes_nic;
475 interface->rx_packets_nic = rx_pkts_nic;
476 interface->rx_drops_nic = rx_drops_nic;
477
478 /* Fill out the OS statistics structure */
479 net_stats->rx_errors = rx_errors;
480 net_stats->rx_dropped = interface->stats.nodesc_drop.count;
481
482 clear_bit(__FM10K_UPDATING_STATS, &interface->state);
483 }
484
485 /**
486 * fm10k_watchdog_flush_tx - flush queues on host not ready
487 * @interface - pointer to the device interface structure
488 **/
489 static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
490 {
491 int some_tx_pending = 0;
492 int i;
493
494 /* nothing to do if carrier is up */
495 if (netif_carrier_ok(interface->netdev))
496 return;
497
498 for (i = 0; i < interface->num_tx_queues; i++) {
499 struct fm10k_ring *tx_ring = interface->tx_ring[i];
500
501 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
502 some_tx_pending = 1;
503 break;
504 }
505 }
506
507 /* We've lost link, so the controller stops DMA, but we've got
508 * queued Tx work that's never going to get done, so reset
509 * controller to flush Tx.
510 */
511 if (some_tx_pending)
512 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
513 }
514
515 /**
516 * fm10k_watchdog_subtask - check and bring link up
517 * @interface - pointer to the device interface structure
518 **/
519 static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
520 {
521 /* if interface is down do nothing */
522 if (test_bit(__FM10K_DOWN, &interface->state) ||
523 test_bit(__FM10K_RESETTING, &interface->state))
524 return;
525
526 if (interface->host_ready)
527 fm10k_watchdog_host_is_ready(interface);
528 else
529 fm10k_watchdog_host_not_ready(interface);
530
531 /* update stats only once every second */
532 if (time_is_before_jiffies(interface->next_stats_update))
533 fm10k_update_stats(interface);
534
535 /* flush any uncompleted work */
536 fm10k_watchdog_flush_tx(interface);
537 }
538
539 /**
540 * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
541 * @interface - pointer to the device interface structure
542 *
543 * This function serves two purposes. First it strobes the interrupt lines
544 * in order to make certain interrupts are occurring. Secondly it sets the
545 * bits needed to check for TX hangs. As a result we should immediately
546 * determine if a hang has occurred.
547 */
548 static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
549 {
550 int i;
551
552 /* If we're down or resetting, just bail */
553 if (test_bit(__FM10K_DOWN, &interface->state) ||
554 test_bit(__FM10K_RESETTING, &interface->state))
555 return;
556
557 /* rate limit tx hang checks to only once every 2 seconds */
558 if (time_is_after_eq_jiffies(interface->next_tx_hang_check))
559 return;
560 interface->next_tx_hang_check = jiffies + (2 * HZ);
561
562 if (netif_carrier_ok(interface->netdev)) {
563 /* Force detection of hung controller */
564 for (i = 0; i < interface->num_tx_queues; i++)
565 set_check_for_tx_hang(interface->tx_ring[i]);
566
567 /* Rearm all in-use q_vectors for immediate firing */
568 for (i = 0; i < interface->num_q_vectors; i++) {
569 struct fm10k_q_vector *qv = interface->q_vector[i];
570
571 if (!qv->tx.count && !qv->rx.count)
572 continue;
573 writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr);
574 }
575 }
576 }
577
578 /**
579 * fm10k_service_task - manages and runs subtasks
580 * @work: pointer to work_struct containing our data
581 **/
582 static void fm10k_service_task(struct work_struct *work)
583 {
584 struct fm10k_intfc *interface;
585
586 interface = container_of(work, struct fm10k_intfc, service_task);
587
588 /* tasks run even when interface is down */
589 fm10k_mbx_subtask(interface);
590 fm10k_detach_subtask(interface);
591 fm10k_reset_subtask(interface);
592
593 /* tasks only run when interface is up */
594 fm10k_watchdog_subtask(interface);
595 fm10k_check_hang_subtask(interface);
596
597 /* release lock on service events to allow scheduling next event */
598 fm10k_service_event_complete(interface);
599 }
600
601 /**
602 * fm10k_configure_tx_ring - Configure Tx ring after Reset
603 * @interface: board private structure
604 * @ring: structure containing ring specific data
605 *
606 * Configure the Tx descriptor ring after a reset.
607 **/
608 static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
609 struct fm10k_ring *ring)
610 {
611 struct fm10k_hw *hw = &interface->hw;
612 u64 tdba = ring->dma;
613 u32 size = ring->count * sizeof(struct fm10k_tx_desc);
614 u32 txint = FM10K_INT_MAP_DISABLE;
615 u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
616 u8 reg_idx = ring->reg_idx;
617
618 /* disable queue to avoid issues while updating state */
619 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0);
620 fm10k_write_flush(hw);
621
622 /* possible poll here to verify ring resources have been cleaned */
623
624 /* set location and size for descriptor ring */
625 fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
626 fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32);
627 fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size);
628
629 /* reset head and tail pointers */
630 fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0);
631 fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0);
632
633 /* store tail pointer */
634 ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)];
635
636 /* reset ntu and ntc to place SW in sync with hardware */
637 ring->next_to_clean = 0;
638 ring->next_to_use = 0;
639
640 /* Map interrupt */
641 if (ring->q_vector) {
642 txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
643 txint |= FM10K_INT_MAP_TIMER0;
644 }
645
646 fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint);
647
648 /* enable use of FTAG bit in Tx descriptor, register is RO for VF */
649 fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx),
650 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
651
652 /* Initialize XPS */
653 if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, &ring->state) &&
654 ring->q_vector)
655 netif_set_xps_queue(ring->netdev,
656 &ring->q_vector->affinity_mask,
657 ring->queue_index);
658
659 /* enable queue */
660 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl);
661 }
662
663 /**
664 * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration
665 * @interface: board private structure
666 * @ring: structure containing ring specific data
667 *
668 * Verify the Tx descriptor ring is ready for transmit.
669 **/
670 static void fm10k_enable_tx_ring(struct fm10k_intfc *interface,
671 struct fm10k_ring *ring)
672 {
673 struct fm10k_hw *hw = &interface->hw;
674 int wait_loop = 10;
675 u32 txdctl;
676 u8 reg_idx = ring->reg_idx;
677
678 /* if we are already enabled just exit */
679 if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE)
680 return;
681
682 /* poll to verify queue is enabled */
683 do {
684 usleep_range(1000, 2000);
685 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx));
686 } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop);
687 if (!wait_loop)
688 netif_err(interface, drv, interface->netdev,
689 "Could not enable Tx Queue %d\n", reg_idx);
690 }
691
692 /**
693 * fm10k_configure_tx - Configure Transmit Unit after Reset
694 * @interface: board private structure
695 *
696 * Configure the Tx unit of the MAC after a reset.
697 **/
698 static void fm10k_configure_tx(struct fm10k_intfc *interface)
699 {
700 int i;
701
702 /* Setup the HW Tx Head and Tail descriptor pointers */
703 for (i = 0; i < interface->num_tx_queues; i++)
704 fm10k_configure_tx_ring(interface, interface->tx_ring[i]);
705
706 /* poll here to verify that Tx rings are now enabled */
707 for (i = 0; i < interface->num_tx_queues; i++)
708 fm10k_enable_tx_ring(interface, interface->tx_ring[i]);
709 }
710
711 /**
712 * fm10k_configure_rx_ring - Configure Rx ring after Reset
713 * @interface: board private structure
714 * @ring: structure containing ring specific data
715 *
716 * Configure the Rx descriptor ring after a reset.
717 **/
718 static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
719 struct fm10k_ring *ring)
720 {
721 u64 rdba = ring->dma;
722 struct fm10k_hw *hw = &interface->hw;
723 u32 size = ring->count * sizeof(union fm10k_rx_desc);
724 u32 rxqctl = FM10K_RXQCTL_ENABLE | FM10K_RXQCTL_PF;
725 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
726 u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
727 u32 rxint = FM10K_INT_MAP_DISABLE;
728 u8 rx_pause = interface->rx_pause;
729 u8 reg_idx = ring->reg_idx;
730
731 /* disable queue to avoid issues while updating state */
732 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), 0);
733 fm10k_write_flush(hw);
734
735 /* possible poll here to verify ring resources have been cleaned */
736
737 /* set location and size for descriptor ring */
738 fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
739 fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32);
740 fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size);
741
742 /* reset head and tail pointers */
743 fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0);
744 fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0);
745
746 /* store tail pointer */
747 ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)];
748
749 /* reset ntu and ntc to place SW in sync with hardware */
750 ring->next_to_clean = 0;
751 ring->next_to_use = 0;
752 ring->next_to_alloc = 0;
753
754 /* Configure the Rx buffer size for one buff without split */
755 srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
756
757 /* Configure the Rx ring to suppress loopback packets */
758 srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
759 fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
760
761 /* Enable drop on empty */
762 #ifdef CONFIG_DCB
763 if (interface->pfc_en)
764 rx_pause = interface->pfc_en;
765 #endif
766 if (!(rx_pause & BIT(ring->qos_pc)))
767 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
768
769 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
770
771 /* assign default VLAN to queue */
772 ring->vid = hw->mac.default_vid;
773
774 /* if we have an active VLAN, disable default VLAN ID */
775 if (test_bit(hw->mac.default_vid, interface->active_vlans))
776 ring->vid |= FM10K_VLAN_CLEAR;
777
778 /* Map interrupt */
779 if (ring->q_vector) {
780 rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
781 rxint |= FM10K_INT_MAP_TIMER1;
782 }
783
784 fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
785
786 /* enable queue */
787 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
788
789 /* place buffers on ring for receive data */
790 fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring));
791 }
792
793 /**
794 * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings
795 * @interface: board private structure
796 *
797 * Configure the drop enable bits for the Rx rings.
798 **/
799 void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
800 {
801 struct fm10k_hw *hw = &interface->hw;
802 u8 rx_pause = interface->rx_pause;
803 int i;
804
805 #ifdef CONFIG_DCB
806 if (interface->pfc_en)
807 rx_pause = interface->pfc_en;
808
809 #endif
810 for (i = 0; i < interface->num_rx_queues; i++) {
811 struct fm10k_ring *ring = interface->rx_ring[i];
812 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
813 u8 reg_idx = ring->reg_idx;
814
815 if (!(rx_pause & BIT(ring->qos_pc)))
816 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
817
818 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
819 }
820 }
821
822 /**
823 * fm10k_configure_dglort - Configure Receive DGLORT after reset
824 * @interface: board private structure
825 *
826 * Configure the DGLORT description and RSS tables.
827 **/
828 static void fm10k_configure_dglort(struct fm10k_intfc *interface)
829 {
830 struct fm10k_dglort_cfg dglort = { 0 };
831 struct fm10k_hw *hw = &interface->hw;
832 int i;
833 u32 mrqc;
834
835 /* Fill out hash function seeds */
836 for (i = 0; i < FM10K_RSSRK_SIZE; i++)
837 fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]);
838
839 /* Write RETA table to hardware */
840 for (i = 0; i < FM10K_RETA_SIZE; i++)
841 fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]);
842
843 /* Generate RSS hash based on packet types, TCP/UDP
844 * port numbers and/or IPv4/v6 src and dst addresses
845 */
846 mrqc = FM10K_MRQC_IPV4 |
847 FM10K_MRQC_TCP_IPV4 |
848 FM10K_MRQC_IPV6 |
849 FM10K_MRQC_TCP_IPV6;
850
851 if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
852 mrqc |= FM10K_MRQC_UDP_IPV4;
853 if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
854 mrqc |= FM10K_MRQC_UDP_IPV6;
855
856 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
857
858 /* configure default DGLORT mapping for RSS/DCB */
859 dglort.inner_rss = 1;
860 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
861 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
862 hw->mac.ops.configure_dglort_map(hw, &dglort);
863
864 /* assign GLORT per queue for queue mapped testing */
865 if (interface->glort_count > 64) {
866 memset(&dglort, 0, sizeof(dglort));
867 dglort.inner_rss = 1;
868 dglort.glort = interface->glort + 64;
869 dglort.idx = fm10k_dglort_pf_queue;
870 dglort.queue_l = fls(interface->num_rx_queues - 1);
871 hw->mac.ops.configure_dglort_map(hw, &dglort);
872 }
873
874 /* assign glort value for RSS/DCB specific to this interface */
875 memset(&dglort, 0, sizeof(dglort));
876 dglort.inner_rss = 1;
877 dglort.glort = interface->glort;
878 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
879 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
880 /* configure DGLORT mapping for RSS/DCB */
881 dglort.idx = fm10k_dglort_pf_rss;
882 if (interface->l2_accel)
883 dglort.shared_l = fls(interface->l2_accel->size);
884 hw->mac.ops.configure_dglort_map(hw, &dglort);
885 }
886
887 /**
888 * fm10k_configure_rx - Configure Receive Unit after Reset
889 * @interface: board private structure
890 *
891 * Configure the Rx unit of the MAC after a reset.
892 **/
893 static void fm10k_configure_rx(struct fm10k_intfc *interface)
894 {
895 int i;
896
897 /* Configure SWPRI to PC map */
898 fm10k_configure_swpri_map(interface);
899
900 /* Configure RSS and DGLORT map */
901 fm10k_configure_dglort(interface);
902
903 /* Setup the HW Rx Head and Tail descriptor pointers */
904 for (i = 0; i < interface->num_rx_queues; i++)
905 fm10k_configure_rx_ring(interface, interface->rx_ring[i]);
906
907 /* possible poll here to verify that Rx rings are now enabled */
908 }
909
910 static void fm10k_napi_enable_all(struct fm10k_intfc *interface)
911 {
912 struct fm10k_q_vector *q_vector;
913 int q_idx;
914
915 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
916 q_vector = interface->q_vector[q_idx];
917 napi_enable(&q_vector->napi);
918 }
919 }
920
921 static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data)
922 {
923 struct fm10k_q_vector *q_vector = data;
924
925 if (q_vector->rx.count || q_vector->tx.count)
926 napi_schedule_irqoff(&q_vector->napi);
927
928 return IRQ_HANDLED;
929 }
930
931 static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
932 {
933 struct fm10k_intfc *interface = data;
934 struct fm10k_hw *hw = &interface->hw;
935 struct fm10k_mbx_info *mbx = &hw->mbx;
936
937 /* re-enable mailbox interrupt and indicate 20us delay */
938 fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
939 (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
940 FM10K_ITR_ENABLE);
941
942 /* service upstream mailbox */
943 if (fm10k_mbx_trylock(interface)) {
944 mbx->ops.process(hw, mbx);
945 fm10k_mbx_unlock(interface);
946 }
947
948 hw->mac.get_host_state = true;
949 fm10k_service_event_schedule(interface);
950
951 return IRQ_HANDLED;
952 }
953
954 #ifdef CONFIG_NET_POLL_CONTROLLER
955 /**
956 * fm10k_netpoll - A Polling 'interrupt' handler
957 * @netdev: network interface device structure
958 *
959 * This is used by netconsole to send skbs without having to re-enable
960 * interrupts. It's not called while the normal interrupt routine is executing.
961 **/
962 void fm10k_netpoll(struct net_device *netdev)
963 {
964 struct fm10k_intfc *interface = netdev_priv(netdev);
965 int i;
966
967 /* if interface is down do nothing */
968 if (test_bit(__FM10K_DOWN, &interface->state))
969 return;
970
971 for (i = 0; i < interface->num_q_vectors; i++)
972 fm10k_msix_clean_rings(0, interface->q_vector[i]);
973 }
974
975 #endif
976 #define FM10K_ERR_MSG(type) case (type): error = #type; break
977 static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
978 struct fm10k_fault *fault)
979 {
980 struct pci_dev *pdev = interface->pdev;
981 struct fm10k_hw *hw = &interface->hw;
982 struct fm10k_iov_data *iov_data = interface->iov_data;
983 char *error;
984
985 switch (type) {
986 case FM10K_PCA_FAULT:
987 switch (fault->type) {
988 default:
989 error = "Unknown PCA error";
990 break;
991 FM10K_ERR_MSG(PCA_NO_FAULT);
992 FM10K_ERR_MSG(PCA_UNMAPPED_ADDR);
993 FM10K_ERR_MSG(PCA_BAD_QACCESS_PF);
994 FM10K_ERR_MSG(PCA_BAD_QACCESS_VF);
995 FM10K_ERR_MSG(PCA_MALICIOUS_REQ);
996 FM10K_ERR_MSG(PCA_POISONED_TLP);
997 FM10K_ERR_MSG(PCA_TLP_ABORT);
998 }
999 break;
1000 case FM10K_THI_FAULT:
1001 switch (fault->type) {
1002 default:
1003 error = "Unknown THI error";
1004 break;
1005 FM10K_ERR_MSG(THI_NO_FAULT);
1006 FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT);
1007 }
1008 break;
1009 case FM10K_FUM_FAULT:
1010 switch (fault->type) {
1011 default:
1012 error = "Unknown FUM error";
1013 break;
1014 FM10K_ERR_MSG(FUM_NO_FAULT);
1015 FM10K_ERR_MSG(FUM_UNMAPPED_ADDR);
1016 FM10K_ERR_MSG(FUM_BAD_VF_QACCESS);
1017 FM10K_ERR_MSG(FUM_ADD_DECODE_ERR);
1018 FM10K_ERR_MSG(FUM_RO_ERROR);
1019 FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR);
1020 FM10K_ERR_MSG(FUM_CSR_TIMEOUT);
1021 FM10K_ERR_MSG(FUM_INVALID_TYPE);
1022 FM10K_ERR_MSG(FUM_INVALID_LENGTH);
1023 FM10K_ERR_MSG(FUM_INVALID_BE);
1024 FM10K_ERR_MSG(FUM_INVALID_ALIGN);
1025 }
1026 break;
1027 default:
1028 error = "Undocumented fault";
1029 break;
1030 }
1031
1032 dev_warn(&pdev->dev,
1033 "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
1034 error, fault->address, fault->specinfo,
1035 PCI_SLOT(fault->func), PCI_FUNC(fault->func));
1036
1037 /* For VF faults, clear out the respective LPORT, reset the queue
1038 * resources, and then reconnect to the mailbox. This allows the
1039 * VF in question to resume behavior. For transient faults that are
1040 * the result of non-malicious behavior this will log the fault and
1041 * allow the VF to resume functionality. Obviously for malicious VFs
1042 * they will be able to attempt malicious behavior again. In this
1043 * case, the system administrator will need to step in and manually
1044 * remove or disable the VF in question.
1045 */
1046 if (fault->func && iov_data) {
1047 int vf = fault->func - 1;
1048 struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf];
1049
1050 hw->iov.ops.reset_lport(hw, vf_info);
1051 hw->iov.ops.reset_resources(hw, vf_info);
1052
1053 /* reset_lport disables the VF, so re-enable it */
1054 hw->iov.ops.set_lport(hw, vf_info, vf,
1055 FM10K_VF_FLAG_MULTI_CAPABLE);
1056
1057 /* reset_resources will disconnect from the mbx */
1058 vf_info->mbx.ops.connect(hw, &vf_info->mbx);
1059 }
1060 }
1061
1062 static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
1063 {
1064 struct fm10k_hw *hw = &interface->hw;
1065 struct fm10k_fault fault = { 0 };
1066 int type, err;
1067
1068 for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT;
1069 eicr;
1070 eicr >>= 1, type += FM10K_FAULT_SIZE) {
1071 /* only check if there is an error reported */
1072 if (!(eicr & 0x1))
1073 continue;
1074
1075 /* retrieve fault info */
1076 err = hw->mac.ops.get_fault(hw, type, &fault);
1077 if (err) {
1078 dev_err(&interface->pdev->dev,
1079 "error reading fault\n");
1080 continue;
1081 }
1082
1083 fm10k_handle_fault(interface, type, &fault);
1084 }
1085 }
1086
1087 static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
1088 {
1089 struct fm10k_hw *hw = &interface->hw;
1090 const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
1091 u32 maxholdq;
1092 int q;
1093
1094 if (!(eicr & FM10K_EICR_MAXHOLDTIME))
1095 return;
1096
1097 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7));
1098 if (maxholdq)
1099 fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
1100 for (q = 255;;) {
1101 if (maxholdq & BIT(31)) {
1102 if (q < FM10K_MAX_QUEUES_PF) {
1103 interface->rx_overrun_pf++;
1104 fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
1105 } else {
1106 interface->rx_overrun_vf++;
1107 }
1108 }
1109
1110 maxholdq *= 2;
1111 if (!maxholdq)
1112 q &= ~(32 - 1);
1113
1114 if (!q)
1115 break;
1116
1117 if (q-- % 32)
1118 continue;
1119
1120 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32));
1121 if (maxholdq)
1122 fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq);
1123 }
1124 }
1125
1126 static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
1127 {
1128 struct fm10k_intfc *interface = data;
1129 struct fm10k_hw *hw = &interface->hw;
1130 struct fm10k_mbx_info *mbx = &hw->mbx;
1131 u32 eicr;
1132
1133 /* unmask any set bits related to this interrupt */
1134 eicr = fm10k_read_reg(hw, FM10K_EICR);
1135 fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX |
1136 FM10K_EICR_SWITCHREADY |
1137 FM10K_EICR_SWITCHNOTREADY));
1138
1139 /* report any faults found to the message log */
1140 fm10k_report_fault(interface, eicr);
1141
1142 /* reset any queues disabled due to receiver overrun */
1143 fm10k_reset_drop_on_empty(interface, eicr);
1144
1145 /* service mailboxes */
1146 if (fm10k_mbx_trylock(interface)) {
1147 mbx->ops.process(hw, mbx);
1148 /* handle VFLRE events */
1149 fm10k_iov_event(interface);
1150 fm10k_mbx_unlock(interface);
1151 }
1152
1153 /* if switch toggled state we should reset GLORTs */
1154 if (eicr & FM10K_EICR_SWITCHNOTREADY) {
1155 /* force link down for at least 4 seconds */
1156 interface->link_down_event = jiffies + (4 * HZ);
1157 set_bit(__FM10K_LINK_DOWN, &interface->state);
1158
1159 /* reset dglort_map back to no config */
1160 hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
1161 }
1162
1163 /* we should validate host state after interrupt event */
1164 hw->mac.get_host_state = true;
1165
1166 /* validate host state, and handle VF mailboxes in the service task */
1167 fm10k_service_event_schedule(interface);
1168
1169 /* re-enable mailbox interrupt and indicate 20us delay */
1170 fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
1171 (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
1172 FM10K_ITR_ENABLE);
1173
1174 return IRQ_HANDLED;
1175 }
1176
1177 void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
1178 {
1179 struct fm10k_hw *hw = &interface->hw;
1180 struct msix_entry *entry;
1181 int itr_reg;
1182
1183 /* no mailbox IRQ to free if MSI-X is not enabled */
1184 if (!interface->msix_entries)
1185 return;
1186
1187 entry = &interface->msix_entries[FM10K_MBX_VECTOR];
1188
1189 /* disconnect the mailbox */
1190 hw->mbx.ops.disconnect(hw, &hw->mbx);
1191
1192 /* disable Mailbox cause */
1193 if (hw->mac.type == fm10k_mac_pf) {
1194 fm10k_write_reg(hw, FM10K_EIMR,
1195 FM10K_EIMR_DISABLE(PCA_FAULT) |
1196 FM10K_EIMR_DISABLE(FUM_FAULT) |
1197 FM10K_EIMR_DISABLE(MAILBOX) |
1198 FM10K_EIMR_DISABLE(SWITCHREADY) |
1199 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
1200 FM10K_EIMR_DISABLE(SRAMERROR) |
1201 FM10K_EIMR_DISABLE(VFLR) |
1202 FM10K_EIMR_DISABLE(MAXHOLDTIME));
1203 itr_reg = FM10K_ITR(FM10K_MBX_VECTOR);
1204 } else {
1205 itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR);
1206 }
1207
1208 fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET);
1209
1210 free_irq(entry->vector, interface);
1211 }
1212
1213 static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
1214 struct fm10k_mbx_info *mbx)
1215 {
1216 bool vlan_override = hw->mac.vlan_override;
1217 u16 default_vid = hw->mac.default_vid;
1218 struct fm10k_intfc *interface;
1219 s32 err;
1220
1221 err = fm10k_msg_mac_vlan_vf(hw, results, mbx);
1222 if (err)
1223 return err;
1224
1225 interface = container_of(hw, struct fm10k_intfc, hw);
1226
1227 /* MAC was changed so we need reset */
1228 if (is_valid_ether_addr(hw->mac.perm_addr) &&
1229 !ether_addr_equal(hw->mac.perm_addr, hw->mac.addr))
1230 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1231
1232 /* VLAN override was changed, or default VLAN changed */
1233 if ((vlan_override != hw->mac.vlan_override) ||
1234 (default_vid != hw->mac.default_vid))
1235 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1236
1237 return 0;
1238 }
1239
1240 /* generic error handler for mailbox issues */
1241 static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
1242 struct fm10k_mbx_info __always_unused *mbx)
1243 {
1244 struct fm10k_intfc *interface;
1245 struct pci_dev *pdev;
1246
1247 interface = container_of(hw, struct fm10k_intfc, hw);
1248 pdev = interface->pdev;
1249
1250 dev_err(&pdev->dev, "Unknown message ID %u\n",
1251 **results & FM10K_TLV_ID_MASK);
1252
1253 return 0;
1254 }
1255
1256 static const struct fm10k_msg_data vf_mbx_data[] = {
1257 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1258 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
1259 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
1260 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
1261 };
1262
1263 static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
1264 {
1265 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
1266 struct net_device *dev = interface->netdev;
1267 struct fm10k_hw *hw = &interface->hw;
1268 int err;
1269
1270 /* Use timer0 for interrupt moderation on the mailbox */
1271 u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
1272
1273 /* register mailbox handlers */
1274 err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
1275 if (err)
1276 return err;
1277
1278 /* request the IRQ */
1279 err = request_irq(entry->vector, fm10k_msix_mbx_vf, 0,
1280 dev->name, interface);
1281 if (err) {
1282 netif_err(interface, probe, dev,
1283 "request_irq for msix_mbx failed: %d\n", err);
1284 return err;
1285 }
1286
1287 /* map all of the interrupt sources */
1288 fm10k_write_reg(hw, FM10K_VFINT_MAP, itr);
1289
1290 /* enable interrupt */
1291 fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE);
1292
1293 return 0;
1294 }
1295
1296 static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
1297 struct fm10k_mbx_info *mbx)
1298 {
1299 struct fm10k_intfc *interface;
1300 u32 dglort_map = hw->mac.dglort_map;
1301 s32 err;
1302
1303 interface = container_of(hw, struct fm10k_intfc, hw);
1304
1305 err = fm10k_msg_err_pf(hw, results, mbx);
1306 if (!err && hw->swapi.status) {
1307 /* force link down for a reasonable delay */
1308 interface->link_down_event = jiffies + (2 * HZ);
1309 set_bit(__FM10K_LINK_DOWN, &interface->state);
1310
1311 /* reset dglort_map back to no config */
1312 hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
1313
1314 fm10k_service_event_schedule(interface);
1315
1316 /* prevent overloading kernel message buffer */
1317 if (interface->lport_map_failed)
1318 return 0;
1319
1320 interface->lport_map_failed = true;
1321
1322 if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
1323 dev_warn(&interface->pdev->dev,
1324 "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
1325 dev_warn(&interface->pdev->dev,
1326 "request logical port map failed: %d\n",
1327 hw->swapi.status);
1328
1329 return 0;
1330 }
1331
1332 err = fm10k_msg_lport_map_pf(hw, results, mbx);
1333 if (err)
1334 return err;
1335
1336 interface->lport_map_failed = false;
1337
1338 /* we need to reset if port count was just updated */
1339 if (dglort_map != hw->mac.dglort_map)
1340 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1341
1342 return 0;
1343 }
1344
1345 static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
1346 struct fm10k_mbx_info __always_unused *mbx)
1347 {
1348 struct fm10k_intfc *interface;
1349 u16 glort, pvid;
1350 u32 pvid_update;
1351 s32 err;
1352
1353 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
1354 &pvid_update);
1355 if (err)
1356 return err;
1357
1358 /* extract values from the pvid update */
1359 glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
1360 pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
1361
1362 /* if glort is not valid return error */
1363 if (!fm10k_glort_valid_pf(hw, glort))
1364 return FM10K_ERR_PARAM;
1365
1366 /* verify VLAN ID is valid */
1367 if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
1368 return FM10K_ERR_PARAM;
1369
1370 interface = container_of(hw, struct fm10k_intfc, hw);
1371
1372 /* check to see if this belongs to one of the VFs */
1373 err = fm10k_iov_update_pvid(interface, glort, pvid);
1374 if (!err)
1375 return 0;
1376
1377 /* we need to reset if default VLAN was just updated */
1378 if (pvid != hw->mac.default_vid)
1379 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1380
1381 hw->mac.default_vid = pvid;
1382
1383 return 0;
1384 }
1385
1386 static const struct fm10k_msg_data pf_mbx_data[] = {
1387 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1388 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1389 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map),
1390 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1391 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1392 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
1393 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
1394 };
1395
1396 static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
1397 {
1398 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
1399 struct net_device *dev = interface->netdev;
1400 struct fm10k_hw *hw = &interface->hw;
1401 int err;
1402
1403 /* Use timer0 for interrupt moderation on the mailbox */
1404 u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
1405 u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
1406
1407 /* register mailbox handlers */
1408 err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
1409 if (err)
1410 return err;
1411
1412 /* request the IRQ */
1413 err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0,
1414 dev->name, interface);
1415 if (err) {
1416 netif_err(interface, probe, dev,
1417 "request_irq for msix_mbx failed: %d\n", err);
1418 return err;
1419 }
1420
1421 /* Enable interrupts w/ no moderation for "other" interrupts */
1422 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr);
1423 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr);
1424 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr);
1425 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr);
1426 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr);
1427
1428 /* Enable interrupts w/ moderation for mailbox */
1429 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr);
1430
1431 /* Enable individual interrupt causes */
1432 fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1433 FM10K_EIMR_ENABLE(FUM_FAULT) |
1434 FM10K_EIMR_ENABLE(MAILBOX) |
1435 FM10K_EIMR_ENABLE(SWITCHREADY) |
1436 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1437 FM10K_EIMR_ENABLE(SRAMERROR) |
1438 FM10K_EIMR_ENABLE(VFLR) |
1439 FM10K_EIMR_ENABLE(MAXHOLDTIME));
1440
1441 /* enable interrupt */
1442 fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE);
1443
1444 return 0;
1445 }
1446
1447 int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
1448 {
1449 struct fm10k_hw *hw = &interface->hw;
1450 int err;
1451
1452 /* enable Mailbox cause */
1453 if (hw->mac.type == fm10k_mac_pf)
1454 err = fm10k_mbx_request_irq_pf(interface);
1455 else
1456 err = fm10k_mbx_request_irq_vf(interface);
1457 if (err)
1458 return err;
1459
1460 /* connect mailbox */
1461 err = hw->mbx.ops.connect(hw, &hw->mbx);
1462
1463 /* if the mailbox failed to connect, then free IRQ */
1464 if (err)
1465 fm10k_mbx_free_irq(interface);
1466
1467 return err;
1468 }
1469
1470 /**
1471 * fm10k_qv_free_irq - release interrupts associated with queue vectors
1472 * @interface: board private structure
1473 *
1474 * Release all interrupts associated with this interface
1475 **/
1476 void fm10k_qv_free_irq(struct fm10k_intfc *interface)
1477 {
1478 int vector = interface->num_q_vectors;
1479 struct fm10k_hw *hw = &interface->hw;
1480 struct msix_entry *entry;
1481
1482 entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
1483
1484 while (vector) {
1485 struct fm10k_q_vector *q_vector;
1486
1487 vector--;
1488 entry--;
1489 q_vector = interface->q_vector[vector];
1490
1491 if (!q_vector->tx.count && !q_vector->rx.count)
1492 continue;
1493
1494 /* clear the affinity_mask in the IRQ descriptor */
1495 irq_set_affinity_hint(entry->vector, NULL);
1496
1497 /* disable interrupts */
1498 writel(FM10K_ITR_MASK_SET, q_vector->itr);
1499
1500 free_irq(entry->vector, q_vector);
1501 }
1502 }
1503
1504 /**
1505 * fm10k_qv_request_irq - initialize interrupts for queue vectors
1506 * @interface: board private structure
1507 *
1508 * Attempts to configure interrupts using the best available
1509 * capabilities of the hardware and kernel.
1510 **/
1511 int fm10k_qv_request_irq(struct fm10k_intfc *interface)
1512 {
1513 struct net_device *dev = interface->netdev;
1514 struct fm10k_hw *hw = &interface->hw;
1515 struct msix_entry *entry;
1516 int ri = 0, ti = 0;
1517 int vector, err;
1518
1519 entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
1520
1521 for (vector = 0; vector < interface->num_q_vectors; vector++) {
1522 struct fm10k_q_vector *q_vector = interface->q_vector[vector];
1523
1524 /* name the vector */
1525 if (q_vector->tx.count && q_vector->rx.count) {
1526 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1527 "%s-TxRx-%d", dev->name, ri++);
1528 ti++;
1529 } else if (q_vector->rx.count) {
1530 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1531 "%s-rx-%d", dev->name, ri++);
1532 } else if (q_vector->tx.count) {
1533 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1534 "%s-tx-%d", dev->name, ti++);
1535 } else {
1536 /* skip this unused q_vector */
1537 continue;
1538 }
1539
1540 /* Assign ITR register to q_vector */
1541 q_vector->itr = (hw->mac.type == fm10k_mac_pf) ?
1542 &interface->uc_addr[FM10K_ITR(entry->entry)] :
1543 &interface->uc_addr[FM10K_VFITR(entry->entry)];
1544
1545 /* request the IRQ */
1546 err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0,
1547 q_vector->name, q_vector);
1548 if (err) {
1549 netif_err(interface, probe, dev,
1550 "request_irq failed for MSIX interrupt Error: %d\n",
1551 err);
1552 goto err_out;
1553 }
1554
1555 /* assign the mask for this irq */
1556 irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask);
1557
1558 /* Enable q_vector */
1559 writel(FM10K_ITR_ENABLE, q_vector->itr);
1560
1561 entry++;
1562 }
1563
1564 return 0;
1565
1566 err_out:
1567 /* wind through the ring freeing all entries and vectors */
1568 while (vector) {
1569 struct fm10k_q_vector *q_vector;
1570
1571 entry--;
1572 vector--;
1573 q_vector = interface->q_vector[vector];
1574
1575 if (!q_vector->tx.count && !q_vector->rx.count)
1576 continue;
1577
1578 /* clear the affinity_mask in the IRQ descriptor */
1579 irq_set_affinity_hint(entry->vector, NULL);
1580
1581 /* disable interrupts */
1582 writel(FM10K_ITR_MASK_SET, q_vector->itr);
1583
1584 free_irq(entry->vector, q_vector);
1585 }
1586
1587 return err;
1588 }
1589
1590 void fm10k_up(struct fm10k_intfc *interface)
1591 {
1592 struct fm10k_hw *hw = &interface->hw;
1593
1594 /* Enable Tx/Rx DMA */
1595 hw->mac.ops.start_hw(hw);
1596
1597 /* configure Tx descriptor rings */
1598 fm10k_configure_tx(interface);
1599
1600 /* configure Rx descriptor rings */
1601 fm10k_configure_rx(interface);
1602
1603 /* configure interrupts */
1604 hw->mac.ops.update_int_moderator(hw);
1605
1606 /* enable statistics capture again */
1607 clear_bit(__FM10K_UPDATING_STATS, &interface->state);
1608
1609 /* clear down bit to indicate we are ready to go */
1610 clear_bit(__FM10K_DOWN, &interface->state);
1611
1612 /* enable polling cleanups */
1613 fm10k_napi_enable_all(interface);
1614
1615 /* re-establish Rx filters */
1616 fm10k_restore_rx_state(interface);
1617
1618 /* enable transmits */
1619 netif_tx_start_all_queues(interface->netdev);
1620
1621 /* kick off the service timer now */
1622 hw->mac.get_host_state = true;
1623 mod_timer(&interface->service_timer, jiffies);
1624 }
1625
1626 static void fm10k_napi_disable_all(struct fm10k_intfc *interface)
1627 {
1628 struct fm10k_q_vector *q_vector;
1629 int q_idx;
1630
1631 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
1632 q_vector = interface->q_vector[q_idx];
1633 napi_disable(&q_vector->napi);
1634 }
1635 }
1636
1637 void fm10k_down(struct fm10k_intfc *interface)
1638 {
1639 struct net_device *netdev = interface->netdev;
1640 struct fm10k_hw *hw = &interface->hw;
1641 int err, i = 0, count = 0;
1642
1643 /* signal that we are down to the interrupt handler and service task */
1644 if (test_and_set_bit(__FM10K_DOWN, &interface->state))
1645 return;
1646
1647 /* call carrier off first to avoid false dev_watchdog timeouts */
1648 netif_carrier_off(netdev);
1649
1650 /* disable transmits */
1651 netif_tx_stop_all_queues(netdev);
1652 netif_tx_disable(netdev);
1653
1654 /* reset Rx filters */
1655 fm10k_reset_rx_state(interface);
1656
1657 /* disable polling routines */
1658 fm10k_napi_disable_all(interface);
1659
1660 /* capture stats one last time before stopping interface */
1661 fm10k_update_stats(interface);
1662
1663 /* prevent updating statistics while we're down */
1664 while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
1665 usleep_range(1000, 2000);
1666
1667 /* skip waiting for TX DMA if we lost PCIe link */
1668 if (FM10K_REMOVED(hw->hw_addr))
1669 goto skip_tx_dma_drain;
1670
1671 /* In some rare circumstances it can take a while for Tx queues to
1672 * quiesce and be fully disabled. Attempt to .stop_hw() first, and
1673 * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop
1674 * until the Tx queues have emptied, or until a number of retries. If
1675 * we fail to clear within the retry loop, we will issue a warning
1676 * indicating that Tx DMA is probably hung. Note this means we call
1677 * .stop_hw() twice but this shouldn't cause any problems.
1678 */
1679 err = hw->mac.ops.stop_hw(hw);
1680 if (err != FM10K_ERR_REQUESTS_PENDING)
1681 goto skip_tx_dma_drain;
1682
1683 #define TX_DMA_DRAIN_RETRIES 25
1684 for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) {
1685 usleep_range(10000, 20000);
1686
1687 /* start checking at the last ring to have pending Tx */
1688 for (; i < interface->num_tx_queues; i++)
1689 if (fm10k_get_tx_pending(interface->tx_ring[i]))
1690 break;
1691
1692 /* if all the queues are drained, we can break now */
1693 if (i == interface->num_tx_queues)
1694 break;
1695 }
1696
1697 if (count >= TX_DMA_DRAIN_RETRIES)
1698 dev_err(&interface->pdev->dev,
1699 "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n",
1700 count);
1701 skip_tx_dma_drain:
1702 /* Disable DMA engine for Tx/Rx */
1703 err = hw->mac.ops.stop_hw(hw);
1704 if (err == FM10K_ERR_REQUESTS_PENDING)
1705 dev_err(&interface->pdev->dev,
1706 "due to pending requests hw was not shut down gracefully\n");
1707 else if (err)
1708 dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
1709
1710 /* free any buffers still on the rings */
1711 fm10k_clean_all_tx_rings(interface);
1712 fm10k_clean_all_rx_rings(interface);
1713 }
1714
1715 /**
1716 * fm10k_sw_init - Initialize general software structures
1717 * @interface: host interface private structure to initialize
1718 *
1719 * fm10k_sw_init initializes the interface private data structure.
1720 * Fields are initialized based on PCI device information and
1721 * OS network device settings (MTU size).
1722 **/
1723 static int fm10k_sw_init(struct fm10k_intfc *interface,
1724 const struct pci_device_id *ent)
1725 {
1726 const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
1727 struct fm10k_hw *hw = &interface->hw;
1728 struct pci_dev *pdev = interface->pdev;
1729 struct net_device *netdev = interface->netdev;
1730 u32 rss_key[FM10K_RSSRK_SIZE];
1731 unsigned int rss;
1732 int err;
1733
1734 /* initialize back pointer */
1735 hw->back = interface;
1736 hw->hw_addr = interface->uc_addr;
1737
1738 /* PCI config space info */
1739 hw->vendor_id = pdev->vendor;
1740 hw->device_id = pdev->device;
1741 hw->revision_id = pdev->revision;
1742 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1743 hw->subsystem_device_id = pdev->subsystem_device;
1744
1745 /* Setup hw api */
1746 memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops));
1747 hw->mac.type = fi->mac;
1748
1749 /* Setup IOV handlers */
1750 if (fi->iov_ops)
1751 memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops));
1752
1753 /* Set common capability flags and settings */
1754 rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus());
1755 interface->ring_feature[RING_F_RSS].limit = rss;
1756 fi->get_invariants(hw);
1757
1758 /* pick up the PCIe bus settings for reporting later */
1759 if (hw->mac.ops.get_bus_info)
1760 hw->mac.ops.get_bus_info(hw);
1761
1762 /* limit the usable DMA range */
1763 if (hw->mac.ops.set_dma_mask)
1764 hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev));
1765
1766 /* update netdev with DMA restrictions */
1767 if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) {
1768 netdev->features |= NETIF_F_HIGHDMA;
1769 netdev->vlan_features |= NETIF_F_HIGHDMA;
1770 }
1771
1772 /* delay any future reset requests */
1773 interface->last_reset = jiffies + (10 * HZ);
1774
1775 /* reset and initialize the hardware so it is in a known state */
1776 err = hw->mac.ops.reset_hw(hw);
1777 if (err) {
1778 dev_err(&pdev->dev, "reset_hw failed: %d\n", err);
1779 return err;
1780 }
1781
1782 err = hw->mac.ops.init_hw(hw);
1783 if (err) {
1784 dev_err(&pdev->dev, "init_hw failed: %d\n", err);
1785 return err;
1786 }
1787
1788 /* initialize hardware statistics */
1789 hw->mac.ops.update_hw_stats(hw, &interface->stats);
1790
1791 /* Set upper limit on IOV VFs that can be allocated */
1792 pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs);
1793
1794 /* Start with random Ethernet address */
1795 eth_random_addr(hw->mac.addr);
1796
1797 /* Initialize MAC address from hardware */
1798 err = hw->mac.ops.read_mac_addr(hw);
1799 if (err) {
1800 dev_warn(&pdev->dev,
1801 "Failed to obtain MAC address defaulting to random\n");
1802 /* tag address assignment as random */
1803 netdev->addr_assign_type |= NET_ADDR_RANDOM;
1804 }
1805
1806 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
1807 ether_addr_copy(netdev->perm_addr, hw->mac.addr);
1808
1809 if (!is_valid_ether_addr(netdev->perm_addr)) {
1810 dev_err(&pdev->dev, "Invalid MAC Address\n");
1811 return -EIO;
1812 }
1813
1814 /* initialize DCBNL interface */
1815 fm10k_dcbnl_set_ops(netdev);
1816
1817 /* set default ring sizes */
1818 interface->tx_ring_count = FM10K_DEFAULT_TXD;
1819 interface->rx_ring_count = FM10K_DEFAULT_RXD;
1820
1821 /* set default interrupt moderation */
1822 interface->tx_itr = FM10K_TX_ITR_DEFAULT;
1823 interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
1824
1825 /* initialize vxlan_port list */
1826 INIT_LIST_HEAD(&interface->vxlan_port);
1827
1828 netdev_rss_key_fill(rss_key, sizeof(rss_key));
1829 memcpy(interface->rssrk, rss_key, sizeof(rss_key));
1830
1831 /* Start off interface as being down */
1832 set_bit(__FM10K_DOWN, &interface->state);
1833 set_bit(__FM10K_UPDATING_STATS, &interface->state);
1834
1835 return 0;
1836 }
1837
1838 static void fm10k_slot_warn(struct fm10k_intfc *interface)
1839 {
1840 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
1841 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
1842 struct fm10k_hw *hw = &interface->hw;
1843 int max_gts = 0, expected_gts = 0;
1844
1845 if (pcie_get_minimum_link(interface->pdev, &speed, &width) ||
1846 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
1847 dev_warn(&interface->pdev->dev,
1848 "Unable to determine PCI Express bandwidth.\n");
1849 return;
1850 }
1851
1852 switch (speed) {
1853 case PCIE_SPEED_2_5GT:
1854 /* 8b/10b encoding reduces max throughput by 20% */
1855 max_gts = 2 * width;
1856 break;
1857 case PCIE_SPEED_5_0GT:
1858 /* 8b/10b encoding reduces max throughput by 20% */
1859 max_gts = 4 * width;
1860 break;
1861 case PCIE_SPEED_8_0GT:
1862 /* 128b/130b encoding has less than 2% impact on throughput */
1863 max_gts = 8 * width;
1864 break;
1865 default:
1866 dev_warn(&interface->pdev->dev,
1867 "Unable to determine PCI Express bandwidth.\n");
1868 return;
1869 }
1870
1871 dev_info(&interface->pdev->dev,
1872 "PCI Express bandwidth of %dGT/s available\n",
1873 max_gts);
1874 dev_info(&interface->pdev->dev,
1875 "(Speed:%s, Width: x%d, Encoding Loss:%s, Payload:%s)\n",
1876 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
1877 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
1878 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
1879 "Unknown"),
1880 hw->bus.width,
1881 (speed == PCIE_SPEED_2_5GT ? "20%" :
1882 speed == PCIE_SPEED_5_0GT ? "20%" :
1883 speed == PCIE_SPEED_8_0GT ? "<2%" :
1884 "Unknown"),
1885 (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
1886 hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
1887 hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
1888 "Unknown"));
1889
1890 switch (hw->bus_caps.speed) {
1891 case fm10k_bus_speed_2500:
1892 /* 8b/10b encoding reduces max throughput by 20% */
1893 expected_gts = 2 * hw->bus_caps.width;
1894 break;
1895 case fm10k_bus_speed_5000:
1896 /* 8b/10b encoding reduces max throughput by 20% */
1897 expected_gts = 4 * hw->bus_caps.width;
1898 break;
1899 case fm10k_bus_speed_8000:
1900 /* 128b/130b encoding has less than 2% impact on throughput */
1901 expected_gts = 8 * hw->bus_caps.width;
1902 break;
1903 default:
1904 dev_warn(&interface->pdev->dev,
1905 "Unable to determine expected PCI Express bandwidth.\n");
1906 return;
1907 }
1908
1909 if (max_gts >= expected_gts)
1910 return;
1911
1912 dev_warn(&interface->pdev->dev,
1913 "This device requires %dGT/s of bandwidth for optimal performance.\n",
1914 expected_gts);
1915 dev_warn(&interface->pdev->dev,
1916 "A %sslot with x%d lanes is suggested.\n",
1917 (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " :
1918 hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " :
1919 hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""),
1920 hw->bus_caps.width);
1921 }
1922
1923 /**
1924 * fm10k_probe - Device Initialization Routine
1925 * @pdev: PCI device information struct
1926 * @ent: entry in fm10k_pci_tbl
1927 *
1928 * Returns 0 on success, negative on failure
1929 *
1930 * fm10k_probe initializes an interface identified by a pci_dev structure.
1931 * The OS initialization, configuring of the interface private structure,
1932 * and a hardware reset occur.
1933 **/
1934 static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1935 {
1936 struct net_device *netdev;
1937 struct fm10k_intfc *interface;
1938 int err;
1939
1940 err = pci_enable_device_mem(pdev);
1941 if (err)
1942 return err;
1943
1944 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
1945 if (err)
1946 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1947 if (err) {
1948 dev_err(&pdev->dev,
1949 "DMA configuration failed: %d\n", err);
1950 goto err_dma;
1951 }
1952
1953 err = pci_request_selected_regions(pdev,
1954 pci_select_bars(pdev,
1955 IORESOURCE_MEM),
1956 fm10k_driver_name);
1957 if (err) {
1958 dev_err(&pdev->dev,
1959 "pci_request_selected_regions failed: %d\n", err);
1960 goto err_pci_reg;
1961 }
1962
1963 pci_enable_pcie_error_reporting(pdev);
1964
1965 pci_set_master(pdev);
1966 pci_save_state(pdev);
1967
1968 netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]);
1969 if (!netdev) {
1970 err = -ENOMEM;
1971 goto err_alloc_netdev;
1972 }
1973
1974 SET_NETDEV_DEV(netdev, &pdev->dev);
1975
1976 interface = netdev_priv(netdev);
1977 pci_set_drvdata(pdev, interface);
1978
1979 interface->netdev = netdev;
1980 interface->pdev = pdev;
1981
1982 interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
1983 FM10K_UC_ADDR_SIZE);
1984 if (!interface->uc_addr) {
1985 err = -EIO;
1986 goto err_ioremap;
1987 }
1988
1989 err = fm10k_sw_init(interface, ent);
1990 if (err)
1991 goto err_sw_init;
1992
1993 /* enable debugfs support */
1994 fm10k_dbg_intfc_init(interface);
1995
1996 err = fm10k_init_queueing_scheme(interface);
1997 if (err)
1998 goto err_sw_init;
1999
2000 /* the mbx interrupt might attempt to schedule the service task, so we
2001 * must ensure it is disabled since we haven't yet requested the timer
2002 * or work item.
2003 */
2004 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
2005
2006 err = fm10k_mbx_request_irq(interface);
2007 if (err)
2008 goto err_mbx_interrupt;
2009
2010 /* final check of hardware state before registering the interface */
2011 err = fm10k_hw_ready(interface);
2012 if (err)
2013 goto err_register;
2014
2015 err = register_netdev(netdev);
2016 if (err)
2017 goto err_register;
2018
2019 /* carrier off reporting is important to ethtool even BEFORE open */
2020 netif_carrier_off(netdev);
2021
2022 /* stop all the transmit queues from transmitting until link is up */
2023 netif_tx_stop_all_queues(netdev);
2024
2025 /* Initialize service timer and service task late in order to avoid
2026 * cleanup issues.
2027 */
2028 setup_timer(&interface->service_timer, &fm10k_service_timer,
2029 (unsigned long)interface);
2030 INIT_WORK(&interface->service_task, fm10k_service_task);
2031
2032 /* kick off service timer now, even when interface is down */
2033 mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
2034
2035 /* print warning for non-optimal configurations */
2036 fm10k_slot_warn(interface);
2037
2038 /* report MAC address for logging */
2039 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
2040
2041 /* enable SR-IOV after registering netdev to enforce PF/VF ordering */
2042 fm10k_iov_configure(pdev, 0);
2043
2044 /* clear the service task disable bit to allow service task to start */
2045 clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
2046
2047 return 0;
2048
2049 err_register:
2050 fm10k_mbx_free_irq(interface);
2051 err_mbx_interrupt:
2052 fm10k_clear_queueing_scheme(interface);
2053 err_sw_init:
2054 if (interface->sw_addr)
2055 iounmap(interface->sw_addr);
2056 iounmap(interface->uc_addr);
2057 err_ioremap:
2058 free_netdev(netdev);
2059 err_alloc_netdev:
2060 pci_release_selected_regions(pdev,
2061 pci_select_bars(pdev, IORESOURCE_MEM));
2062 err_pci_reg:
2063 err_dma:
2064 pci_disable_device(pdev);
2065 return err;
2066 }
2067
2068 /**
2069 * fm10k_remove - Device Removal Routine
2070 * @pdev: PCI device information struct
2071 *
2072 * fm10k_remove is called by the PCI subsystem to alert the driver
2073 * that it should release a PCI device. The could be caused by a
2074 * Hot-Plug event, or because the driver is going to be removed from
2075 * memory.
2076 **/
2077 static void fm10k_remove(struct pci_dev *pdev)
2078 {
2079 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2080 struct net_device *netdev = interface->netdev;
2081
2082 del_timer_sync(&interface->service_timer);
2083
2084 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
2085 cancel_work_sync(&interface->service_task);
2086
2087 /* free netdev, this may bounce the interrupts due to setup_tc */
2088 if (netdev->reg_state == NETREG_REGISTERED)
2089 unregister_netdev(netdev);
2090
2091 /* release VFs */
2092 fm10k_iov_disable(pdev);
2093
2094 /* disable mailbox interrupt */
2095 fm10k_mbx_free_irq(interface);
2096
2097 /* free interrupts */
2098 fm10k_clear_queueing_scheme(interface);
2099
2100 /* remove any debugfs interfaces */
2101 fm10k_dbg_intfc_exit(interface);
2102
2103 if (interface->sw_addr)
2104 iounmap(interface->sw_addr);
2105 iounmap(interface->uc_addr);
2106
2107 free_netdev(netdev);
2108
2109 pci_release_selected_regions(pdev,
2110 pci_select_bars(pdev, IORESOURCE_MEM));
2111
2112 pci_disable_pcie_error_reporting(pdev);
2113
2114 pci_disable_device(pdev);
2115 }
2116
2117 static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
2118 {
2119 /* the watchdog task reads from registers, which might appear like
2120 * a surprise remove if the PCIe device is disabled while we're
2121 * stopped. We stop the watchdog task until after we resume software
2122 * activity.
2123 */
2124 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
2125 cancel_work_sync(&interface->service_task);
2126
2127 fm10k_prepare_for_reset(interface);
2128 }
2129
2130 static int fm10k_handle_resume(struct fm10k_intfc *interface)
2131 {
2132 struct fm10k_hw *hw = &interface->hw;
2133 int err;
2134
2135 /* reset statistics starting values */
2136 hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
2137
2138 err = fm10k_handle_reset(interface);
2139 if (err)
2140 return err;
2141
2142 /* assume host is not ready, to prevent race with watchdog in case we
2143 * actually don't have connection to the switch
2144 */
2145 interface->host_ready = false;
2146 fm10k_watchdog_host_not_ready(interface);
2147
2148 /* clear the service task disable bit to allow service task to start */
2149 clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
2150 fm10k_service_event_schedule(interface);
2151
2152 return err;
2153 }
2154
2155 #ifdef CONFIG_PM
2156 /**
2157 * fm10k_resume - Restore device to pre-sleep state
2158 * @pdev: PCI device information struct
2159 *
2160 * fm10k_resume is called after the system has powered back up from a sleep
2161 * state and is ready to resume operation. This function is meant to restore
2162 * the device back to its pre-sleep state.
2163 **/
2164 static int fm10k_resume(struct pci_dev *pdev)
2165 {
2166 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2167 struct net_device *netdev = interface->netdev;
2168 struct fm10k_hw *hw = &interface->hw;
2169 u32 err;
2170
2171 pci_set_power_state(pdev, PCI_D0);
2172 pci_restore_state(pdev);
2173
2174 /* pci_restore_state clears dev->state_saved so call
2175 * pci_save_state to restore it.
2176 */
2177 pci_save_state(pdev);
2178
2179 err = pci_enable_device_mem(pdev);
2180 if (err) {
2181 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2182 return err;
2183 }
2184 pci_set_master(pdev);
2185
2186 pci_wake_from_d3(pdev, false);
2187
2188 /* refresh hw_addr in case it was dropped */
2189 hw->hw_addr = interface->uc_addr;
2190
2191 err = fm10k_handle_resume(interface);
2192 if (err)
2193 return err;
2194
2195 netif_device_attach(netdev);
2196
2197 return 0;
2198 }
2199
2200 /**
2201 * fm10k_suspend - Prepare the device for a system sleep state
2202 * @pdev: PCI device information struct
2203 *
2204 * fm10k_suspend is meant to shutdown the device prior to the system entering
2205 * a sleep state. The fm10k hardware does not support wake on lan so the
2206 * driver simply needs to shut down the device so it is in a low power state.
2207 **/
2208 static int fm10k_suspend(struct pci_dev *pdev,
2209 pm_message_t __always_unused state)
2210 {
2211 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2212 struct net_device *netdev = interface->netdev;
2213 int err = 0;
2214
2215 netif_device_detach(netdev);
2216
2217 fm10k_prepare_suspend(interface);
2218
2219 err = pci_save_state(pdev);
2220 if (err)
2221 return err;
2222
2223 pci_disable_device(pdev);
2224 pci_wake_from_d3(pdev, false);
2225 pci_set_power_state(pdev, PCI_D3hot);
2226
2227 return 0;
2228 }
2229
2230 #endif /* CONFIG_PM */
2231 /**
2232 * fm10k_io_error_detected - called when PCI error is detected
2233 * @pdev: Pointer to PCI device
2234 * @state: The current pci connection state
2235 *
2236 * This function is called after a PCI bus error affecting
2237 * this device has been detected.
2238 */
2239 static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
2240 pci_channel_state_t state)
2241 {
2242 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2243 struct net_device *netdev = interface->netdev;
2244
2245 netif_device_detach(netdev);
2246
2247 if (state == pci_channel_io_perm_failure)
2248 return PCI_ERS_RESULT_DISCONNECT;
2249
2250 fm10k_prepare_suspend(interface);
2251
2252 /* Request a slot reset. */
2253 return PCI_ERS_RESULT_NEED_RESET;
2254 }
2255
2256 /**
2257 * fm10k_io_slot_reset - called after the pci bus has been reset.
2258 * @pdev: Pointer to PCI device
2259 *
2260 * Restart the card from scratch, as if from a cold-boot.
2261 */
2262 static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
2263 {
2264 pci_ers_result_t result;
2265
2266 if (pci_enable_device_mem(pdev)) {
2267 dev_err(&pdev->dev,
2268 "Cannot re-enable PCI device after reset.\n");
2269 result = PCI_ERS_RESULT_DISCONNECT;
2270 } else {
2271 pci_set_master(pdev);
2272 pci_restore_state(pdev);
2273
2274 /* After second error pci->state_saved is false, this
2275 * resets it so EEH doesn't break.
2276 */
2277 pci_save_state(pdev);
2278
2279 pci_wake_from_d3(pdev, false);
2280
2281 result = PCI_ERS_RESULT_RECOVERED;
2282 }
2283
2284 pci_cleanup_aer_uncorrect_error_status(pdev);
2285
2286 return result;
2287 }
2288
2289 /**
2290 * fm10k_io_resume - called when traffic can start flowing again.
2291 * @pdev: Pointer to PCI device
2292 *
2293 * This callback is called when the error recovery driver tells us that
2294 * its OK to resume normal operation.
2295 */
2296 static void fm10k_io_resume(struct pci_dev *pdev)
2297 {
2298 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2299 struct net_device *netdev = interface->netdev;
2300 int err;
2301
2302 err = fm10k_handle_resume(interface);
2303
2304 if (err)
2305 dev_warn(&pdev->dev,
2306 "fm10k_io_resume failed: %d\n", err);
2307 else
2308 netif_device_attach(netdev);
2309 }
2310
2311 /**
2312 * fm10k_io_reset_notify - called when PCI function is reset
2313 * @pdev: Pointer to PCI device
2314 *
2315 * This callback is called when the PCI function is reset such as from
2316 * /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it
2317 * means we should prepare for a function reset. If prepare is false, it means
2318 * the function reset just occurred.
2319 */
2320 static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare)
2321 {
2322 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2323 int err = 0;
2324
2325 if (prepare) {
2326 /* warn incase we have any active VF devices */
2327 if (pci_num_vf(pdev))
2328 dev_warn(&pdev->dev,
2329 "PCIe FLR may cause issues for any active VF devices\n");
2330
2331 fm10k_prepare_suspend(interface);
2332 } else {
2333 err = fm10k_handle_resume(interface);
2334 }
2335
2336 if (err) {
2337 dev_warn(&pdev->dev,
2338 "fm10k_io_reset_notify failed: %d\n", err);
2339 netif_device_detach(interface->netdev);
2340 }
2341 }
2342
2343 static const struct pci_error_handlers fm10k_err_handler = {
2344 .error_detected = fm10k_io_error_detected,
2345 .slot_reset = fm10k_io_slot_reset,
2346 .resume = fm10k_io_resume,
2347 .reset_notify = fm10k_io_reset_notify,
2348 };
2349
2350 static struct pci_driver fm10k_driver = {
2351 .name = fm10k_driver_name,
2352 .id_table = fm10k_pci_tbl,
2353 .probe = fm10k_probe,
2354 .remove = fm10k_remove,
2355 #ifdef CONFIG_PM
2356 .suspend = fm10k_suspend,
2357 .resume = fm10k_resume,
2358 #endif
2359 .sriov_configure = fm10k_iov_configure,
2360 .err_handler = &fm10k_err_handler
2361 };
2362
2363 /**
2364 * fm10k_register_pci_driver - register driver interface
2365 *
2366 * This function is called on module load in order to register the driver.
2367 **/
2368 int fm10k_register_pci_driver(void)
2369 {
2370 return pci_register_driver(&fm10k_driver);
2371 }
2372
2373 /**
2374 * fm10k_unregister_pci_driver - unregister driver interface
2375 *
2376 * This function is called on module unload in order to remove the driver.
2377 **/
2378 void fm10k_unregister_pci_driver(void)
2379 {
2380 pci_unregister_driver(&fm10k_driver);
2381 }
This page took 0.1056 seconds and 6 git commands to generate.