fm10k: only show actual queues, not the maximum in hardware
[deliverable/linux.git] / drivers / net / ethernet / intel / fm10k / fm10k_pci.c
CommitLineData
b3890e30 1/* Intel Ethernet Switch Host Interface Driver
97c71e3c 2 * Copyright(c) 2013 - 2015 Intel Corporation.
b3890e30
AD
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19 */
20
21#include <linux/module.h>
19ae1b3f 22#include <linux/aer.h>
b3890e30
AD
23
24#include "fm10k.h"
25
0e7b3644
AD
26static const struct fm10k_info *fm10k_info_tbl[] = {
27 [fm10k_device_pf] = &fm10k_pf_info,
5cb8db4a 28 [fm10k_device_vf] = &fm10k_vf_info,
0e7b3644
AD
29};
30
b3890e30
AD
31/**
32 * fm10k_pci_tbl - PCI Device ID Table
33 *
34 * Wildcard entries (PCI_ANY_ID) should come last
35 * Last entry must be all 0s
36 *
37 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
38 * Class, Class Mask, private data (not used) }
39 */
40static const struct pci_device_id fm10k_pci_tbl[] = {
0e7b3644 41 { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
5cb8db4a 42 { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf },
b3890e30
AD
43 /* required last entry */
44 { 0, }
45};
46MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl);
47
04a5aefb
AD
48u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg)
49{
50 struct fm10k_intfc *interface = hw->back;
51 u16 value = 0;
52
53 if (FM10K_REMOVED(hw->hw_addr))
54 return ~value;
55
56 pci_read_config_word(interface->pdev, reg, &value);
57 if (value == 0xFFFF)
58 fm10k_write_flush(hw);
59
60 return value;
61}
62
63u32 fm10k_read_reg(struct fm10k_hw *hw, int reg)
64{
65 u32 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
66 u32 value = 0;
67
68 if (FM10K_REMOVED(hw_addr))
69 return ~value;
70
71 value = readl(&hw_addr[reg]);
0e7b3644
AD
72 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
73 struct fm10k_intfc *interface = hw->back;
74 struct net_device *netdev = interface->netdev;
75
04a5aefb 76 hw->hw_addr = NULL;
0e7b3644
AD
77 netif_device_detach(netdev);
78 netdev_err(netdev, "PCIe link lost, device now detached\n");
79 }
04a5aefb
AD
80
81 return value;
82}
83
0e7b3644
AD
84static int fm10k_hw_ready(struct fm10k_intfc *interface)
85{
86 struct fm10k_hw *hw = &interface->hw;
87
88 fm10k_write_flush(hw);
89
90 return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0;
91}
92
b7d8514c
AD
93void fm10k_service_event_schedule(struct fm10k_intfc *interface)
94{
95 if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) &&
96 !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state))
97 schedule_work(&interface->service_task);
98}
99
100static void fm10k_service_event_complete(struct fm10k_intfc *interface)
101{
102 BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
103
104 /* flush memory to make sure state is correct before next watchog */
105 smp_mb__before_atomic();
106 clear_bit(__FM10K_SERVICE_SCHED, &interface->state);
107}
108
109/**
110 * fm10k_service_timer - Timer Call-back
111 * @data: pointer to interface cast into an unsigned long
112 **/
113static void fm10k_service_timer(unsigned long data)
114{
115 struct fm10k_intfc *interface = (struct fm10k_intfc *)data;
116
117 /* Reset the timer */
118 mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
119
120 fm10k_service_event_schedule(interface);
121}
122
123static void fm10k_detach_subtask(struct fm10k_intfc *interface)
124{
125 struct net_device *netdev = interface->netdev;
126
127 /* do nothing if device is still present or hw_addr is set */
128 if (netif_device_present(netdev) || interface->hw.hw_addr)
129 return;
130
131 rtnl_lock();
132
133 if (netif_running(netdev))
134 dev_close(netdev);
135
136 rtnl_unlock();
137}
138
139static void fm10k_reinit(struct fm10k_intfc *interface)
140{
141 struct net_device *netdev = interface->netdev;
142 struct fm10k_hw *hw = &interface->hw;
143 int err;
144
145 WARN_ON(in_interrupt());
146
147 /* put off any impending NetWatchDogTimeout */
148 netdev->trans_start = jiffies;
149
150 while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
151 usleep_range(1000, 2000);
152
153 rtnl_lock();
154
883a9ccb
AD
155 fm10k_iov_suspend(interface->pdev);
156
b7d8514c
AD
157 if (netif_running(netdev))
158 fm10k_close(netdev);
159
160 fm10k_mbx_free_irq(interface);
161
162 /* delay any future reset requests */
163 interface->last_reset = jiffies + (10 * HZ);
164
165 /* reset and initialize the hardware so it is in a known state */
166 err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
167 if (err)
168 dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
169
170 /* reassociate interrupts */
171 fm10k_mbx_request_irq(interface);
172
a211e013
AD
173 /* reset clock */
174 fm10k_ts_reset(interface);
175
b7d8514c
AD
176 if (netif_running(netdev))
177 fm10k_open(netdev);
178
883a9ccb
AD
179 fm10k_iov_resume(interface->pdev);
180
b7d8514c
AD
181 rtnl_unlock();
182
183 clear_bit(__FM10K_RESETTING, &interface->state);
184}
185
186static void fm10k_reset_subtask(struct fm10k_intfc *interface)
187{
188 if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED))
189 return;
190
191 interface->flags &= ~FM10K_FLAG_RESET_REQUESTED;
192
193 netdev_err(interface->netdev, "Reset interface\n");
194 interface->tx_timeout_count++;
195
196 fm10k_reinit(interface);
197}
198
199/**
200 * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
201 * @interface: board private structure
202 *
203 * Configure the SWPRI to PC mapping for the port.
204 **/
205static void fm10k_configure_swpri_map(struct fm10k_intfc *interface)
206{
207 struct net_device *netdev = interface->netdev;
208 struct fm10k_hw *hw = &interface->hw;
209 int i;
210
211 /* clear flag indicating update is needed */
212 interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG;
213
214 /* these registers are only available on the PF */
215 if (hw->mac.type != fm10k_mac_pf)
216 return;
217
218 /* configure SWPRI to PC map */
219 for (i = 0; i < FM10K_SWPRI_MAX; i++)
220 fm10k_write_reg(hw, FM10K_SWPRI_MAP(i),
221 netdev_get_prio_tc_map(netdev, i));
222}
223
224/**
225 * fm10k_watchdog_update_host_state - Update the link status based on host.
226 * @interface: board private structure
227 **/
228static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
229{
230 struct fm10k_hw *hw = &interface->hw;
231 s32 err;
232
233 if (test_bit(__FM10K_LINK_DOWN, &interface->state)) {
234 interface->host_ready = false;
235 if (time_is_after_jiffies(interface->link_down_event))
236 return;
237 clear_bit(__FM10K_LINK_DOWN, &interface->state);
238 }
239
240 if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) {
241 if (rtnl_trylock()) {
242 fm10k_configure_swpri_map(interface);
243 rtnl_unlock();
244 }
245 }
246
247 /* lock the mailbox for transmit and receive */
248 fm10k_mbx_lock(interface);
249
250 err = hw->mac.ops.get_host_state(hw, &interface->host_ready);
251 if (err && time_is_before_jiffies(interface->last_reset))
252 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
253
254 /* free the lock */
255 fm10k_mbx_unlock(interface);
256}
257
258/**
259 * fm10k_mbx_subtask - Process upstream and downstream mailboxes
260 * @interface: board private structure
261 *
262 * This function will process both the upstream and downstream mailboxes.
263 * It is necessary for us to hold the rtnl_lock while doing this as the
264 * mailbox accesses are protected by this lock.
265 **/
266static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
267{
268 /* process upstream mailbox and update device state */
269 fm10k_watchdog_update_host_state(interface);
883a9ccb
AD
270
271 /* process downstream mailboxes */
272 fm10k_iov_mbx(interface);
b7d8514c
AD
273}
274
275/**
276 * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
277 * @interface: board private structure
278 **/
279static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface)
280{
281 struct net_device *netdev = interface->netdev;
282
283 /* only continue if link state is currently down */
284 if (netif_carrier_ok(netdev))
285 return;
286
287 netif_info(interface, drv, netdev, "NIC Link is up\n");
288
289 netif_carrier_on(netdev);
290 netif_tx_wake_all_queues(netdev);
291}
292
293/**
294 * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
295 * @interface: board private structure
296 **/
297static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface)
298{
299 struct net_device *netdev = interface->netdev;
300
301 /* only continue if link state is currently up */
302 if (!netif_carrier_ok(netdev))
303 return;
304
305 netif_info(interface, drv, netdev, "NIC Link is down\n");
306
307 netif_carrier_off(netdev);
308 netif_tx_stop_all_queues(netdev);
309}
310
311/**
312 * fm10k_update_stats - Update the board statistics counters.
313 * @interface: board private structure
314 **/
315void fm10k_update_stats(struct fm10k_intfc *interface)
316{
317 struct net_device_stats *net_stats = &interface->netdev->stats;
318 struct fm10k_hw *hw = &interface->hw;
319 u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
320 u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
321 u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
322 u64 tx_bytes_nic = 0, tx_pkts_nic = 0;
323 u64 bytes, pkts;
324 int i;
325
326 /* do not allow stats update via service task for next second */
327 interface->next_stats_update = jiffies + HZ;
328
329 /* gather some stats to the interface struct that are per queue */
330 for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
331 struct fm10k_ring *tx_ring = interface->tx_ring[i];
332
333 restart_queue += tx_ring->tx_stats.restart_queue;
334 tx_busy += tx_ring->tx_stats.tx_busy;
335 tx_csum_errors += tx_ring->tx_stats.csum_err;
336 bytes += tx_ring->stats.bytes;
337 pkts += tx_ring->stats.packets;
338 }
339
340 interface->restart_queue = restart_queue;
341 interface->tx_busy = tx_busy;
342 net_stats->tx_bytes = bytes;
343 net_stats->tx_packets = pkts;
344 interface->tx_csum_errors = tx_csum_errors;
345 /* gather some stats to the interface struct that are per queue */
346 for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
347 struct fm10k_ring *rx_ring = interface->rx_ring[i];
348
349 bytes += rx_ring->stats.bytes;
350 pkts += rx_ring->stats.packets;
351 alloc_failed += rx_ring->rx_stats.alloc_failed;
352 rx_csum_errors += rx_ring->rx_stats.csum_err;
353 rx_errors += rx_ring->rx_stats.errors;
354 }
355
356 net_stats->rx_bytes = bytes;
357 net_stats->rx_packets = pkts;
358 interface->alloc_failed = alloc_failed;
359 interface->rx_csum_errors = rx_csum_errors;
b7d8514c
AD
360
361 hw->mac.ops.update_hw_stats(hw, &interface->stats);
362
363 for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) {
364 struct fm10k_hw_stats_q *q = &interface->stats.q[i];
365
366 tx_bytes_nic += q->tx_bytes.count;
367 tx_pkts_nic += q->tx_packets.count;
368 rx_bytes_nic += q->rx_bytes.count;
369 rx_pkts_nic += q->rx_packets.count;
370 rx_drops_nic += q->rx_drops.count;
371 }
372
373 interface->tx_bytes_nic = tx_bytes_nic;
374 interface->tx_packets_nic = tx_pkts_nic;
375 interface->rx_bytes_nic = rx_bytes_nic;
376 interface->rx_packets_nic = rx_pkts_nic;
377 interface->rx_drops_nic = rx_drops_nic;
378
379 /* Fill out the OS statistics structure */
97c71e3c 380 net_stats->rx_errors = rx_errors;
b7d8514c
AD
381 net_stats->rx_dropped = interface->stats.nodesc_drop.count;
382}
383
384/**
385 * fm10k_watchdog_flush_tx - flush queues on host not ready
386 * @interface - pointer to the device interface structure
387 **/
388static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
389{
390 int some_tx_pending = 0;
391 int i;
392
393 /* nothing to do if carrier is up */
394 if (netif_carrier_ok(interface->netdev))
395 return;
396
397 for (i = 0; i < interface->num_tx_queues; i++) {
398 struct fm10k_ring *tx_ring = interface->tx_ring[i];
399
400 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
401 some_tx_pending = 1;
402 break;
403 }
404 }
405
406 /* We've lost link, so the controller stops DMA, but we've got
407 * queued Tx work that's never going to get done, so reset
408 * controller to flush Tx.
409 */
410 if (some_tx_pending)
411 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
412}
413
414/**
415 * fm10k_watchdog_subtask - check and bring link up
416 * @interface - pointer to the device interface structure
417 **/
418static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
419{
420 /* if interface is down do nothing */
421 if (test_bit(__FM10K_DOWN, &interface->state) ||
422 test_bit(__FM10K_RESETTING, &interface->state))
423 return;
424
425 if (interface->host_ready)
426 fm10k_watchdog_host_is_ready(interface);
427 else
428 fm10k_watchdog_host_not_ready(interface);
429
430 /* update stats only once every second */
431 if (time_is_before_jiffies(interface->next_stats_update))
432 fm10k_update_stats(interface);
433
434 /* flush any uncompleted work */
435 fm10k_watchdog_flush_tx(interface);
436}
437
438/**
439 * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
440 * @interface - pointer to the device interface structure
441 *
442 * This function serves two purposes. First it strobes the interrupt lines
443 * in order to make certain interrupts are occurring. Secondly it sets the
444 * bits needed to check for TX hangs. As a result we should immediately
445 * determine if a hang has occurred.
446 */
447static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
448{
449 int i;
450
451 /* If we're down or resetting, just bail */
452 if (test_bit(__FM10K_DOWN, &interface->state) ||
453 test_bit(__FM10K_RESETTING, &interface->state))
454 return;
455
456 /* rate limit tx hang checks to only once every 2 seconds */
457 if (time_is_after_eq_jiffies(interface->next_tx_hang_check))
458 return;
459 interface->next_tx_hang_check = jiffies + (2 * HZ);
460
461 if (netif_carrier_ok(interface->netdev)) {
462 /* Force detection of hung controller */
463 for (i = 0; i < interface->num_tx_queues; i++)
464 set_check_for_tx_hang(interface->tx_ring[i]);
465
466 /* Rearm all in-use q_vectors for immediate firing */
467 for (i = 0; i < interface->num_q_vectors; i++) {
468 struct fm10k_q_vector *qv = interface->q_vector[i];
469
470 if (!qv->tx.count && !qv->rx.count)
471 continue;
472 writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr);
473 }
474 }
475}
476
477/**
478 * fm10k_service_task - manages and runs subtasks
479 * @work: pointer to work_struct containing our data
480 **/
481static void fm10k_service_task(struct work_struct *work)
482{
483 struct fm10k_intfc *interface;
484
485 interface = container_of(work, struct fm10k_intfc, service_task);
486
487 /* tasks always capable of running, but must be rtnl protected */
488 fm10k_mbx_subtask(interface);
489 fm10k_detach_subtask(interface);
490 fm10k_reset_subtask(interface);
491
492 /* tasks only run when interface is up */
493 fm10k_watchdog_subtask(interface);
494 fm10k_check_hang_subtask(interface);
a211e013 495 fm10k_ts_tx_subtask(interface);
b7d8514c
AD
496
497 /* release lock on service events to allow scheduling next event */
498 fm10k_service_event_complete(interface);
499}
500
3abaae42
AD
501/**
502 * fm10k_configure_tx_ring - Configure Tx ring after Reset
503 * @interface: board private structure
504 * @ring: structure containing ring specific data
505 *
506 * Configure the Tx descriptor ring after a reset.
507 **/
508static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
509 struct fm10k_ring *ring)
510{
511 struct fm10k_hw *hw = &interface->hw;
512 u64 tdba = ring->dma;
513 u32 size = ring->count * sizeof(struct fm10k_tx_desc);
514 u32 txint = FM10K_INT_MAP_DISABLE;
515 u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT);
516 u8 reg_idx = ring->reg_idx;
517
518 /* disable queue to avoid issues while updating state */
519 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0);
520 fm10k_write_flush(hw);
521
522 /* possible poll here to verify ring resources have been cleaned */
523
524 /* set location and size for descriptor ring */
525 fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
526 fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32);
527 fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size);
528
529 /* reset head and tail pointers */
530 fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0);
531 fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0);
532
533 /* store tail pointer */
534 ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)];
535
536 /* reset ntu and ntc to place SW in sync with hardwdare */
537 ring->next_to_clean = 0;
538 ring->next_to_use = 0;
539
540 /* Map interrupt */
541 if (ring->q_vector) {
542 txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
543 txint |= FM10K_INT_MAP_TIMER0;
544 }
545
546 fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint);
547
548 /* enable use of FTAG bit in Tx descriptor, register is RO for VF */
549 fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx),
550 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
551
552 /* enable queue */
553 fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl);
554}
555
556/**
557 * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration
558 * @interface: board private structure
559 * @ring: structure containing ring specific data
560 *
561 * Verify the Tx descriptor ring is ready for transmit.
562 **/
563static void fm10k_enable_tx_ring(struct fm10k_intfc *interface,
564 struct fm10k_ring *ring)
565{
566 struct fm10k_hw *hw = &interface->hw;
567 int wait_loop = 10;
568 u32 txdctl;
569 u8 reg_idx = ring->reg_idx;
570
571 /* if we are already enabled just exit */
572 if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE)
573 return;
574
575 /* poll to verify queue is enabled */
576 do {
577 usleep_range(1000, 2000);
578 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx));
579 } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop);
580 if (!wait_loop)
581 netif_err(interface, drv, interface->netdev,
582 "Could not enable Tx Queue %d\n", reg_idx);
583}
584
585/**
586 * fm10k_configure_tx - Configure Transmit Unit after Reset
587 * @interface: board private structure
588 *
589 * Configure the Tx unit of the MAC after a reset.
590 **/
591static void fm10k_configure_tx(struct fm10k_intfc *interface)
592{
593 int i;
594
595 /* Setup the HW Tx Head and Tail descriptor pointers */
596 for (i = 0; i < interface->num_tx_queues; i++)
597 fm10k_configure_tx_ring(interface, interface->tx_ring[i]);
598
599 /* poll here to verify that Tx rings are now enabled */
600 for (i = 0; i < interface->num_tx_queues; i++)
601 fm10k_enable_tx_ring(interface, interface->tx_ring[i]);
602}
603
604/**
605 * fm10k_configure_rx_ring - Configure Rx ring after Reset
606 * @interface: board private structure
607 * @ring: structure containing ring specific data
608 *
609 * Configure the Rx descriptor ring after a reset.
610 **/
611static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
612 struct fm10k_ring *ring)
613{
614 u64 rdba = ring->dma;
615 struct fm10k_hw *hw = &interface->hw;
616 u32 size = ring->count * sizeof(union fm10k_rx_desc);
617 u32 rxqctl = FM10K_RXQCTL_ENABLE | FM10K_RXQCTL_PF;
618 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
619 u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN;
620 u32 rxint = FM10K_INT_MAP_DISABLE;
621 u8 rx_pause = interface->rx_pause;
622 u8 reg_idx = ring->reg_idx;
623
624 /* disable queue to avoid issues while updating state */
625 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), 0);
626 fm10k_write_flush(hw);
627
628 /* possible poll here to verify ring resources have been cleaned */
629
630 /* set location and size for descriptor ring */
631 fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
632 fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32);
633 fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size);
634
635 /* reset head and tail pointers */
636 fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0);
637 fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0);
638
639 /* store tail pointer */
640 ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)];
641
642 /* reset ntu and ntc to place SW in sync with hardwdare */
643 ring->next_to_clean = 0;
644 ring->next_to_use = 0;
645 ring->next_to_alloc = 0;
646
647 /* Configure the Rx buffer size for one buff without split */
648 srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT;
649
eca32047 650 /* Configure the Rx ring to suppress loopback packets */
3abaae42
AD
651 srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS;
652 fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl);
653
654 /* Enable drop on empty */
9f801abc 655#ifdef CONFIG_DCB
3abaae42
AD
656 if (interface->pfc_en)
657 rx_pause = interface->pfc_en;
658#endif
659 if (!(rx_pause & (1 << ring->qos_pc)))
660 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
661
662 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
663
664 /* assign default VLAN to queue */
665 ring->vid = hw->mac.default_vid;
666
667 /* Map interrupt */
668 if (ring->q_vector) {
669 rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
670 rxint |= FM10K_INT_MAP_TIMER1;
671 }
672
673 fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint);
674
675 /* enable queue */
676 fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl);
b101c962
AD
677
678 /* place buffers on ring for receive data */
679 fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring));
3abaae42
AD
680}
681
682/**
683 * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings
684 * @interface: board private structure
685 *
686 * Configure the drop enable bits for the Rx rings.
687 **/
688void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
689{
690 struct fm10k_hw *hw = &interface->hw;
691 u8 rx_pause = interface->rx_pause;
692 int i;
693
9f801abc 694#ifdef CONFIG_DCB
3abaae42
AD
695 if (interface->pfc_en)
696 rx_pause = interface->pfc_en;
697
698#endif
699 for (i = 0; i < interface->num_rx_queues; i++) {
700 struct fm10k_ring *ring = interface->rx_ring[i];
701 u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
702 u8 reg_idx = ring->reg_idx;
703
704 if (!(rx_pause & (1 << ring->qos_pc)))
705 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
706
707 fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
708 }
709}
710
711/**
712 * fm10k_configure_dglort - Configure Receive DGLORT after reset
713 * @interface: board private structure
714 *
715 * Configure the DGLORT description and RSS tables.
716 **/
717static void fm10k_configure_dglort(struct fm10k_intfc *interface)
718{
719 struct fm10k_dglort_cfg dglort = { 0 };
720 struct fm10k_hw *hw = &interface->hw;
721 int i;
722 u32 mrqc;
723
724 /* Fill out hash function seeds */
725 for (i = 0; i < FM10K_RSSRK_SIZE; i++)
726 fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]);
727
728 /* Write RETA table to hardware */
729 for (i = 0; i < FM10K_RETA_SIZE; i++)
730 fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]);
731
732 /* Generate RSS hash based on packet types, TCP/UDP
733 * port numbers and/or IPv4/v6 src and dst addresses
734 */
735 mrqc = FM10K_MRQC_IPV4 |
736 FM10K_MRQC_TCP_IPV4 |
737 FM10K_MRQC_IPV6 |
738 FM10K_MRQC_TCP_IPV6;
739
740 if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP)
741 mrqc |= FM10K_MRQC_UDP_IPV4;
742 if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP)
743 mrqc |= FM10K_MRQC_UDP_IPV6;
744
745 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
746
747 /* configure default DGLORT mapping for RSS/DCB */
748 dglort.inner_rss = 1;
749 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
750 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
751 hw->mac.ops.configure_dglort_map(hw, &dglort);
752
753 /* assign GLORT per queue for queue mapped testing */
754 if (interface->glort_count > 64) {
755 memset(&dglort, 0, sizeof(dglort));
756 dglort.inner_rss = 1;
757 dglort.glort = interface->glort + 64;
758 dglort.idx = fm10k_dglort_pf_queue;
759 dglort.queue_l = fls(interface->num_rx_queues - 1);
760 hw->mac.ops.configure_dglort_map(hw, &dglort);
761 }
762
763 /* assign glort value for RSS/DCB specific to this interface */
764 memset(&dglort, 0, sizeof(dglort));
765 dglort.inner_rss = 1;
766 dglort.glort = interface->glort;
767 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
768 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
769 /* configure DGLORT mapping for RSS/DCB */
770 dglort.idx = fm10k_dglort_pf_rss;
5cd5e2e9
AD
771 if (interface->l2_accel)
772 dglort.shared_l = fls(interface->l2_accel->size);
3abaae42
AD
773 hw->mac.ops.configure_dglort_map(hw, &dglort);
774}
775
776/**
777 * fm10k_configure_rx - Configure Receive Unit after Reset
778 * @interface: board private structure
779 *
780 * Configure the Rx unit of the MAC after a reset.
781 **/
782static void fm10k_configure_rx(struct fm10k_intfc *interface)
783{
784 int i;
785
786 /* Configure SWPRI to PC map */
787 fm10k_configure_swpri_map(interface);
788
789 /* Configure RSS and DGLORT map */
790 fm10k_configure_dglort(interface);
791
792 /* Setup the HW Rx Head and Tail descriptor pointers */
793 for (i = 0; i < interface->num_rx_queues; i++)
794 fm10k_configure_rx_ring(interface, interface->rx_ring[i]);
795
796 /* possible poll here to verify that Rx rings are now enabled */
797}
798
18283cad
AD
799static void fm10k_napi_enable_all(struct fm10k_intfc *interface)
800{
801 struct fm10k_q_vector *q_vector;
802 int q_idx;
803
804 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
805 q_vector = interface->q_vector[q_idx];
806 napi_enable(&q_vector->napi);
807 }
808}
809
de445199 810static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data)
18283cad
AD
811{
812 struct fm10k_q_vector *q_vector = data;
813
814 if (q_vector->rx.count || q_vector->tx.count)
815 napi_schedule(&q_vector->napi);
816
817 return IRQ_HANDLED;
818}
819
de445199 820static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
5cb8db4a
AD
821{
822 struct fm10k_intfc *interface = data;
823 struct fm10k_hw *hw = &interface->hw;
824 struct fm10k_mbx_info *mbx = &hw->mbx;
825
826 /* re-enable mailbox interrupt and indicate 20us delay */
827 fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
828 FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY);
829
830 /* service upstream mailbox */
831 if (fm10k_mbx_trylock(interface)) {
832 mbx->ops.process(hw, mbx);
833 fm10k_mbx_unlock(interface);
834 }
835
836 hw->mac.get_host_state = 1;
837 fm10k_service_event_schedule(interface);
838
839 return IRQ_HANDLED;
840}
841
8b4a98c7
JK
842#ifdef CONFIG_NET_POLL_CONTROLLER
843/**
844 * fm10k_netpoll - A Polling 'interrupt' handler
845 * @netdev: network interface device structure
846 *
847 * This is used by netconsole to send skbs without having to re-enable
848 * interrupts. It's not called while the normal interrupt routine is executing.
849 **/
850void fm10k_netpoll(struct net_device *netdev)
851{
852 struct fm10k_intfc *interface = netdev_priv(netdev);
853 int i;
854
855 /* if interface is down do nothing */
856 if (test_bit(__FM10K_DOWN, &interface->state))
857 return;
858
859 for (i = 0; i < interface->num_q_vectors; i++)
860 fm10k_msix_clean_rings(0, interface->q_vector[i]);
861}
862
863#endif
18283cad
AD
864#define FM10K_ERR_MSG(type) case (type): error = #type; break
865static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
866 struct fm10k_fault *fault)
867{
868 struct pci_dev *pdev = interface->pdev;
869 char *error;
870
871 switch (type) {
872 case FM10K_PCA_FAULT:
873 switch (fault->type) {
874 default:
875 error = "Unknown PCA error";
876 break;
877 FM10K_ERR_MSG(PCA_NO_FAULT);
878 FM10K_ERR_MSG(PCA_UNMAPPED_ADDR);
879 FM10K_ERR_MSG(PCA_BAD_QACCESS_PF);
880 FM10K_ERR_MSG(PCA_BAD_QACCESS_VF);
881 FM10K_ERR_MSG(PCA_MALICIOUS_REQ);
882 FM10K_ERR_MSG(PCA_POISONED_TLP);
883 FM10K_ERR_MSG(PCA_TLP_ABORT);
884 }
885 break;
886 case FM10K_THI_FAULT:
887 switch (fault->type) {
888 default:
889 error = "Unknown THI error";
890 break;
891 FM10K_ERR_MSG(THI_NO_FAULT);
892 FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT);
893 }
894 break;
895 case FM10K_FUM_FAULT:
896 switch (fault->type) {
897 default:
898 error = "Unknown FUM error";
899 break;
900 FM10K_ERR_MSG(FUM_NO_FAULT);
901 FM10K_ERR_MSG(FUM_UNMAPPED_ADDR);
902 FM10K_ERR_MSG(FUM_BAD_VF_QACCESS);
903 FM10K_ERR_MSG(FUM_ADD_DECODE_ERR);
904 FM10K_ERR_MSG(FUM_RO_ERROR);
905 FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR);
906 FM10K_ERR_MSG(FUM_CSR_TIMEOUT);
907 FM10K_ERR_MSG(FUM_INVALID_TYPE);
908 FM10K_ERR_MSG(FUM_INVALID_LENGTH);
909 FM10K_ERR_MSG(FUM_INVALID_BE);
910 FM10K_ERR_MSG(FUM_INVALID_ALIGN);
911 }
912 break;
913 default:
914 error = "Undocumented fault";
915 break;
916 }
917
918 dev_warn(&pdev->dev,
919 "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
920 error, fault->address, fault->specinfo,
921 PCI_SLOT(fault->func), PCI_FUNC(fault->func));
922}
923
924static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
925{
926 struct fm10k_hw *hw = &interface->hw;
927 struct fm10k_fault fault = { 0 };
928 int type, err;
929
930 for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT;
931 eicr;
932 eicr >>= 1, type += FM10K_FAULT_SIZE) {
933 /* only check if there is an error reported */
934 if (!(eicr & 0x1))
935 continue;
936
937 /* retrieve fault info */
938 err = hw->mac.ops.get_fault(hw, type, &fault);
939 if (err) {
940 dev_err(&interface->pdev->dev,
941 "error reading fault\n");
942 continue;
943 }
944
945 fm10k_print_fault(interface, type, &fault);
946 }
947}
948
949static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
950{
951 struct fm10k_hw *hw = &interface->hw;
952 const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
953 u32 maxholdq;
954 int q;
955
956 if (!(eicr & FM10K_EICR_MAXHOLDTIME))
957 return;
958
959 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7));
960 if (maxholdq)
961 fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
962 for (q = 255;;) {
963 if (maxholdq & (1 << 31)) {
964 if (q < FM10K_MAX_QUEUES_PF) {
965 interface->rx_overrun_pf++;
966 fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
967 } else {
968 interface->rx_overrun_vf++;
969 }
970 }
971
972 maxholdq *= 2;
973 if (!maxholdq)
974 q &= ~(32 - 1);
975
976 if (!q)
977 break;
978
979 if (q-- % 32)
980 continue;
981
982 maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32));
983 if (maxholdq)
984 fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq);
985 }
986}
987
de445199 988static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
18283cad
AD
989{
990 struct fm10k_intfc *interface = data;
991 struct fm10k_hw *hw = &interface->hw;
992 struct fm10k_mbx_info *mbx = &hw->mbx;
993 u32 eicr;
994
995 /* unmask any set bits related to this interrupt */
996 eicr = fm10k_read_reg(hw, FM10K_EICR);
997 fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX |
998 FM10K_EICR_SWITCHREADY |
999 FM10K_EICR_SWITCHNOTREADY));
1000
1001 /* report any faults found to the message log */
1002 fm10k_report_fault(interface, eicr);
1003
1004 /* reset any queues disabled due to receiver overrun */
1005 fm10k_reset_drop_on_empty(interface, eicr);
1006
1007 /* service mailboxes */
1008 if (fm10k_mbx_trylock(interface)) {
1009 mbx->ops.process(hw, mbx);
883a9ccb 1010 fm10k_iov_event(interface);
18283cad
AD
1011 fm10k_mbx_unlock(interface);
1012 }
1013
b7d8514c
AD
1014 /* if switch toggled state we should reset GLORTs */
1015 if (eicr & FM10K_EICR_SWITCHNOTREADY) {
1016 /* force link down for at least 4 seconds */
1017 interface->link_down_event = jiffies + (4 * HZ);
1018 set_bit(__FM10K_LINK_DOWN, &interface->state);
1019
1020 /* reset dglort_map back to no config */
1021 hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
1022 }
1023
1024 /* we should validate host state after interrupt event */
1025 hw->mac.get_host_state = 1;
1026 fm10k_service_event_schedule(interface);
1027
18283cad
AD
1028 /* re-enable mailbox interrupt and indicate 20us delay */
1029 fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
1030 FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY);
1031
1032 return IRQ_HANDLED;
1033}
1034
1035void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
1036{
1037 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
1038 struct fm10k_hw *hw = &interface->hw;
1039 int itr_reg;
1040
1041 /* disconnect the mailbox */
1042 hw->mbx.ops.disconnect(hw, &hw->mbx);
1043
1044 /* disable Mailbox cause */
1045 if (hw->mac.type == fm10k_mac_pf) {
1046 fm10k_write_reg(hw, FM10K_EIMR,
1047 FM10K_EIMR_DISABLE(PCA_FAULT) |
1048 FM10K_EIMR_DISABLE(FUM_FAULT) |
1049 FM10K_EIMR_DISABLE(MAILBOX) |
1050 FM10K_EIMR_DISABLE(SWITCHREADY) |
1051 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
1052 FM10K_EIMR_DISABLE(SRAMERROR) |
1053 FM10K_EIMR_DISABLE(VFLR) |
1054 FM10K_EIMR_DISABLE(MAXHOLDTIME));
1055 itr_reg = FM10K_ITR(FM10K_MBX_VECTOR);
5cb8db4a
AD
1056 } else {
1057 itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR);
18283cad
AD
1058 }
1059
1060 fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET);
1061
1062 free_irq(entry->vector, interface);
1063}
1064
5cb8db4a
AD
1065static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
1066 struct fm10k_mbx_info *mbx)
1067{
1068 bool vlan_override = hw->mac.vlan_override;
1069 u16 default_vid = hw->mac.default_vid;
1070 struct fm10k_intfc *interface;
1071 s32 err;
1072
1073 err = fm10k_msg_mac_vlan_vf(hw, results, mbx);
1074 if (err)
1075 return err;
1076
1077 interface = container_of(hw, struct fm10k_intfc, hw);
1078
1079 /* MAC was changed so we need reset */
1080 if (is_valid_ether_addr(hw->mac.perm_addr) &&
1081 memcmp(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN))
1082 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1083
1084 /* VLAN override was changed, or default VLAN changed */
1085 if ((vlan_override != hw->mac.vlan_override) ||
1086 (default_vid != hw->mac.default_vid))
1087 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1088
1089 return 0;
1090}
1091
a211e013 1092static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results,
de445199 1093 struct fm10k_mbx_info __always_unused *mbx)
a211e013
AD
1094{
1095 struct fm10k_intfc *interface;
1096 u64 timestamp;
1097 s32 err;
1098
1099 err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP],
1100 &timestamp);
1101 if (err)
1102 return err;
1103
1104 interface = container_of(hw, struct fm10k_intfc, hw);
1105
1106 fm10k_ts_tx_hwtstamp(interface, 0, timestamp);
1107
1108 return 0;
1109}
1110
18283cad
AD
1111/* generic error handler for mailbox issues */
1112static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
de445199 1113 struct fm10k_mbx_info __always_unused *mbx)
18283cad
AD
1114{
1115 struct fm10k_intfc *interface;
1116 struct pci_dev *pdev;
1117
1118 interface = container_of(hw, struct fm10k_intfc, hw);
1119 pdev = interface->pdev;
1120
1121 dev_err(&pdev->dev, "Unknown message ID %u\n",
1122 **results & FM10K_TLV_ID_MASK);
1123
1124 return 0;
1125}
1126
5cb8db4a
AD
1127static const struct fm10k_msg_data vf_mbx_data[] = {
1128 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1129 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
1130 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
a211e013 1131 FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf),
5cb8db4a
AD
1132 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
1133};
1134
1135static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
1136{
1137 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
1138 struct net_device *dev = interface->netdev;
1139 struct fm10k_hw *hw = &interface->hw;
1140 int err;
1141
1142 /* Use timer0 for interrupt moderation on the mailbox */
1143 u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry;
1144
1145 /* register mailbox handlers */
1146 err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
1147 if (err)
1148 return err;
1149
1150 /* request the IRQ */
1151 err = request_irq(entry->vector, fm10k_msix_mbx_vf, 0,
1152 dev->name, interface);
1153 if (err) {
1154 netif_err(interface, probe, dev,
1155 "request_irq for msix_mbx failed: %d\n", err);
1156 return err;
1157 }
1158
1159 /* map all of the interrupt sources */
1160 fm10k_write_reg(hw, FM10K_VFINT_MAP, itr);
1161
1162 /* enable interrupt */
1163 fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE);
1164
1165 return 0;
1166}
1167
18283cad
AD
1168static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
1169 struct fm10k_mbx_info *mbx)
1170{
1171 struct fm10k_intfc *interface;
1172 u32 dglort_map = hw->mac.dglort_map;
1173 s32 err;
1174
1175 err = fm10k_msg_lport_map_pf(hw, results, mbx);
1176 if (err)
1177 return err;
1178
1179 interface = container_of(hw, struct fm10k_intfc, hw);
1180
1181 /* we need to reset if port count was just updated */
1182 if (dglort_map != hw->mac.dglort_map)
1183 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1184
1185 return 0;
1186}
1187
1188static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
de445199 1189 struct fm10k_mbx_info __always_unused *mbx)
18283cad
AD
1190{
1191 struct fm10k_intfc *interface;
1192 u16 glort, pvid;
1193 u32 pvid_update;
1194 s32 err;
1195
1196 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
1197 &pvid_update);
1198 if (err)
1199 return err;
1200
1201 /* extract values from the pvid update */
1202 glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
1203 pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
1204
1205 /* if glort is not valid return error */
1206 if (!fm10k_glort_valid_pf(hw, glort))
1207 return FM10K_ERR_PARAM;
1208
1209 /* verify VID is valid */
1210 if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
1211 return FM10K_ERR_PARAM;
1212
1213 interface = container_of(hw, struct fm10k_intfc, hw);
1214
883a9ccb
AD
1215 /* check to see if this belongs to one of the VFs */
1216 err = fm10k_iov_update_pvid(interface, glort, pvid);
1217 if (!err)
1218 return 0;
1219
18283cad
AD
1220 /* we need to reset if default VLAN was just updated */
1221 if (pvid != hw->mac.default_vid)
1222 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1223
1224 hw->mac.default_vid = pvid;
1225
1226 return 0;
1227}
1228
a211e013 1229static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results,
de445199 1230 struct fm10k_mbx_info __always_unused *mbx)
a211e013
AD
1231{
1232 struct fm10k_swapi_1588_timestamp timestamp;
1233 struct fm10k_iov_data *iov_data;
1234 struct fm10k_intfc *interface;
1235 u16 sglort, vf_idx;
1236 s32 err;
1237
1238 err = fm10k_tlv_attr_get_le_struct(
1239 results[FM10K_PF_ATTR_ID_1588_TIMESTAMP],
1240 &timestamp, sizeof(timestamp));
1241 if (err)
1242 return err;
1243
1244 interface = container_of(hw, struct fm10k_intfc, hw);
1245
1246 if (timestamp.dglort) {
1247 fm10k_ts_tx_hwtstamp(interface, timestamp.dglort,
1248 le64_to_cpu(timestamp.egress));
1249 return 0;
1250 }
1251
1252 /* either dglort or sglort must be set */
1253 if (!timestamp.sglort)
1254 return FM10K_ERR_PARAM;
1255
1256 /* verify GLORT is at least one of the ones we own */
1257 sglort = le16_to_cpu(timestamp.sglort);
1258 if (!fm10k_glort_valid_pf(hw, sglort))
1259 return FM10K_ERR_PARAM;
1260
1261 if (sglort == interface->glort) {
1262 fm10k_ts_tx_hwtstamp(interface, 0,
1263 le64_to_cpu(timestamp.ingress));
1264 return 0;
1265 }
1266
1267 /* if there is no iov_data then there is no mailboxes to process */
1268 if (!ACCESS_ONCE(interface->iov_data))
1269 return FM10K_ERR_PARAM;
1270
1271 rcu_read_lock();
1272
1273 /* notify VF if this timestamp belongs to it */
1274 iov_data = interface->iov_data;
1275 vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort;
1276
1277 if (!iov_data || vf_idx >= iov_data->num_vfs) {
1278 err = FM10K_ERR_PARAM;
1279 goto err_unlock;
1280 }
1281
1282 err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx],
1283 le64_to_cpu(timestamp.ingress));
1284
1285err_unlock:
1286 rcu_read_unlock();
1287
1288 return err;
1289}
1290
18283cad
AD
1291static const struct fm10k_msg_data pf_mbx_data[] = {
1292 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1293 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1294 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map),
1295 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1296 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1297 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
a211e013 1298 FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf),
18283cad
AD
1299 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
1300};
1301
1302static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
1303{
1304 struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
1305 struct net_device *dev = interface->netdev;
1306 struct fm10k_hw *hw = &interface->hw;
1307 int err;
1308
1309 /* Use timer0 for interrupt moderation on the mailbox */
1310 u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry;
1311 u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry;
1312
1313 /* register mailbox handlers */
1314 err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
1315 if (err)
1316 return err;
1317
1318 /* request the IRQ */
1319 err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0,
1320 dev->name, interface);
1321 if (err) {
1322 netif_err(interface, probe, dev,
1323 "request_irq for msix_mbx failed: %d\n", err);
1324 return err;
1325 }
1326
1327 /* Enable interrupts w/ no moderation for "other" interrupts */
1328 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), other_itr);
1329 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), other_itr);
1330 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SRAM), other_itr);
1331 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_MaxHoldTime), other_itr);
1332 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_VFLR), other_itr);
1333
1334 /* Enable interrupts w/ moderation for mailbox */
1335 fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_Mailbox), mbx_itr);
1336
1337 /* Enable individual interrupt causes */
1338 fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
1339 FM10K_EIMR_ENABLE(FUM_FAULT) |
1340 FM10K_EIMR_ENABLE(MAILBOX) |
1341 FM10K_EIMR_ENABLE(SWITCHREADY) |
1342 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
1343 FM10K_EIMR_ENABLE(SRAMERROR) |
1344 FM10K_EIMR_ENABLE(VFLR) |
1345 FM10K_EIMR_ENABLE(MAXHOLDTIME));
1346
1347 /* enable interrupt */
1348 fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE);
1349
1350 return 0;
1351}
1352
1353int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
1354{
1355 struct fm10k_hw *hw = &interface->hw;
1356 int err;
1357
1358 /* enable Mailbox cause */
5cb8db4a
AD
1359 if (hw->mac.type == fm10k_mac_pf)
1360 err = fm10k_mbx_request_irq_pf(interface);
1361 else
1362 err = fm10k_mbx_request_irq_vf(interface);
18283cad
AD
1363
1364 /* connect mailbox */
1365 if (!err)
1366 err = hw->mbx.ops.connect(hw, &hw->mbx);
1367
1368 return err;
1369}
1370
1371/**
1372 * fm10k_qv_free_irq - release interrupts associated with queue vectors
1373 * @interface: board private structure
1374 *
1375 * Release all interrupts associated with this interface
1376 **/
1377void fm10k_qv_free_irq(struct fm10k_intfc *interface)
1378{
1379 int vector = interface->num_q_vectors;
1380 struct fm10k_hw *hw = &interface->hw;
1381 struct msix_entry *entry;
1382
1383 entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
1384
1385 while (vector) {
1386 struct fm10k_q_vector *q_vector;
1387
1388 vector--;
1389 entry--;
1390 q_vector = interface->q_vector[vector];
1391
1392 if (!q_vector->tx.count && !q_vector->rx.count)
1393 continue;
1394
1395 /* disable interrupts */
1396
1397 writel(FM10K_ITR_MASK_SET, q_vector->itr);
1398
1399 free_irq(entry->vector, q_vector);
1400 }
1401}
1402
1403/**
1404 * fm10k_qv_request_irq - initialize interrupts for queue vectors
1405 * @interface: board private structure
1406 *
1407 * Attempts to configure interrupts using the best available
1408 * capabilities of the hardware and kernel.
1409 **/
1410int fm10k_qv_request_irq(struct fm10k_intfc *interface)
1411{
1412 struct net_device *dev = interface->netdev;
1413 struct fm10k_hw *hw = &interface->hw;
1414 struct msix_entry *entry;
1415 int ri = 0, ti = 0;
1416 int vector, err;
1417
1418 entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
1419
1420 for (vector = 0; vector < interface->num_q_vectors; vector++) {
1421 struct fm10k_q_vector *q_vector = interface->q_vector[vector];
1422
1423 /* name the vector */
1424 if (q_vector->tx.count && q_vector->rx.count) {
1425 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1426 "%s-TxRx-%d", dev->name, ri++);
1427 ti++;
1428 } else if (q_vector->rx.count) {
1429 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1430 "%s-rx-%d", dev->name, ri++);
1431 } else if (q_vector->tx.count) {
1432 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1433 "%s-tx-%d", dev->name, ti++);
1434 } else {
1435 /* skip this unused q_vector */
1436 continue;
1437 }
1438
1439 /* Assign ITR register to q_vector */
5cb8db4a
AD
1440 q_vector->itr = (hw->mac.type == fm10k_mac_pf) ?
1441 &interface->uc_addr[FM10K_ITR(entry->entry)] :
1442 &interface->uc_addr[FM10K_VFITR(entry->entry)];
18283cad
AD
1443
1444 /* request the IRQ */
1445 err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0,
1446 q_vector->name, q_vector);
1447 if (err) {
1448 netif_err(interface, probe, dev,
1449 "request_irq failed for MSIX interrupt Error: %d\n",
1450 err);
1451 goto err_out;
1452 }
1453
1454 /* Enable q_vector */
1455 writel(FM10K_ITR_ENABLE, q_vector->itr);
1456
1457 entry++;
1458 }
1459
1460 return 0;
1461
1462err_out:
1463 /* wind through the ring freeing all entries and vectors */
1464 while (vector) {
1465 struct fm10k_q_vector *q_vector;
1466
1467 entry--;
1468 vector--;
1469 q_vector = interface->q_vector[vector];
1470
1471 if (!q_vector->tx.count && !q_vector->rx.count)
1472 continue;
1473
1474 /* disable interrupts */
1475
1476 writel(FM10K_ITR_MASK_SET, q_vector->itr);
1477
1478 free_irq(entry->vector, q_vector);
1479 }
1480
1481 return err;
1482}
1483
504c5eac
AD
1484void fm10k_up(struct fm10k_intfc *interface)
1485{
1486 struct fm10k_hw *hw = &interface->hw;
1487
1488 /* Enable Tx/Rx DMA */
1489 hw->mac.ops.start_hw(hw);
1490
3abaae42
AD
1491 /* configure Tx descriptor rings */
1492 fm10k_configure_tx(interface);
1493
1494 /* configure Rx descriptor rings */
1495 fm10k_configure_rx(interface);
1496
504c5eac
AD
1497 /* configure interrupts */
1498 hw->mac.ops.update_int_moderator(hw);
1499
1500 /* clear down bit to indicate we are ready to go */
1501 clear_bit(__FM10K_DOWN, &interface->state);
1502
18283cad
AD
1503 /* enable polling cleanups */
1504 fm10k_napi_enable_all(interface);
1505
504c5eac
AD
1506 /* re-establish Rx filters */
1507 fm10k_restore_rx_state(interface);
1508
1509 /* enable transmits */
1510 netif_tx_start_all_queues(interface->netdev);
b7d8514c
AD
1511
1512 /* kick off the service timer */
4d419156 1513 hw->mac.get_host_state = 1;
b7d8514c 1514 mod_timer(&interface->service_timer, jiffies);
504c5eac
AD
1515}
1516
18283cad
AD
1517static void fm10k_napi_disable_all(struct fm10k_intfc *interface)
1518{
1519 struct fm10k_q_vector *q_vector;
1520 int q_idx;
1521
1522 for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) {
1523 q_vector = interface->q_vector[q_idx];
1524 napi_disable(&q_vector->napi);
1525 }
1526}
1527
504c5eac
AD
1528void fm10k_down(struct fm10k_intfc *interface)
1529{
1530 struct net_device *netdev = interface->netdev;
1531 struct fm10k_hw *hw = &interface->hw;
1532
1533 /* signal that we are down to the interrupt handler and service task */
1534 set_bit(__FM10K_DOWN, &interface->state);
1535
1536 /* call carrier off first to avoid false dev_watchdog timeouts */
1537 netif_carrier_off(netdev);
1538
1539 /* disable transmits */
1540 netif_tx_stop_all_queues(netdev);
1541 netif_tx_disable(netdev);
1542
1543 /* reset Rx filters */
1544 fm10k_reset_rx_state(interface);
1545
1546 /* allow 10ms for device to quiesce */
1547 usleep_range(10000, 20000);
1548
18283cad
AD
1549 /* disable polling routines */
1550 fm10k_napi_disable_all(interface);
1551
b7d8514c
AD
1552 del_timer_sync(&interface->service_timer);
1553
1554 /* capture stats one last time before stopping interface */
1555 fm10k_update_stats(interface);
1556
504c5eac
AD
1557 /* Disable DMA engine for Tx/Rx */
1558 hw->mac.ops.stop_hw(hw);
3abaae42
AD
1559
1560 /* free any buffers still on the rings */
1561 fm10k_clean_all_tx_rings(interface);
504c5eac
AD
1562}
1563
0e7b3644
AD
1564/**
1565 * fm10k_sw_init - Initialize general software structures
1566 * @interface: host interface private structure to initialize
1567 *
1568 * fm10k_sw_init initializes the interface private data structure.
1569 * Fields are initialized based on PCI device information and
1570 * OS network device settings (MTU size).
1571 **/
1572static int fm10k_sw_init(struct fm10k_intfc *interface,
1573 const struct pci_device_id *ent)
1574{
0e7b3644
AD
1575 const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data];
1576 struct fm10k_hw *hw = &interface->hw;
1577 struct pci_dev *pdev = interface->pdev;
1578 struct net_device *netdev = interface->netdev;
c41a4fba 1579 u32 rss_key[FM10K_RSSRK_SIZE];
0e7b3644
AD
1580 unsigned int rss;
1581 int err;
1582
1583 /* initialize back pointer */
1584 hw->back = interface;
1585 hw->hw_addr = interface->uc_addr;
1586
1587 /* PCI config space info */
1588 hw->vendor_id = pdev->vendor;
1589 hw->device_id = pdev->device;
1590 hw->revision_id = pdev->revision;
1591 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1592 hw->subsystem_device_id = pdev->subsystem_device;
1593
1594 /* Setup hw api */
1595 memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops));
1596 hw->mac.type = fi->mac;
1597
883a9ccb
AD
1598 /* Setup IOV handlers */
1599 if (fi->iov_ops)
1600 memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops));
1601
0e7b3644
AD
1602 /* Set common capability flags and settings */
1603 rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus());
1604 interface->ring_feature[RING_F_RSS].limit = rss;
1605 fi->get_invariants(hw);
1606
1607 /* pick up the PCIe bus settings for reporting later */
1608 if (hw->mac.ops.get_bus_info)
1609 hw->mac.ops.get_bus_info(hw);
1610
1611 /* limit the usable DMA range */
1612 if (hw->mac.ops.set_dma_mask)
1613 hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev));
1614
1615 /* update netdev with DMA restrictions */
1616 if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) {
1617 netdev->features |= NETIF_F_HIGHDMA;
1618 netdev->vlan_features |= NETIF_F_HIGHDMA;
1619 }
1620
b7d8514c
AD
1621 /* delay any future reset requests */
1622 interface->last_reset = jiffies + (10 * HZ);
1623
0e7b3644
AD
1624 /* reset and initialize the hardware so it is in a known state */
1625 err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
1626 if (err) {
1627 dev_err(&pdev->dev, "init_hw failed: %d\n", err);
1628 return err;
1629 }
1630
1631 /* initialize hardware statistics */
1632 hw->mac.ops.update_hw_stats(hw, &interface->stats);
1633
883a9ccb
AD
1634 /* Set upper limit on IOV VFs that can be allocated */
1635 pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs);
1636
0e7b3644
AD
1637 /* Start with random Ethernet address */
1638 eth_random_addr(hw->mac.addr);
1639
1640 /* Initialize MAC address from hardware */
1641 err = hw->mac.ops.read_mac_addr(hw);
1642 if (err) {
1643 dev_warn(&pdev->dev,
1644 "Failed to obtain MAC address defaulting to random\n");
1645 /* tag address assignment as random */
1646 netdev->addr_assign_type |= NET_ADDR_RANDOM;
1647 }
1648
1649 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1650 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1651
1652 if (!is_valid_ether_addr(netdev->perm_addr)) {
1653 dev_err(&pdev->dev, "Invalid MAC Address\n");
1654 return -EIO;
1655 }
1656
a211e013
AD
1657 /* assign BAR 4 resources for use with PTP */
1658 if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED)
1659 interface->sw_addr = ioremap(pci_resource_start(pdev, 4),
1660 pci_resource_len(pdev, 4));
1661 hw->sw_addr = interface->sw_addr;
1662
0e7b3644
AD
1663 /* Only the PF can support VXLAN and NVGRE offloads */
1664 if (hw->mac.type != fm10k_mac_pf) {
1665 netdev->hw_enc_features = 0;
1666 netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
1667 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
1668 }
1669
9f801abc
AD
1670 /* initialize DCBNL interface */
1671 fm10k_dcbnl_set_ops(netdev);
1672
b7d8514c
AD
1673 /* Initialize service timer and service task */
1674 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1675 setup_timer(&interface->service_timer, &fm10k_service_timer,
1676 (unsigned long)interface);
1677 INIT_WORK(&interface->service_task, fm10k_service_task);
1678
a211e013
AD
1679 /* Intitialize timestamp data */
1680 fm10k_ts_init(interface);
1681
e27ef599
AD
1682 /* set default ring sizes */
1683 interface->tx_ring_count = FM10K_DEFAULT_TXD;
1684 interface->rx_ring_count = FM10K_DEFAULT_RXD;
1685
18283cad
AD
1686 /* set default interrupt moderation */
1687 interface->tx_itr = FM10K_ITR_10K;
1688 interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K;
1689
0e7b3644
AD
1690 /* initialize vxlan_port list */
1691 INIT_LIST_HEAD(&interface->vxlan_port);
1692
c41a4fba
ED
1693 netdev_rss_key_fill(rss_key, sizeof(rss_key));
1694 memcpy(interface->rssrk, rss_key, sizeof(rss_key));
0e7b3644
AD
1695
1696 /* Start off interface as being down */
1697 set_bit(__FM10K_DOWN, &interface->state);
1698
1699 return 0;
1700}
1701
1702static void fm10k_slot_warn(struct fm10k_intfc *interface)
1703{
1704 struct device *dev = &interface->pdev->dev;
1705 struct fm10k_hw *hw = &interface->hw;
1706
1707 if (hw->mac.ops.is_slot_appropriate(hw))
1708 return;
1709
1710 dev_warn(dev,
1711 "For optimal performance, a %s %s slot is recommended.\n",
1712 (hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" :
1713 hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" :
1714 "x8"),
1715 (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
1716 hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
1717 "8.0GT/s"));
1718 dev_warn(dev,
1719 "A slot with more lanes and/or higher speed is suggested.\n");
1720}
1721
b3890e30
AD
1722/**
1723 * fm10k_probe - Device Initialization Routine
1724 * @pdev: PCI device information struct
1725 * @ent: entry in fm10k_pci_tbl
1726 *
1727 * Returns 0 on success, negative on failure
1728 *
1729 * fm10k_probe initializes an interface identified by a pci_dev structure.
1730 * The OS initialization, configuring of the interface private structure,
1731 * and a hardware reset occur.
1732 **/
1733static int fm10k_probe(struct pci_dev *pdev,
1734 const struct pci_device_id *ent)
1735{
0e7b3644
AD
1736 struct net_device *netdev;
1737 struct fm10k_intfc *interface;
1738 struct fm10k_hw *hw;
b3890e30
AD
1739 int err;
1740 u64 dma_mask;
1741
1742 err = pci_enable_device_mem(pdev);
1743 if (err)
1744 return err;
1745
1746 /* By default fm10k only supports a 48 bit DMA mask */
1747 dma_mask = DMA_BIT_MASK(48) | dma_get_required_mask(&pdev->dev);
1748
1749 if ((dma_mask <= DMA_BIT_MASK(32)) ||
1750 dma_set_mask_and_coherent(&pdev->dev, dma_mask)) {
1751 dma_mask &= DMA_BIT_MASK(32);
1752
1753 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1754 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1755 if (err) {
1756 err = dma_set_coherent_mask(&pdev->dev,
1757 DMA_BIT_MASK(32));
1758 if (err) {
1759 dev_err(&pdev->dev,
1760 "No usable DMA configuration, aborting\n");
1761 goto err_dma;
1762 }
1763 }
1764 }
1765
1766 err = pci_request_selected_regions(pdev,
1767 pci_select_bars(pdev,
1768 IORESOURCE_MEM),
1769 fm10k_driver_name);
1770 if (err) {
1771 dev_err(&pdev->dev,
1772 "pci_request_selected_regions failed 0x%x\n", err);
1773 goto err_pci_reg;
1774 }
1775
19ae1b3f
AD
1776 pci_enable_pcie_error_reporting(pdev);
1777
b3890e30
AD
1778 pci_set_master(pdev);
1779 pci_save_state(pdev);
1780
0e7b3644
AD
1781 netdev = fm10k_alloc_netdev();
1782 if (!netdev) {
1783 err = -ENOMEM;
1784 goto err_alloc_netdev;
1785 }
1786
1787 SET_NETDEV_DEV(netdev, &pdev->dev);
1788
1789 interface = netdev_priv(netdev);
1790 pci_set_drvdata(pdev, interface);
1791
1792 interface->netdev = netdev;
1793 interface->pdev = pdev;
1794 hw = &interface->hw;
1795
1796 interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
1797 FM10K_UC_ADDR_SIZE);
1798 if (!interface->uc_addr) {
1799 err = -EIO;
1800 goto err_ioremap;
1801 }
1802
1803 err = fm10k_sw_init(interface, ent);
1804 if (err)
1805 goto err_sw_init;
1806
7461fd91
AD
1807 /* enable debugfs support */
1808 fm10k_dbg_intfc_init(interface);
1809
18283cad
AD
1810 err = fm10k_init_queueing_scheme(interface);
1811 if (err)
1812 goto err_sw_init;
1813
1814 err = fm10k_mbx_request_irq(interface);
1815 if (err)
1816 goto err_mbx_interrupt;
1817
0e7b3644
AD
1818 /* final check of hardware state before registering the interface */
1819 err = fm10k_hw_ready(interface);
1820 if (err)
1821 goto err_register;
1822
1823 err = register_netdev(netdev);
1824 if (err)
1825 goto err_register;
1826
1827 /* carrier off reporting is important to ethtool even BEFORE open */
1828 netif_carrier_off(netdev);
1829
1830 /* stop all the transmit queues from transmitting until link is up */
1831 netif_tx_stop_all_queues(netdev);
1832
a211e013
AD
1833 /* Register PTP interface */
1834 fm10k_ptp_register(interface);
1835
0e7b3644
AD
1836 /* print bus type/speed/width info */
1837 dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n",
1838 (hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" :
1839 hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
1840 hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
1841 "Unknown"),
1842 (hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" :
1843 hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" :
1844 hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" :
1845 "Unknown"),
1846 (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
1847 hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
1848 hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
1849 "Unknown"));
1850
1851 /* print warning for non-optimal configurations */
1852 fm10k_slot_warn(interface);
1853
883a9ccb
AD
1854 /* enable SR-IOV after registering netdev to enforce PF/VF ordering */
1855 fm10k_iov_configure(pdev, 0);
1856
b7d8514c
AD
1857 /* clear the service task disable bit to allow service task to start */
1858 clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1859
b3890e30
AD
1860 return 0;
1861
0e7b3644 1862err_register:
18283cad
AD
1863 fm10k_mbx_free_irq(interface);
1864err_mbx_interrupt:
1865 fm10k_clear_queueing_scheme(interface);
0e7b3644 1866err_sw_init:
a211e013
AD
1867 if (interface->sw_addr)
1868 iounmap(interface->sw_addr);
0e7b3644
AD
1869 iounmap(interface->uc_addr);
1870err_ioremap:
1871 free_netdev(netdev);
1872err_alloc_netdev:
1873 pci_release_selected_regions(pdev,
1874 pci_select_bars(pdev, IORESOURCE_MEM));
b3890e30
AD
1875err_pci_reg:
1876err_dma:
1877 pci_disable_device(pdev);
1878 return err;
1879}
1880
1881/**
1882 * fm10k_remove - Device Removal Routine
1883 * @pdev: PCI device information struct
1884 *
1885 * fm10k_remove is called by the PCI subsystem to alert the driver
1886 * that it should release a PCI device. The could be caused by a
1887 * Hot-Plug event, or because the driver is going to be removed from
1888 * memory.
1889 **/
1890static void fm10k_remove(struct pci_dev *pdev)
1891{
0e7b3644
AD
1892 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
1893 struct net_device *netdev = interface->netdev;
1894
b7d8514c
AD
1895 set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
1896 cancel_work_sync(&interface->service_task);
1897
0e7b3644
AD
1898 /* free netdev, this may bounce the interrupts due to setup_tc */
1899 if (netdev->reg_state == NETREG_REGISTERED)
1900 unregister_netdev(netdev);
1901
a211e013
AD
1902 /* cleanup timestamp handling */
1903 fm10k_ptp_unregister(interface);
1904
883a9ccb
AD
1905 /* release VFs */
1906 fm10k_iov_disable(pdev);
1907
18283cad
AD
1908 /* disable mailbox interrupt */
1909 fm10k_mbx_free_irq(interface);
1910
1911 /* free interrupts */
1912 fm10k_clear_queueing_scheme(interface);
1913
7461fd91
AD
1914 /* remove any debugfs interfaces */
1915 fm10k_dbg_intfc_exit(interface);
1916
a211e013
AD
1917 if (interface->sw_addr)
1918 iounmap(interface->sw_addr);
0e7b3644
AD
1919 iounmap(interface->uc_addr);
1920
1921 free_netdev(netdev);
1922
b3890e30
AD
1923 pci_release_selected_regions(pdev,
1924 pci_select_bars(pdev, IORESOURCE_MEM));
1925
19ae1b3f
AD
1926 pci_disable_pcie_error_reporting(pdev);
1927
b3890e30
AD
1928 pci_disable_device(pdev);
1929}
1930
19ae1b3f
AD
1931#ifdef CONFIG_PM
1932/**
1933 * fm10k_resume - Restore device to pre-sleep state
1934 * @pdev: PCI device information struct
1935 *
1936 * fm10k_resume is called after the system has powered back up from a sleep
1937 * state and is ready to resume operation. This function is meant to restore
1938 * the device back to its pre-sleep state.
1939 **/
1940static int fm10k_resume(struct pci_dev *pdev)
1941{
1942 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
1943 struct net_device *netdev = interface->netdev;
1944 struct fm10k_hw *hw = &interface->hw;
1945 u32 err;
1946
1947 pci_set_power_state(pdev, PCI_D0);
1948 pci_restore_state(pdev);
1949
1950 /* pci_restore_state clears dev->state_saved so call
1951 * pci_save_state to restore it.
1952 */
1953 pci_save_state(pdev);
1954
1955 err = pci_enable_device_mem(pdev);
1956 if (err) {
1957 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
1958 return err;
1959 }
1960 pci_set_master(pdev);
1961
1962 pci_wake_from_d3(pdev, false);
1963
1964 /* refresh hw_addr in case it was dropped */
1965 hw->hw_addr = interface->uc_addr;
1966
1967 /* reset hardware to known state */
1968 err = hw->mac.ops.init_hw(&interface->hw);
1969 if (err)
1970 return err;
1971
1972 /* reset statistics starting values */
1973 hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
1974
a211e013
AD
1975 /* reset clock */
1976 fm10k_ts_reset(interface);
1977
19ae1b3f
AD
1978 rtnl_lock();
1979
1980 err = fm10k_init_queueing_scheme(interface);
1981 if (!err) {
1982 fm10k_mbx_request_irq(interface);
1983 if (netif_running(netdev))
1984 err = fm10k_open(netdev);
1985 }
1986
1987 rtnl_unlock();
1988
1989 if (err)
1990 return err;
1991
883a9ccb
AD
1992 /* restore SR-IOV interface */
1993 fm10k_iov_resume(pdev);
1994
19ae1b3f
AD
1995 netif_device_attach(netdev);
1996
1997 return 0;
1998}
1999
2000/**
2001 * fm10k_suspend - Prepare the device for a system sleep state
2002 * @pdev: PCI device information struct
2003 *
2004 * fm10k_suspend is meant to shutdown the device prior to the system entering
2005 * a sleep state. The fm10k hardware does not support wake on lan so the
2006 * driver simply needs to shut down the device so it is in a low power state.
2007 **/
de445199
JK
2008static int fm10k_suspend(struct pci_dev *pdev,
2009 pm_message_t __always_unused state)
19ae1b3f
AD
2010{
2011 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2012 struct net_device *netdev = interface->netdev;
2013 int err = 0;
2014
2015 netif_device_detach(netdev);
2016
883a9ccb
AD
2017 fm10k_iov_suspend(pdev);
2018
19ae1b3f
AD
2019 rtnl_lock();
2020
2021 if (netif_running(netdev))
2022 fm10k_close(netdev);
2023
2024 fm10k_mbx_free_irq(interface);
2025
2026 fm10k_clear_queueing_scheme(interface);
2027
2028 rtnl_unlock();
2029
2030 err = pci_save_state(pdev);
2031 if (err)
2032 return err;
2033
2034 pci_disable_device(pdev);
2035 pci_wake_from_d3(pdev, false);
2036 pci_set_power_state(pdev, PCI_D3hot);
2037
2038 return 0;
2039}
2040
2041#endif /* CONFIG_PM */
2042/**
2043 * fm10k_io_error_detected - called when PCI error is detected
2044 * @pdev: Pointer to PCI device
2045 * @state: The current pci connection state
2046 *
2047 * This function is called after a PCI bus error affecting
2048 * this device has been detected.
2049 */
2050static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
2051 pci_channel_state_t state)
2052{
2053 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2054 struct net_device *netdev = interface->netdev;
2055
2056 netif_device_detach(netdev);
2057
2058 if (state == pci_channel_io_perm_failure)
2059 return PCI_ERS_RESULT_DISCONNECT;
2060
2061 if (netif_running(netdev))
2062 fm10k_close(netdev);
2063
2064 fm10k_mbx_free_irq(interface);
2065
2066 pci_disable_device(pdev);
2067
2068 /* Request a slot reset. */
2069 return PCI_ERS_RESULT_NEED_RESET;
2070}
2071
2072/**
2073 * fm10k_io_slot_reset - called after the pci bus has been reset.
2074 * @pdev: Pointer to PCI device
2075 *
2076 * Restart the card from scratch, as if from a cold-boot.
2077 */
2078static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
2079{
2080 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2081 pci_ers_result_t result;
2082
2083 if (pci_enable_device_mem(pdev)) {
2084 dev_err(&pdev->dev,
2085 "Cannot re-enable PCI device after reset.\n");
2086 result = PCI_ERS_RESULT_DISCONNECT;
2087 } else {
2088 pci_set_master(pdev);
2089 pci_restore_state(pdev);
2090
2091 /* After second error pci->state_saved is false, this
2092 * resets it so EEH doesn't break.
2093 */
2094 pci_save_state(pdev);
2095
2096 pci_wake_from_d3(pdev, false);
2097
2098 /* refresh hw_addr in case it was dropped */
2099 interface->hw.hw_addr = interface->uc_addr;
2100
2101 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
2102 fm10k_service_event_schedule(interface);
2103
2104 result = PCI_ERS_RESULT_RECOVERED;
2105 }
2106
2107 pci_cleanup_aer_uncorrect_error_status(pdev);
2108
2109 return result;
2110}
2111
2112/**
2113 * fm10k_io_resume - called when traffic can start flowing again.
2114 * @pdev: Pointer to PCI device
2115 *
2116 * This callback is called when the error recovery driver tells us that
2117 * its OK to resume normal operation.
2118 */
2119static void fm10k_io_resume(struct pci_dev *pdev)
2120{
2121 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
2122 struct net_device *netdev = interface->netdev;
2123 struct fm10k_hw *hw = &interface->hw;
2124 int err = 0;
2125
2126 /* reset hardware to known state */
2127 hw->mac.ops.init_hw(&interface->hw);
2128
2129 /* reset statistics starting values */
2130 hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
2131
2132 /* reassociate interrupts */
2133 fm10k_mbx_request_irq(interface);
2134
a211e013
AD
2135 /* reset clock */
2136 fm10k_ts_reset(interface);
2137
19ae1b3f
AD
2138 if (netif_running(netdev))
2139 err = fm10k_open(netdev);
2140
2141 /* final check of hardware state before registering the interface */
2142 err = err ? : fm10k_hw_ready(interface);
2143
2144 if (!err)
2145 netif_device_attach(netdev);
2146}
2147
2148static const struct pci_error_handlers fm10k_err_handler = {
2149 .error_detected = fm10k_io_error_detected,
2150 .slot_reset = fm10k_io_slot_reset,
2151 .resume = fm10k_io_resume,
2152};
2153
b3890e30
AD
2154static struct pci_driver fm10k_driver = {
2155 .name = fm10k_driver_name,
2156 .id_table = fm10k_pci_tbl,
2157 .probe = fm10k_probe,
2158 .remove = fm10k_remove,
19ae1b3f
AD
2159#ifdef CONFIG_PM
2160 .suspend = fm10k_suspend,
2161 .resume = fm10k_resume,
2162#endif
883a9ccb 2163 .sriov_configure = fm10k_iov_configure,
19ae1b3f 2164 .err_handler = &fm10k_err_handler
b3890e30
AD
2165};
2166
2167/**
2168 * fm10k_register_pci_driver - register driver interface
2169 *
2170 * This funciton is called on module load in order to register the driver.
2171 **/
2172int fm10k_register_pci_driver(void)
2173{
2174 return pci_register_driver(&fm10k_driver);
2175}
2176
2177/**
2178 * fm10k_unregister_pci_driver - unregister driver interface
2179 *
2180 * This funciton is called on module unload in order to remove the driver.
2181 **/
2182void fm10k_unregister_pci_driver(void)
2183{
2184 pci_unregister_driver(&fm10k_driver);
2185}
This page took 0.150027 seconds and 5 git commands to generate.