i40e/i40evf : Bump driver version from 1.5.5 to 1.5.10
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30
31 /* Local includes */
32 #include "i40e.h"
33 #include "i40e_diag.h"
34 #if IS_ENABLED(CONFIG_VXLAN)
35 #include <net/vxlan.h>
36 #endif
37 #if IS_ENABLED(CONFIG_GENEVE)
38 #include <net/geneve.h>
39 #endif
40
41 const char i40e_driver_name[] = "i40e";
42 static const char i40e_driver_string[] =
43 "Intel(R) Ethernet Connection XL710 Network Driver";
44
45 #define DRV_KERN "-k"
46
47 #define DRV_VERSION_MAJOR 1
48 #define DRV_VERSION_MINOR 5
49 #define DRV_VERSION_BUILD 10
50 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
51 __stringify(DRV_VERSION_MINOR) "." \
52 __stringify(DRV_VERSION_BUILD) DRV_KERN
53 const char i40e_driver_version_str[] = DRV_VERSION;
54 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
55
56 /* a bit of forward declarations */
57 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
58 static void i40e_handle_reset_warning(struct i40e_pf *pf);
59 static int i40e_add_vsi(struct i40e_vsi *vsi);
60 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
61 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
62 static int i40e_setup_misc_vector(struct i40e_pf *pf);
63 static void i40e_determine_queue_usage(struct i40e_pf *pf);
64 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
65 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
66 u16 rss_table_size, u16 rss_size);
67 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
68 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
69
70 /* i40e_pci_tbl - PCI Device ID Table
71 *
72 * Last entry must be all 0s
73 *
74 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
75 * Class, Class Mask, private data (not used) }
76 */
77 static const struct pci_device_id i40e_pci_tbl[] = {
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0},
95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
96 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
97 /* required last entry */
98 {0, }
99 };
100 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
101
102 #define I40E_MAX_VF_COUNT 128
103 static int debug = -1;
104 module_param(debug, int, 0);
105 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
106
107 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
108 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
109 MODULE_LICENSE("GPL");
110 MODULE_VERSION(DRV_VERSION);
111
112 static struct workqueue_struct *i40e_wq;
113
114 /**
115 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
116 * @hw: pointer to the HW structure
117 * @mem: ptr to mem struct to fill out
118 * @size: size of memory requested
119 * @alignment: what to align the allocation to
120 **/
121 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
122 u64 size, u32 alignment)
123 {
124 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
125
126 mem->size = ALIGN(size, alignment);
127 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
128 &mem->pa, GFP_KERNEL);
129 if (!mem->va)
130 return -ENOMEM;
131
132 return 0;
133 }
134
135 /**
136 * i40e_free_dma_mem_d - OS specific memory free for shared code
137 * @hw: pointer to the HW structure
138 * @mem: ptr to mem struct to free
139 **/
140 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
141 {
142 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
143
144 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
145 mem->va = NULL;
146 mem->pa = 0;
147 mem->size = 0;
148
149 return 0;
150 }
151
152 /**
153 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
154 * @hw: pointer to the HW structure
155 * @mem: ptr to mem struct to fill out
156 * @size: size of memory requested
157 **/
158 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
159 u32 size)
160 {
161 mem->size = size;
162 mem->va = kzalloc(size, GFP_KERNEL);
163
164 if (!mem->va)
165 return -ENOMEM;
166
167 return 0;
168 }
169
170 /**
171 * i40e_free_virt_mem_d - OS specific memory free for shared code
172 * @hw: pointer to the HW structure
173 * @mem: ptr to mem struct to free
174 **/
175 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
176 {
177 /* it's ok to kfree a NULL pointer */
178 kfree(mem->va);
179 mem->va = NULL;
180 mem->size = 0;
181
182 return 0;
183 }
184
185 /**
186 * i40e_get_lump - find a lump of free generic resource
187 * @pf: board private structure
188 * @pile: the pile of resource to search
189 * @needed: the number of items needed
190 * @id: an owner id to stick on the items assigned
191 *
192 * Returns the base item index of the lump, or negative for error
193 *
194 * The search_hint trick and lack of advanced fit-finding only work
195 * because we're highly likely to have all the same size lump requests.
196 * Linear search time and any fragmentation should be minimal.
197 **/
198 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
199 u16 needed, u16 id)
200 {
201 int ret = -ENOMEM;
202 int i, j;
203
204 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
205 dev_info(&pf->pdev->dev,
206 "param err: pile=%p needed=%d id=0x%04x\n",
207 pile, needed, id);
208 return -EINVAL;
209 }
210
211 /* start the linear search with an imperfect hint */
212 i = pile->search_hint;
213 while (i < pile->num_entries) {
214 /* skip already allocated entries */
215 if (pile->list[i] & I40E_PILE_VALID_BIT) {
216 i++;
217 continue;
218 }
219
220 /* do we have enough in this lump? */
221 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
222 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
223 break;
224 }
225
226 if (j == needed) {
227 /* there was enough, so assign it to the requestor */
228 for (j = 0; j < needed; j++)
229 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
230 ret = i;
231 pile->search_hint = i + j;
232 break;
233 }
234
235 /* not enough, so skip over it and continue looking */
236 i += j;
237 }
238
239 return ret;
240 }
241
242 /**
243 * i40e_put_lump - return a lump of generic resource
244 * @pile: the pile of resource to search
245 * @index: the base item index
246 * @id: the owner id of the items assigned
247 *
248 * Returns the count of items in the lump
249 **/
250 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
251 {
252 int valid_id = (id | I40E_PILE_VALID_BIT);
253 int count = 0;
254 int i;
255
256 if (!pile || index >= pile->num_entries)
257 return -EINVAL;
258
259 for (i = index;
260 i < pile->num_entries && pile->list[i] == valid_id;
261 i++) {
262 pile->list[i] = 0;
263 count++;
264 }
265
266 if (count && index < pile->search_hint)
267 pile->search_hint = index;
268
269 return count;
270 }
271
272 /**
273 * i40e_find_vsi_from_id - searches for the vsi with the given id
274 * @pf - the pf structure to search for the vsi
275 * @id - id of the vsi it is searching for
276 **/
277 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
278 {
279 int i;
280
281 for (i = 0; i < pf->num_alloc_vsi; i++)
282 if (pf->vsi[i] && (pf->vsi[i]->id == id))
283 return pf->vsi[i];
284
285 return NULL;
286 }
287
288 /**
289 * i40e_service_event_schedule - Schedule the service task to wake up
290 * @pf: board private structure
291 *
292 * If not already scheduled, this puts the task into the work queue
293 **/
294 void i40e_service_event_schedule(struct i40e_pf *pf)
295 {
296 if (!test_bit(__I40E_DOWN, &pf->state) &&
297 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
298 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
299 queue_work(i40e_wq, &pf->service_task);
300 }
301
302 /**
303 * i40e_tx_timeout - Respond to a Tx Hang
304 * @netdev: network interface device structure
305 *
306 * If any port has noticed a Tx timeout, it is likely that the whole
307 * device is munged, not just the one netdev port, so go for the full
308 * reset.
309 **/
310 #ifdef I40E_FCOE
311 void i40e_tx_timeout(struct net_device *netdev)
312 #else
313 static void i40e_tx_timeout(struct net_device *netdev)
314 #endif
315 {
316 struct i40e_netdev_priv *np = netdev_priv(netdev);
317 struct i40e_vsi *vsi = np->vsi;
318 struct i40e_pf *pf = vsi->back;
319 struct i40e_ring *tx_ring = NULL;
320 unsigned int i, hung_queue = 0;
321 u32 head, val;
322
323 pf->tx_timeout_count++;
324
325 /* find the stopped queue the same way the stack does */
326 for (i = 0; i < netdev->num_tx_queues; i++) {
327 struct netdev_queue *q;
328 unsigned long trans_start;
329
330 q = netdev_get_tx_queue(netdev, i);
331 trans_start = q->trans_start ? : netdev->trans_start;
332 if (netif_xmit_stopped(q) &&
333 time_after(jiffies,
334 (trans_start + netdev->watchdog_timeo))) {
335 hung_queue = i;
336 break;
337 }
338 }
339
340 if (i == netdev->num_tx_queues) {
341 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
342 } else {
343 /* now that we have an index, find the tx_ring struct */
344 for (i = 0; i < vsi->num_queue_pairs; i++) {
345 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
346 if (hung_queue ==
347 vsi->tx_rings[i]->queue_index) {
348 tx_ring = vsi->tx_rings[i];
349 break;
350 }
351 }
352 }
353 }
354
355 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
356 pf->tx_timeout_recovery_level = 1; /* reset after some time */
357 else if (time_before(jiffies,
358 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
359 return; /* don't do any new action before the next timeout */
360
361 if (tx_ring) {
362 head = i40e_get_head(tx_ring);
363 /* Read interrupt register */
364 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
365 val = rd32(&pf->hw,
366 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
367 tx_ring->vsi->base_vector - 1));
368 else
369 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
370
371 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
372 vsi->seid, hung_queue, tx_ring->next_to_clean,
373 head, tx_ring->next_to_use,
374 readl(tx_ring->tail), val);
375 }
376
377 pf->tx_timeout_last_recovery = jiffies;
378 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
379 pf->tx_timeout_recovery_level, hung_queue);
380
381 switch (pf->tx_timeout_recovery_level) {
382 case 1:
383 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
384 break;
385 case 2:
386 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
387 break;
388 case 3:
389 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
390 break;
391 default:
392 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
393 break;
394 }
395
396 i40e_service_event_schedule(pf);
397 pf->tx_timeout_recovery_level++;
398 }
399
400 /**
401 * i40e_get_vsi_stats_struct - Get System Network Statistics
402 * @vsi: the VSI we care about
403 *
404 * Returns the address of the device statistics structure.
405 * The statistics are actually updated from the service task.
406 **/
407 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
408 {
409 return &vsi->net_stats;
410 }
411
412 /**
413 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
414 * @netdev: network interface device structure
415 *
416 * Returns the address of the device statistics structure.
417 * The statistics are actually updated from the service task.
418 **/
419 #ifdef I40E_FCOE
420 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
421 struct net_device *netdev,
422 struct rtnl_link_stats64 *stats)
423 #else
424 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
425 struct net_device *netdev,
426 struct rtnl_link_stats64 *stats)
427 #endif
428 {
429 struct i40e_netdev_priv *np = netdev_priv(netdev);
430 struct i40e_ring *tx_ring, *rx_ring;
431 struct i40e_vsi *vsi = np->vsi;
432 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
433 int i;
434
435 if (test_bit(__I40E_DOWN, &vsi->state))
436 return stats;
437
438 if (!vsi->tx_rings)
439 return stats;
440
441 rcu_read_lock();
442 for (i = 0; i < vsi->num_queue_pairs; i++) {
443 u64 bytes, packets;
444 unsigned int start;
445
446 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
447 if (!tx_ring)
448 continue;
449
450 do {
451 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
452 packets = tx_ring->stats.packets;
453 bytes = tx_ring->stats.bytes;
454 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
455
456 stats->tx_packets += packets;
457 stats->tx_bytes += bytes;
458 rx_ring = &tx_ring[1];
459
460 do {
461 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
462 packets = rx_ring->stats.packets;
463 bytes = rx_ring->stats.bytes;
464 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
465
466 stats->rx_packets += packets;
467 stats->rx_bytes += bytes;
468 }
469 rcu_read_unlock();
470
471 /* following stats updated by i40e_watchdog_subtask() */
472 stats->multicast = vsi_stats->multicast;
473 stats->tx_errors = vsi_stats->tx_errors;
474 stats->tx_dropped = vsi_stats->tx_dropped;
475 stats->rx_errors = vsi_stats->rx_errors;
476 stats->rx_dropped = vsi_stats->rx_dropped;
477 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
478 stats->rx_length_errors = vsi_stats->rx_length_errors;
479
480 return stats;
481 }
482
483 /**
484 * i40e_vsi_reset_stats - Resets all stats of the given vsi
485 * @vsi: the VSI to have its stats reset
486 **/
487 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
488 {
489 struct rtnl_link_stats64 *ns;
490 int i;
491
492 if (!vsi)
493 return;
494
495 ns = i40e_get_vsi_stats_struct(vsi);
496 memset(ns, 0, sizeof(*ns));
497 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
498 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
499 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
500 if (vsi->rx_rings && vsi->rx_rings[0]) {
501 for (i = 0; i < vsi->num_queue_pairs; i++) {
502 memset(&vsi->rx_rings[i]->stats, 0,
503 sizeof(vsi->rx_rings[i]->stats));
504 memset(&vsi->rx_rings[i]->rx_stats, 0,
505 sizeof(vsi->rx_rings[i]->rx_stats));
506 memset(&vsi->tx_rings[i]->stats, 0,
507 sizeof(vsi->tx_rings[i]->stats));
508 memset(&vsi->tx_rings[i]->tx_stats, 0,
509 sizeof(vsi->tx_rings[i]->tx_stats));
510 }
511 }
512 vsi->stat_offsets_loaded = false;
513 }
514
515 /**
516 * i40e_pf_reset_stats - Reset all of the stats for the given PF
517 * @pf: the PF to be reset
518 **/
519 void i40e_pf_reset_stats(struct i40e_pf *pf)
520 {
521 int i;
522
523 memset(&pf->stats, 0, sizeof(pf->stats));
524 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
525 pf->stat_offsets_loaded = false;
526
527 for (i = 0; i < I40E_MAX_VEB; i++) {
528 if (pf->veb[i]) {
529 memset(&pf->veb[i]->stats, 0,
530 sizeof(pf->veb[i]->stats));
531 memset(&pf->veb[i]->stats_offsets, 0,
532 sizeof(pf->veb[i]->stats_offsets));
533 pf->veb[i]->stat_offsets_loaded = false;
534 }
535 }
536 }
537
538 /**
539 * i40e_stat_update48 - read and update a 48 bit stat from the chip
540 * @hw: ptr to the hardware info
541 * @hireg: the high 32 bit reg to read
542 * @loreg: the low 32 bit reg to read
543 * @offset_loaded: has the initial offset been loaded yet
544 * @offset: ptr to current offset value
545 * @stat: ptr to the stat
546 *
547 * Since the device stats are not reset at PFReset, they likely will not
548 * be zeroed when the driver starts. We'll save the first values read
549 * and use them as offsets to be subtracted from the raw values in order
550 * to report stats that count from zero. In the process, we also manage
551 * the potential roll-over.
552 **/
553 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
554 bool offset_loaded, u64 *offset, u64 *stat)
555 {
556 u64 new_data;
557
558 if (hw->device_id == I40E_DEV_ID_QEMU) {
559 new_data = rd32(hw, loreg);
560 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
561 } else {
562 new_data = rd64(hw, loreg);
563 }
564 if (!offset_loaded)
565 *offset = new_data;
566 if (likely(new_data >= *offset))
567 *stat = new_data - *offset;
568 else
569 *stat = (new_data + BIT_ULL(48)) - *offset;
570 *stat &= 0xFFFFFFFFFFFFULL;
571 }
572
573 /**
574 * i40e_stat_update32 - read and update a 32 bit stat from the chip
575 * @hw: ptr to the hardware info
576 * @reg: the hw reg to read
577 * @offset_loaded: has the initial offset been loaded yet
578 * @offset: ptr to current offset value
579 * @stat: ptr to the stat
580 **/
581 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
582 bool offset_loaded, u64 *offset, u64 *stat)
583 {
584 u32 new_data;
585
586 new_data = rd32(hw, reg);
587 if (!offset_loaded)
588 *offset = new_data;
589 if (likely(new_data >= *offset))
590 *stat = (u32)(new_data - *offset);
591 else
592 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
593 }
594
595 /**
596 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
597 * @vsi: the VSI to be updated
598 **/
599 void i40e_update_eth_stats(struct i40e_vsi *vsi)
600 {
601 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
602 struct i40e_pf *pf = vsi->back;
603 struct i40e_hw *hw = &pf->hw;
604 struct i40e_eth_stats *oes;
605 struct i40e_eth_stats *es; /* device's eth stats */
606
607 es = &vsi->eth_stats;
608 oes = &vsi->eth_stats_offsets;
609
610 /* Gather up the stats that the hw collects */
611 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
612 vsi->stat_offsets_loaded,
613 &oes->tx_errors, &es->tx_errors);
614 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
615 vsi->stat_offsets_loaded,
616 &oes->rx_discards, &es->rx_discards);
617 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
618 vsi->stat_offsets_loaded,
619 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
620 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
621 vsi->stat_offsets_loaded,
622 &oes->tx_errors, &es->tx_errors);
623
624 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
625 I40E_GLV_GORCL(stat_idx),
626 vsi->stat_offsets_loaded,
627 &oes->rx_bytes, &es->rx_bytes);
628 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
629 I40E_GLV_UPRCL(stat_idx),
630 vsi->stat_offsets_loaded,
631 &oes->rx_unicast, &es->rx_unicast);
632 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
633 I40E_GLV_MPRCL(stat_idx),
634 vsi->stat_offsets_loaded,
635 &oes->rx_multicast, &es->rx_multicast);
636 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
637 I40E_GLV_BPRCL(stat_idx),
638 vsi->stat_offsets_loaded,
639 &oes->rx_broadcast, &es->rx_broadcast);
640
641 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
642 I40E_GLV_GOTCL(stat_idx),
643 vsi->stat_offsets_loaded,
644 &oes->tx_bytes, &es->tx_bytes);
645 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
646 I40E_GLV_UPTCL(stat_idx),
647 vsi->stat_offsets_loaded,
648 &oes->tx_unicast, &es->tx_unicast);
649 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
650 I40E_GLV_MPTCL(stat_idx),
651 vsi->stat_offsets_loaded,
652 &oes->tx_multicast, &es->tx_multicast);
653 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
654 I40E_GLV_BPTCL(stat_idx),
655 vsi->stat_offsets_loaded,
656 &oes->tx_broadcast, &es->tx_broadcast);
657 vsi->stat_offsets_loaded = true;
658 }
659
660 /**
661 * i40e_update_veb_stats - Update Switch component statistics
662 * @veb: the VEB being updated
663 **/
664 static void i40e_update_veb_stats(struct i40e_veb *veb)
665 {
666 struct i40e_pf *pf = veb->pf;
667 struct i40e_hw *hw = &pf->hw;
668 struct i40e_eth_stats *oes;
669 struct i40e_eth_stats *es; /* device's eth stats */
670 struct i40e_veb_tc_stats *veb_oes;
671 struct i40e_veb_tc_stats *veb_es;
672 int i, idx = 0;
673
674 idx = veb->stats_idx;
675 es = &veb->stats;
676 oes = &veb->stats_offsets;
677 veb_es = &veb->tc_stats;
678 veb_oes = &veb->tc_stats_offsets;
679
680 /* Gather up the stats that the hw collects */
681 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
682 veb->stat_offsets_loaded,
683 &oes->tx_discards, &es->tx_discards);
684 if (hw->revision_id > 0)
685 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
686 veb->stat_offsets_loaded,
687 &oes->rx_unknown_protocol,
688 &es->rx_unknown_protocol);
689 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
690 veb->stat_offsets_loaded,
691 &oes->rx_bytes, &es->rx_bytes);
692 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
693 veb->stat_offsets_loaded,
694 &oes->rx_unicast, &es->rx_unicast);
695 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
696 veb->stat_offsets_loaded,
697 &oes->rx_multicast, &es->rx_multicast);
698 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
699 veb->stat_offsets_loaded,
700 &oes->rx_broadcast, &es->rx_broadcast);
701
702 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
703 veb->stat_offsets_loaded,
704 &oes->tx_bytes, &es->tx_bytes);
705 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->tx_unicast, &es->tx_unicast);
708 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
709 veb->stat_offsets_loaded,
710 &oes->tx_multicast, &es->tx_multicast);
711 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
712 veb->stat_offsets_loaded,
713 &oes->tx_broadcast, &es->tx_broadcast);
714 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
715 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
716 I40E_GLVEBTC_RPCL(i, idx),
717 veb->stat_offsets_loaded,
718 &veb_oes->tc_rx_packets[i],
719 &veb_es->tc_rx_packets[i]);
720 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
721 I40E_GLVEBTC_RBCL(i, idx),
722 veb->stat_offsets_loaded,
723 &veb_oes->tc_rx_bytes[i],
724 &veb_es->tc_rx_bytes[i]);
725 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
726 I40E_GLVEBTC_TPCL(i, idx),
727 veb->stat_offsets_loaded,
728 &veb_oes->tc_tx_packets[i],
729 &veb_es->tc_tx_packets[i]);
730 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
731 I40E_GLVEBTC_TBCL(i, idx),
732 veb->stat_offsets_loaded,
733 &veb_oes->tc_tx_bytes[i],
734 &veb_es->tc_tx_bytes[i]);
735 }
736 veb->stat_offsets_loaded = true;
737 }
738
739 #ifdef I40E_FCOE
740 /**
741 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
742 * @vsi: the VSI that is capable of doing FCoE
743 **/
744 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
745 {
746 struct i40e_pf *pf = vsi->back;
747 struct i40e_hw *hw = &pf->hw;
748 struct i40e_fcoe_stats *ofs;
749 struct i40e_fcoe_stats *fs; /* device's eth stats */
750 int idx;
751
752 if (vsi->type != I40E_VSI_FCOE)
753 return;
754
755 idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
756 fs = &vsi->fcoe_stats;
757 ofs = &vsi->fcoe_stats_offsets;
758
759 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
760 vsi->fcoe_stat_offsets_loaded,
761 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
762 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
763 vsi->fcoe_stat_offsets_loaded,
764 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
765 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
766 vsi->fcoe_stat_offsets_loaded,
767 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
768 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
769 vsi->fcoe_stat_offsets_loaded,
770 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
771 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
772 vsi->fcoe_stat_offsets_loaded,
773 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
774 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
775 vsi->fcoe_stat_offsets_loaded,
776 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
777 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
778 vsi->fcoe_stat_offsets_loaded,
779 &ofs->fcoe_last_error, &fs->fcoe_last_error);
780 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
781 vsi->fcoe_stat_offsets_loaded,
782 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
783
784 vsi->fcoe_stat_offsets_loaded = true;
785 }
786
787 #endif
788 /**
789 * i40e_update_vsi_stats - Update the vsi statistics counters.
790 * @vsi: the VSI to be updated
791 *
792 * There are a few instances where we store the same stat in a
793 * couple of different structs. This is partly because we have
794 * the netdev stats that need to be filled out, which is slightly
795 * different from the "eth_stats" defined by the chip and used in
796 * VF communications. We sort it out here.
797 **/
798 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
799 {
800 struct i40e_pf *pf = vsi->back;
801 struct rtnl_link_stats64 *ons;
802 struct rtnl_link_stats64 *ns; /* netdev stats */
803 struct i40e_eth_stats *oes;
804 struct i40e_eth_stats *es; /* device's eth stats */
805 u32 tx_restart, tx_busy;
806 u64 tx_lost_interrupt;
807 struct i40e_ring *p;
808 u32 rx_page, rx_buf;
809 u64 bytes, packets;
810 unsigned int start;
811 u64 tx_linearize;
812 u64 tx_force_wb;
813 u64 rx_p, rx_b;
814 u64 tx_p, tx_b;
815 u16 q;
816
817 if (test_bit(__I40E_DOWN, &vsi->state) ||
818 test_bit(__I40E_CONFIG_BUSY, &pf->state))
819 return;
820
821 ns = i40e_get_vsi_stats_struct(vsi);
822 ons = &vsi->net_stats_offsets;
823 es = &vsi->eth_stats;
824 oes = &vsi->eth_stats_offsets;
825
826 /* Gather up the netdev and vsi stats that the driver collects
827 * on the fly during packet processing
828 */
829 rx_b = rx_p = 0;
830 tx_b = tx_p = 0;
831 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
832 tx_lost_interrupt = 0;
833 rx_page = 0;
834 rx_buf = 0;
835 rcu_read_lock();
836 for (q = 0; q < vsi->num_queue_pairs; q++) {
837 /* locate Tx ring */
838 p = ACCESS_ONCE(vsi->tx_rings[q]);
839
840 do {
841 start = u64_stats_fetch_begin_irq(&p->syncp);
842 packets = p->stats.packets;
843 bytes = p->stats.bytes;
844 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
845 tx_b += bytes;
846 tx_p += packets;
847 tx_restart += p->tx_stats.restart_queue;
848 tx_busy += p->tx_stats.tx_busy;
849 tx_linearize += p->tx_stats.tx_linearize;
850 tx_force_wb += p->tx_stats.tx_force_wb;
851 tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
852
853 /* Rx queue is part of the same block as Tx queue */
854 p = &p[1];
855 do {
856 start = u64_stats_fetch_begin_irq(&p->syncp);
857 packets = p->stats.packets;
858 bytes = p->stats.bytes;
859 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
860 rx_b += bytes;
861 rx_p += packets;
862 rx_buf += p->rx_stats.alloc_buff_failed;
863 rx_page += p->rx_stats.alloc_page_failed;
864 }
865 rcu_read_unlock();
866 vsi->tx_restart = tx_restart;
867 vsi->tx_busy = tx_busy;
868 vsi->tx_linearize = tx_linearize;
869 vsi->tx_force_wb = tx_force_wb;
870 vsi->tx_lost_interrupt = tx_lost_interrupt;
871 vsi->rx_page_failed = rx_page;
872 vsi->rx_buf_failed = rx_buf;
873
874 ns->rx_packets = rx_p;
875 ns->rx_bytes = rx_b;
876 ns->tx_packets = tx_p;
877 ns->tx_bytes = tx_b;
878
879 /* update netdev stats from eth stats */
880 i40e_update_eth_stats(vsi);
881 ons->tx_errors = oes->tx_errors;
882 ns->tx_errors = es->tx_errors;
883 ons->multicast = oes->rx_multicast;
884 ns->multicast = es->rx_multicast;
885 ons->rx_dropped = oes->rx_discards;
886 ns->rx_dropped = es->rx_discards;
887 ons->tx_dropped = oes->tx_discards;
888 ns->tx_dropped = es->tx_discards;
889
890 /* pull in a couple PF stats if this is the main vsi */
891 if (vsi == pf->vsi[pf->lan_vsi]) {
892 ns->rx_crc_errors = pf->stats.crc_errors;
893 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
894 ns->rx_length_errors = pf->stats.rx_length_errors;
895 }
896 }
897
898 /**
899 * i40e_update_pf_stats - Update the PF statistics counters.
900 * @pf: the PF to be updated
901 **/
902 static void i40e_update_pf_stats(struct i40e_pf *pf)
903 {
904 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
905 struct i40e_hw_port_stats *nsd = &pf->stats;
906 struct i40e_hw *hw = &pf->hw;
907 u32 val;
908 int i;
909
910 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
911 I40E_GLPRT_GORCL(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
914 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
915 I40E_GLPRT_GOTCL(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
918 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
919 pf->stat_offsets_loaded,
920 &osd->eth.rx_discards,
921 &nsd->eth.rx_discards);
922 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
923 I40E_GLPRT_UPRCL(hw->port),
924 pf->stat_offsets_loaded,
925 &osd->eth.rx_unicast,
926 &nsd->eth.rx_unicast);
927 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
928 I40E_GLPRT_MPRCL(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->eth.rx_multicast,
931 &nsd->eth.rx_multicast);
932 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
933 I40E_GLPRT_BPRCL(hw->port),
934 pf->stat_offsets_loaded,
935 &osd->eth.rx_broadcast,
936 &nsd->eth.rx_broadcast);
937 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
938 I40E_GLPRT_UPTCL(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->eth.tx_unicast,
941 &nsd->eth.tx_unicast);
942 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
943 I40E_GLPRT_MPTCL(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->eth.tx_multicast,
946 &nsd->eth.tx_multicast);
947 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
948 I40E_GLPRT_BPTCL(hw->port),
949 pf->stat_offsets_loaded,
950 &osd->eth.tx_broadcast,
951 &nsd->eth.tx_broadcast);
952
953 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->tx_dropped_link_down,
956 &nsd->tx_dropped_link_down);
957
958 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
959 pf->stat_offsets_loaded,
960 &osd->crc_errors, &nsd->crc_errors);
961
962 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
963 pf->stat_offsets_loaded,
964 &osd->illegal_bytes, &nsd->illegal_bytes);
965
966 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
967 pf->stat_offsets_loaded,
968 &osd->mac_local_faults,
969 &nsd->mac_local_faults);
970 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
971 pf->stat_offsets_loaded,
972 &osd->mac_remote_faults,
973 &nsd->mac_remote_faults);
974
975 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->rx_length_errors,
978 &nsd->rx_length_errors);
979
980 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
981 pf->stat_offsets_loaded,
982 &osd->link_xon_rx, &nsd->link_xon_rx);
983 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
984 pf->stat_offsets_loaded,
985 &osd->link_xon_tx, &nsd->link_xon_tx);
986 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
987 pf->stat_offsets_loaded,
988 &osd->link_xoff_rx, &nsd->link_xoff_rx);
989 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
990 pf->stat_offsets_loaded,
991 &osd->link_xoff_tx, &nsd->link_xoff_tx);
992
993 for (i = 0; i < 8; i++) {
994 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
995 pf->stat_offsets_loaded,
996 &osd->priority_xoff_rx[i],
997 &nsd->priority_xoff_rx[i]);
998 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
999 pf->stat_offsets_loaded,
1000 &osd->priority_xon_rx[i],
1001 &nsd->priority_xon_rx[i]);
1002 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1003 pf->stat_offsets_loaded,
1004 &osd->priority_xon_tx[i],
1005 &nsd->priority_xon_tx[i]);
1006 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1007 pf->stat_offsets_loaded,
1008 &osd->priority_xoff_tx[i],
1009 &nsd->priority_xoff_tx[i]);
1010 i40e_stat_update32(hw,
1011 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1012 pf->stat_offsets_loaded,
1013 &osd->priority_xon_2_xoff[i],
1014 &nsd->priority_xon_2_xoff[i]);
1015 }
1016
1017 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1018 I40E_GLPRT_PRC64L(hw->port),
1019 pf->stat_offsets_loaded,
1020 &osd->rx_size_64, &nsd->rx_size_64);
1021 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1022 I40E_GLPRT_PRC127L(hw->port),
1023 pf->stat_offsets_loaded,
1024 &osd->rx_size_127, &nsd->rx_size_127);
1025 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1026 I40E_GLPRT_PRC255L(hw->port),
1027 pf->stat_offsets_loaded,
1028 &osd->rx_size_255, &nsd->rx_size_255);
1029 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1030 I40E_GLPRT_PRC511L(hw->port),
1031 pf->stat_offsets_loaded,
1032 &osd->rx_size_511, &nsd->rx_size_511);
1033 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1034 I40E_GLPRT_PRC1023L(hw->port),
1035 pf->stat_offsets_loaded,
1036 &osd->rx_size_1023, &nsd->rx_size_1023);
1037 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1038 I40E_GLPRT_PRC1522L(hw->port),
1039 pf->stat_offsets_loaded,
1040 &osd->rx_size_1522, &nsd->rx_size_1522);
1041 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1042 I40E_GLPRT_PRC9522L(hw->port),
1043 pf->stat_offsets_loaded,
1044 &osd->rx_size_big, &nsd->rx_size_big);
1045
1046 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1047 I40E_GLPRT_PTC64L(hw->port),
1048 pf->stat_offsets_loaded,
1049 &osd->tx_size_64, &nsd->tx_size_64);
1050 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1051 I40E_GLPRT_PTC127L(hw->port),
1052 pf->stat_offsets_loaded,
1053 &osd->tx_size_127, &nsd->tx_size_127);
1054 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1055 I40E_GLPRT_PTC255L(hw->port),
1056 pf->stat_offsets_loaded,
1057 &osd->tx_size_255, &nsd->tx_size_255);
1058 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1059 I40E_GLPRT_PTC511L(hw->port),
1060 pf->stat_offsets_loaded,
1061 &osd->tx_size_511, &nsd->tx_size_511);
1062 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1063 I40E_GLPRT_PTC1023L(hw->port),
1064 pf->stat_offsets_loaded,
1065 &osd->tx_size_1023, &nsd->tx_size_1023);
1066 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1067 I40E_GLPRT_PTC1522L(hw->port),
1068 pf->stat_offsets_loaded,
1069 &osd->tx_size_1522, &nsd->tx_size_1522);
1070 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1071 I40E_GLPRT_PTC9522L(hw->port),
1072 pf->stat_offsets_loaded,
1073 &osd->tx_size_big, &nsd->tx_size_big);
1074
1075 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1076 pf->stat_offsets_loaded,
1077 &osd->rx_undersize, &nsd->rx_undersize);
1078 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1079 pf->stat_offsets_loaded,
1080 &osd->rx_fragments, &nsd->rx_fragments);
1081 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1082 pf->stat_offsets_loaded,
1083 &osd->rx_oversize, &nsd->rx_oversize);
1084 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1085 pf->stat_offsets_loaded,
1086 &osd->rx_jabber, &nsd->rx_jabber);
1087
1088 /* FDIR stats */
1089 i40e_stat_update32(hw,
1090 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1091 pf->stat_offsets_loaded,
1092 &osd->fd_atr_match, &nsd->fd_atr_match);
1093 i40e_stat_update32(hw,
1094 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1095 pf->stat_offsets_loaded,
1096 &osd->fd_sb_match, &nsd->fd_sb_match);
1097 i40e_stat_update32(hw,
1098 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1099 pf->stat_offsets_loaded,
1100 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1101
1102 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1103 nsd->tx_lpi_status =
1104 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1105 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1106 nsd->rx_lpi_status =
1107 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1108 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1109 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1110 pf->stat_offsets_loaded,
1111 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1112 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1113 pf->stat_offsets_loaded,
1114 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1115
1116 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1117 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1118 nsd->fd_sb_status = true;
1119 else
1120 nsd->fd_sb_status = false;
1121
1122 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1123 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1124 nsd->fd_atr_status = true;
1125 else
1126 nsd->fd_atr_status = false;
1127
1128 pf->stat_offsets_loaded = true;
1129 }
1130
1131 /**
1132 * i40e_update_stats - Update the various statistics counters.
1133 * @vsi: the VSI to be updated
1134 *
1135 * Update the various stats for this VSI and its related entities.
1136 **/
1137 void i40e_update_stats(struct i40e_vsi *vsi)
1138 {
1139 struct i40e_pf *pf = vsi->back;
1140
1141 if (vsi == pf->vsi[pf->lan_vsi])
1142 i40e_update_pf_stats(pf);
1143
1144 i40e_update_vsi_stats(vsi);
1145 #ifdef I40E_FCOE
1146 i40e_update_fcoe_stats(vsi);
1147 #endif
1148 }
1149
1150 /**
1151 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1152 * @vsi: the VSI to be searched
1153 * @macaddr: the MAC address
1154 * @vlan: the vlan
1155 * @is_vf: make sure its a VF filter, else doesn't matter
1156 * @is_netdev: make sure its a netdev filter, else doesn't matter
1157 *
1158 * Returns ptr to the filter object or NULL
1159 **/
1160 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1161 u8 *macaddr, s16 vlan,
1162 bool is_vf, bool is_netdev)
1163 {
1164 struct i40e_mac_filter *f;
1165
1166 if (!vsi || !macaddr)
1167 return NULL;
1168
1169 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1170 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1171 (vlan == f->vlan) &&
1172 (!is_vf || f->is_vf) &&
1173 (!is_netdev || f->is_netdev))
1174 return f;
1175 }
1176 return NULL;
1177 }
1178
1179 /**
1180 * i40e_find_mac - Find a mac addr in the macvlan filters list
1181 * @vsi: the VSI to be searched
1182 * @macaddr: the MAC address we are searching for
1183 * @is_vf: make sure its a VF filter, else doesn't matter
1184 * @is_netdev: make sure its a netdev filter, else doesn't matter
1185 *
1186 * Returns the first filter with the provided MAC address or NULL if
1187 * MAC address was not found
1188 **/
1189 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1190 bool is_vf, bool is_netdev)
1191 {
1192 struct i40e_mac_filter *f;
1193
1194 if (!vsi || !macaddr)
1195 return NULL;
1196
1197 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1198 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1199 (!is_vf || f->is_vf) &&
1200 (!is_netdev || f->is_netdev))
1201 return f;
1202 }
1203 return NULL;
1204 }
1205
1206 /**
1207 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1208 * @vsi: the VSI to be searched
1209 *
1210 * Returns true if VSI is in vlan mode or false otherwise
1211 **/
1212 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1213 {
1214 struct i40e_mac_filter *f;
1215
1216 /* Only -1 for all the filters denotes not in vlan mode
1217 * so we have to go through all the list in order to make sure
1218 */
1219 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1220 if (f->vlan >= 0 || vsi->info.pvid)
1221 return true;
1222 }
1223
1224 return false;
1225 }
1226
1227 /**
1228 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1229 * @vsi: the VSI to be searched
1230 * @macaddr: the mac address to be filtered
1231 * @is_vf: true if it is a VF
1232 * @is_netdev: true if it is a netdev
1233 *
1234 * Goes through all the macvlan filters and adds a
1235 * macvlan filter for each unique vlan that already exists
1236 *
1237 * Returns first filter found on success, else NULL
1238 **/
1239 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1240 bool is_vf, bool is_netdev)
1241 {
1242 struct i40e_mac_filter *f;
1243
1244 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1245 if (vsi->info.pvid)
1246 f->vlan = le16_to_cpu(vsi->info.pvid);
1247 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1248 is_vf, is_netdev)) {
1249 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1250 is_vf, is_netdev))
1251 return NULL;
1252 }
1253 }
1254
1255 return list_first_entry_or_null(&vsi->mac_filter_list,
1256 struct i40e_mac_filter, list);
1257 }
1258
1259 /**
1260 * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
1261 * @vsi: the VSI to be searched
1262 * @macaddr: the mac address to be removed
1263 * @is_vf: true if it is a VF
1264 * @is_netdev: true if it is a netdev
1265 *
1266 * Removes a given MAC address from a VSI, regardless of VLAN
1267 *
1268 * Returns 0 for success, or error
1269 **/
1270 int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1271 bool is_vf, bool is_netdev)
1272 {
1273 struct i40e_mac_filter *f = NULL;
1274 int changed = 0;
1275
1276 WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
1277 "Missing mac_filter_list_lock\n");
1278 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1279 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1280 (is_vf == f->is_vf) &&
1281 (is_netdev == f->is_netdev)) {
1282 f->counter--;
1283 f->changed = true;
1284 changed = 1;
1285 }
1286 }
1287 if (changed) {
1288 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1289 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1290 return 0;
1291 }
1292 return -ENOENT;
1293 }
1294
1295 /**
1296 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1297 * @vsi: the PF Main VSI - inappropriate for any other VSI
1298 * @macaddr: the MAC address
1299 *
1300 * Some older firmware configurations set up a default promiscuous VLAN
1301 * filter that needs to be removed.
1302 **/
1303 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1304 {
1305 struct i40e_aqc_remove_macvlan_element_data element;
1306 struct i40e_pf *pf = vsi->back;
1307 i40e_status ret;
1308
1309 /* Only appropriate for the PF main VSI */
1310 if (vsi->type != I40E_VSI_MAIN)
1311 return -EINVAL;
1312
1313 memset(&element, 0, sizeof(element));
1314 ether_addr_copy(element.mac_addr, macaddr);
1315 element.vlan_tag = 0;
1316 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1317 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1318 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1319 if (ret)
1320 return -ENOENT;
1321
1322 return 0;
1323 }
1324
1325 /**
1326 * i40e_add_filter - Add a mac/vlan filter to the VSI
1327 * @vsi: the VSI to be searched
1328 * @macaddr: the MAC address
1329 * @vlan: the vlan
1330 * @is_vf: make sure its a VF filter, else doesn't matter
1331 * @is_netdev: make sure its a netdev filter, else doesn't matter
1332 *
1333 * Returns ptr to the filter object or NULL when no memory available.
1334 *
1335 * NOTE: This function is expected to be called with mac_filter_list_lock
1336 * being held.
1337 **/
1338 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1339 u8 *macaddr, s16 vlan,
1340 bool is_vf, bool is_netdev)
1341 {
1342 struct i40e_mac_filter *f;
1343
1344 if (!vsi || !macaddr)
1345 return NULL;
1346
1347 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1348 if (!f) {
1349 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1350 if (!f)
1351 goto add_filter_out;
1352
1353 ether_addr_copy(f->macaddr, macaddr);
1354 f->vlan = vlan;
1355 f->changed = true;
1356
1357 INIT_LIST_HEAD(&f->list);
1358 list_add_tail(&f->list, &vsi->mac_filter_list);
1359 }
1360
1361 /* increment counter and add a new flag if needed */
1362 if (is_vf) {
1363 if (!f->is_vf) {
1364 f->is_vf = true;
1365 f->counter++;
1366 }
1367 } else if (is_netdev) {
1368 if (!f->is_netdev) {
1369 f->is_netdev = true;
1370 f->counter++;
1371 }
1372 } else {
1373 f->counter++;
1374 }
1375
1376 /* changed tells sync_filters_subtask to
1377 * push the filter down to the firmware
1378 */
1379 if (f->changed) {
1380 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1381 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1382 }
1383
1384 add_filter_out:
1385 return f;
1386 }
1387
1388 /**
1389 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1390 * @vsi: the VSI to be searched
1391 * @macaddr: the MAC address
1392 * @vlan: the vlan
1393 * @is_vf: make sure it's a VF filter, else doesn't matter
1394 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1395 *
1396 * NOTE: This function is expected to be called with mac_filter_list_lock
1397 * being held.
1398 **/
1399 void i40e_del_filter(struct i40e_vsi *vsi,
1400 u8 *macaddr, s16 vlan,
1401 bool is_vf, bool is_netdev)
1402 {
1403 struct i40e_mac_filter *f;
1404
1405 if (!vsi || !macaddr)
1406 return;
1407
1408 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1409 if (!f || f->counter == 0)
1410 return;
1411
1412 if (is_vf) {
1413 if (f->is_vf) {
1414 f->is_vf = false;
1415 f->counter--;
1416 }
1417 } else if (is_netdev) {
1418 if (f->is_netdev) {
1419 f->is_netdev = false;
1420 f->counter--;
1421 }
1422 } else {
1423 /* make sure we don't remove a filter in use by VF or netdev */
1424 int min_f = 0;
1425
1426 min_f += (f->is_vf ? 1 : 0);
1427 min_f += (f->is_netdev ? 1 : 0);
1428
1429 if (f->counter > min_f)
1430 f->counter--;
1431 }
1432
1433 /* counter == 0 tells sync_filters_subtask to
1434 * remove the filter from the firmware's list
1435 */
1436 if (f->counter == 0) {
1437 f->changed = true;
1438 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1439 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1440 }
1441 }
1442
1443 /**
1444 * i40e_set_mac - NDO callback to set mac address
1445 * @netdev: network interface device structure
1446 * @p: pointer to an address structure
1447 *
1448 * Returns 0 on success, negative on failure
1449 **/
1450 #ifdef I40E_FCOE
1451 int i40e_set_mac(struct net_device *netdev, void *p)
1452 #else
1453 static int i40e_set_mac(struct net_device *netdev, void *p)
1454 #endif
1455 {
1456 struct i40e_netdev_priv *np = netdev_priv(netdev);
1457 struct i40e_vsi *vsi = np->vsi;
1458 struct i40e_pf *pf = vsi->back;
1459 struct i40e_hw *hw = &pf->hw;
1460 struct sockaddr *addr = p;
1461 struct i40e_mac_filter *f;
1462
1463 if (!is_valid_ether_addr(addr->sa_data))
1464 return -EADDRNOTAVAIL;
1465
1466 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1467 netdev_info(netdev, "already using mac address %pM\n",
1468 addr->sa_data);
1469 return 0;
1470 }
1471
1472 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1473 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1474 return -EADDRNOTAVAIL;
1475
1476 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1477 netdev_info(netdev, "returning to hw mac address %pM\n",
1478 hw->mac.addr);
1479 else
1480 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1481
1482 if (vsi->type == I40E_VSI_MAIN) {
1483 i40e_status ret;
1484
1485 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1486 I40E_AQC_WRITE_TYPE_LAA_WOL,
1487 addr->sa_data, NULL);
1488 if (ret) {
1489 netdev_info(netdev,
1490 "Addr change for Main VSI failed: %d\n",
1491 ret);
1492 return -EADDRNOTAVAIL;
1493 }
1494 }
1495
1496 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1497 struct i40e_aqc_remove_macvlan_element_data element;
1498
1499 memset(&element, 0, sizeof(element));
1500 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1501 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1502 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1503 } else {
1504 spin_lock_bh(&vsi->mac_filter_list_lock);
1505 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1506 false, false);
1507 spin_unlock_bh(&vsi->mac_filter_list_lock);
1508 }
1509
1510 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1511 struct i40e_aqc_add_macvlan_element_data element;
1512
1513 memset(&element, 0, sizeof(element));
1514 ether_addr_copy(element.mac_addr, hw->mac.addr);
1515 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1516 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1517 } else {
1518 spin_lock_bh(&vsi->mac_filter_list_lock);
1519 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1520 false, false);
1521 if (f)
1522 f->is_laa = true;
1523 spin_unlock_bh(&vsi->mac_filter_list_lock);
1524 }
1525
1526 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1527
1528 /* schedule our worker thread which will take care of
1529 * applying the new filter changes
1530 */
1531 i40e_service_event_schedule(vsi->back);
1532 return 0;
1533 }
1534
1535 /**
1536 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1537 * @vsi: the VSI being setup
1538 * @ctxt: VSI context structure
1539 * @enabled_tc: Enabled TCs bitmap
1540 * @is_add: True if called before Add VSI
1541 *
1542 * Setup VSI queue mapping for enabled traffic classes.
1543 **/
1544 #ifdef I40E_FCOE
1545 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1546 struct i40e_vsi_context *ctxt,
1547 u8 enabled_tc,
1548 bool is_add)
1549 #else
1550 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1551 struct i40e_vsi_context *ctxt,
1552 u8 enabled_tc,
1553 bool is_add)
1554 #endif
1555 {
1556 struct i40e_pf *pf = vsi->back;
1557 u16 sections = 0;
1558 u8 netdev_tc = 0;
1559 u16 numtc = 0;
1560 u16 qcount;
1561 u8 offset;
1562 u16 qmap;
1563 int i;
1564 u16 num_tc_qps = 0;
1565
1566 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1567 offset = 0;
1568
1569 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1570 /* Find numtc from enabled TC bitmap */
1571 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1572 if (enabled_tc & BIT(i)) /* TC is enabled */
1573 numtc++;
1574 }
1575 if (!numtc) {
1576 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1577 numtc = 1;
1578 }
1579 } else {
1580 /* At least TC0 is enabled in case of non-DCB case */
1581 numtc = 1;
1582 }
1583
1584 vsi->tc_config.numtc = numtc;
1585 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1586 /* Number of queues per enabled TC */
1587 /* In MFP case we can have a much lower count of MSIx
1588 * vectors available and so we need to lower the used
1589 * q count.
1590 */
1591 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1592 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1593 else
1594 qcount = vsi->alloc_queue_pairs;
1595 num_tc_qps = qcount / numtc;
1596 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1597
1598 /* Setup queue offset/count for all TCs for given VSI */
1599 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1600 /* See if the given TC is enabled for the given VSI */
1601 if (vsi->tc_config.enabled_tc & BIT(i)) {
1602 /* TC is enabled */
1603 int pow, num_qps;
1604
1605 switch (vsi->type) {
1606 case I40E_VSI_MAIN:
1607 qcount = min_t(int, pf->alloc_rss_size,
1608 num_tc_qps);
1609 break;
1610 #ifdef I40E_FCOE
1611 case I40E_VSI_FCOE:
1612 qcount = num_tc_qps;
1613 break;
1614 #endif
1615 case I40E_VSI_FDIR:
1616 case I40E_VSI_SRIOV:
1617 case I40E_VSI_VMDQ2:
1618 default:
1619 qcount = num_tc_qps;
1620 WARN_ON(i != 0);
1621 break;
1622 }
1623 vsi->tc_config.tc_info[i].qoffset = offset;
1624 vsi->tc_config.tc_info[i].qcount = qcount;
1625
1626 /* find the next higher power-of-2 of num queue pairs */
1627 num_qps = qcount;
1628 pow = 0;
1629 while (num_qps && (BIT_ULL(pow) < qcount)) {
1630 pow++;
1631 num_qps >>= 1;
1632 }
1633
1634 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1635 qmap =
1636 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1637 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1638
1639 offset += qcount;
1640 } else {
1641 /* TC is not enabled so set the offset to
1642 * default queue and allocate one queue
1643 * for the given TC.
1644 */
1645 vsi->tc_config.tc_info[i].qoffset = 0;
1646 vsi->tc_config.tc_info[i].qcount = 1;
1647 vsi->tc_config.tc_info[i].netdev_tc = 0;
1648
1649 qmap = 0;
1650 }
1651 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1652 }
1653
1654 /* Set actual Tx/Rx queue pairs */
1655 vsi->num_queue_pairs = offset;
1656 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1657 if (vsi->req_queue_pairs > 0)
1658 vsi->num_queue_pairs = vsi->req_queue_pairs;
1659 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1660 vsi->num_queue_pairs = pf->num_lan_msix;
1661 }
1662
1663 /* Scheduler section valid can only be set for ADD VSI */
1664 if (is_add) {
1665 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1666
1667 ctxt->info.up_enable_bits = enabled_tc;
1668 }
1669 if (vsi->type == I40E_VSI_SRIOV) {
1670 ctxt->info.mapping_flags |=
1671 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1672 for (i = 0; i < vsi->num_queue_pairs; i++)
1673 ctxt->info.queue_mapping[i] =
1674 cpu_to_le16(vsi->base_queue + i);
1675 } else {
1676 ctxt->info.mapping_flags |=
1677 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1678 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1679 }
1680 ctxt->info.valid_sections |= cpu_to_le16(sections);
1681 }
1682
1683 /**
1684 * i40e_set_rx_mode - NDO callback to set the netdev filters
1685 * @netdev: network interface device structure
1686 **/
1687 #ifdef I40E_FCOE
1688 void i40e_set_rx_mode(struct net_device *netdev)
1689 #else
1690 static void i40e_set_rx_mode(struct net_device *netdev)
1691 #endif
1692 {
1693 struct i40e_netdev_priv *np = netdev_priv(netdev);
1694 struct i40e_mac_filter *f, *ftmp;
1695 struct i40e_vsi *vsi = np->vsi;
1696 struct netdev_hw_addr *uca;
1697 struct netdev_hw_addr *mca;
1698 struct netdev_hw_addr *ha;
1699
1700 spin_lock_bh(&vsi->mac_filter_list_lock);
1701
1702 /* add addr if not already in the filter list */
1703 netdev_for_each_uc_addr(uca, netdev) {
1704 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1705 if (i40e_is_vsi_in_vlan(vsi))
1706 i40e_put_mac_in_vlan(vsi, uca->addr,
1707 false, true);
1708 else
1709 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1710 false, true);
1711 }
1712 }
1713
1714 netdev_for_each_mc_addr(mca, netdev) {
1715 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1716 if (i40e_is_vsi_in_vlan(vsi))
1717 i40e_put_mac_in_vlan(vsi, mca->addr,
1718 false, true);
1719 else
1720 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1721 false, true);
1722 }
1723 }
1724
1725 /* remove filter if not in netdev list */
1726 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1727
1728 if (!f->is_netdev)
1729 continue;
1730
1731 netdev_for_each_mc_addr(mca, netdev)
1732 if (ether_addr_equal(mca->addr, f->macaddr))
1733 goto bottom_of_search_loop;
1734
1735 netdev_for_each_uc_addr(uca, netdev)
1736 if (ether_addr_equal(uca->addr, f->macaddr))
1737 goto bottom_of_search_loop;
1738
1739 for_each_dev_addr(netdev, ha)
1740 if (ether_addr_equal(ha->addr, f->macaddr))
1741 goto bottom_of_search_loop;
1742
1743 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
1744 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1745
1746 bottom_of_search_loop:
1747 continue;
1748 }
1749 spin_unlock_bh(&vsi->mac_filter_list_lock);
1750
1751 /* check for other flag changes */
1752 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1753 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1754 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1755 }
1756
1757 /* schedule our worker thread which will take care of
1758 * applying the new filter changes
1759 */
1760 i40e_service_event_schedule(vsi->back);
1761 }
1762
1763 /**
1764 * i40e_mac_filter_entry_clone - Clones a MAC filter entry
1765 * @src: source MAC filter entry to be clones
1766 *
1767 * Returns the pointer to newly cloned MAC filter entry or NULL
1768 * in case of error
1769 **/
1770 static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
1771 struct i40e_mac_filter *src)
1772 {
1773 struct i40e_mac_filter *f;
1774
1775 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1776 if (!f)
1777 return NULL;
1778 *f = *src;
1779
1780 INIT_LIST_HEAD(&f->list);
1781
1782 return f;
1783 }
1784
1785 /**
1786 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1787 * @vsi: pointer to vsi struct
1788 * @from: Pointer to list which contains MAC filter entries - changes to
1789 * those entries needs to be undone.
1790 *
1791 * MAC filter entries from list were slated to be removed from device.
1792 **/
1793 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1794 struct list_head *from)
1795 {
1796 struct i40e_mac_filter *f, *ftmp;
1797
1798 list_for_each_entry_safe(f, ftmp, from, list) {
1799 f->changed = true;
1800 /* Move the element back into MAC filter list*/
1801 list_move_tail(&f->list, &vsi->mac_filter_list);
1802 }
1803 }
1804
1805 /**
1806 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1807 * @vsi: pointer to vsi struct
1808 *
1809 * MAC filter entries from list were slated to be added from device.
1810 **/
1811 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
1812 {
1813 struct i40e_mac_filter *f, *ftmp;
1814
1815 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1816 if (!f->changed && f->counter)
1817 f->changed = true;
1818 }
1819 }
1820
1821 /**
1822 * i40e_cleanup_add_list - Deletes the element from add list and release
1823 * memory
1824 * @add_list: Pointer to list which contains MAC filter entries
1825 **/
1826 static void i40e_cleanup_add_list(struct list_head *add_list)
1827 {
1828 struct i40e_mac_filter *f, *ftmp;
1829
1830 list_for_each_entry_safe(f, ftmp, add_list, list) {
1831 list_del(&f->list);
1832 kfree(f);
1833 }
1834 }
1835
1836 /**
1837 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1838 * @vsi: ptr to the VSI
1839 *
1840 * Push any outstanding VSI filter changes through the AdminQ.
1841 *
1842 * Returns 0 or error value
1843 **/
1844 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1845 {
1846 struct list_head tmp_del_list, tmp_add_list;
1847 struct i40e_mac_filter *f, *ftmp, *fclone;
1848 bool promisc_forced_on = false;
1849 bool add_happened = false;
1850 int filter_list_len = 0;
1851 u32 changed_flags = 0;
1852 i40e_status aq_ret = 0;
1853 bool err_cond = false;
1854 int retval = 0;
1855 struct i40e_pf *pf;
1856 int num_add = 0;
1857 int num_del = 0;
1858 int aq_err = 0;
1859 u16 cmd_flags;
1860
1861 /* empty array typed pointers, kcalloc later */
1862 struct i40e_aqc_add_macvlan_element_data *add_list;
1863 struct i40e_aqc_remove_macvlan_element_data *del_list;
1864
1865 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1866 usleep_range(1000, 2000);
1867 pf = vsi->back;
1868
1869 if (vsi->netdev) {
1870 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1871 vsi->current_netdev_flags = vsi->netdev->flags;
1872 }
1873
1874 INIT_LIST_HEAD(&tmp_del_list);
1875 INIT_LIST_HEAD(&tmp_add_list);
1876
1877 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1878 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1879
1880 spin_lock_bh(&vsi->mac_filter_list_lock);
1881 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1882 if (!f->changed)
1883 continue;
1884
1885 if (f->counter != 0)
1886 continue;
1887 f->changed = false;
1888
1889 /* Move the element into temporary del_list */
1890 list_move_tail(&f->list, &tmp_del_list);
1891 }
1892
1893 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1894 if (!f->changed)
1895 continue;
1896
1897 if (f->counter == 0)
1898 continue;
1899 f->changed = false;
1900
1901 /* Clone MAC filter entry and add into temporary list */
1902 fclone = i40e_mac_filter_entry_clone(f);
1903 if (!fclone) {
1904 err_cond = true;
1905 break;
1906 }
1907 list_add_tail(&fclone->list, &tmp_add_list);
1908 }
1909
1910 /* if failed to clone MAC filter entry - undo */
1911 if (err_cond) {
1912 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
1913 i40e_undo_add_filter_entries(vsi);
1914 }
1915 spin_unlock_bh(&vsi->mac_filter_list_lock);
1916
1917 if (err_cond) {
1918 i40e_cleanup_add_list(&tmp_add_list);
1919 retval = -ENOMEM;
1920 goto out;
1921 }
1922 }
1923
1924 /* Now process 'del_list' outside the lock */
1925 if (!list_empty(&tmp_del_list)) {
1926 int del_list_size;
1927
1928 filter_list_len = pf->hw.aq.asq_buf_size /
1929 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1930 del_list_size = filter_list_len *
1931 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1932 del_list = kzalloc(del_list_size, GFP_ATOMIC);
1933 if (!del_list) {
1934 i40e_cleanup_add_list(&tmp_add_list);
1935
1936 /* Undo VSI's MAC filter entry element updates */
1937 spin_lock_bh(&vsi->mac_filter_list_lock);
1938 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
1939 i40e_undo_add_filter_entries(vsi);
1940 spin_unlock_bh(&vsi->mac_filter_list_lock);
1941 retval = -ENOMEM;
1942 goto out;
1943 }
1944
1945 list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
1946 cmd_flags = 0;
1947
1948 /* add to delete list */
1949 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1950 del_list[num_del].vlan_tag =
1951 cpu_to_le16((u16)(f->vlan ==
1952 I40E_VLAN_ANY ? 0 : f->vlan));
1953
1954 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1955 del_list[num_del].flags = cmd_flags;
1956 num_del++;
1957
1958 /* flush a full buffer */
1959 if (num_del == filter_list_len) {
1960 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1961 vsi->seid,
1962 del_list,
1963 num_del,
1964 NULL);
1965 aq_err = pf->hw.aq.asq_last_status;
1966 num_del = 0;
1967 memset(del_list, 0, del_list_size);
1968
1969 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
1970 retval = -EIO;
1971 dev_err(&pf->pdev->dev,
1972 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
1973 i40e_stat_str(&pf->hw, aq_ret),
1974 i40e_aq_str(&pf->hw, aq_err));
1975 }
1976 }
1977 /* Release memory for MAC filter entries which were
1978 * synced up with HW.
1979 */
1980 list_del(&f->list);
1981 kfree(f);
1982 }
1983
1984 if (num_del) {
1985 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1986 del_list, num_del,
1987 NULL);
1988 aq_err = pf->hw.aq.asq_last_status;
1989 num_del = 0;
1990
1991 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT)
1992 dev_info(&pf->pdev->dev,
1993 "ignoring delete macvlan error, err %s aq_err %s\n",
1994 i40e_stat_str(&pf->hw, aq_ret),
1995 i40e_aq_str(&pf->hw, aq_err));
1996 }
1997
1998 kfree(del_list);
1999 del_list = NULL;
2000 }
2001
2002 if (!list_empty(&tmp_add_list)) {
2003 int add_list_size;
2004
2005 /* do all the adds now */
2006 filter_list_len = pf->hw.aq.asq_buf_size /
2007 sizeof(struct i40e_aqc_add_macvlan_element_data),
2008 add_list_size = filter_list_len *
2009 sizeof(struct i40e_aqc_add_macvlan_element_data);
2010 add_list = kzalloc(add_list_size, GFP_ATOMIC);
2011 if (!add_list) {
2012 /* Purge element from temporary lists */
2013 i40e_cleanup_add_list(&tmp_add_list);
2014
2015 /* Undo add filter entries from VSI MAC filter list */
2016 spin_lock_bh(&vsi->mac_filter_list_lock);
2017 i40e_undo_add_filter_entries(vsi);
2018 spin_unlock_bh(&vsi->mac_filter_list_lock);
2019 retval = -ENOMEM;
2020 goto out;
2021 }
2022
2023 list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
2024
2025 add_happened = true;
2026 cmd_flags = 0;
2027
2028 /* add to add array */
2029 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
2030 add_list[num_add].vlan_tag =
2031 cpu_to_le16(
2032 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
2033 add_list[num_add].queue_number = 0;
2034
2035 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2036 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2037 num_add++;
2038
2039 /* flush a full buffer */
2040 if (num_add == filter_list_len) {
2041 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
2042 add_list, num_add,
2043 NULL);
2044 aq_err = pf->hw.aq.asq_last_status;
2045 num_add = 0;
2046
2047 if (aq_ret)
2048 break;
2049 memset(add_list, 0, add_list_size);
2050 }
2051 /* Entries from tmp_add_list were cloned from MAC
2052 * filter list, hence clean those cloned entries
2053 */
2054 list_del(&f->list);
2055 kfree(f);
2056 }
2057
2058 if (num_add) {
2059 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
2060 add_list, num_add, NULL);
2061 aq_err = pf->hw.aq.asq_last_status;
2062 num_add = 0;
2063 }
2064 kfree(add_list);
2065 add_list = NULL;
2066
2067 if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) {
2068 retval = i40e_aq_rc_to_posix(aq_ret, aq_err);
2069 dev_info(&pf->pdev->dev,
2070 "add filter failed, err %s aq_err %s\n",
2071 i40e_stat_str(&pf->hw, aq_ret),
2072 i40e_aq_str(&pf->hw, aq_err));
2073 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
2074 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2075 &vsi->state)) {
2076 promisc_forced_on = true;
2077 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2078 &vsi->state);
2079 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
2080 }
2081 }
2082 }
2083
2084 /* if the VF is not trusted do not do promisc */
2085 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2086 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
2087 goto out;
2088 }
2089
2090 /* check for changes in promiscuous modes */
2091 if (changed_flags & IFF_ALLMULTI) {
2092 bool cur_multipromisc;
2093
2094 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2095 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2096 vsi->seid,
2097 cur_multipromisc,
2098 NULL);
2099 if (aq_ret) {
2100 retval = i40e_aq_rc_to_posix(aq_ret,
2101 pf->hw.aq.asq_last_status);
2102 dev_info(&pf->pdev->dev,
2103 "set multi promisc failed, err %s aq_err %s\n",
2104 i40e_stat_str(&pf->hw, aq_ret),
2105 i40e_aq_str(&pf->hw,
2106 pf->hw.aq.asq_last_status));
2107 }
2108 }
2109 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
2110 bool cur_promisc;
2111
2112 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2113 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2114 &vsi->state));
2115 if ((vsi->type == I40E_VSI_MAIN) &&
2116 (pf->lan_veb != I40E_NO_VEB) &&
2117 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2118 /* set defport ON for Main VSI instead of true promisc
2119 * this way we will get all unicast/multicast and VLAN
2120 * promisc behavior but will not get VF or VMDq traffic
2121 * replicated on the Main VSI.
2122 */
2123 if (pf->cur_promisc != cur_promisc) {
2124 pf->cur_promisc = cur_promisc;
2125 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2126 }
2127 } else {
2128 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2129 &vsi->back->hw,
2130 vsi->seid,
2131 cur_promisc, NULL);
2132 if (aq_ret) {
2133 retval =
2134 i40e_aq_rc_to_posix(aq_ret,
2135 pf->hw.aq.asq_last_status);
2136 dev_info(&pf->pdev->dev,
2137 "set unicast promisc failed, err %d, aq_err %d\n",
2138 aq_ret, pf->hw.aq.asq_last_status);
2139 }
2140 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2141 &vsi->back->hw,
2142 vsi->seid,
2143 cur_promisc, NULL);
2144 if (aq_ret) {
2145 retval =
2146 i40e_aq_rc_to_posix(aq_ret,
2147 pf->hw.aq.asq_last_status);
2148 dev_info(&pf->pdev->dev,
2149 "set multicast promisc failed, err %d, aq_err %d\n",
2150 aq_ret, pf->hw.aq.asq_last_status);
2151 }
2152 }
2153 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2154 vsi->seid,
2155 cur_promisc, NULL);
2156 if (aq_ret) {
2157 retval = i40e_aq_rc_to_posix(aq_ret,
2158 pf->hw.aq.asq_last_status);
2159 dev_info(&pf->pdev->dev,
2160 "set brdcast promisc failed, err %s, aq_err %s\n",
2161 i40e_stat_str(&pf->hw, aq_ret),
2162 i40e_aq_str(&pf->hw,
2163 pf->hw.aq.asq_last_status));
2164 }
2165 }
2166 out:
2167 /* if something went wrong then set the changed flag so we try again */
2168 if (retval)
2169 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2170
2171 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
2172 return retval;
2173 }
2174
2175 /**
2176 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2177 * @pf: board private structure
2178 **/
2179 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2180 {
2181 int v;
2182
2183 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2184 return;
2185 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2186
2187 for (v = 0; v < pf->num_alloc_vsi; v++) {
2188 if (pf->vsi[v] &&
2189 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2190 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2191
2192 if (ret) {
2193 /* come back and try again later */
2194 pf->flags |= I40E_FLAG_FILTER_SYNC;
2195 break;
2196 }
2197 }
2198 }
2199 }
2200
2201 /**
2202 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2203 * @netdev: network interface device structure
2204 * @new_mtu: new value for maximum frame size
2205 *
2206 * Returns 0 on success, negative on failure
2207 **/
2208 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2209 {
2210 struct i40e_netdev_priv *np = netdev_priv(netdev);
2211 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2212 struct i40e_vsi *vsi = np->vsi;
2213
2214 /* MTU < 68 is an error and causes problems on some kernels */
2215 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
2216 return -EINVAL;
2217
2218 netdev_info(netdev, "changing MTU from %d to %d\n",
2219 netdev->mtu, new_mtu);
2220 netdev->mtu = new_mtu;
2221 if (netif_running(netdev))
2222 i40e_vsi_reinit_locked(vsi);
2223 i40e_notify_client_of_l2_param_changes(vsi);
2224 return 0;
2225 }
2226
2227 /**
2228 * i40e_ioctl - Access the hwtstamp interface
2229 * @netdev: network interface device structure
2230 * @ifr: interface request data
2231 * @cmd: ioctl command
2232 **/
2233 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2234 {
2235 struct i40e_netdev_priv *np = netdev_priv(netdev);
2236 struct i40e_pf *pf = np->vsi->back;
2237
2238 switch (cmd) {
2239 case SIOCGHWTSTAMP:
2240 return i40e_ptp_get_ts_config(pf, ifr);
2241 case SIOCSHWTSTAMP:
2242 return i40e_ptp_set_ts_config(pf, ifr);
2243 default:
2244 return -EOPNOTSUPP;
2245 }
2246 }
2247
2248 /**
2249 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2250 * @vsi: the vsi being adjusted
2251 **/
2252 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2253 {
2254 struct i40e_vsi_context ctxt;
2255 i40e_status ret;
2256
2257 if ((vsi->info.valid_sections &
2258 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2259 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2260 return; /* already enabled */
2261
2262 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2263 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2264 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2265
2266 ctxt.seid = vsi->seid;
2267 ctxt.info = vsi->info;
2268 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2269 if (ret) {
2270 dev_info(&vsi->back->pdev->dev,
2271 "update vlan stripping failed, err %s aq_err %s\n",
2272 i40e_stat_str(&vsi->back->hw, ret),
2273 i40e_aq_str(&vsi->back->hw,
2274 vsi->back->hw.aq.asq_last_status));
2275 }
2276 }
2277
2278 /**
2279 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2280 * @vsi: the vsi being adjusted
2281 **/
2282 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2283 {
2284 struct i40e_vsi_context ctxt;
2285 i40e_status ret;
2286
2287 if ((vsi->info.valid_sections &
2288 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2289 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2290 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2291 return; /* already disabled */
2292
2293 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2294 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2295 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2296
2297 ctxt.seid = vsi->seid;
2298 ctxt.info = vsi->info;
2299 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2300 if (ret) {
2301 dev_info(&vsi->back->pdev->dev,
2302 "update vlan stripping failed, err %s aq_err %s\n",
2303 i40e_stat_str(&vsi->back->hw, ret),
2304 i40e_aq_str(&vsi->back->hw,
2305 vsi->back->hw.aq.asq_last_status));
2306 }
2307 }
2308
2309 /**
2310 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2311 * @netdev: network interface to be adjusted
2312 * @features: netdev features to test if VLAN offload is enabled or not
2313 **/
2314 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2315 {
2316 struct i40e_netdev_priv *np = netdev_priv(netdev);
2317 struct i40e_vsi *vsi = np->vsi;
2318
2319 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2320 i40e_vlan_stripping_enable(vsi);
2321 else
2322 i40e_vlan_stripping_disable(vsi);
2323 }
2324
2325 /**
2326 * i40e_vsi_add_vlan - Add vsi membership for given vlan
2327 * @vsi: the vsi being configured
2328 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2329 **/
2330 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2331 {
2332 struct i40e_mac_filter *f, *add_f;
2333 bool is_netdev, is_vf;
2334
2335 is_vf = (vsi->type == I40E_VSI_SRIOV);
2336 is_netdev = !!(vsi->netdev);
2337
2338 /* Locked once because all functions invoked below iterates list*/
2339 spin_lock_bh(&vsi->mac_filter_list_lock);
2340
2341 if (is_netdev) {
2342 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2343 is_vf, is_netdev);
2344 if (!add_f) {
2345 dev_info(&vsi->back->pdev->dev,
2346 "Could not add vlan filter %d for %pM\n",
2347 vid, vsi->netdev->dev_addr);
2348 spin_unlock_bh(&vsi->mac_filter_list_lock);
2349 return -ENOMEM;
2350 }
2351 }
2352
2353 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2354 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2355 if (!add_f) {
2356 dev_info(&vsi->back->pdev->dev,
2357 "Could not add vlan filter %d for %pM\n",
2358 vid, f->macaddr);
2359 spin_unlock_bh(&vsi->mac_filter_list_lock);
2360 return -ENOMEM;
2361 }
2362 }
2363
2364 /* Now if we add a vlan tag, make sure to check if it is the first
2365 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2366 * with 0, so we now accept untagged and specified tagged traffic
2367 * (and not any taged and untagged)
2368 */
2369 if (vid > 0) {
2370 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2371 I40E_VLAN_ANY,
2372 is_vf, is_netdev)) {
2373 i40e_del_filter(vsi, vsi->netdev->dev_addr,
2374 I40E_VLAN_ANY, is_vf, is_netdev);
2375 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2376 is_vf, is_netdev);
2377 if (!add_f) {
2378 dev_info(&vsi->back->pdev->dev,
2379 "Could not add filter 0 for %pM\n",
2380 vsi->netdev->dev_addr);
2381 spin_unlock_bh(&vsi->mac_filter_list_lock);
2382 return -ENOMEM;
2383 }
2384 }
2385 }
2386
2387 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2388 if (vid > 0 && !vsi->info.pvid) {
2389 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2390 if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2391 is_vf, is_netdev))
2392 continue;
2393 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2394 is_vf, is_netdev);
2395 add_f = i40e_add_filter(vsi, f->macaddr,
2396 0, is_vf, is_netdev);
2397 if (!add_f) {
2398 dev_info(&vsi->back->pdev->dev,
2399 "Could not add filter 0 for %pM\n",
2400 f->macaddr);
2401 spin_unlock_bh(&vsi->mac_filter_list_lock);
2402 return -ENOMEM;
2403 }
2404 }
2405 }
2406
2407 spin_unlock_bh(&vsi->mac_filter_list_lock);
2408
2409 /* schedule our worker thread which will take care of
2410 * applying the new filter changes
2411 */
2412 i40e_service_event_schedule(vsi->back);
2413 return 0;
2414 }
2415
2416 /**
2417 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2418 * @vsi: the vsi being configured
2419 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2420 *
2421 * Return: 0 on success or negative otherwise
2422 **/
2423 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2424 {
2425 struct net_device *netdev = vsi->netdev;
2426 struct i40e_mac_filter *f, *add_f;
2427 bool is_vf, is_netdev;
2428 int filter_count = 0;
2429
2430 is_vf = (vsi->type == I40E_VSI_SRIOV);
2431 is_netdev = !!(netdev);
2432
2433 /* Locked once because all functions invoked below iterates list */
2434 spin_lock_bh(&vsi->mac_filter_list_lock);
2435
2436 if (is_netdev)
2437 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2438
2439 list_for_each_entry(f, &vsi->mac_filter_list, list)
2440 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2441
2442 /* go through all the filters for this VSI and if there is only
2443 * vid == 0 it means there are no other filters, so vid 0 must
2444 * be replaced with -1. This signifies that we should from now
2445 * on accept any traffic (with any tag present, or untagged)
2446 */
2447 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2448 if (is_netdev) {
2449 if (f->vlan &&
2450 ether_addr_equal(netdev->dev_addr, f->macaddr))
2451 filter_count++;
2452 }
2453
2454 if (f->vlan)
2455 filter_count++;
2456 }
2457
2458 if (!filter_count && is_netdev) {
2459 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2460 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2461 is_vf, is_netdev);
2462 if (!f) {
2463 dev_info(&vsi->back->pdev->dev,
2464 "Could not add filter %d for %pM\n",
2465 I40E_VLAN_ANY, netdev->dev_addr);
2466 spin_unlock_bh(&vsi->mac_filter_list_lock);
2467 return -ENOMEM;
2468 }
2469 }
2470
2471 if (!filter_count) {
2472 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2473 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2474 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2475 is_vf, is_netdev);
2476 if (!add_f) {
2477 dev_info(&vsi->back->pdev->dev,
2478 "Could not add filter %d for %pM\n",
2479 I40E_VLAN_ANY, f->macaddr);
2480 spin_unlock_bh(&vsi->mac_filter_list_lock);
2481 return -ENOMEM;
2482 }
2483 }
2484 }
2485
2486 spin_unlock_bh(&vsi->mac_filter_list_lock);
2487
2488 /* schedule our worker thread which will take care of
2489 * applying the new filter changes
2490 */
2491 i40e_service_event_schedule(vsi->back);
2492 return 0;
2493 }
2494
2495 /**
2496 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2497 * @netdev: network interface to be adjusted
2498 * @vid: vlan id to be added
2499 *
2500 * net_device_ops implementation for adding vlan ids
2501 **/
2502 #ifdef I40E_FCOE
2503 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2504 __always_unused __be16 proto, u16 vid)
2505 #else
2506 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2507 __always_unused __be16 proto, u16 vid)
2508 #endif
2509 {
2510 struct i40e_netdev_priv *np = netdev_priv(netdev);
2511 struct i40e_vsi *vsi = np->vsi;
2512 int ret = 0;
2513
2514 if (vid > 4095)
2515 return -EINVAL;
2516
2517 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2518
2519 /* If the network stack called us with vid = 0 then
2520 * it is asking to receive priority tagged packets with
2521 * vlan id 0. Our HW receives them by default when configured
2522 * to receive untagged packets so there is no need to add an
2523 * extra filter for vlan 0 tagged packets.
2524 */
2525 if (vid)
2526 ret = i40e_vsi_add_vlan(vsi, vid);
2527
2528 if (!ret && (vid < VLAN_N_VID))
2529 set_bit(vid, vsi->active_vlans);
2530
2531 return ret;
2532 }
2533
2534 /**
2535 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2536 * @netdev: network interface to be adjusted
2537 * @vid: vlan id to be removed
2538 *
2539 * net_device_ops implementation for removing vlan ids
2540 **/
2541 #ifdef I40E_FCOE
2542 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2543 __always_unused __be16 proto, u16 vid)
2544 #else
2545 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2546 __always_unused __be16 proto, u16 vid)
2547 #endif
2548 {
2549 struct i40e_netdev_priv *np = netdev_priv(netdev);
2550 struct i40e_vsi *vsi = np->vsi;
2551
2552 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2553
2554 /* return code is ignored as there is nothing a user
2555 * can do about failure to remove and a log message was
2556 * already printed from the other function
2557 */
2558 i40e_vsi_kill_vlan(vsi, vid);
2559
2560 clear_bit(vid, vsi->active_vlans);
2561
2562 return 0;
2563 }
2564
2565 /**
2566 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2567 * @vsi: the vsi being brought back up
2568 **/
2569 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2570 {
2571 u16 vid;
2572
2573 if (!vsi->netdev)
2574 return;
2575
2576 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2577
2578 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2579 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2580 vid);
2581 }
2582
2583 /**
2584 * i40e_vsi_add_pvid - Add pvid for the VSI
2585 * @vsi: the vsi being adjusted
2586 * @vid: the vlan id to set as a PVID
2587 **/
2588 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2589 {
2590 struct i40e_vsi_context ctxt;
2591 i40e_status ret;
2592
2593 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2594 vsi->info.pvid = cpu_to_le16(vid);
2595 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2596 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2597 I40E_AQ_VSI_PVLAN_EMOD_STR;
2598
2599 ctxt.seid = vsi->seid;
2600 ctxt.info = vsi->info;
2601 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2602 if (ret) {
2603 dev_info(&vsi->back->pdev->dev,
2604 "add pvid failed, err %s aq_err %s\n",
2605 i40e_stat_str(&vsi->back->hw, ret),
2606 i40e_aq_str(&vsi->back->hw,
2607 vsi->back->hw.aq.asq_last_status));
2608 return -ENOENT;
2609 }
2610
2611 return 0;
2612 }
2613
2614 /**
2615 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2616 * @vsi: the vsi being adjusted
2617 *
2618 * Just use the vlan_rx_register() service to put it back to normal
2619 **/
2620 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2621 {
2622 i40e_vlan_stripping_disable(vsi);
2623
2624 vsi->info.pvid = 0;
2625 }
2626
2627 /**
2628 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2629 * @vsi: ptr to the VSI
2630 *
2631 * If this function returns with an error, then it's possible one or
2632 * more of the rings is populated (while the rest are not). It is the
2633 * callers duty to clean those orphaned rings.
2634 *
2635 * Return 0 on success, negative on failure
2636 **/
2637 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2638 {
2639 int i, err = 0;
2640
2641 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2642 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2643
2644 return err;
2645 }
2646
2647 /**
2648 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2649 * @vsi: ptr to the VSI
2650 *
2651 * Free VSI's transmit software resources
2652 **/
2653 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2654 {
2655 int i;
2656
2657 if (!vsi->tx_rings)
2658 return;
2659
2660 for (i = 0; i < vsi->num_queue_pairs; i++)
2661 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2662 i40e_free_tx_resources(vsi->tx_rings[i]);
2663 }
2664
2665 /**
2666 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2667 * @vsi: ptr to the VSI
2668 *
2669 * If this function returns with an error, then it's possible one or
2670 * more of the rings is populated (while the rest are not). It is the
2671 * callers duty to clean those orphaned rings.
2672 *
2673 * Return 0 on success, negative on failure
2674 **/
2675 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2676 {
2677 int i, err = 0;
2678
2679 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2680 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2681 #ifdef I40E_FCOE
2682 i40e_fcoe_setup_ddp_resources(vsi);
2683 #endif
2684 return err;
2685 }
2686
2687 /**
2688 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2689 * @vsi: ptr to the VSI
2690 *
2691 * Free all receive software resources
2692 **/
2693 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2694 {
2695 int i;
2696
2697 if (!vsi->rx_rings)
2698 return;
2699
2700 for (i = 0; i < vsi->num_queue_pairs; i++)
2701 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2702 i40e_free_rx_resources(vsi->rx_rings[i]);
2703 #ifdef I40E_FCOE
2704 i40e_fcoe_free_ddp_resources(vsi);
2705 #endif
2706 }
2707
2708 /**
2709 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2710 * @ring: The Tx ring to configure
2711 *
2712 * This enables/disables XPS for a given Tx descriptor ring
2713 * based on the TCs enabled for the VSI that ring belongs to.
2714 **/
2715 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2716 {
2717 struct i40e_vsi *vsi = ring->vsi;
2718 cpumask_var_t mask;
2719
2720 if (!ring->q_vector || !ring->netdev)
2721 return;
2722
2723 /* Single TC mode enable XPS */
2724 if (vsi->tc_config.numtc <= 1) {
2725 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2726 netif_set_xps_queue(ring->netdev,
2727 &ring->q_vector->affinity_mask,
2728 ring->queue_index);
2729 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2730 /* Disable XPS to allow selection based on TC */
2731 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2732 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2733 free_cpumask_var(mask);
2734 }
2735
2736 /* schedule our worker thread which will take care of
2737 * applying the new filter changes
2738 */
2739 i40e_service_event_schedule(vsi->back);
2740 }
2741
2742 /**
2743 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2744 * @ring: The Tx ring to configure
2745 *
2746 * Configure the Tx descriptor ring in the HMC context.
2747 **/
2748 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2749 {
2750 struct i40e_vsi *vsi = ring->vsi;
2751 u16 pf_q = vsi->base_queue + ring->queue_index;
2752 struct i40e_hw *hw = &vsi->back->hw;
2753 struct i40e_hmc_obj_txq tx_ctx;
2754 i40e_status err = 0;
2755 u32 qtx_ctl = 0;
2756
2757 /* some ATR related tx ring init */
2758 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2759 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2760 ring->atr_count = 0;
2761 } else {
2762 ring->atr_sample_rate = 0;
2763 }
2764
2765 /* configure XPS */
2766 i40e_config_xps_tx_ring(ring);
2767
2768 /* clear the context structure first */
2769 memset(&tx_ctx, 0, sizeof(tx_ctx));
2770
2771 tx_ctx.new_context = 1;
2772 tx_ctx.base = (ring->dma / 128);
2773 tx_ctx.qlen = ring->count;
2774 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2775 I40E_FLAG_FD_ATR_ENABLED));
2776 #ifdef I40E_FCOE
2777 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2778 #endif
2779 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2780 /* FDIR VSI tx ring can still use RS bit and writebacks */
2781 if (vsi->type != I40E_VSI_FDIR)
2782 tx_ctx.head_wb_ena = 1;
2783 tx_ctx.head_wb_addr = ring->dma +
2784 (ring->count * sizeof(struct i40e_tx_desc));
2785
2786 /* As part of VSI creation/update, FW allocates certain
2787 * Tx arbitration queue sets for each TC enabled for
2788 * the VSI. The FW returns the handles to these queue
2789 * sets as part of the response buffer to Add VSI,
2790 * Update VSI, etc. AQ commands. It is expected that
2791 * these queue set handles be associated with the Tx
2792 * queues by the driver as part of the TX queue context
2793 * initialization. This has to be done regardless of
2794 * DCB as by default everything is mapped to TC0.
2795 */
2796 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2797 tx_ctx.rdylist_act = 0;
2798
2799 /* clear the context in the HMC */
2800 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2801 if (err) {
2802 dev_info(&vsi->back->pdev->dev,
2803 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2804 ring->queue_index, pf_q, err);
2805 return -ENOMEM;
2806 }
2807
2808 /* set the context in the HMC */
2809 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2810 if (err) {
2811 dev_info(&vsi->back->pdev->dev,
2812 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2813 ring->queue_index, pf_q, err);
2814 return -ENOMEM;
2815 }
2816
2817 /* Now associate this queue with this PCI function */
2818 if (vsi->type == I40E_VSI_VMDQ2) {
2819 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2820 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2821 I40E_QTX_CTL_VFVM_INDX_MASK;
2822 } else {
2823 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2824 }
2825
2826 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2827 I40E_QTX_CTL_PF_INDX_MASK);
2828 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2829 i40e_flush(hw);
2830
2831 /* cache tail off for easier writes later */
2832 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2833
2834 return 0;
2835 }
2836
2837 /**
2838 * i40e_configure_rx_ring - Configure a receive ring context
2839 * @ring: The Rx ring to configure
2840 *
2841 * Configure the Rx descriptor ring in the HMC context.
2842 **/
2843 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2844 {
2845 struct i40e_vsi *vsi = ring->vsi;
2846 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2847 u16 pf_q = vsi->base_queue + ring->queue_index;
2848 struct i40e_hw *hw = &vsi->back->hw;
2849 struct i40e_hmc_obj_rxq rx_ctx;
2850 i40e_status err = 0;
2851
2852 ring->state = 0;
2853
2854 /* clear the context structure first */
2855 memset(&rx_ctx, 0, sizeof(rx_ctx));
2856
2857 ring->rx_buf_len = vsi->rx_buf_len;
2858 ring->rx_hdr_len = vsi->rx_hdr_len;
2859
2860 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2861 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2862
2863 rx_ctx.base = (ring->dma / 128);
2864 rx_ctx.qlen = ring->count;
2865
2866 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2867 set_ring_16byte_desc_enabled(ring);
2868 rx_ctx.dsize = 0;
2869 } else {
2870 rx_ctx.dsize = 1;
2871 }
2872
2873 rx_ctx.dtype = vsi->dtype;
2874 if (vsi->dtype) {
2875 set_ring_ps_enabled(ring);
2876 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2877 I40E_RX_SPLIT_IP |
2878 I40E_RX_SPLIT_TCP_UDP |
2879 I40E_RX_SPLIT_SCTP;
2880 } else {
2881 rx_ctx.hsplit_0 = 0;
2882 }
2883
2884 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2885 (chain_len * ring->rx_buf_len));
2886 if (hw->revision_id == 0)
2887 rx_ctx.lrxqthresh = 0;
2888 else
2889 rx_ctx.lrxqthresh = 2;
2890 rx_ctx.crcstrip = 1;
2891 rx_ctx.l2tsel = 1;
2892 /* this controls whether VLAN is stripped from inner headers */
2893 rx_ctx.showiv = 0;
2894 #ifdef I40E_FCOE
2895 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2896 #endif
2897 /* set the prefena field to 1 because the manual says to */
2898 rx_ctx.prefena = 1;
2899
2900 /* clear the context in the HMC */
2901 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2902 if (err) {
2903 dev_info(&vsi->back->pdev->dev,
2904 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2905 ring->queue_index, pf_q, err);
2906 return -ENOMEM;
2907 }
2908
2909 /* set the context in the HMC */
2910 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2911 if (err) {
2912 dev_info(&vsi->back->pdev->dev,
2913 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2914 ring->queue_index, pf_q, err);
2915 return -ENOMEM;
2916 }
2917
2918 /* cache tail for quicker writes, and clear the reg before use */
2919 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2920 writel(0, ring->tail);
2921
2922 if (ring_is_ps_enabled(ring)) {
2923 i40e_alloc_rx_headers(ring);
2924 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
2925 } else {
2926 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
2927 }
2928
2929 return 0;
2930 }
2931
2932 /**
2933 * i40e_vsi_configure_tx - Configure the VSI for Tx
2934 * @vsi: VSI structure describing this set of rings and resources
2935 *
2936 * Configure the Tx VSI for operation.
2937 **/
2938 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2939 {
2940 int err = 0;
2941 u16 i;
2942
2943 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2944 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2945
2946 return err;
2947 }
2948
2949 /**
2950 * i40e_vsi_configure_rx - Configure the VSI for Rx
2951 * @vsi: the VSI being configured
2952 *
2953 * Configure the Rx VSI for operation.
2954 **/
2955 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2956 {
2957 int err = 0;
2958 u16 i;
2959
2960 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2961 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2962 + ETH_FCS_LEN + VLAN_HLEN;
2963 else
2964 vsi->max_frame = I40E_RXBUFFER_2048;
2965
2966 /* figure out correct receive buffer length */
2967 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2968 I40E_FLAG_RX_PS_ENABLED)) {
2969 case I40E_FLAG_RX_1BUF_ENABLED:
2970 vsi->rx_hdr_len = 0;
2971 vsi->rx_buf_len = vsi->max_frame;
2972 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2973 break;
2974 case I40E_FLAG_RX_PS_ENABLED:
2975 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2976 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2977 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2978 break;
2979 default:
2980 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2981 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2982 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2983 break;
2984 }
2985
2986 #ifdef I40E_FCOE
2987 /* setup rx buffer for FCoE */
2988 if ((vsi->type == I40E_VSI_FCOE) &&
2989 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2990 vsi->rx_hdr_len = 0;
2991 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2992 vsi->max_frame = I40E_RXBUFFER_3072;
2993 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2994 }
2995
2996 #endif /* I40E_FCOE */
2997 /* round up for the chip's needs */
2998 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2999 BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
3000 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
3001 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3002
3003 /* set up individual rings */
3004 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3005 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3006
3007 return err;
3008 }
3009
3010 /**
3011 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3012 * @vsi: ptr to the VSI
3013 **/
3014 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3015 {
3016 struct i40e_ring *tx_ring, *rx_ring;
3017 u16 qoffset, qcount;
3018 int i, n;
3019
3020 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3021 /* Reset the TC information */
3022 for (i = 0; i < vsi->num_queue_pairs; i++) {
3023 rx_ring = vsi->rx_rings[i];
3024 tx_ring = vsi->tx_rings[i];
3025 rx_ring->dcb_tc = 0;
3026 tx_ring->dcb_tc = 0;
3027 }
3028 }
3029
3030 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3031 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3032 continue;
3033
3034 qoffset = vsi->tc_config.tc_info[n].qoffset;
3035 qcount = vsi->tc_config.tc_info[n].qcount;
3036 for (i = qoffset; i < (qoffset + qcount); i++) {
3037 rx_ring = vsi->rx_rings[i];
3038 tx_ring = vsi->tx_rings[i];
3039 rx_ring->dcb_tc = n;
3040 tx_ring->dcb_tc = n;
3041 }
3042 }
3043 }
3044
3045 /**
3046 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3047 * @vsi: ptr to the VSI
3048 **/
3049 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3050 {
3051 if (vsi->netdev)
3052 i40e_set_rx_mode(vsi->netdev);
3053 }
3054
3055 /**
3056 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3057 * @vsi: Pointer to the targeted VSI
3058 *
3059 * This function replays the hlist on the hw where all the SB Flow Director
3060 * filters were saved.
3061 **/
3062 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3063 {
3064 struct i40e_fdir_filter *filter;
3065 struct i40e_pf *pf = vsi->back;
3066 struct hlist_node *node;
3067
3068 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3069 return;
3070
3071 hlist_for_each_entry_safe(filter, node,
3072 &pf->fdir_filter_list, fdir_node) {
3073 i40e_add_del_fdir(vsi, filter, true);
3074 }
3075 }
3076
3077 /**
3078 * i40e_vsi_configure - Set up the VSI for action
3079 * @vsi: the VSI being configured
3080 **/
3081 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3082 {
3083 int err;
3084
3085 i40e_set_vsi_rx_mode(vsi);
3086 i40e_restore_vlan(vsi);
3087 i40e_vsi_config_dcb_rings(vsi);
3088 err = i40e_vsi_configure_tx(vsi);
3089 if (!err)
3090 err = i40e_vsi_configure_rx(vsi);
3091
3092 return err;
3093 }
3094
3095 /**
3096 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3097 * @vsi: the VSI being configured
3098 **/
3099 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3100 {
3101 struct i40e_pf *pf = vsi->back;
3102 struct i40e_hw *hw = &pf->hw;
3103 u16 vector;
3104 int i, q;
3105 u32 qp;
3106
3107 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3108 * and PFINT_LNKLSTn registers, e.g.:
3109 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3110 */
3111 qp = vsi->base_queue;
3112 vector = vsi->base_vector;
3113 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3114 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3115
3116 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3117 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3118 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3119 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3120 q_vector->rx.itr);
3121 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3122 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3123 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3124 q_vector->tx.itr);
3125 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3126 INTRL_USEC_TO_REG(vsi->int_rate_limit));
3127
3128 /* Linked list for the queuepairs assigned to this vector */
3129 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3130 for (q = 0; q < q_vector->num_ringpairs; q++) {
3131 u32 val;
3132
3133 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3134 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3135 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3136 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3137 (I40E_QUEUE_TYPE_TX
3138 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3139
3140 wr32(hw, I40E_QINT_RQCTL(qp), val);
3141
3142 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3143 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3144 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3145 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
3146 (I40E_QUEUE_TYPE_RX
3147 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3148
3149 /* Terminate the linked list */
3150 if (q == (q_vector->num_ringpairs - 1))
3151 val |= (I40E_QUEUE_END_OF_LIST
3152 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3153
3154 wr32(hw, I40E_QINT_TQCTL(qp), val);
3155 qp++;
3156 }
3157 }
3158
3159 i40e_flush(hw);
3160 }
3161
3162 /**
3163 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3164 * @hw: ptr to the hardware info
3165 **/
3166 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3167 {
3168 struct i40e_hw *hw = &pf->hw;
3169 u32 val;
3170
3171 /* clear things first */
3172 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3173 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3174
3175 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3176 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3177 I40E_PFINT_ICR0_ENA_GRST_MASK |
3178 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3179 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3180 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3181 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3182 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3183
3184 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3185 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3186
3187 if (pf->flags & I40E_FLAG_PTP)
3188 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3189
3190 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3191
3192 /* SW_ITR_IDX = 0, but don't change INTENA */
3193 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3194 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3195
3196 /* OTHER_ITR_IDX = 0 */
3197 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3198 }
3199
3200 /**
3201 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3202 * @vsi: the VSI being configured
3203 **/
3204 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3205 {
3206 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3207 struct i40e_pf *pf = vsi->back;
3208 struct i40e_hw *hw = &pf->hw;
3209 u32 val;
3210
3211 /* set the ITR configuration */
3212 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3213 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3214 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3215 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3216 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3217 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3218 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3219
3220 i40e_enable_misc_int_causes(pf);
3221
3222 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3223 wr32(hw, I40E_PFINT_LNKLST0, 0);
3224
3225 /* Associate the queue pair to the vector and enable the queue int */
3226 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3227 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3228 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3229
3230 wr32(hw, I40E_QINT_RQCTL(0), val);
3231
3232 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3233 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3234 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3235
3236 wr32(hw, I40E_QINT_TQCTL(0), val);
3237 i40e_flush(hw);
3238 }
3239
3240 /**
3241 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3242 * @pf: board private structure
3243 **/
3244 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3245 {
3246 struct i40e_hw *hw = &pf->hw;
3247
3248 wr32(hw, I40E_PFINT_DYN_CTL0,
3249 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3250 i40e_flush(hw);
3251 }
3252
3253 /**
3254 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3255 * @pf: board private structure
3256 * @clearpba: true when all pending interrupt events should be cleared
3257 **/
3258 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
3259 {
3260 struct i40e_hw *hw = &pf->hw;
3261 u32 val;
3262
3263 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3264 (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
3265 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3266
3267 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3268 i40e_flush(hw);
3269 }
3270
3271 /**
3272 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3273 * @irq: interrupt number
3274 * @data: pointer to a q_vector
3275 **/
3276 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3277 {
3278 struct i40e_q_vector *q_vector = data;
3279
3280 if (!q_vector->tx.ring && !q_vector->rx.ring)
3281 return IRQ_HANDLED;
3282
3283 napi_schedule_irqoff(&q_vector->napi);
3284
3285 return IRQ_HANDLED;
3286 }
3287
3288 /**
3289 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3290 * @vsi: the VSI being configured
3291 * @basename: name for the vector
3292 *
3293 * Allocates MSI-X vectors and requests interrupts from the kernel.
3294 **/
3295 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3296 {
3297 int q_vectors = vsi->num_q_vectors;
3298 struct i40e_pf *pf = vsi->back;
3299 int base = vsi->base_vector;
3300 int rx_int_idx = 0;
3301 int tx_int_idx = 0;
3302 int vector, err;
3303
3304 for (vector = 0; vector < q_vectors; vector++) {
3305 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3306
3307 if (q_vector->tx.ring && q_vector->rx.ring) {
3308 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3309 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3310 tx_int_idx++;
3311 } else if (q_vector->rx.ring) {
3312 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3313 "%s-%s-%d", basename, "rx", rx_int_idx++);
3314 } else if (q_vector->tx.ring) {
3315 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3316 "%s-%s-%d", basename, "tx", tx_int_idx++);
3317 } else {
3318 /* skip this unused q_vector */
3319 continue;
3320 }
3321 err = request_irq(pf->msix_entries[base + vector].vector,
3322 vsi->irq_handler,
3323 0,
3324 q_vector->name,
3325 q_vector);
3326 if (err) {
3327 dev_info(&pf->pdev->dev,
3328 "MSIX request_irq failed, error: %d\n", err);
3329 goto free_queue_irqs;
3330 }
3331 /* assign the mask for this irq */
3332 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3333 &q_vector->affinity_mask);
3334 }
3335
3336 vsi->irqs_ready = true;
3337 return 0;
3338
3339 free_queue_irqs:
3340 while (vector) {
3341 vector--;
3342 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3343 NULL);
3344 free_irq(pf->msix_entries[base + vector].vector,
3345 &(vsi->q_vectors[vector]));
3346 }
3347 return err;
3348 }
3349
3350 /**
3351 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3352 * @vsi: the VSI being un-configured
3353 **/
3354 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3355 {
3356 struct i40e_pf *pf = vsi->back;
3357 struct i40e_hw *hw = &pf->hw;
3358 int base = vsi->base_vector;
3359 int i;
3360
3361 for (i = 0; i < vsi->num_queue_pairs; i++) {
3362 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3363 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3364 }
3365
3366 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3367 for (i = vsi->base_vector;
3368 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3369 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3370
3371 i40e_flush(hw);
3372 for (i = 0; i < vsi->num_q_vectors; i++)
3373 synchronize_irq(pf->msix_entries[i + base].vector);
3374 } else {
3375 /* Legacy and MSI mode - this stops all interrupt handling */
3376 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3377 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3378 i40e_flush(hw);
3379 synchronize_irq(pf->pdev->irq);
3380 }
3381 }
3382
3383 /**
3384 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3385 * @vsi: the VSI being configured
3386 **/
3387 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3388 {
3389 struct i40e_pf *pf = vsi->back;
3390 int i;
3391
3392 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3393 for (i = 0; i < vsi->num_q_vectors; i++)
3394 i40e_irq_dynamic_enable(vsi, i);
3395 } else {
3396 i40e_irq_dynamic_enable_icr0(pf, true);
3397 }
3398
3399 i40e_flush(&pf->hw);
3400 return 0;
3401 }
3402
3403 /**
3404 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3405 * @pf: board private structure
3406 **/
3407 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3408 {
3409 /* Disable ICR 0 */
3410 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3411 i40e_flush(&pf->hw);
3412 }
3413
3414 /**
3415 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3416 * @irq: interrupt number
3417 * @data: pointer to a q_vector
3418 *
3419 * This is the handler used for all MSI/Legacy interrupts, and deals
3420 * with both queue and non-queue interrupts. This is also used in
3421 * MSIX mode to handle the non-queue interrupts.
3422 **/
3423 static irqreturn_t i40e_intr(int irq, void *data)
3424 {
3425 struct i40e_pf *pf = (struct i40e_pf *)data;
3426 struct i40e_hw *hw = &pf->hw;
3427 irqreturn_t ret = IRQ_NONE;
3428 u32 icr0, icr0_remaining;
3429 u32 val, ena_mask;
3430
3431 icr0 = rd32(hw, I40E_PFINT_ICR0);
3432 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3433
3434 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3435 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3436 goto enable_intr;
3437
3438 /* if interrupt but no bits showing, must be SWINT */
3439 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3440 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3441 pf->sw_int_count++;
3442
3443 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3444 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3445 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3446 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3447 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3448 }
3449
3450 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3451 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3452 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3453 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3454
3455 /* We do not have a way to disarm Queue causes while leaving
3456 * interrupt enabled for all other causes, ideally
3457 * interrupt should be disabled while we are in NAPI but
3458 * this is not a performance path and napi_schedule()
3459 * can deal with rescheduling.
3460 */
3461 if (!test_bit(__I40E_DOWN, &pf->state))
3462 napi_schedule_irqoff(&q_vector->napi);
3463 }
3464
3465 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3466 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3467 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3468 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3469 }
3470
3471 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3472 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3473 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3474 }
3475
3476 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3477 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3478 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3479 }
3480
3481 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3482 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3483 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3484 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3485 val = rd32(hw, I40E_GLGEN_RSTAT);
3486 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3487 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3488 if (val == I40E_RESET_CORER) {
3489 pf->corer_count++;
3490 } else if (val == I40E_RESET_GLOBR) {
3491 pf->globr_count++;
3492 } else if (val == I40E_RESET_EMPR) {
3493 pf->empr_count++;
3494 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3495 }
3496 }
3497
3498 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3499 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3500 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3501 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3502 rd32(hw, I40E_PFHMC_ERRORINFO),
3503 rd32(hw, I40E_PFHMC_ERRORDATA));
3504 }
3505
3506 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3507 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3508
3509 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3510 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3511 i40e_ptp_tx_hwtstamp(pf);
3512 }
3513 }
3514
3515 /* If a critical error is pending we have no choice but to reset the
3516 * device.
3517 * Report and mask out any remaining unexpected interrupts.
3518 */
3519 icr0_remaining = icr0 & ena_mask;
3520 if (icr0_remaining) {
3521 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3522 icr0_remaining);
3523 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3524 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3525 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3526 dev_info(&pf->pdev->dev, "device will be reset\n");
3527 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3528 i40e_service_event_schedule(pf);
3529 }
3530 ena_mask &= ~icr0_remaining;
3531 }
3532 ret = IRQ_HANDLED;
3533
3534 enable_intr:
3535 /* re-enable interrupt causes */
3536 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3537 if (!test_bit(__I40E_DOWN, &pf->state)) {
3538 i40e_service_event_schedule(pf);
3539 i40e_irq_dynamic_enable_icr0(pf, false);
3540 }
3541
3542 return ret;
3543 }
3544
3545 /**
3546 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3547 * @tx_ring: tx ring to clean
3548 * @budget: how many cleans we're allowed
3549 *
3550 * Returns true if there's any budget left (e.g. the clean is finished)
3551 **/
3552 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3553 {
3554 struct i40e_vsi *vsi = tx_ring->vsi;
3555 u16 i = tx_ring->next_to_clean;
3556 struct i40e_tx_buffer *tx_buf;
3557 struct i40e_tx_desc *tx_desc;
3558
3559 tx_buf = &tx_ring->tx_bi[i];
3560 tx_desc = I40E_TX_DESC(tx_ring, i);
3561 i -= tx_ring->count;
3562
3563 do {
3564 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3565
3566 /* if next_to_watch is not set then there is no work pending */
3567 if (!eop_desc)
3568 break;
3569
3570 /* prevent any other reads prior to eop_desc */
3571 read_barrier_depends();
3572
3573 /* if the descriptor isn't done, no work yet to do */
3574 if (!(eop_desc->cmd_type_offset_bsz &
3575 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3576 break;
3577
3578 /* clear next_to_watch to prevent false hangs */
3579 tx_buf->next_to_watch = NULL;
3580
3581 tx_desc->buffer_addr = 0;
3582 tx_desc->cmd_type_offset_bsz = 0;
3583 /* move past filter desc */
3584 tx_buf++;
3585 tx_desc++;
3586 i++;
3587 if (unlikely(!i)) {
3588 i -= tx_ring->count;
3589 tx_buf = tx_ring->tx_bi;
3590 tx_desc = I40E_TX_DESC(tx_ring, 0);
3591 }
3592 /* unmap skb header data */
3593 dma_unmap_single(tx_ring->dev,
3594 dma_unmap_addr(tx_buf, dma),
3595 dma_unmap_len(tx_buf, len),
3596 DMA_TO_DEVICE);
3597 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3598 kfree(tx_buf->raw_buf);
3599
3600 tx_buf->raw_buf = NULL;
3601 tx_buf->tx_flags = 0;
3602 tx_buf->next_to_watch = NULL;
3603 dma_unmap_len_set(tx_buf, len, 0);
3604 tx_desc->buffer_addr = 0;
3605 tx_desc->cmd_type_offset_bsz = 0;
3606
3607 /* move us past the eop_desc for start of next FD desc */
3608 tx_buf++;
3609 tx_desc++;
3610 i++;
3611 if (unlikely(!i)) {
3612 i -= tx_ring->count;
3613 tx_buf = tx_ring->tx_bi;
3614 tx_desc = I40E_TX_DESC(tx_ring, 0);
3615 }
3616
3617 /* update budget accounting */
3618 budget--;
3619 } while (likely(budget));
3620
3621 i += tx_ring->count;
3622 tx_ring->next_to_clean = i;
3623
3624 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3625 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3626
3627 return budget > 0;
3628 }
3629
3630 /**
3631 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3632 * @irq: interrupt number
3633 * @data: pointer to a q_vector
3634 **/
3635 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3636 {
3637 struct i40e_q_vector *q_vector = data;
3638 struct i40e_vsi *vsi;
3639
3640 if (!q_vector->tx.ring)
3641 return IRQ_HANDLED;
3642
3643 vsi = q_vector->tx.ring->vsi;
3644 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3645
3646 return IRQ_HANDLED;
3647 }
3648
3649 /**
3650 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3651 * @vsi: the VSI being configured
3652 * @v_idx: vector index
3653 * @qp_idx: queue pair index
3654 **/
3655 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3656 {
3657 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3658 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3659 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3660
3661 tx_ring->q_vector = q_vector;
3662 tx_ring->next = q_vector->tx.ring;
3663 q_vector->tx.ring = tx_ring;
3664 q_vector->tx.count++;
3665
3666 rx_ring->q_vector = q_vector;
3667 rx_ring->next = q_vector->rx.ring;
3668 q_vector->rx.ring = rx_ring;
3669 q_vector->rx.count++;
3670 }
3671
3672 /**
3673 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3674 * @vsi: the VSI being configured
3675 *
3676 * This function maps descriptor rings to the queue-specific vectors
3677 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3678 * one vector per queue pair, but on a constrained vector budget, we
3679 * group the queue pairs as "efficiently" as possible.
3680 **/
3681 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3682 {
3683 int qp_remaining = vsi->num_queue_pairs;
3684 int q_vectors = vsi->num_q_vectors;
3685 int num_ringpairs;
3686 int v_start = 0;
3687 int qp_idx = 0;
3688
3689 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3690 * group them so there are multiple queues per vector.
3691 * It is also important to go through all the vectors available to be
3692 * sure that if we don't use all the vectors, that the remaining vectors
3693 * are cleared. This is especially important when decreasing the
3694 * number of queues in use.
3695 */
3696 for (; v_start < q_vectors; v_start++) {
3697 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3698
3699 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3700
3701 q_vector->num_ringpairs = num_ringpairs;
3702
3703 q_vector->rx.count = 0;
3704 q_vector->tx.count = 0;
3705 q_vector->rx.ring = NULL;
3706 q_vector->tx.ring = NULL;
3707
3708 while (num_ringpairs--) {
3709 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3710 qp_idx++;
3711 qp_remaining--;
3712 }
3713 }
3714 }
3715
3716 /**
3717 * i40e_vsi_request_irq - Request IRQ from the OS
3718 * @vsi: the VSI being configured
3719 * @basename: name for the vector
3720 **/
3721 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3722 {
3723 struct i40e_pf *pf = vsi->back;
3724 int err;
3725
3726 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3727 err = i40e_vsi_request_irq_msix(vsi, basename);
3728 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3729 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3730 pf->int_name, pf);
3731 else
3732 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3733 pf->int_name, pf);
3734
3735 if (err)
3736 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3737
3738 return err;
3739 }
3740
3741 #ifdef CONFIG_NET_POLL_CONTROLLER
3742 /**
3743 * i40e_netpoll - A Polling 'interrupt' handler
3744 * @netdev: network interface device structure
3745 *
3746 * This is used by netconsole to send skbs without having to re-enable
3747 * interrupts. It's not called while the normal interrupt routine is executing.
3748 **/
3749 #ifdef I40E_FCOE
3750 void i40e_netpoll(struct net_device *netdev)
3751 #else
3752 static void i40e_netpoll(struct net_device *netdev)
3753 #endif
3754 {
3755 struct i40e_netdev_priv *np = netdev_priv(netdev);
3756 struct i40e_vsi *vsi = np->vsi;
3757 struct i40e_pf *pf = vsi->back;
3758 int i;
3759
3760 /* if interface is down do nothing */
3761 if (test_bit(__I40E_DOWN, &vsi->state))
3762 return;
3763
3764 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3765 for (i = 0; i < vsi->num_q_vectors; i++)
3766 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3767 } else {
3768 i40e_intr(pf->pdev->irq, netdev);
3769 }
3770 }
3771 #endif
3772
3773 /**
3774 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3775 * @pf: the PF being configured
3776 * @pf_q: the PF queue
3777 * @enable: enable or disable state of the queue
3778 *
3779 * This routine will wait for the given Tx queue of the PF to reach the
3780 * enabled or disabled state.
3781 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3782 * multiple retries; else will return 0 in case of success.
3783 **/
3784 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3785 {
3786 int i;
3787 u32 tx_reg;
3788
3789 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3790 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3791 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3792 break;
3793
3794 usleep_range(10, 20);
3795 }
3796 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3797 return -ETIMEDOUT;
3798
3799 return 0;
3800 }
3801
3802 /**
3803 * i40e_vsi_control_tx - Start or stop a VSI's rings
3804 * @vsi: the VSI being configured
3805 * @enable: start or stop the rings
3806 **/
3807 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3808 {
3809 struct i40e_pf *pf = vsi->back;
3810 struct i40e_hw *hw = &pf->hw;
3811 int i, j, pf_q, ret = 0;
3812 u32 tx_reg;
3813
3814 pf_q = vsi->base_queue;
3815 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3816
3817 /* warn the TX unit of coming changes */
3818 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3819 if (!enable)
3820 usleep_range(10, 20);
3821
3822 for (j = 0; j < 50; j++) {
3823 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3824 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3825 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3826 break;
3827 usleep_range(1000, 2000);
3828 }
3829 /* Skip if the queue is already in the requested state */
3830 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3831 continue;
3832
3833 /* turn on/off the queue */
3834 if (enable) {
3835 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3836 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3837 } else {
3838 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3839 }
3840
3841 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3842 /* No waiting for the Tx queue to disable */
3843 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3844 continue;
3845
3846 /* wait for the change to finish */
3847 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3848 if (ret) {
3849 dev_info(&pf->pdev->dev,
3850 "VSI seid %d Tx ring %d %sable timeout\n",
3851 vsi->seid, pf_q, (enable ? "en" : "dis"));
3852 break;
3853 }
3854 }
3855
3856 if (hw->revision_id == 0)
3857 mdelay(50);
3858 return ret;
3859 }
3860
3861 /**
3862 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3863 * @pf: the PF being configured
3864 * @pf_q: the PF queue
3865 * @enable: enable or disable state of the queue
3866 *
3867 * This routine will wait for the given Rx queue of the PF to reach the
3868 * enabled or disabled state.
3869 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3870 * multiple retries; else will return 0 in case of success.
3871 **/
3872 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3873 {
3874 int i;
3875 u32 rx_reg;
3876
3877 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3878 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3879 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3880 break;
3881
3882 usleep_range(10, 20);
3883 }
3884 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3885 return -ETIMEDOUT;
3886
3887 return 0;
3888 }
3889
3890 /**
3891 * i40e_vsi_control_rx - Start or stop a VSI's rings
3892 * @vsi: the VSI being configured
3893 * @enable: start or stop the rings
3894 **/
3895 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3896 {
3897 struct i40e_pf *pf = vsi->back;
3898 struct i40e_hw *hw = &pf->hw;
3899 int i, j, pf_q, ret = 0;
3900 u32 rx_reg;
3901
3902 pf_q = vsi->base_queue;
3903 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3904 for (j = 0; j < 50; j++) {
3905 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3906 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3907 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3908 break;
3909 usleep_range(1000, 2000);
3910 }
3911
3912 /* Skip if the queue is already in the requested state */
3913 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3914 continue;
3915
3916 /* turn on/off the queue */
3917 if (enable)
3918 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3919 else
3920 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3921 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3922 /* No waiting for the Tx queue to disable */
3923 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3924 continue;
3925
3926 /* wait for the change to finish */
3927 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3928 if (ret) {
3929 dev_info(&pf->pdev->dev,
3930 "VSI seid %d Rx ring %d %sable timeout\n",
3931 vsi->seid, pf_q, (enable ? "en" : "dis"));
3932 break;
3933 }
3934 }
3935
3936 return ret;
3937 }
3938
3939 /**
3940 * i40e_vsi_control_rings - Start or stop a VSI's rings
3941 * @vsi: the VSI being configured
3942 * @enable: start or stop the rings
3943 **/
3944 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3945 {
3946 int ret = 0;
3947
3948 /* do rx first for enable and last for disable */
3949 if (request) {
3950 ret = i40e_vsi_control_rx(vsi, request);
3951 if (ret)
3952 return ret;
3953 ret = i40e_vsi_control_tx(vsi, request);
3954 } else {
3955 /* Ignore return value, we need to shutdown whatever we can */
3956 i40e_vsi_control_tx(vsi, request);
3957 i40e_vsi_control_rx(vsi, request);
3958 }
3959
3960 return ret;
3961 }
3962
3963 /**
3964 * i40e_vsi_free_irq - Free the irq association with the OS
3965 * @vsi: the VSI being configured
3966 **/
3967 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3968 {
3969 struct i40e_pf *pf = vsi->back;
3970 struct i40e_hw *hw = &pf->hw;
3971 int base = vsi->base_vector;
3972 u32 val, qp;
3973 int i;
3974
3975 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3976 if (!vsi->q_vectors)
3977 return;
3978
3979 if (!vsi->irqs_ready)
3980 return;
3981
3982 vsi->irqs_ready = false;
3983 for (i = 0; i < vsi->num_q_vectors; i++) {
3984 u16 vector = i + base;
3985
3986 /* free only the irqs that were actually requested */
3987 if (!vsi->q_vectors[i] ||
3988 !vsi->q_vectors[i]->num_ringpairs)
3989 continue;
3990
3991 /* clear the affinity_mask in the IRQ descriptor */
3992 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3993 NULL);
3994 free_irq(pf->msix_entries[vector].vector,
3995 vsi->q_vectors[i]);
3996
3997 /* Tear down the interrupt queue link list
3998 *
3999 * We know that they come in pairs and always
4000 * the Rx first, then the Tx. To clear the
4001 * link list, stick the EOL value into the
4002 * next_q field of the registers.
4003 */
4004 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4005 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4006 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4007 val |= I40E_QUEUE_END_OF_LIST
4008 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4009 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4010
4011 while (qp != I40E_QUEUE_END_OF_LIST) {
4012 u32 next;
4013
4014 val = rd32(hw, I40E_QINT_RQCTL(qp));
4015
4016 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4017 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4018 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4019 I40E_QINT_RQCTL_INTEVENT_MASK);
4020
4021 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4022 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4023
4024 wr32(hw, I40E_QINT_RQCTL(qp), val);
4025
4026 val = rd32(hw, I40E_QINT_TQCTL(qp));
4027
4028 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4029 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4030
4031 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4032 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4033 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4034 I40E_QINT_TQCTL_INTEVENT_MASK);
4035
4036 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4037 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4038
4039 wr32(hw, I40E_QINT_TQCTL(qp), val);
4040 qp = next;
4041 }
4042 }
4043 } else {
4044 free_irq(pf->pdev->irq, pf);
4045
4046 val = rd32(hw, I40E_PFINT_LNKLST0);
4047 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4048 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4049 val |= I40E_QUEUE_END_OF_LIST
4050 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4051 wr32(hw, I40E_PFINT_LNKLST0, val);
4052
4053 val = rd32(hw, I40E_QINT_RQCTL(qp));
4054 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4055 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4056 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4057 I40E_QINT_RQCTL_INTEVENT_MASK);
4058
4059 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4060 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4061
4062 wr32(hw, I40E_QINT_RQCTL(qp), val);
4063
4064 val = rd32(hw, I40E_QINT_TQCTL(qp));
4065
4066 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4067 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4068 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4069 I40E_QINT_TQCTL_INTEVENT_MASK);
4070
4071 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4072 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4073
4074 wr32(hw, I40E_QINT_TQCTL(qp), val);
4075 }
4076 }
4077
4078 /**
4079 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4080 * @vsi: the VSI being configured
4081 * @v_idx: Index of vector to be freed
4082 *
4083 * This function frees the memory allocated to the q_vector. In addition if
4084 * NAPI is enabled it will delete any references to the NAPI struct prior
4085 * to freeing the q_vector.
4086 **/
4087 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4088 {
4089 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4090 struct i40e_ring *ring;
4091
4092 if (!q_vector)
4093 return;
4094
4095 /* disassociate q_vector from rings */
4096 i40e_for_each_ring(ring, q_vector->tx)
4097 ring->q_vector = NULL;
4098
4099 i40e_for_each_ring(ring, q_vector->rx)
4100 ring->q_vector = NULL;
4101
4102 /* only VSI w/ an associated netdev is set up w/ NAPI */
4103 if (vsi->netdev)
4104 netif_napi_del(&q_vector->napi);
4105
4106 vsi->q_vectors[v_idx] = NULL;
4107
4108 kfree_rcu(q_vector, rcu);
4109 }
4110
4111 /**
4112 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4113 * @vsi: the VSI being un-configured
4114 *
4115 * This frees the memory allocated to the q_vectors and
4116 * deletes references to the NAPI struct.
4117 **/
4118 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4119 {
4120 int v_idx;
4121
4122 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4123 i40e_free_q_vector(vsi, v_idx);
4124 }
4125
4126 /**
4127 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4128 * @pf: board private structure
4129 **/
4130 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4131 {
4132 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4133 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4134 pci_disable_msix(pf->pdev);
4135 kfree(pf->msix_entries);
4136 pf->msix_entries = NULL;
4137 kfree(pf->irq_pile);
4138 pf->irq_pile = NULL;
4139 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4140 pci_disable_msi(pf->pdev);
4141 }
4142 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4143 }
4144
4145 /**
4146 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4147 * @pf: board private structure
4148 *
4149 * We go through and clear interrupt specific resources and reset the structure
4150 * to pre-load conditions
4151 **/
4152 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4153 {
4154 int i;
4155
4156 i40e_stop_misc_vector(pf);
4157 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4158 synchronize_irq(pf->msix_entries[0].vector);
4159 free_irq(pf->msix_entries[0].vector, pf);
4160 }
4161
4162 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4163 I40E_IWARP_IRQ_PILE_ID);
4164
4165 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4166 for (i = 0; i < pf->num_alloc_vsi; i++)
4167 if (pf->vsi[i])
4168 i40e_vsi_free_q_vectors(pf->vsi[i]);
4169 i40e_reset_interrupt_capability(pf);
4170 }
4171
4172 /**
4173 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4174 * @vsi: the VSI being configured
4175 **/
4176 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4177 {
4178 int q_idx;
4179
4180 if (!vsi->netdev)
4181 return;
4182
4183 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4184 napi_enable(&vsi->q_vectors[q_idx]->napi);
4185 }
4186
4187 /**
4188 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4189 * @vsi: the VSI being configured
4190 **/
4191 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4192 {
4193 int q_idx;
4194
4195 if (!vsi->netdev)
4196 return;
4197
4198 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4199 napi_disable(&vsi->q_vectors[q_idx]->napi);
4200 }
4201
4202 /**
4203 * i40e_vsi_close - Shut down a VSI
4204 * @vsi: the vsi to be quelled
4205 **/
4206 static void i40e_vsi_close(struct i40e_vsi *vsi)
4207 {
4208 bool reset = false;
4209
4210 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4211 i40e_down(vsi);
4212 i40e_vsi_free_irq(vsi);
4213 i40e_vsi_free_tx_resources(vsi);
4214 i40e_vsi_free_rx_resources(vsi);
4215 vsi->current_netdev_flags = 0;
4216 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4217 reset = true;
4218 i40e_notify_client_of_netdev_close(vsi, reset);
4219 }
4220
4221 /**
4222 * i40e_quiesce_vsi - Pause a given VSI
4223 * @vsi: the VSI being paused
4224 **/
4225 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4226 {
4227 if (test_bit(__I40E_DOWN, &vsi->state))
4228 return;
4229
4230 /* No need to disable FCoE VSI when Tx suspended */
4231 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4232 vsi->type == I40E_VSI_FCOE) {
4233 dev_dbg(&vsi->back->pdev->dev,
4234 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
4235 return;
4236 }
4237
4238 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
4239 if (vsi->netdev && netif_running(vsi->netdev))
4240 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4241 else
4242 i40e_vsi_close(vsi);
4243 }
4244
4245 /**
4246 * i40e_unquiesce_vsi - Resume a given VSI
4247 * @vsi: the VSI being resumed
4248 **/
4249 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4250 {
4251 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4252 return;
4253
4254 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4255 if (vsi->netdev && netif_running(vsi->netdev))
4256 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4257 else
4258 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4259 }
4260
4261 /**
4262 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4263 * @pf: the PF
4264 **/
4265 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4266 {
4267 int v;
4268
4269 for (v = 0; v < pf->num_alloc_vsi; v++) {
4270 if (pf->vsi[v])
4271 i40e_quiesce_vsi(pf->vsi[v]);
4272 }
4273 }
4274
4275 /**
4276 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4277 * @pf: the PF
4278 **/
4279 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4280 {
4281 int v;
4282
4283 for (v = 0; v < pf->num_alloc_vsi; v++) {
4284 if (pf->vsi[v])
4285 i40e_unquiesce_vsi(pf->vsi[v]);
4286 }
4287 }
4288
4289 #ifdef CONFIG_I40E_DCB
4290 /**
4291 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4292 * @vsi: the VSI being configured
4293 *
4294 * This function waits for the given VSI's queues to be disabled.
4295 **/
4296 static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4297 {
4298 struct i40e_pf *pf = vsi->back;
4299 int i, pf_q, ret;
4300
4301 pf_q = vsi->base_queue;
4302 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4303 /* Check and wait for the disable status of the queue */
4304 ret = i40e_pf_txq_wait(pf, pf_q, false);
4305 if (ret) {
4306 dev_info(&pf->pdev->dev,
4307 "VSI seid %d Tx ring %d disable timeout\n",
4308 vsi->seid, pf_q);
4309 return ret;
4310 }
4311 }
4312
4313 pf_q = vsi->base_queue;
4314 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4315 /* Check and wait for the disable status of the queue */
4316 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4317 if (ret) {
4318 dev_info(&pf->pdev->dev,
4319 "VSI seid %d Rx ring %d disable timeout\n",
4320 vsi->seid, pf_q);
4321 return ret;
4322 }
4323 }
4324
4325 return 0;
4326 }
4327
4328 /**
4329 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4330 * @pf: the PF
4331 *
4332 * This function waits for the queues to be in disabled state for all the
4333 * VSIs that are managed by this PF.
4334 **/
4335 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4336 {
4337 int v, ret = 0;
4338
4339 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4340 /* No need to wait for FCoE VSI queues */
4341 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4342 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4343 if (ret)
4344 break;
4345 }
4346 }
4347
4348 return ret;
4349 }
4350
4351 #endif
4352
4353 /**
4354 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4355 * @q_idx: TX queue number
4356 * @vsi: Pointer to VSI struct
4357 *
4358 * This function checks specified queue for given VSI. Detects hung condition.
4359 * Sets hung bit since it is two step process. Before next run of service task
4360 * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
4361 * hung condition remain unchanged and during subsequent run, this function
4362 * issues SW interrupt to recover from hung condition.
4363 **/
4364 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4365 {
4366 struct i40e_ring *tx_ring = NULL;
4367 struct i40e_pf *pf;
4368 u32 head, val, tx_pending_hw;
4369 int i;
4370
4371 pf = vsi->back;
4372
4373 /* now that we have an index, find the tx_ring struct */
4374 for (i = 0; i < vsi->num_queue_pairs; i++) {
4375 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4376 if (q_idx == vsi->tx_rings[i]->queue_index) {
4377 tx_ring = vsi->tx_rings[i];
4378 break;
4379 }
4380 }
4381 }
4382
4383 if (!tx_ring)
4384 return;
4385
4386 /* Read interrupt register */
4387 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4388 val = rd32(&pf->hw,
4389 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4390 tx_ring->vsi->base_vector - 1));
4391 else
4392 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4393
4394 head = i40e_get_head(tx_ring);
4395
4396 tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
4397
4398 /* HW is done executing descriptors, updated HEAD write back,
4399 * but SW hasn't processed those descriptors. If interrupt is
4400 * not generated from this point ON, it could result into
4401 * dev_watchdog detecting timeout on those netdev_queue,
4402 * hence proactively trigger SW interrupt.
4403 */
4404 if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4405 /* NAPI Poll didn't run and clear since it was set */
4406 if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
4407 &tx_ring->q_vector->hung_detected)) {
4408 netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
4409 vsi->seid, q_idx, tx_pending_hw,
4410 tx_ring->next_to_clean, head,
4411 tx_ring->next_to_use,
4412 readl(tx_ring->tail));
4413 netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
4414 vsi->seid, q_idx, val);
4415 i40e_force_wb(vsi, tx_ring->q_vector);
4416 } else {
4417 /* First Chance - detected possible hung */
4418 set_bit(I40E_Q_VECTOR_HUNG_DETECT,
4419 &tx_ring->q_vector->hung_detected);
4420 }
4421 }
4422
4423 /* This is the case where we have interrupts missing,
4424 * so the tx_pending in HW will most likely be 0, but we
4425 * will have tx_pending in SW since the WB happened but the
4426 * interrupt got lost.
4427 */
4428 if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
4429 (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
4430 if (napi_reschedule(&tx_ring->q_vector->napi))
4431 tx_ring->tx_stats.tx_lost_interrupt++;
4432 }
4433 }
4434
4435 /**
4436 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4437 * @pf: pointer to PF struct
4438 *
4439 * LAN VSI has netdev and netdev has TX queues. This function is to check
4440 * each of those TX queues if they are hung, trigger recovery by issuing
4441 * SW interrupt.
4442 **/
4443 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4444 {
4445 struct net_device *netdev;
4446 struct i40e_vsi *vsi;
4447 int i;
4448
4449 /* Only for LAN VSI */
4450 vsi = pf->vsi[pf->lan_vsi];
4451
4452 if (!vsi)
4453 return;
4454
4455 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4456 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4457 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4458 return;
4459
4460 /* Make sure type is MAIN VSI */
4461 if (vsi->type != I40E_VSI_MAIN)
4462 return;
4463
4464 netdev = vsi->netdev;
4465 if (!netdev)
4466 return;
4467
4468 /* Bail out if netif_carrier is not OK */
4469 if (!netif_carrier_ok(netdev))
4470 return;
4471
4472 /* Go thru' TX queues for netdev */
4473 for (i = 0; i < netdev->num_tx_queues; i++) {
4474 struct netdev_queue *q;
4475
4476 q = netdev_get_tx_queue(netdev, i);
4477 if (q)
4478 i40e_detect_recover_hung_queue(i, vsi);
4479 }
4480 }
4481
4482 /**
4483 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4484 * @pf: pointer to PF
4485 *
4486 * Get TC map for ISCSI PF type that will include iSCSI TC
4487 * and LAN TC.
4488 **/
4489 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4490 {
4491 struct i40e_dcb_app_priority_table app;
4492 struct i40e_hw *hw = &pf->hw;
4493 u8 enabled_tc = 1; /* TC0 is always enabled */
4494 u8 tc, i;
4495 /* Get the iSCSI APP TLV */
4496 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4497
4498 for (i = 0; i < dcbcfg->numapps; i++) {
4499 app = dcbcfg->app[i];
4500 if (app.selector == I40E_APP_SEL_TCPIP &&
4501 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4502 tc = dcbcfg->etscfg.prioritytable[app.priority];
4503 enabled_tc |= BIT(tc);
4504 break;
4505 }
4506 }
4507
4508 return enabled_tc;
4509 }
4510
4511 /**
4512 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4513 * @dcbcfg: the corresponding DCBx configuration structure
4514 *
4515 * Return the number of TCs from given DCBx configuration
4516 **/
4517 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4518 {
4519 u8 num_tc = 0;
4520 int i;
4521
4522 /* Scan the ETS Config Priority Table to find
4523 * traffic class enabled for a given priority
4524 * and use the traffic class index to get the
4525 * number of traffic classes enabled
4526 */
4527 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4528 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4529 num_tc = dcbcfg->etscfg.prioritytable[i];
4530 }
4531
4532 /* Traffic class index starts from zero so
4533 * increment to return the actual count
4534 */
4535 return num_tc + 1;
4536 }
4537
4538 /**
4539 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4540 * @dcbcfg: the corresponding DCBx configuration structure
4541 *
4542 * Query the current DCB configuration and return the number of
4543 * traffic classes enabled from the given DCBX config
4544 **/
4545 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4546 {
4547 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4548 u8 enabled_tc = 1;
4549 u8 i;
4550
4551 for (i = 0; i < num_tc; i++)
4552 enabled_tc |= BIT(i);
4553
4554 return enabled_tc;
4555 }
4556
4557 /**
4558 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4559 * @pf: PF being queried
4560 *
4561 * Return number of traffic classes enabled for the given PF
4562 **/
4563 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4564 {
4565 struct i40e_hw *hw = &pf->hw;
4566 u8 i, enabled_tc;
4567 u8 num_tc = 0;
4568 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4569
4570 /* If DCB is not enabled then always in single TC */
4571 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4572 return 1;
4573
4574 /* SFP mode will be enabled for all TCs on port */
4575 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4576 return i40e_dcb_get_num_tc(dcbcfg);
4577
4578 /* MFP mode return count of enabled TCs for this PF */
4579 if (pf->hw.func_caps.iscsi)
4580 enabled_tc = i40e_get_iscsi_tc_map(pf);
4581 else
4582 return 1; /* Only TC0 */
4583
4584 /* At least have TC0 */
4585 enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4586 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4587 if (enabled_tc & BIT(i))
4588 num_tc++;
4589 }
4590 return num_tc;
4591 }
4592
4593 /**
4594 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4595 * @pf: PF being queried
4596 *
4597 * Return a bitmap for first enabled traffic class for this PF.
4598 **/
4599 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4600 {
4601 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4602 u8 i = 0;
4603
4604 if (!enabled_tc)
4605 return 0x1; /* TC0 */
4606
4607 /* Find the first enabled TC */
4608 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4609 if (enabled_tc & BIT(i))
4610 break;
4611 }
4612
4613 return BIT(i);
4614 }
4615
4616 /**
4617 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4618 * @pf: PF being queried
4619 *
4620 * Return a bitmap for enabled traffic classes for this PF.
4621 **/
4622 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4623 {
4624 /* If DCB is not enabled for this PF then just return default TC */
4625 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4626 return i40e_pf_get_default_tc(pf);
4627
4628 /* SFP mode we want PF to be enabled for all TCs */
4629 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4630 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4631
4632 /* MFP enabled and iSCSI PF type */
4633 if (pf->hw.func_caps.iscsi)
4634 return i40e_get_iscsi_tc_map(pf);
4635 else
4636 return i40e_pf_get_default_tc(pf);
4637 }
4638
4639 /**
4640 * i40e_vsi_get_bw_info - Query VSI BW Information
4641 * @vsi: the VSI being queried
4642 *
4643 * Returns 0 on success, negative value on failure
4644 **/
4645 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4646 {
4647 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4648 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4649 struct i40e_pf *pf = vsi->back;
4650 struct i40e_hw *hw = &pf->hw;
4651 i40e_status ret;
4652 u32 tc_bw_max;
4653 int i;
4654
4655 /* Get the VSI level BW configuration */
4656 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4657 if (ret) {
4658 dev_info(&pf->pdev->dev,
4659 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4660 i40e_stat_str(&pf->hw, ret),
4661 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4662 return -EINVAL;
4663 }
4664
4665 /* Get the VSI level BW configuration per TC */
4666 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4667 NULL);
4668 if (ret) {
4669 dev_info(&pf->pdev->dev,
4670 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4671 i40e_stat_str(&pf->hw, ret),
4672 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4673 return -EINVAL;
4674 }
4675
4676 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4677 dev_info(&pf->pdev->dev,
4678 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4679 bw_config.tc_valid_bits,
4680 bw_ets_config.tc_valid_bits);
4681 /* Still continuing */
4682 }
4683
4684 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4685 vsi->bw_max_quanta = bw_config.max_bw;
4686 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4687 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4688 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4689 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4690 vsi->bw_ets_limit_credits[i] =
4691 le16_to_cpu(bw_ets_config.credits[i]);
4692 /* 3 bits out of 4 for each TC */
4693 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4694 }
4695
4696 return 0;
4697 }
4698
4699 /**
4700 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4701 * @vsi: the VSI being configured
4702 * @enabled_tc: TC bitmap
4703 * @bw_credits: BW shared credits per TC
4704 *
4705 * Returns 0 on success, negative value on failure
4706 **/
4707 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4708 u8 *bw_share)
4709 {
4710 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4711 i40e_status ret;
4712 int i;
4713
4714 bw_data.tc_valid_bits = enabled_tc;
4715 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4716 bw_data.tc_bw_credits[i] = bw_share[i];
4717
4718 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4719 NULL);
4720 if (ret) {
4721 dev_info(&vsi->back->pdev->dev,
4722 "AQ command Config VSI BW allocation per TC failed = %d\n",
4723 vsi->back->hw.aq.asq_last_status);
4724 return -EINVAL;
4725 }
4726
4727 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4728 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4729
4730 return 0;
4731 }
4732
4733 /**
4734 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4735 * @vsi: the VSI being configured
4736 * @enabled_tc: TC map to be enabled
4737 *
4738 **/
4739 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4740 {
4741 struct net_device *netdev = vsi->netdev;
4742 struct i40e_pf *pf = vsi->back;
4743 struct i40e_hw *hw = &pf->hw;
4744 u8 netdev_tc = 0;
4745 int i;
4746 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4747
4748 if (!netdev)
4749 return;
4750
4751 if (!enabled_tc) {
4752 netdev_reset_tc(netdev);
4753 return;
4754 }
4755
4756 /* Set up actual enabled TCs on the VSI */
4757 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4758 return;
4759
4760 /* set per TC queues for the VSI */
4761 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4762 /* Only set TC queues for enabled tcs
4763 *
4764 * e.g. For a VSI that has TC0 and TC3 enabled the
4765 * enabled_tc bitmap would be 0x00001001; the driver
4766 * will set the numtc for netdev as 2 that will be
4767 * referenced by the netdev layer as TC 0 and 1.
4768 */
4769 if (vsi->tc_config.enabled_tc & BIT(i))
4770 netdev_set_tc_queue(netdev,
4771 vsi->tc_config.tc_info[i].netdev_tc,
4772 vsi->tc_config.tc_info[i].qcount,
4773 vsi->tc_config.tc_info[i].qoffset);
4774 }
4775
4776 /* Assign UP2TC map for the VSI */
4777 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4778 /* Get the actual TC# for the UP */
4779 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4780 /* Get the mapped netdev TC# for the UP */
4781 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
4782 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4783 }
4784 }
4785
4786 /**
4787 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4788 * @vsi: the VSI being configured
4789 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4790 **/
4791 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4792 struct i40e_vsi_context *ctxt)
4793 {
4794 /* copy just the sections touched not the entire info
4795 * since not all sections are valid as returned by
4796 * update vsi params
4797 */
4798 vsi->info.mapping_flags = ctxt->info.mapping_flags;
4799 memcpy(&vsi->info.queue_mapping,
4800 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4801 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4802 sizeof(vsi->info.tc_mapping));
4803 }
4804
4805 /**
4806 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4807 * @vsi: VSI to be configured
4808 * @enabled_tc: TC bitmap
4809 *
4810 * This configures a particular VSI for TCs that are mapped to the
4811 * given TC bitmap. It uses default bandwidth share for TCs across
4812 * VSIs to configure TC for a particular VSI.
4813 *
4814 * NOTE:
4815 * It is expected that the VSI queues have been quisced before calling
4816 * this function.
4817 **/
4818 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4819 {
4820 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4821 struct i40e_vsi_context ctxt;
4822 int ret = 0;
4823 int i;
4824
4825 /* Check if enabled_tc is same as existing or new TCs */
4826 if (vsi->tc_config.enabled_tc == enabled_tc)
4827 return ret;
4828
4829 /* Enable ETS TCs with equal BW Share for now across all VSIs */
4830 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4831 if (enabled_tc & BIT(i))
4832 bw_share[i] = 1;
4833 }
4834
4835 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4836 if (ret) {
4837 dev_info(&vsi->back->pdev->dev,
4838 "Failed configuring TC map %d for VSI %d\n",
4839 enabled_tc, vsi->seid);
4840 goto out;
4841 }
4842
4843 /* Update Queue Pairs Mapping for currently enabled UPs */
4844 ctxt.seid = vsi->seid;
4845 ctxt.pf_num = vsi->back->hw.pf_id;
4846 ctxt.vf_num = 0;
4847 ctxt.uplink_seid = vsi->uplink_seid;
4848 ctxt.info = vsi->info;
4849 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4850
4851 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
4852 ctxt.info.valid_sections |=
4853 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
4854 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
4855 }
4856
4857 /* Update the VSI after updating the VSI queue-mapping information */
4858 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4859 if (ret) {
4860 dev_info(&vsi->back->pdev->dev,
4861 "Update vsi tc config failed, err %s aq_err %s\n",
4862 i40e_stat_str(&vsi->back->hw, ret),
4863 i40e_aq_str(&vsi->back->hw,
4864 vsi->back->hw.aq.asq_last_status));
4865 goto out;
4866 }
4867 /* update the local VSI info with updated queue map */
4868 i40e_vsi_update_queue_map(vsi, &ctxt);
4869 vsi->info.valid_sections = 0;
4870
4871 /* Update current VSI BW information */
4872 ret = i40e_vsi_get_bw_info(vsi);
4873 if (ret) {
4874 dev_info(&vsi->back->pdev->dev,
4875 "Failed updating vsi bw info, err %s aq_err %s\n",
4876 i40e_stat_str(&vsi->back->hw, ret),
4877 i40e_aq_str(&vsi->back->hw,
4878 vsi->back->hw.aq.asq_last_status));
4879 goto out;
4880 }
4881
4882 /* Update the netdev TC setup */
4883 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4884 out:
4885 return ret;
4886 }
4887
4888 /**
4889 * i40e_veb_config_tc - Configure TCs for given VEB
4890 * @veb: given VEB
4891 * @enabled_tc: TC bitmap
4892 *
4893 * Configures given TC bitmap for VEB (switching) element
4894 **/
4895 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4896 {
4897 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4898 struct i40e_pf *pf = veb->pf;
4899 int ret = 0;
4900 int i;
4901
4902 /* No TCs or already enabled TCs just return */
4903 if (!enabled_tc || veb->enabled_tc == enabled_tc)
4904 return ret;
4905
4906 bw_data.tc_valid_bits = enabled_tc;
4907 /* bw_data.absolute_credits is not set (relative) */
4908
4909 /* Enable ETS TCs with equal BW Share for now */
4910 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4911 if (enabled_tc & BIT(i))
4912 bw_data.tc_bw_share_credits[i] = 1;
4913 }
4914
4915 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4916 &bw_data, NULL);
4917 if (ret) {
4918 dev_info(&pf->pdev->dev,
4919 "VEB bw config failed, err %s aq_err %s\n",
4920 i40e_stat_str(&pf->hw, ret),
4921 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4922 goto out;
4923 }
4924
4925 /* Update the BW information */
4926 ret = i40e_veb_get_bw_info(veb);
4927 if (ret) {
4928 dev_info(&pf->pdev->dev,
4929 "Failed getting veb bw config, err %s aq_err %s\n",
4930 i40e_stat_str(&pf->hw, ret),
4931 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4932 }
4933
4934 out:
4935 return ret;
4936 }
4937
4938 #ifdef CONFIG_I40E_DCB
4939 /**
4940 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4941 * @pf: PF struct
4942 *
4943 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4944 * the caller would've quiesce all the VSIs before calling
4945 * this function
4946 **/
4947 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4948 {
4949 u8 tc_map = 0;
4950 int ret;
4951 u8 v;
4952
4953 /* Enable the TCs available on PF to all VEBs */
4954 tc_map = i40e_pf_get_tc_map(pf);
4955 for (v = 0; v < I40E_MAX_VEB; v++) {
4956 if (!pf->veb[v])
4957 continue;
4958 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4959 if (ret) {
4960 dev_info(&pf->pdev->dev,
4961 "Failed configuring TC for VEB seid=%d\n",
4962 pf->veb[v]->seid);
4963 /* Will try to configure as many components */
4964 }
4965 }
4966
4967 /* Update each VSI */
4968 for (v = 0; v < pf->num_alloc_vsi; v++) {
4969 if (!pf->vsi[v])
4970 continue;
4971
4972 /* - Enable all TCs for the LAN VSI
4973 #ifdef I40E_FCOE
4974 * - For FCoE VSI only enable the TC configured
4975 * as per the APP TLV
4976 #endif
4977 * - For all others keep them at TC0 for now
4978 */
4979 if (v == pf->lan_vsi)
4980 tc_map = i40e_pf_get_tc_map(pf);
4981 else
4982 tc_map = i40e_pf_get_default_tc(pf);
4983 #ifdef I40E_FCOE
4984 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4985 tc_map = i40e_get_fcoe_tc_map(pf);
4986 #endif /* #ifdef I40E_FCOE */
4987
4988 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4989 if (ret) {
4990 dev_info(&pf->pdev->dev,
4991 "Failed configuring TC for VSI seid=%d\n",
4992 pf->vsi[v]->seid);
4993 /* Will try to configure as many components */
4994 } else {
4995 /* Re-configure VSI vectors based on updated TC map */
4996 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4997 if (pf->vsi[v]->netdev)
4998 i40e_dcbnl_set_all(pf->vsi[v]);
4999 }
5000 i40e_notify_client_of_l2_param_changes(pf->vsi[v]);
5001 }
5002 }
5003
5004 /**
5005 * i40e_resume_port_tx - Resume port Tx
5006 * @pf: PF struct
5007 *
5008 * Resume a port's Tx and issue a PF reset in case of failure to
5009 * resume.
5010 **/
5011 static int i40e_resume_port_tx(struct i40e_pf *pf)
5012 {
5013 struct i40e_hw *hw = &pf->hw;
5014 int ret;
5015
5016 ret = i40e_aq_resume_port_tx(hw, NULL);
5017 if (ret) {
5018 dev_info(&pf->pdev->dev,
5019 "Resume Port Tx failed, err %s aq_err %s\n",
5020 i40e_stat_str(&pf->hw, ret),
5021 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5022 /* Schedule PF reset to recover */
5023 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5024 i40e_service_event_schedule(pf);
5025 }
5026
5027 return ret;
5028 }
5029
5030 /**
5031 * i40e_init_pf_dcb - Initialize DCB configuration
5032 * @pf: PF being configured
5033 *
5034 * Query the current DCB configuration and cache it
5035 * in the hardware structure
5036 **/
5037 static int i40e_init_pf_dcb(struct i40e_pf *pf)
5038 {
5039 struct i40e_hw *hw = &pf->hw;
5040 int err = 0;
5041
5042 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
5043 if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
5044 goto out;
5045
5046 /* Get the initial DCB configuration */
5047 err = i40e_init_dcb(hw);
5048 if (!err) {
5049 /* Device/Function is not DCBX capable */
5050 if ((!hw->func_caps.dcb) ||
5051 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5052 dev_info(&pf->pdev->dev,
5053 "DCBX offload is not supported or is disabled for this PF.\n");
5054
5055 if (pf->flags & I40E_FLAG_MFP_ENABLED)
5056 goto out;
5057
5058 } else {
5059 /* When status is not DISABLED then DCBX in FW */
5060 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5061 DCB_CAP_DCBX_VER_IEEE;
5062
5063 pf->flags |= I40E_FLAG_DCB_CAPABLE;
5064 /* Enable DCB tagging only when more than one TC */
5065 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5066 pf->flags |= I40E_FLAG_DCB_ENABLED;
5067 dev_dbg(&pf->pdev->dev,
5068 "DCBX offload is supported for this PF.\n");
5069 }
5070 } else {
5071 dev_info(&pf->pdev->dev,
5072 "Query for DCB configuration failed, err %s aq_err %s\n",
5073 i40e_stat_str(&pf->hw, err),
5074 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5075 }
5076
5077 out:
5078 return err;
5079 }
5080 #endif /* CONFIG_I40E_DCB */
5081 #define SPEED_SIZE 14
5082 #define FC_SIZE 8
5083 /**
5084 * i40e_print_link_message - print link up or down
5085 * @vsi: the VSI for which link needs a message
5086 */
5087 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5088 {
5089 char *speed = "Unknown";
5090 char *fc = "Unknown";
5091
5092 if (vsi->current_isup == isup)
5093 return;
5094 vsi->current_isup = isup;
5095 if (!isup) {
5096 netdev_info(vsi->netdev, "NIC Link is Down\n");
5097 return;
5098 }
5099
5100 /* Warn user if link speed on NPAR enabled partition is not at
5101 * least 10GB
5102 */
5103 if (vsi->back->hw.func_caps.npar_enable &&
5104 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5105 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5106 netdev_warn(vsi->netdev,
5107 "The partition detected link speed that is less than 10Gbps\n");
5108
5109 switch (vsi->back->hw.phy.link_info.link_speed) {
5110 case I40E_LINK_SPEED_40GB:
5111 speed = "40 G";
5112 break;
5113 case I40E_LINK_SPEED_20GB:
5114 speed = "20 G";
5115 break;
5116 case I40E_LINK_SPEED_10GB:
5117 speed = "10 G";
5118 break;
5119 case I40E_LINK_SPEED_1GB:
5120 speed = "1000 M";
5121 break;
5122 case I40E_LINK_SPEED_100MB:
5123 speed = "100 M";
5124 break;
5125 default:
5126 break;
5127 }
5128
5129 switch (vsi->back->hw.fc.current_mode) {
5130 case I40E_FC_FULL:
5131 fc = "RX/TX";
5132 break;
5133 case I40E_FC_TX_PAUSE:
5134 fc = "TX";
5135 break;
5136 case I40E_FC_RX_PAUSE:
5137 fc = "RX";
5138 break;
5139 default:
5140 fc = "None";
5141 break;
5142 }
5143
5144 netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
5145 speed, fc);
5146 }
5147
5148 /**
5149 * i40e_up_complete - Finish the last steps of bringing up a connection
5150 * @vsi: the VSI being configured
5151 **/
5152 static int i40e_up_complete(struct i40e_vsi *vsi)
5153 {
5154 struct i40e_pf *pf = vsi->back;
5155 int err;
5156
5157 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5158 i40e_vsi_configure_msix(vsi);
5159 else
5160 i40e_configure_msi_and_legacy(vsi);
5161
5162 /* start rings */
5163 err = i40e_vsi_control_rings(vsi, true);
5164 if (err)
5165 return err;
5166
5167 clear_bit(__I40E_DOWN, &vsi->state);
5168 i40e_napi_enable_all(vsi);
5169 i40e_vsi_enable_irq(vsi);
5170
5171 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5172 (vsi->netdev)) {
5173 i40e_print_link_message(vsi, true);
5174 netif_tx_start_all_queues(vsi->netdev);
5175 netif_carrier_on(vsi->netdev);
5176 } else if (vsi->netdev) {
5177 i40e_print_link_message(vsi, false);
5178 /* need to check for qualified module here*/
5179 if ((pf->hw.phy.link_info.link_info &
5180 I40E_AQ_MEDIA_AVAILABLE) &&
5181 (!(pf->hw.phy.link_info.an_info &
5182 I40E_AQ_QUALIFIED_MODULE)))
5183 netdev_err(vsi->netdev,
5184 "the driver failed to link because an unqualified module was detected.");
5185 }
5186
5187 /* replay FDIR SB filters */
5188 if (vsi->type == I40E_VSI_FDIR) {
5189 /* reset fd counters */
5190 pf->fd_add_err = pf->fd_atr_cnt = 0;
5191 if (pf->fd_tcp_rule > 0) {
5192 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5193 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5194 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
5195 pf->fd_tcp_rule = 0;
5196 }
5197 i40e_fdir_filter_restore(vsi);
5198 }
5199
5200 /* On the next run of the service_task, notify any clients of the new
5201 * opened netdev
5202 */
5203 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
5204 i40e_service_event_schedule(pf);
5205
5206 return 0;
5207 }
5208
5209 /**
5210 * i40e_vsi_reinit_locked - Reset the VSI
5211 * @vsi: the VSI being configured
5212 *
5213 * Rebuild the ring structs after some configuration
5214 * has changed, e.g. MTU size.
5215 **/
5216 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5217 {
5218 struct i40e_pf *pf = vsi->back;
5219
5220 WARN_ON(in_interrupt());
5221 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
5222 usleep_range(1000, 2000);
5223 i40e_down(vsi);
5224
5225 /* Give a VF some time to respond to the reset. The
5226 * two second wait is based upon the watchdog cycle in
5227 * the VF driver.
5228 */
5229 if (vsi->type == I40E_VSI_SRIOV)
5230 msleep(2000);
5231 i40e_up(vsi);
5232 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5233 }
5234
5235 /**
5236 * i40e_up - Bring the connection back up after being down
5237 * @vsi: the VSI being configured
5238 **/
5239 int i40e_up(struct i40e_vsi *vsi)
5240 {
5241 int err;
5242
5243 err = i40e_vsi_configure(vsi);
5244 if (!err)
5245 err = i40e_up_complete(vsi);
5246
5247 return err;
5248 }
5249
5250 /**
5251 * i40e_down - Shutdown the connection processing
5252 * @vsi: the VSI being stopped
5253 **/
5254 void i40e_down(struct i40e_vsi *vsi)
5255 {
5256 int i;
5257
5258 /* It is assumed that the caller of this function
5259 * sets the vsi->state __I40E_DOWN bit.
5260 */
5261 if (vsi->netdev) {
5262 netif_carrier_off(vsi->netdev);
5263 netif_tx_disable(vsi->netdev);
5264 }
5265 i40e_vsi_disable_irq(vsi);
5266 i40e_vsi_control_rings(vsi, false);
5267 i40e_napi_disable_all(vsi);
5268
5269 for (i = 0; i < vsi->num_queue_pairs; i++) {
5270 i40e_clean_tx_ring(vsi->tx_rings[i]);
5271 i40e_clean_rx_ring(vsi->rx_rings[i]);
5272 }
5273 }
5274
5275 /**
5276 * i40e_setup_tc - configure multiple traffic classes
5277 * @netdev: net device to configure
5278 * @tc: number of traffic classes to enable
5279 **/
5280 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5281 {
5282 struct i40e_netdev_priv *np = netdev_priv(netdev);
5283 struct i40e_vsi *vsi = np->vsi;
5284 struct i40e_pf *pf = vsi->back;
5285 u8 enabled_tc = 0;
5286 int ret = -EINVAL;
5287 int i;
5288
5289 /* Check if DCB enabled to continue */
5290 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5291 netdev_info(netdev, "DCB is not enabled for adapter\n");
5292 goto exit;
5293 }
5294
5295 /* Check if MFP enabled */
5296 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5297 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5298 goto exit;
5299 }
5300
5301 /* Check whether tc count is within enabled limit */
5302 if (tc > i40e_pf_get_num_tc(pf)) {
5303 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5304 goto exit;
5305 }
5306
5307 /* Generate TC map for number of tc requested */
5308 for (i = 0; i < tc; i++)
5309 enabled_tc |= BIT(i);
5310
5311 /* Requesting same TC configuration as already enabled */
5312 if (enabled_tc == vsi->tc_config.enabled_tc)
5313 return 0;
5314
5315 /* Quiesce VSI queues */
5316 i40e_quiesce_vsi(vsi);
5317
5318 /* Configure VSI for enabled TCs */
5319 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5320 if (ret) {
5321 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5322 vsi->seid);
5323 goto exit;
5324 }
5325
5326 /* Unquiesce VSI */
5327 i40e_unquiesce_vsi(vsi);
5328
5329 exit:
5330 return ret;
5331 }
5332
5333 #ifdef I40E_FCOE
5334 int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5335 struct tc_to_netdev *tc)
5336 #else
5337 static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
5338 struct tc_to_netdev *tc)
5339 #endif
5340 {
5341 if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
5342 return -EINVAL;
5343 return i40e_setup_tc(netdev, tc->tc);
5344 }
5345
5346 /**
5347 * i40e_open - Called when a network interface is made active
5348 * @netdev: network interface device structure
5349 *
5350 * The open entry point is called when a network interface is made
5351 * active by the system (IFF_UP). At this point all resources needed
5352 * for transmit and receive operations are allocated, the interrupt
5353 * handler is registered with the OS, the netdev watchdog subtask is
5354 * enabled, and the stack is notified that the interface is ready.
5355 *
5356 * Returns 0 on success, negative value on failure
5357 **/
5358 int i40e_open(struct net_device *netdev)
5359 {
5360 struct i40e_netdev_priv *np = netdev_priv(netdev);
5361 struct i40e_vsi *vsi = np->vsi;
5362 struct i40e_pf *pf = vsi->back;
5363 int err;
5364
5365 /* disallow open during test or if eeprom is broken */
5366 if (test_bit(__I40E_TESTING, &pf->state) ||
5367 test_bit(__I40E_BAD_EEPROM, &pf->state))
5368 return -EBUSY;
5369
5370 netif_carrier_off(netdev);
5371
5372 err = i40e_vsi_open(vsi);
5373 if (err)
5374 return err;
5375
5376 /* configure global TSO hardware offload settings */
5377 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5378 TCP_FLAG_FIN) >> 16);
5379 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5380 TCP_FLAG_FIN |
5381 TCP_FLAG_CWR) >> 16);
5382 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5383
5384 #ifdef CONFIG_I40E_VXLAN
5385 vxlan_get_rx_port(netdev);
5386 #endif
5387 #ifdef CONFIG_I40E_GENEVE
5388 if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)
5389 geneve_get_rx_port(netdev);
5390 #endif
5391
5392 i40e_notify_client_of_netdev_open(vsi);
5393
5394 return 0;
5395 }
5396
5397 /**
5398 * i40e_vsi_open -
5399 * @vsi: the VSI to open
5400 *
5401 * Finish initialization of the VSI.
5402 *
5403 * Returns 0 on success, negative value on failure
5404 **/
5405 int i40e_vsi_open(struct i40e_vsi *vsi)
5406 {
5407 struct i40e_pf *pf = vsi->back;
5408 char int_name[I40E_INT_NAME_STR_LEN];
5409 int err;
5410
5411 /* allocate descriptors */
5412 err = i40e_vsi_setup_tx_resources(vsi);
5413 if (err)
5414 goto err_setup_tx;
5415 err = i40e_vsi_setup_rx_resources(vsi);
5416 if (err)
5417 goto err_setup_rx;
5418
5419 err = i40e_vsi_configure(vsi);
5420 if (err)
5421 goto err_setup_rx;
5422
5423 if (vsi->netdev) {
5424 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5425 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5426 err = i40e_vsi_request_irq(vsi, int_name);
5427 if (err)
5428 goto err_setup_rx;
5429
5430 /* Notify the stack of the actual queue counts. */
5431 err = netif_set_real_num_tx_queues(vsi->netdev,
5432 vsi->num_queue_pairs);
5433 if (err)
5434 goto err_set_queues;
5435
5436 err = netif_set_real_num_rx_queues(vsi->netdev,
5437 vsi->num_queue_pairs);
5438 if (err)
5439 goto err_set_queues;
5440
5441 } else if (vsi->type == I40E_VSI_FDIR) {
5442 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5443 dev_driver_string(&pf->pdev->dev),
5444 dev_name(&pf->pdev->dev));
5445 err = i40e_vsi_request_irq(vsi, int_name);
5446
5447 } else {
5448 err = -EINVAL;
5449 goto err_setup_rx;
5450 }
5451
5452 err = i40e_up_complete(vsi);
5453 if (err)
5454 goto err_up_complete;
5455
5456 return 0;
5457
5458 err_up_complete:
5459 i40e_down(vsi);
5460 err_set_queues:
5461 i40e_vsi_free_irq(vsi);
5462 err_setup_rx:
5463 i40e_vsi_free_rx_resources(vsi);
5464 err_setup_tx:
5465 i40e_vsi_free_tx_resources(vsi);
5466 if (vsi == pf->vsi[pf->lan_vsi])
5467 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5468
5469 return err;
5470 }
5471
5472 /**
5473 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5474 * @pf: Pointer to PF
5475 *
5476 * This function destroys the hlist where all the Flow Director
5477 * filters were saved.
5478 **/
5479 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5480 {
5481 struct i40e_fdir_filter *filter;
5482 struct hlist_node *node2;
5483
5484 hlist_for_each_entry_safe(filter, node2,
5485 &pf->fdir_filter_list, fdir_node) {
5486 hlist_del(&filter->fdir_node);
5487 kfree(filter);
5488 }
5489 pf->fdir_pf_active_filters = 0;
5490 }
5491
5492 /**
5493 * i40e_close - Disables a network interface
5494 * @netdev: network interface device structure
5495 *
5496 * The close entry point is called when an interface is de-activated
5497 * by the OS. The hardware is still under the driver's control, but
5498 * this netdev interface is disabled.
5499 *
5500 * Returns 0, this is not allowed to fail
5501 **/
5502 int i40e_close(struct net_device *netdev)
5503 {
5504 struct i40e_netdev_priv *np = netdev_priv(netdev);
5505 struct i40e_vsi *vsi = np->vsi;
5506
5507 i40e_vsi_close(vsi);
5508
5509 return 0;
5510 }
5511
5512 /**
5513 * i40e_do_reset - Start a PF or Core Reset sequence
5514 * @pf: board private structure
5515 * @reset_flags: which reset is requested
5516 *
5517 * The essential difference in resets is that the PF Reset
5518 * doesn't clear the packet buffers, doesn't reset the PE
5519 * firmware, and doesn't bother the other PFs on the chip.
5520 **/
5521 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5522 {
5523 u32 val;
5524
5525 WARN_ON(in_interrupt());
5526
5527
5528 /* do the biggest reset indicated */
5529 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5530
5531 /* Request a Global Reset
5532 *
5533 * This will start the chip's countdown to the actual full
5534 * chip reset event, and a warning interrupt to be sent
5535 * to all PFs, including the requestor. Our handler
5536 * for the warning interrupt will deal with the shutdown
5537 * and recovery of the switch setup.
5538 */
5539 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5540 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5541 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5542 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5543
5544 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5545
5546 /* Request a Core Reset
5547 *
5548 * Same as Global Reset, except does *not* include the MAC/PHY
5549 */
5550 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5551 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5552 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5553 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5554 i40e_flush(&pf->hw);
5555
5556 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5557
5558 /* Request a PF Reset
5559 *
5560 * Resets only the PF-specific registers
5561 *
5562 * This goes directly to the tear-down and rebuild of
5563 * the switch, since we need to do all the recovery as
5564 * for the Core Reset.
5565 */
5566 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5567 i40e_handle_reset_warning(pf);
5568
5569 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5570 int v;
5571
5572 /* Find the VSI(s) that requested a re-init */
5573 dev_info(&pf->pdev->dev,
5574 "VSI reinit requested\n");
5575 for (v = 0; v < pf->num_alloc_vsi; v++) {
5576 struct i40e_vsi *vsi = pf->vsi[v];
5577
5578 if (vsi != NULL &&
5579 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5580 i40e_vsi_reinit_locked(pf->vsi[v]);
5581 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5582 }
5583 }
5584 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5585 int v;
5586
5587 /* Find the VSI(s) that needs to be brought down */
5588 dev_info(&pf->pdev->dev, "VSI down requested\n");
5589 for (v = 0; v < pf->num_alloc_vsi; v++) {
5590 struct i40e_vsi *vsi = pf->vsi[v];
5591
5592 if (vsi != NULL &&
5593 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5594 set_bit(__I40E_DOWN, &vsi->state);
5595 i40e_down(vsi);
5596 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5597 }
5598 }
5599 } else {
5600 dev_info(&pf->pdev->dev,
5601 "bad reset request 0x%08x\n", reset_flags);
5602 }
5603 }
5604
5605 #ifdef CONFIG_I40E_DCB
5606 /**
5607 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5608 * @pf: board private structure
5609 * @old_cfg: current DCB config
5610 * @new_cfg: new DCB config
5611 **/
5612 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5613 struct i40e_dcbx_config *old_cfg,
5614 struct i40e_dcbx_config *new_cfg)
5615 {
5616 bool need_reconfig = false;
5617
5618 /* Check if ETS configuration has changed */
5619 if (memcmp(&new_cfg->etscfg,
5620 &old_cfg->etscfg,
5621 sizeof(new_cfg->etscfg))) {
5622 /* If Priority Table has changed reconfig is needed */
5623 if (memcmp(&new_cfg->etscfg.prioritytable,
5624 &old_cfg->etscfg.prioritytable,
5625 sizeof(new_cfg->etscfg.prioritytable))) {
5626 need_reconfig = true;
5627 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5628 }
5629
5630 if (memcmp(&new_cfg->etscfg.tcbwtable,
5631 &old_cfg->etscfg.tcbwtable,
5632 sizeof(new_cfg->etscfg.tcbwtable)))
5633 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5634
5635 if (memcmp(&new_cfg->etscfg.tsatable,
5636 &old_cfg->etscfg.tsatable,
5637 sizeof(new_cfg->etscfg.tsatable)))
5638 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5639 }
5640
5641 /* Check if PFC configuration has changed */
5642 if (memcmp(&new_cfg->pfc,
5643 &old_cfg->pfc,
5644 sizeof(new_cfg->pfc))) {
5645 need_reconfig = true;
5646 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5647 }
5648
5649 /* Check if APP Table has changed */
5650 if (memcmp(&new_cfg->app,
5651 &old_cfg->app,
5652 sizeof(new_cfg->app))) {
5653 need_reconfig = true;
5654 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5655 }
5656
5657 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
5658 return need_reconfig;
5659 }
5660
5661 /**
5662 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5663 * @pf: board private structure
5664 * @e: event info posted on ARQ
5665 **/
5666 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5667 struct i40e_arq_event_info *e)
5668 {
5669 struct i40e_aqc_lldp_get_mib *mib =
5670 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5671 struct i40e_hw *hw = &pf->hw;
5672 struct i40e_dcbx_config tmp_dcbx_cfg;
5673 bool need_reconfig = false;
5674 int ret = 0;
5675 u8 type;
5676
5677 /* Not DCB capable or capability disabled */
5678 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5679 return ret;
5680
5681 /* Ignore if event is not for Nearest Bridge */
5682 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5683 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5684 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
5685 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5686 return ret;
5687
5688 /* Check MIB Type and return if event for Remote MIB update */
5689 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5690 dev_dbg(&pf->pdev->dev,
5691 "LLDP event mib type %s\n", type ? "remote" : "local");
5692 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5693 /* Update the remote cached instance and return */
5694 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5695 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5696 &hw->remote_dcbx_config);
5697 goto exit;
5698 }
5699
5700 /* Store the old configuration */
5701 tmp_dcbx_cfg = hw->local_dcbx_config;
5702
5703 /* Reset the old DCBx configuration data */
5704 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5705 /* Get updated DCBX data from firmware */
5706 ret = i40e_get_dcb_config(&pf->hw);
5707 if (ret) {
5708 dev_info(&pf->pdev->dev,
5709 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5710 i40e_stat_str(&pf->hw, ret),
5711 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5712 goto exit;
5713 }
5714
5715 /* No change detected in DCBX configs */
5716 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5717 sizeof(tmp_dcbx_cfg))) {
5718 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5719 goto exit;
5720 }
5721
5722 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5723 &hw->local_dcbx_config);
5724
5725 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5726
5727 if (!need_reconfig)
5728 goto exit;
5729
5730 /* Enable DCB tagging only when more than one TC */
5731 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5732 pf->flags |= I40E_FLAG_DCB_ENABLED;
5733 else
5734 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5735
5736 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5737 /* Reconfiguration needed quiesce all VSIs */
5738 i40e_pf_quiesce_all_vsi(pf);
5739
5740 /* Changes in configuration update VEB/VSI */
5741 i40e_dcb_reconfigure(pf);
5742
5743 ret = i40e_resume_port_tx(pf);
5744
5745 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5746 /* In case of error no point in resuming VSIs */
5747 if (ret)
5748 goto exit;
5749
5750 /* Wait for the PF's queues to be disabled */
5751 ret = i40e_pf_wait_queues_disabled(pf);
5752 if (ret) {
5753 /* Schedule PF reset to recover */
5754 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5755 i40e_service_event_schedule(pf);
5756 } else {
5757 i40e_pf_unquiesce_all_vsi(pf);
5758 }
5759
5760 exit:
5761 return ret;
5762 }
5763 #endif /* CONFIG_I40E_DCB */
5764
5765 /**
5766 * i40e_do_reset_safe - Protected reset path for userland calls.
5767 * @pf: board private structure
5768 * @reset_flags: which reset is requested
5769 *
5770 **/
5771 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5772 {
5773 rtnl_lock();
5774 i40e_do_reset(pf, reset_flags);
5775 rtnl_unlock();
5776 }
5777
5778 /**
5779 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5780 * @pf: board private structure
5781 * @e: event info posted on ARQ
5782 *
5783 * Handler for LAN Queue Overflow Event generated by the firmware for PF
5784 * and VF queues
5785 **/
5786 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5787 struct i40e_arq_event_info *e)
5788 {
5789 struct i40e_aqc_lan_overflow *data =
5790 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5791 u32 queue = le32_to_cpu(data->prtdcb_rupto);
5792 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5793 struct i40e_hw *hw = &pf->hw;
5794 struct i40e_vf *vf;
5795 u16 vf_id;
5796
5797 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5798 queue, qtx_ctl);
5799
5800 /* Queue belongs to VF, find the VF and issue VF reset */
5801 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5802 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5803 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5804 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5805 vf_id -= hw->func_caps.vf_base_id;
5806 vf = &pf->vf[vf_id];
5807 i40e_vc_notify_vf_reset(vf);
5808 /* Allow VF to process pending reset notification */
5809 msleep(20);
5810 i40e_reset_vf(vf, false);
5811 }
5812 }
5813
5814 /**
5815 * i40e_service_event_complete - Finish up the service event
5816 * @pf: board private structure
5817 **/
5818 static void i40e_service_event_complete(struct i40e_pf *pf)
5819 {
5820 WARN_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5821
5822 /* flush memory to make sure state is correct before next watchog */
5823 smp_mb__before_atomic();
5824 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5825 }
5826
5827 /**
5828 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5829 * @pf: board private structure
5830 **/
5831 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5832 {
5833 u32 val, fcnt_prog;
5834
5835 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5836 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5837 return fcnt_prog;
5838 }
5839
5840 /**
5841 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5842 * @pf: board private structure
5843 **/
5844 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5845 {
5846 u32 val, fcnt_prog;
5847
5848 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5849 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5850 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5851 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5852 return fcnt_prog;
5853 }
5854
5855 /**
5856 * i40e_get_global_fd_count - Get total FD filters programmed on device
5857 * @pf: board private structure
5858 **/
5859 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5860 {
5861 u32 val, fcnt_prog;
5862
5863 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5864 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5865 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5866 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5867 return fcnt_prog;
5868 }
5869
5870 /**
5871 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5872 * @pf: board private structure
5873 **/
5874 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5875 {
5876 struct i40e_fdir_filter *filter;
5877 u32 fcnt_prog, fcnt_avail;
5878 struct hlist_node *node;
5879
5880 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5881 return;
5882
5883 /* Check if, FD SB or ATR was auto disabled and if there is enough room
5884 * to re-enable
5885 */
5886 fcnt_prog = i40e_get_global_fd_count(pf);
5887 fcnt_avail = pf->fdir_pf_filter_count;
5888 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5889 (pf->fd_add_err == 0) ||
5890 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5891 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5892 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5893 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5894 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5895 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5896 }
5897 }
5898 /* Wait for some more space to be available to turn on ATR */
5899 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5900 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5901 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5902 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5903 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5904 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5905 }
5906 }
5907
5908 /* if hw had a problem adding a filter, delete it */
5909 if (pf->fd_inv > 0) {
5910 hlist_for_each_entry_safe(filter, node,
5911 &pf->fdir_filter_list, fdir_node) {
5912 if (filter->fd_id == pf->fd_inv) {
5913 hlist_del(&filter->fdir_node);
5914 kfree(filter);
5915 pf->fdir_pf_active_filters--;
5916 }
5917 }
5918 }
5919 }
5920
5921 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5922 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5923 /**
5924 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5925 * @pf: board private structure
5926 **/
5927 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5928 {
5929 unsigned long min_flush_time;
5930 int flush_wait_retry = 50;
5931 bool disable_atr = false;
5932 int fd_room;
5933 int reg;
5934
5935 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5936 return;
5937
5938 if (!time_after(jiffies, pf->fd_flush_timestamp +
5939 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
5940 return;
5941
5942 /* If the flush is happening too quick and we have mostly SB rules we
5943 * should not re-enable ATR for some time.
5944 */
5945 min_flush_time = pf->fd_flush_timestamp +
5946 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5947 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5948
5949 if (!(time_after(jiffies, min_flush_time)) &&
5950 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5951 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5952 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5953 disable_atr = true;
5954 }
5955
5956 pf->fd_flush_timestamp = jiffies;
5957 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5958 /* flush all filters */
5959 wr32(&pf->hw, I40E_PFQF_CTL_1,
5960 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5961 i40e_flush(&pf->hw);
5962 pf->fd_flush_cnt++;
5963 pf->fd_add_err = 0;
5964 do {
5965 /* Check FD flush status every 5-6msec */
5966 usleep_range(5000, 6000);
5967 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5968 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5969 break;
5970 } while (flush_wait_retry--);
5971 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5972 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5973 } else {
5974 /* replay sideband filters */
5975 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5976 if (!disable_atr)
5977 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5978 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5979 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5980 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5981 }
5982
5983 }
5984
5985 /**
5986 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5987 * @pf: board private structure
5988 **/
5989 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
5990 {
5991 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5992 }
5993
5994 /* We can see up to 256 filter programming desc in transit if the filters are
5995 * being applied really fast; before we see the first
5996 * filter miss error on Rx queue 0. Accumulating enough error messages before
5997 * reacting will make sure we don't cause flush too often.
5998 */
5999 #define I40E_MAX_FD_PROGRAM_ERROR 256
6000
6001 /**
6002 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
6003 * @pf: board private structure
6004 **/
6005 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6006 {
6007
6008 /* if interface is down do nothing */
6009 if (test_bit(__I40E_DOWN, &pf->state))
6010 return;
6011
6012 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
6013 return;
6014
6015 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
6016 i40e_fdir_flush_and_replay(pf);
6017
6018 i40e_fdir_check_and_reenable(pf);
6019
6020 }
6021
6022 /**
6023 * i40e_vsi_link_event - notify VSI of a link event
6024 * @vsi: vsi to be notified
6025 * @link_up: link up or down
6026 **/
6027 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6028 {
6029 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
6030 return;
6031
6032 switch (vsi->type) {
6033 case I40E_VSI_MAIN:
6034 #ifdef I40E_FCOE
6035 case I40E_VSI_FCOE:
6036 #endif
6037 if (!vsi->netdev || !vsi->netdev_registered)
6038 break;
6039
6040 if (link_up) {
6041 netif_carrier_on(vsi->netdev);
6042 netif_tx_wake_all_queues(vsi->netdev);
6043 } else {
6044 netif_carrier_off(vsi->netdev);
6045 netif_tx_stop_all_queues(vsi->netdev);
6046 }
6047 break;
6048
6049 case I40E_VSI_SRIOV:
6050 case I40E_VSI_VMDQ2:
6051 case I40E_VSI_CTRL:
6052 case I40E_VSI_IWARP:
6053 case I40E_VSI_MIRROR:
6054 default:
6055 /* there is no notification for other VSIs */
6056 break;
6057 }
6058 }
6059
6060 /**
6061 * i40e_veb_link_event - notify elements on the veb of a link event
6062 * @veb: veb to be notified
6063 * @link_up: link up or down
6064 **/
6065 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6066 {
6067 struct i40e_pf *pf;
6068 int i;
6069
6070 if (!veb || !veb->pf)
6071 return;
6072 pf = veb->pf;
6073
6074 /* depth first... */
6075 for (i = 0; i < I40E_MAX_VEB; i++)
6076 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6077 i40e_veb_link_event(pf->veb[i], link_up);
6078
6079 /* ... now the local VSIs */
6080 for (i = 0; i < pf->num_alloc_vsi; i++)
6081 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6082 i40e_vsi_link_event(pf->vsi[i], link_up);
6083 }
6084
6085 /**
6086 * i40e_link_event - Update netif_carrier status
6087 * @pf: board private structure
6088 **/
6089 static void i40e_link_event(struct i40e_pf *pf)
6090 {
6091 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6092 u8 new_link_speed, old_link_speed;
6093 i40e_status status;
6094 bool new_link, old_link;
6095
6096 /* save off old link status information */
6097 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
6098
6099 /* set this to force the get_link_status call to refresh state */
6100 pf->hw.phy.get_link_info = true;
6101
6102 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6103
6104 status = i40e_get_link_status(&pf->hw, &new_link);
6105 if (status) {
6106 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6107 status);
6108 return;
6109 }
6110
6111 old_link_speed = pf->hw.phy.link_info_old.link_speed;
6112 new_link_speed = pf->hw.phy.link_info.link_speed;
6113
6114 if (new_link == old_link &&
6115 new_link_speed == old_link_speed &&
6116 (test_bit(__I40E_DOWN, &vsi->state) ||
6117 new_link == netif_carrier_ok(vsi->netdev)))
6118 return;
6119
6120 if (!test_bit(__I40E_DOWN, &vsi->state))
6121 i40e_print_link_message(vsi, new_link);
6122
6123 /* Notify the base of the switch tree connected to
6124 * the link. Floating VEBs are not notified.
6125 */
6126 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6127 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6128 else
6129 i40e_vsi_link_event(vsi, new_link);
6130
6131 if (pf->vf)
6132 i40e_vc_notify_link_state(pf);
6133
6134 if (pf->flags & I40E_FLAG_PTP)
6135 i40e_ptp_set_increment(pf);
6136 }
6137
6138 /**
6139 * i40e_watchdog_subtask - periodic checks not using event driven response
6140 * @pf: board private structure
6141 **/
6142 static void i40e_watchdog_subtask(struct i40e_pf *pf)
6143 {
6144 int i;
6145
6146 /* if interface is down do nothing */
6147 if (test_bit(__I40E_DOWN, &pf->state) ||
6148 test_bit(__I40E_CONFIG_BUSY, &pf->state))
6149 return;
6150
6151 /* make sure we don't do these things too often */
6152 if (time_before(jiffies, (pf->service_timer_previous +
6153 pf->service_timer_period)))
6154 return;
6155 pf->service_timer_previous = jiffies;
6156
6157 if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
6158 i40e_link_event(pf);
6159
6160 /* Update the stats for active netdevs so the network stack
6161 * can look at updated numbers whenever it cares to
6162 */
6163 for (i = 0; i < pf->num_alloc_vsi; i++)
6164 if (pf->vsi[i] && pf->vsi[i]->netdev)
6165 i40e_update_stats(pf->vsi[i]);
6166
6167 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6168 /* Update the stats for the active switching components */
6169 for (i = 0; i < I40E_MAX_VEB; i++)
6170 if (pf->veb[i])
6171 i40e_update_veb_stats(pf->veb[i]);
6172 }
6173
6174 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
6175 }
6176
6177 /**
6178 * i40e_reset_subtask - Set up for resetting the device and driver
6179 * @pf: board private structure
6180 **/
6181 static void i40e_reset_subtask(struct i40e_pf *pf)
6182 {
6183 u32 reset_flags = 0;
6184
6185 rtnl_lock();
6186 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
6187 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
6188 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
6189 }
6190 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
6191 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
6192 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6193 }
6194 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
6195 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
6196 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
6197 }
6198 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
6199 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6200 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
6201 }
6202 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
6203 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6204 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
6205 }
6206
6207 /* If there's a recovery already waiting, it takes
6208 * precedence before starting a new reset sequence.
6209 */
6210 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
6211 i40e_handle_reset_warning(pf);
6212 goto unlock;
6213 }
6214
6215 /* If we're already down or resetting, just bail */
6216 if (reset_flags &&
6217 !test_bit(__I40E_DOWN, &pf->state) &&
6218 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
6219 i40e_do_reset(pf, reset_flags);
6220
6221 unlock:
6222 rtnl_unlock();
6223 }
6224
6225 /**
6226 * i40e_handle_link_event - Handle link event
6227 * @pf: board private structure
6228 * @e: event info posted on ARQ
6229 **/
6230 static void i40e_handle_link_event(struct i40e_pf *pf,
6231 struct i40e_arq_event_info *e)
6232 {
6233 struct i40e_aqc_get_link_status *status =
6234 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6235
6236 /* Do a new status request to re-enable LSE reporting
6237 * and load new status information into the hw struct
6238 * This completely ignores any state information
6239 * in the ARQ event info, instead choosing to always
6240 * issue the AQ update link status command.
6241 */
6242 i40e_link_event(pf);
6243
6244 /* check for unqualified module, if link is down */
6245 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6246 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6247 (!(status->link_info & I40E_AQ_LINK_UP)))
6248 dev_err(&pf->pdev->dev,
6249 "The driver failed to link because an unqualified module was detected.\n");
6250 }
6251
6252 /**
6253 * i40e_clean_adminq_subtask - Clean the AdminQ rings
6254 * @pf: board private structure
6255 **/
6256 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6257 {
6258 struct i40e_arq_event_info event;
6259 struct i40e_hw *hw = &pf->hw;
6260 u16 pending, i = 0;
6261 i40e_status ret;
6262 u16 opcode;
6263 u32 oldval;
6264 u32 val;
6265
6266 /* Do not run clean AQ when PF reset fails */
6267 if (test_bit(__I40E_RESET_FAILED, &pf->state))
6268 return;
6269
6270 /* check for error indications */
6271 val = rd32(&pf->hw, pf->hw.aq.arq.len);
6272 oldval = val;
6273 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6274 if (hw->debug_mask & I40E_DEBUG_AQ)
6275 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6276 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6277 }
6278 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6279 if (hw->debug_mask & I40E_DEBUG_AQ)
6280 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6281 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6282 pf->arq_overflows++;
6283 }
6284 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6285 if (hw->debug_mask & I40E_DEBUG_AQ)
6286 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6287 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6288 }
6289 if (oldval != val)
6290 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6291
6292 val = rd32(&pf->hw, pf->hw.aq.asq.len);
6293 oldval = val;
6294 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6295 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6296 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6297 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6298 }
6299 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6300 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6301 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6302 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6303 }
6304 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6305 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
6306 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6307 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6308 }
6309 if (oldval != val)
6310 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6311
6312 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6313 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6314 if (!event.msg_buf)
6315 return;
6316
6317 do {
6318 ret = i40e_clean_arq_element(hw, &event, &pending);
6319 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6320 break;
6321 else if (ret) {
6322 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6323 break;
6324 }
6325
6326 opcode = le16_to_cpu(event.desc.opcode);
6327 switch (opcode) {
6328
6329 case i40e_aqc_opc_get_link_status:
6330 i40e_handle_link_event(pf, &event);
6331 break;
6332 case i40e_aqc_opc_send_msg_to_pf:
6333 ret = i40e_vc_process_vf_msg(pf,
6334 le16_to_cpu(event.desc.retval),
6335 le32_to_cpu(event.desc.cookie_high),
6336 le32_to_cpu(event.desc.cookie_low),
6337 event.msg_buf,
6338 event.msg_len);
6339 break;
6340 case i40e_aqc_opc_lldp_update_mib:
6341 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6342 #ifdef CONFIG_I40E_DCB
6343 rtnl_lock();
6344 ret = i40e_handle_lldp_event(pf, &event);
6345 rtnl_unlock();
6346 #endif /* CONFIG_I40E_DCB */
6347 break;
6348 case i40e_aqc_opc_event_lan_overflow:
6349 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6350 i40e_handle_lan_overflow_event(pf, &event);
6351 break;
6352 case i40e_aqc_opc_send_msg_to_peer:
6353 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6354 break;
6355 case i40e_aqc_opc_nvm_erase:
6356 case i40e_aqc_opc_nvm_update:
6357 case i40e_aqc_opc_oem_post_update:
6358 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
6359 "ARQ NVM operation 0x%04x completed\n",
6360 opcode);
6361 break;
6362 default:
6363 dev_info(&pf->pdev->dev,
6364 "ARQ: Unknown event 0x%04x ignored\n",
6365 opcode);
6366 break;
6367 }
6368 } while (pending && (i++ < pf->adminq_work_limit));
6369
6370 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6371 /* re-enable Admin queue interrupt cause */
6372 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6373 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6374 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6375 i40e_flush(hw);
6376
6377 kfree(event.msg_buf);
6378 }
6379
6380 /**
6381 * i40e_verify_eeprom - make sure eeprom is good to use
6382 * @pf: board private structure
6383 **/
6384 static void i40e_verify_eeprom(struct i40e_pf *pf)
6385 {
6386 int err;
6387
6388 err = i40e_diag_eeprom_test(&pf->hw);
6389 if (err) {
6390 /* retry in case of garbage read */
6391 err = i40e_diag_eeprom_test(&pf->hw);
6392 if (err) {
6393 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6394 err);
6395 set_bit(__I40E_BAD_EEPROM, &pf->state);
6396 }
6397 }
6398
6399 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6400 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6401 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6402 }
6403 }
6404
6405 /**
6406 * i40e_enable_pf_switch_lb
6407 * @pf: pointer to the PF structure
6408 *
6409 * enable switch loop back or die - no point in a return value
6410 **/
6411 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6412 {
6413 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6414 struct i40e_vsi_context ctxt;
6415 int ret;
6416
6417 ctxt.seid = pf->main_vsi_seid;
6418 ctxt.pf_num = pf->hw.pf_id;
6419 ctxt.vf_num = 0;
6420 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6421 if (ret) {
6422 dev_info(&pf->pdev->dev,
6423 "couldn't get PF vsi config, err %s aq_err %s\n",
6424 i40e_stat_str(&pf->hw, ret),
6425 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6426 return;
6427 }
6428 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6429 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6430 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6431
6432 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6433 if (ret) {
6434 dev_info(&pf->pdev->dev,
6435 "update vsi switch failed, err %s aq_err %s\n",
6436 i40e_stat_str(&pf->hw, ret),
6437 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6438 }
6439 }
6440
6441 /**
6442 * i40e_disable_pf_switch_lb
6443 * @pf: pointer to the PF structure
6444 *
6445 * disable switch loop back or die - no point in a return value
6446 **/
6447 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6448 {
6449 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6450 struct i40e_vsi_context ctxt;
6451 int ret;
6452
6453 ctxt.seid = pf->main_vsi_seid;
6454 ctxt.pf_num = pf->hw.pf_id;
6455 ctxt.vf_num = 0;
6456 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6457 if (ret) {
6458 dev_info(&pf->pdev->dev,
6459 "couldn't get PF vsi config, err %s aq_err %s\n",
6460 i40e_stat_str(&pf->hw, ret),
6461 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6462 return;
6463 }
6464 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6465 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6466 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6467
6468 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6469 if (ret) {
6470 dev_info(&pf->pdev->dev,
6471 "update vsi switch failed, err %s aq_err %s\n",
6472 i40e_stat_str(&pf->hw, ret),
6473 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6474 }
6475 }
6476
6477 /**
6478 * i40e_config_bridge_mode - Configure the HW bridge mode
6479 * @veb: pointer to the bridge instance
6480 *
6481 * Configure the loop back mode for the LAN VSI that is downlink to the
6482 * specified HW bridge instance. It is expected this function is called
6483 * when a new HW bridge is instantiated.
6484 **/
6485 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6486 {
6487 struct i40e_pf *pf = veb->pf;
6488
6489 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6490 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6491 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6492 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6493 i40e_disable_pf_switch_lb(pf);
6494 else
6495 i40e_enable_pf_switch_lb(pf);
6496 }
6497
6498 /**
6499 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6500 * @veb: pointer to the VEB instance
6501 *
6502 * This is a recursive function that first builds the attached VSIs then
6503 * recurses in to build the next layer of VEB. We track the connections
6504 * through our own index numbers because the seid's from the HW could
6505 * change across the reset.
6506 **/
6507 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6508 {
6509 struct i40e_vsi *ctl_vsi = NULL;
6510 struct i40e_pf *pf = veb->pf;
6511 int v, veb_idx;
6512 int ret;
6513
6514 /* build VSI that owns this VEB, temporarily attached to base VEB */
6515 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6516 if (pf->vsi[v] &&
6517 pf->vsi[v]->veb_idx == veb->idx &&
6518 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6519 ctl_vsi = pf->vsi[v];
6520 break;
6521 }
6522 }
6523 if (!ctl_vsi) {
6524 dev_info(&pf->pdev->dev,
6525 "missing owner VSI for veb_idx %d\n", veb->idx);
6526 ret = -ENOENT;
6527 goto end_reconstitute;
6528 }
6529 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6530 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6531 ret = i40e_add_vsi(ctl_vsi);
6532 if (ret) {
6533 dev_info(&pf->pdev->dev,
6534 "rebuild of veb_idx %d owner VSI failed: %d\n",
6535 veb->idx, ret);
6536 goto end_reconstitute;
6537 }
6538 i40e_vsi_reset_stats(ctl_vsi);
6539
6540 /* create the VEB in the switch and move the VSI onto the VEB */
6541 ret = i40e_add_veb(veb, ctl_vsi);
6542 if (ret)
6543 goto end_reconstitute;
6544
6545 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6546 veb->bridge_mode = BRIDGE_MODE_VEB;
6547 else
6548 veb->bridge_mode = BRIDGE_MODE_VEPA;
6549 i40e_config_bridge_mode(veb);
6550
6551 /* create the remaining VSIs attached to this VEB */
6552 for (v = 0; v < pf->num_alloc_vsi; v++) {
6553 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6554 continue;
6555
6556 if (pf->vsi[v]->veb_idx == veb->idx) {
6557 struct i40e_vsi *vsi = pf->vsi[v];
6558
6559 vsi->uplink_seid = veb->seid;
6560 ret = i40e_add_vsi(vsi);
6561 if (ret) {
6562 dev_info(&pf->pdev->dev,
6563 "rebuild of vsi_idx %d failed: %d\n",
6564 v, ret);
6565 goto end_reconstitute;
6566 }
6567 i40e_vsi_reset_stats(vsi);
6568 }
6569 }
6570
6571 /* create any VEBs attached to this VEB - RECURSION */
6572 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6573 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6574 pf->veb[veb_idx]->uplink_seid = veb->seid;
6575 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6576 if (ret)
6577 break;
6578 }
6579 }
6580
6581 end_reconstitute:
6582 return ret;
6583 }
6584
6585 /**
6586 * i40e_get_capabilities - get info about the HW
6587 * @pf: the PF struct
6588 **/
6589 static int i40e_get_capabilities(struct i40e_pf *pf)
6590 {
6591 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6592 u16 data_size;
6593 int buf_len;
6594 int err;
6595
6596 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6597 do {
6598 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6599 if (!cap_buf)
6600 return -ENOMEM;
6601
6602 /* this loads the data into the hw struct for us */
6603 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6604 &data_size,
6605 i40e_aqc_opc_list_func_capabilities,
6606 NULL);
6607 /* data loaded, buffer no longer needed */
6608 kfree(cap_buf);
6609
6610 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6611 /* retry with a larger buffer */
6612 buf_len = data_size;
6613 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6614 dev_info(&pf->pdev->dev,
6615 "capability discovery failed, err %s aq_err %s\n",
6616 i40e_stat_str(&pf->hw, err),
6617 i40e_aq_str(&pf->hw,
6618 pf->hw.aq.asq_last_status));
6619 return -ENODEV;
6620 }
6621 } while (err);
6622
6623 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6624 dev_info(&pf->pdev->dev,
6625 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6626 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6627 pf->hw.func_caps.num_msix_vectors,
6628 pf->hw.func_caps.num_msix_vectors_vf,
6629 pf->hw.func_caps.fd_filters_guaranteed,
6630 pf->hw.func_caps.fd_filters_best_effort,
6631 pf->hw.func_caps.num_tx_qp,
6632 pf->hw.func_caps.num_vsis);
6633
6634 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6635 + pf->hw.func_caps.num_vfs)
6636 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6637 dev_info(&pf->pdev->dev,
6638 "got num_vsis %d, setting num_vsis to %d\n",
6639 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6640 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6641 }
6642
6643 return 0;
6644 }
6645
6646 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6647
6648 /**
6649 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6650 * @pf: board private structure
6651 **/
6652 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6653 {
6654 struct i40e_vsi *vsi;
6655 int i;
6656
6657 /* quick workaround for an NVM issue that leaves a critical register
6658 * uninitialized
6659 */
6660 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6661 static const u32 hkey[] = {
6662 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6663 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6664 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6665 0x95b3a76d};
6666
6667 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6668 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6669 }
6670
6671 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6672 return;
6673
6674 /* find existing VSI and see if it needs configuring */
6675 vsi = NULL;
6676 for (i = 0; i < pf->num_alloc_vsi; i++) {
6677 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6678 vsi = pf->vsi[i];
6679 break;
6680 }
6681 }
6682
6683 /* create a new VSI if none exists */
6684 if (!vsi) {
6685 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6686 pf->vsi[pf->lan_vsi]->seid, 0);
6687 if (!vsi) {
6688 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6689 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6690 return;
6691 }
6692 }
6693
6694 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6695 }
6696
6697 /**
6698 * i40e_fdir_teardown - release the Flow Director resources
6699 * @pf: board private structure
6700 **/
6701 static void i40e_fdir_teardown(struct i40e_pf *pf)
6702 {
6703 int i;
6704
6705 i40e_fdir_filter_exit(pf);
6706 for (i = 0; i < pf->num_alloc_vsi; i++) {
6707 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6708 i40e_vsi_release(pf->vsi[i]);
6709 break;
6710 }
6711 }
6712 }
6713
6714 /**
6715 * i40e_prep_for_reset - prep for the core to reset
6716 * @pf: board private structure
6717 *
6718 * Close up the VFs and other things in prep for PF Reset.
6719 **/
6720 static void i40e_prep_for_reset(struct i40e_pf *pf)
6721 {
6722 struct i40e_hw *hw = &pf->hw;
6723 i40e_status ret = 0;
6724 u32 v;
6725
6726 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6727 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6728 return;
6729 if (i40e_check_asq_alive(&pf->hw))
6730 i40e_vc_notify_reset(pf);
6731
6732 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6733
6734 /* quiesce the VSIs and their queues that are not already DOWN */
6735 i40e_pf_quiesce_all_vsi(pf);
6736
6737 for (v = 0; v < pf->num_alloc_vsi; v++) {
6738 if (pf->vsi[v])
6739 pf->vsi[v]->seid = 0;
6740 }
6741
6742 i40e_shutdown_adminq(&pf->hw);
6743
6744 /* call shutdown HMC */
6745 if (hw->hmc.hmc_obj) {
6746 ret = i40e_shutdown_lan_hmc(hw);
6747 if (ret)
6748 dev_warn(&pf->pdev->dev,
6749 "shutdown_lan_hmc failed: %d\n", ret);
6750 }
6751 }
6752
6753 /**
6754 * i40e_send_version - update firmware with driver version
6755 * @pf: PF struct
6756 */
6757 static void i40e_send_version(struct i40e_pf *pf)
6758 {
6759 struct i40e_driver_version dv;
6760
6761 dv.major_version = DRV_VERSION_MAJOR;
6762 dv.minor_version = DRV_VERSION_MINOR;
6763 dv.build_version = DRV_VERSION_BUILD;
6764 dv.subbuild_version = 0;
6765 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6766 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6767 }
6768
6769 /**
6770 * i40e_reset_and_rebuild - reset and rebuild using a saved config
6771 * @pf: board private structure
6772 * @reinit: if the Main VSI needs to re-initialized.
6773 **/
6774 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6775 {
6776 struct i40e_hw *hw = &pf->hw;
6777 u8 set_fc_aq_fail = 0;
6778 i40e_status ret;
6779 u32 val;
6780 u32 v;
6781
6782 /* Now we wait for GRST to settle out.
6783 * We don't have to delete the VEBs or VSIs from the hw switch
6784 * because the reset will make them disappear.
6785 */
6786 ret = i40e_pf_reset(hw);
6787 if (ret) {
6788 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6789 set_bit(__I40E_RESET_FAILED, &pf->state);
6790 goto clear_recovery;
6791 }
6792 pf->pfr_count++;
6793
6794 if (test_bit(__I40E_DOWN, &pf->state))
6795 goto clear_recovery;
6796 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6797
6798 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6799 ret = i40e_init_adminq(&pf->hw);
6800 if (ret) {
6801 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6802 i40e_stat_str(&pf->hw, ret),
6803 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6804 goto clear_recovery;
6805 }
6806
6807 /* re-verify the eeprom if we just had an EMP reset */
6808 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6809 i40e_verify_eeprom(pf);
6810
6811 i40e_clear_pxe_mode(hw);
6812 ret = i40e_get_capabilities(pf);
6813 if (ret)
6814 goto end_core_reset;
6815
6816 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6817 hw->func_caps.num_rx_qp,
6818 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6819 if (ret) {
6820 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6821 goto end_core_reset;
6822 }
6823 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6824 if (ret) {
6825 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6826 goto end_core_reset;
6827 }
6828
6829 #ifdef CONFIG_I40E_DCB
6830 ret = i40e_init_pf_dcb(pf);
6831 if (ret) {
6832 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6833 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6834 /* Continue without DCB enabled */
6835 }
6836 #endif /* CONFIG_I40E_DCB */
6837 #ifdef I40E_FCOE
6838 i40e_init_pf_fcoe(pf);
6839
6840 #endif
6841 /* do basic switch setup */
6842 ret = i40e_setup_pf_switch(pf, reinit);
6843 if (ret)
6844 goto end_core_reset;
6845
6846 /* The driver only wants link up/down and module qualification
6847 * reports from firmware. Note the negative logic.
6848 */
6849 ret = i40e_aq_set_phy_int_mask(&pf->hw,
6850 ~(I40E_AQ_EVENT_LINK_UPDOWN |
6851 I40E_AQ_EVENT_MEDIA_NA |
6852 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
6853 if (ret)
6854 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6855 i40e_stat_str(&pf->hw, ret),
6856 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6857
6858 /* make sure our flow control settings are restored */
6859 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6860 if (ret)
6861 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
6862 i40e_stat_str(&pf->hw, ret),
6863 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6864
6865 /* Rebuild the VSIs and VEBs that existed before reset.
6866 * They are still in our local switch element arrays, so only
6867 * need to rebuild the switch model in the HW.
6868 *
6869 * If there were VEBs but the reconstitution failed, we'll try
6870 * try to recover minimal use by getting the basic PF VSI working.
6871 */
6872 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6873 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6874 /* find the one VEB connected to the MAC, and find orphans */
6875 for (v = 0; v < I40E_MAX_VEB; v++) {
6876 if (!pf->veb[v])
6877 continue;
6878
6879 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6880 pf->veb[v]->uplink_seid == 0) {
6881 ret = i40e_reconstitute_veb(pf->veb[v]);
6882
6883 if (!ret)
6884 continue;
6885
6886 /* If Main VEB failed, we're in deep doodoo,
6887 * so give up rebuilding the switch and set up
6888 * for minimal rebuild of PF VSI.
6889 * If orphan failed, we'll report the error
6890 * but try to keep going.
6891 */
6892 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6893 dev_info(&pf->pdev->dev,
6894 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6895 ret);
6896 pf->vsi[pf->lan_vsi]->uplink_seid
6897 = pf->mac_seid;
6898 break;
6899 } else if (pf->veb[v]->uplink_seid == 0) {
6900 dev_info(&pf->pdev->dev,
6901 "rebuild of orphan VEB failed: %d\n",
6902 ret);
6903 }
6904 }
6905 }
6906 }
6907
6908 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6909 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6910 /* no VEB, so rebuild only the Main VSI */
6911 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6912 if (ret) {
6913 dev_info(&pf->pdev->dev,
6914 "rebuild of Main VSI failed: %d\n", ret);
6915 goto end_core_reset;
6916 }
6917 }
6918
6919 /* Reconfigure hardware for allowing smaller MSS in the case
6920 * of TSO, so that we avoid the MDD being fired and causing
6921 * a reset in the case of small MSS+TSO.
6922 */
6923 #define I40E_REG_MSS 0x000E64DC
6924 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
6925 #define I40E_64BYTE_MSS 0x400000
6926 val = rd32(hw, I40E_REG_MSS);
6927 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
6928 val &= ~I40E_REG_MSS_MIN_MASK;
6929 val |= I40E_64BYTE_MSS;
6930 wr32(hw, I40E_REG_MSS, val);
6931 }
6932
6933 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
6934 msleep(75);
6935 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6936 if (ret)
6937 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6938 i40e_stat_str(&pf->hw, ret),
6939 i40e_aq_str(&pf->hw,
6940 pf->hw.aq.asq_last_status));
6941 }
6942 /* reinit the misc interrupt */
6943 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6944 ret = i40e_setup_misc_vector(pf);
6945
6946 /* Add a filter to drop all Flow control frames from any VSI from being
6947 * transmitted. By doing so we stop a malicious VF from sending out
6948 * PAUSE or PFC frames and potentially controlling traffic for other
6949 * PF/VF VSIs.
6950 * The FW can still send Flow control frames if enabled.
6951 */
6952 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
6953 pf->main_vsi_seid);
6954
6955 /* restart the VSIs that were rebuilt and running before the reset */
6956 i40e_pf_unquiesce_all_vsi(pf);
6957
6958 if (pf->num_alloc_vfs) {
6959 for (v = 0; v < pf->num_alloc_vfs; v++)
6960 i40e_reset_vf(&pf->vf[v], true);
6961 }
6962
6963 /* tell the firmware that we're starting */
6964 i40e_send_version(pf);
6965
6966 end_core_reset:
6967 clear_bit(__I40E_RESET_FAILED, &pf->state);
6968 clear_recovery:
6969 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6970 }
6971
6972 /**
6973 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
6974 * @pf: board private structure
6975 *
6976 * Close up the VFs and other things in prep for a Core Reset,
6977 * then get ready to rebuild the world.
6978 **/
6979 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6980 {
6981 i40e_prep_for_reset(pf);
6982 i40e_reset_and_rebuild(pf, false);
6983 }
6984
6985 /**
6986 * i40e_handle_mdd_event
6987 * @pf: pointer to the PF structure
6988 *
6989 * Called from the MDD irq handler to identify possibly malicious vfs
6990 **/
6991 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6992 {
6993 struct i40e_hw *hw = &pf->hw;
6994 bool mdd_detected = false;
6995 bool pf_mdd_detected = false;
6996 struct i40e_vf *vf;
6997 u32 reg;
6998 int i;
6999
7000 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
7001 return;
7002
7003 /* find what triggered the MDD event */
7004 reg = rd32(hw, I40E_GL_MDET_TX);
7005 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
7006 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
7007 I40E_GL_MDET_TX_PF_NUM_SHIFT;
7008 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
7009 I40E_GL_MDET_TX_VF_NUM_SHIFT;
7010 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
7011 I40E_GL_MDET_TX_EVENT_SHIFT;
7012 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
7013 I40E_GL_MDET_TX_QUEUE_SHIFT) -
7014 pf->hw.func_caps.base_queue;
7015 if (netif_msg_tx_err(pf))
7016 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
7017 event, queue, pf_num, vf_num);
7018 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
7019 mdd_detected = true;
7020 }
7021 reg = rd32(hw, I40E_GL_MDET_RX);
7022 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
7023 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
7024 I40E_GL_MDET_RX_FUNCTION_SHIFT;
7025 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
7026 I40E_GL_MDET_RX_EVENT_SHIFT;
7027 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
7028 I40E_GL_MDET_RX_QUEUE_SHIFT) -
7029 pf->hw.func_caps.base_queue;
7030 if (netif_msg_rx_err(pf))
7031 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
7032 event, queue, func);
7033 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
7034 mdd_detected = true;
7035 }
7036
7037 if (mdd_detected) {
7038 reg = rd32(hw, I40E_PF_MDET_TX);
7039 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7040 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
7041 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
7042 pf_mdd_detected = true;
7043 }
7044 reg = rd32(hw, I40E_PF_MDET_RX);
7045 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7046 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
7047 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
7048 pf_mdd_detected = true;
7049 }
7050 /* Queue belongs to the PF, initiate a reset */
7051 if (pf_mdd_detected) {
7052 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
7053 i40e_service_event_schedule(pf);
7054 }
7055 }
7056
7057 /* see if one of the VFs needs its hand slapped */
7058 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7059 vf = &(pf->vf[i]);
7060 reg = rd32(hw, I40E_VP_MDET_TX(i));
7061 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7062 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7063 vf->num_mdd_events++;
7064 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7065 i);
7066 }
7067
7068 reg = rd32(hw, I40E_VP_MDET_RX(i));
7069 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7070 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7071 vf->num_mdd_events++;
7072 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7073 i);
7074 }
7075
7076 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7077 dev_info(&pf->pdev->dev,
7078 "Too many MDD events on VF %d, disabled\n", i);
7079 dev_info(&pf->pdev->dev,
7080 "Use PF Control I/F to re-enable the VF\n");
7081 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
7082 }
7083 }
7084
7085 /* re-enable mdd interrupt cause */
7086 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
7087 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7088 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7089 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7090 i40e_flush(hw);
7091 }
7092
7093 /**
7094 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
7095 * @pf: board private structure
7096 **/
7097 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
7098 {
7099 #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
7100 struct i40e_hw *hw = &pf->hw;
7101 i40e_status ret;
7102 __be16 port;
7103 int i;
7104
7105 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
7106 return;
7107
7108 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
7109
7110 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7111 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
7112 pf->pending_udp_bitmap &= ~BIT_ULL(i);
7113 port = pf->udp_ports[i].index;
7114 if (port)
7115 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
7116 pf->udp_ports[i].type,
7117 NULL, NULL);
7118 else
7119 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7120
7121 if (ret) {
7122 dev_dbg(&pf->pdev->dev,
7123 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7124 pf->udp_ports[i].type ? "vxlan" : "geneve",
7125 port ? "add" : "delete",
7126 ntohs(port), i,
7127 i40e_stat_str(&pf->hw, ret),
7128 i40e_aq_str(&pf->hw,
7129 pf->hw.aq.asq_last_status));
7130 pf->udp_ports[i].index = 0;
7131 }
7132 }
7133 }
7134 #endif
7135 }
7136
7137 /**
7138 * i40e_service_task - Run the driver's async subtasks
7139 * @work: pointer to work_struct containing our data
7140 **/
7141 static void i40e_service_task(struct work_struct *work)
7142 {
7143 struct i40e_pf *pf = container_of(work,
7144 struct i40e_pf,
7145 service_task);
7146 unsigned long start_time = jiffies;
7147
7148 /* don't bother with service tasks if a reset is in progress */
7149 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7150 i40e_service_event_complete(pf);
7151 return;
7152 }
7153
7154 i40e_detect_recover_hung(pf);
7155 i40e_sync_filters_subtask(pf);
7156 i40e_reset_subtask(pf);
7157 i40e_handle_mdd_event(pf);
7158 i40e_vc_process_vflr_event(pf);
7159 i40e_watchdog_subtask(pf);
7160 i40e_fdir_reinit_subtask(pf);
7161 i40e_client_subtask(pf);
7162 i40e_sync_filters_subtask(pf);
7163 i40e_sync_udp_filters_subtask(pf);
7164 i40e_clean_adminq_subtask(pf);
7165
7166 i40e_service_event_complete(pf);
7167
7168 /* If the tasks have taken longer than one timer cycle or there
7169 * is more work to be done, reschedule the service task now
7170 * rather than wait for the timer to tick again.
7171 */
7172 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7173 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
7174 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
7175 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
7176 i40e_service_event_schedule(pf);
7177 }
7178
7179 /**
7180 * i40e_service_timer - timer callback
7181 * @data: pointer to PF struct
7182 **/
7183 static void i40e_service_timer(unsigned long data)
7184 {
7185 struct i40e_pf *pf = (struct i40e_pf *)data;
7186
7187 mod_timer(&pf->service_timer,
7188 round_jiffies(jiffies + pf->service_timer_period));
7189 i40e_service_event_schedule(pf);
7190 }
7191
7192 /**
7193 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7194 * @vsi: the VSI being configured
7195 **/
7196 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7197 {
7198 struct i40e_pf *pf = vsi->back;
7199
7200 switch (vsi->type) {
7201 case I40E_VSI_MAIN:
7202 vsi->alloc_queue_pairs = pf->num_lan_qps;
7203 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7204 I40E_REQ_DESCRIPTOR_MULTIPLE);
7205 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7206 vsi->num_q_vectors = pf->num_lan_msix;
7207 else
7208 vsi->num_q_vectors = 1;
7209
7210 break;
7211
7212 case I40E_VSI_FDIR:
7213 vsi->alloc_queue_pairs = 1;
7214 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7215 I40E_REQ_DESCRIPTOR_MULTIPLE);
7216 vsi->num_q_vectors = 1;
7217 break;
7218
7219 case I40E_VSI_VMDQ2:
7220 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7221 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7222 I40E_REQ_DESCRIPTOR_MULTIPLE);
7223 vsi->num_q_vectors = pf->num_vmdq_msix;
7224 break;
7225
7226 case I40E_VSI_SRIOV:
7227 vsi->alloc_queue_pairs = pf->num_vf_qps;
7228 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7229 I40E_REQ_DESCRIPTOR_MULTIPLE);
7230 break;
7231
7232 #ifdef I40E_FCOE
7233 case I40E_VSI_FCOE:
7234 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
7235 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7236 I40E_REQ_DESCRIPTOR_MULTIPLE);
7237 vsi->num_q_vectors = pf->num_fcoe_msix;
7238 break;
7239
7240 #endif /* I40E_FCOE */
7241 default:
7242 WARN_ON(1);
7243 return -ENODATA;
7244 }
7245
7246 return 0;
7247 }
7248
7249 /**
7250 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7251 * @type: VSI pointer
7252 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
7253 *
7254 * On error: returns error code (negative)
7255 * On success: returns 0
7256 **/
7257 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7258 {
7259 int size;
7260 int ret = 0;
7261
7262 /* allocate memory for both Tx and Rx ring pointers */
7263 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
7264 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7265 if (!vsi->tx_rings)
7266 return -ENOMEM;
7267 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
7268
7269 if (alloc_qvectors) {
7270 /* allocate memory for q_vector pointers */
7271 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7272 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7273 if (!vsi->q_vectors) {
7274 ret = -ENOMEM;
7275 goto err_vectors;
7276 }
7277 }
7278 return ret;
7279
7280 err_vectors:
7281 kfree(vsi->tx_rings);
7282 return ret;
7283 }
7284
7285 /**
7286 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7287 * @pf: board private structure
7288 * @type: type of VSI
7289 *
7290 * On error: returns error code (negative)
7291 * On success: returns vsi index in PF (positive)
7292 **/
7293 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7294 {
7295 int ret = -ENODEV;
7296 struct i40e_vsi *vsi;
7297 int vsi_idx;
7298 int i;
7299
7300 /* Need to protect the allocation of the VSIs at the PF level */
7301 mutex_lock(&pf->switch_mutex);
7302
7303 /* VSI list may be fragmented if VSI creation/destruction has
7304 * been happening. We can afford to do a quick scan to look
7305 * for any free VSIs in the list.
7306 *
7307 * find next empty vsi slot, looping back around if necessary
7308 */
7309 i = pf->next_vsi;
7310 while (i < pf->num_alloc_vsi && pf->vsi[i])
7311 i++;
7312 if (i >= pf->num_alloc_vsi) {
7313 i = 0;
7314 while (i < pf->next_vsi && pf->vsi[i])
7315 i++;
7316 }
7317
7318 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7319 vsi_idx = i; /* Found one! */
7320 } else {
7321 ret = -ENODEV;
7322 goto unlock_pf; /* out of VSI slots! */
7323 }
7324 pf->next_vsi = ++i;
7325
7326 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7327 if (!vsi) {
7328 ret = -ENOMEM;
7329 goto unlock_pf;
7330 }
7331 vsi->type = type;
7332 vsi->back = pf;
7333 set_bit(__I40E_DOWN, &vsi->state);
7334 vsi->flags = 0;
7335 vsi->idx = vsi_idx;
7336 vsi->int_rate_limit = 0;
7337 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7338 pf->rss_table_size : 64;
7339 vsi->netdev_registered = false;
7340 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7341 INIT_LIST_HEAD(&vsi->mac_filter_list);
7342 vsi->irqs_ready = false;
7343
7344 ret = i40e_set_num_rings_in_vsi(vsi);
7345 if (ret)
7346 goto err_rings;
7347
7348 ret = i40e_vsi_alloc_arrays(vsi, true);
7349 if (ret)
7350 goto err_rings;
7351
7352 /* Setup default MSIX irq handler for VSI */
7353 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7354
7355 /* Initialize VSI lock */
7356 spin_lock_init(&vsi->mac_filter_list_lock);
7357 pf->vsi[vsi_idx] = vsi;
7358 ret = vsi_idx;
7359 goto unlock_pf;
7360
7361 err_rings:
7362 pf->next_vsi = i - 1;
7363 kfree(vsi);
7364 unlock_pf:
7365 mutex_unlock(&pf->switch_mutex);
7366 return ret;
7367 }
7368
7369 /**
7370 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7371 * @type: VSI pointer
7372 * @free_qvectors: a bool to specify if q_vectors need to be freed.
7373 *
7374 * On error: returns error code (negative)
7375 * On success: returns 0
7376 **/
7377 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7378 {
7379 /* free the ring and vector containers */
7380 if (free_qvectors) {
7381 kfree(vsi->q_vectors);
7382 vsi->q_vectors = NULL;
7383 }
7384 kfree(vsi->tx_rings);
7385 vsi->tx_rings = NULL;
7386 vsi->rx_rings = NULL;
7387 }
7388
7389 /**
7390 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7391 * and lookup table
7392 * @vsi: Pointer to VSI structure
7393 */
7394 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
7395 {
7396 if (!vsi)
7397 return;
7398
7399 kfree(vsi->rss_hkey_user);
7400 vsi->rss_hkey_user = NULL;
7401
7402 kfree(vsi->rss_lut_user);
7403 vsi->rss_lut_user = NULL;
7404 }
7405
7406 /**
7407 * i40e_vsi_clear - Deallocate the VSI provided
7408 * @vsi: the VSI being un-configured
7409 **/
7410 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7411 {
7412 struct i40e_pf *pf;
7413
7414 if (!vsi)
7415 return 0;
7416
7417 if (!vsi->back)
7418 goto free_vsi;
7419 pf = vsi->back;
7420
7421 mutex_lock(&pf->switch_mutex);
7422 if (!pf->vsi[vsi->idx]) {
7423 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7424 vsi->idx, vsi->idx, vsi, vsi->type);
7425 goto unlock_vsi;
7426 }
7427
7428 if (pf->vsi[vsi->idx] != vsi) {
7429 dev_err(&pf->pdev->dev,
7430 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7431 pf->vsi[vsi->idx]->idx,
7432 pf->vsi[vsi->idx],
7433 pf->vsi[vsi->idx]->type,
7434 vsi->idx, vsi, vsi->type);
7435 goto unlock_vsi;
7436 }
7437
7438 /* updates the PF for this cleared vsi */
7439 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7440 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7441
7442 i40e_vsi_free_arrays(vsi, true);
7443 i40e_clear_rss_config_user(vsi);
7444
7445 pf->vsi[vsi->idx] = NULL;
7446 if (vsi->idx < pf->next_vsi)
7447 pf->next_vsi = vsi->idx;
7448
7449 unlock_vsi:
7450 mutex_unlock(&pf->switch_mutex);
7451 free_vsi:
7452 kfree(vsi);
7453
7454 return 0;
7455 }
7456
7457 /**
7458 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7459 * @vsi: the VSI being cleaned
7460 **/
7461 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7462 {
7463 int i;
7464
7465 if (vsi->tx_rings && vsi->tx_rings[0]) {
7466 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7467 kfree_rcu(vsi->tx_rings[i], rcu);
7468 vsi->tx_rings[i] = NULL;
7469 vsi->rx_rings[i] = NULL;
7470 }
7471 }
7472 }
7473
7474 /**
7475 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7476 * @vsi: the VSI being configured
7477 **/
7478 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7479 {
7480 struct i40e_ring *tx_ring, *rx_ring;
7481 struct i40e_pf *pf = vsi->back;
7482 int i;
7483
7484 /* Set basic values in the rings to be used later during open() */
7485 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7486 /* allocate space for both Tx and Rx in one shot */
7487 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7488 if (!tx_ring)
7489 goto err_out;
7490
7491 tx_ring->queue_index = i;
7492 tx_ring->reg_idx = vsi->base_queue + i;
7493 tx_ring->ring_active = false;
7494 tx_ring->vsi = vsi;
7495 tx_ring->netdev = vsi->netdev;
7496 tx_ring->dev = &pf->pdev->dev;
7497 tx_ring->count = vsi->num_desc;
7498 tx_ring->size = 0;
7499 tx_ring->dcb_tc = 0;
7500 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7501 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7502 tx_ring->tx_itr_setting = pf->tx_itr_default;
7503 vsi->tx_rings[i] = tx_ring;
7504
7505 rx_ring = &tx_ring[1];
7506 rx_ring->queue_index = i;
7507 rx_ring->reg_idx = vsi->base_queue + i;
7508 rx_ring->ring_active = false;
7509 rx_ring->vsi = vsi;
7510 rx_ring->netdev = vsi->netdev;
7511 rx_ring->dev = &pf->pdev->dev;
7512 rx_ring->count = vsi->num_desc;
7513 rx_ring->size = 0;
7514 rx_ring->dcb_tc = 0;
7515 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
7516 set_ring_16byte_desc_enabled(rx_ring);
7517 else
7518 clear_ring_16byte_desc_enabled(rx_ring);
7519 rx_ring->rx_itr_setting = pf->rx_itr_default;
7520 vsi->rx_rings[i] = rx_ring;
7521 }
7522
7523 return 0;
7524
7525 err_out:
7526 i40e_vsi_clear_rings(vsi);
7527 return -ENOMEM;
7528 }
7529
7530 /**
7531 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7532 * @pf: board private structure
7533 * @vectors: the number of MSI-X vectors to request
7534 *
7535 * Returns the number of vectors reserved, or error
7536 **/
7537 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7538 {
7539 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7540 I40E_MIN_MSIX, vectors);
7541 if (vectors < 0) {
7542 dev_info(&pf->pdev->dev,
7543 "MSI-X vector reservation failed: %d\n", vectors);
7544 vectors = 0;
7545 }
7546
7547 return vectors;
7548 }
7549
7550 /**
7551 * i40e_init_msix - Setup the MSIX capability
7552 * @pf: board private structure
7553 *
7554 * Work with the OS to set up the MSIX vectors needed.
7555 *
7556 * Returns the number of vectors reserved or negative on failure
7557 **/
7558 static int i40e_init_msix(struct i40e_pf *pf)
7559 {
7560 struct i40e_hw *hw = &pf->hw;
7561 int vectors_left;
7562 int v_budget, i;
7563 int v_actual;
7564 int iwarp_requested = 0;
7565
7566 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7567 return -ENODEV;
7568
7569 /* The number of vectors we'll request will be comprised of:
7570 * - Add 1 for "other" cause for Admin Queue events, etc.
7571 * - The number of LAN queue pairs
7572 * - Queues being used for RSS.
7573 * We don't need as many as max_rss_size vectors.
7574 * use rss_size instead in the calculation since that
7575 * is governed by number of cpus in the system.
7576 * - assumes symmetric Tx/Rx pairing
7577 * - The number of VMDq pairs
7578 * - The CPU count within the NUMA node if iWARP is enabled
7579 #ifdef I40E_FCOE
7580 * - The number of FCOE qps.
7581 #endif
7582 * Once we count this up, try the request.
7583 *
7584 * If we can't get what we want, we'll simplify to nearly nothing
7585 * and try again. If that still fails, we punt.
7586 */
7587 vectors_left = hw->func_caps.num_msix_vectors;
7588 v_budget = 0;
7589
7590 /* reserve one vector for miscellaneous handler */
7591 if (vectors_left) {
7592 v_budget++;
7593 vectors_left--;
7594 }
7595
7596 /* reserve vectors for the main PF traffic queues */
7597 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7598 vectors_left -= pf->num_lan_msix;
7599 v_budget += pf->num_lan_msix;
7600
7601 /* reserve one vector for sideband flow director */
7602 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7603 if (vectors_left) {
7604 v_budget++;
7605 vectors_left--;
7606 } else {
7607 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7608 }
7609 }
7610
7611 #ifdef I40E_FCOE
7612 /* can we reserve enough for FCoE? */
7613 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7614 if (!vectors_left)
7615 pf->num_fcoe_msix = 0;
7616 else if (vectors_left >= pf->num_fcoe_qps)
7617 pf->num_fcoe_msix = pf->num_fcoe_qps;
7618 else
7619 pf->num_fcoe_msix = 1;
7620 v_budget += pf->num_fcoe_msix;
7621 vectors_left -= pf->num_fcoe_msix;
7622 }
7623
7624 #endif
7625 /* can we reserve enough for iWARP? */
7626 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7627 if (!vectors_left)
7628 pf->num_iwarp_msix = 0;
7629 else if (vectors_left < pf->num_iwarp_msix)
7630 pf->num_iwarp_msix = 1;
7631 v_budget += pf->num_iwarp_msix;
7632 vectors_left -= pf->num_iwarp_msix;
7633 }
7634
7635 /* any vectors left over go for VMDq support */
7636 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7637 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7638 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7639
7640 /* if we're short on vectors for what's desired, we limit
7641 * the queues per vmdq. If this is still more than are
7642 * available, the user will need to change the number of
7643 * queues/vectors used by the PF later with the ethtool
7644 * channels command
7645 */
7646 if (vmdq_vecs < vmdq_vecs_wanted)
7647 pf->num_vmdq_qps = 1;
7648 pf->num_vmdq_msix = pf->num_vmdq_qps;
7649
7650 v_budget += vmdq_vecs;
7651 vectors_left -= vmdq_vecs;
7652 }
7653
7654 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7655 GFP_KERNEL);
7656 if (!pf->msix_entries)
7657 return -ENOMEM;
7658
7659 for (i = 0; i < v_budget; i++)
7660 pf->msix_entries[i].entry = i;
7661 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7662
7663 if (v_actual != v_budget) {
7664 /* If we have limited resources, we will start with no vectors
7665 * for the special features and then allocate vectors to some
7666 * of these features based on the policy and at the end disable
7667 * the features that did not get any vectors.
7668 */
7669 iwarp_requested = pf->num_iwarp_msix;
7670 pf->num_iwarp_msix = 0;
7671 #ifdef I40E_FCOE
7672 pf->num_fcoe_qps = 0;
7673 pf->num_fcoe_msix = 0;
7674 #endif
7675 pf->num_vmdq_msix = 0;
7676 }
7677
7678 if (v_actual < I40E_MIN_MSIX) {
7679 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7680 kfree(pf->msix_entries);
7681 pf->msix_entries = NULL;
7682 return -ENODEV;
7683
7684 } else if (v_actual == I40E_MIN_MSIX) {
7685 /* Adjust for minimal MSIX use */
7686 pf->num_vmdq_vsis = 0;
7687 pf->num_vmdq_qps = 0;
7688 pf->num_lan_qps = 1;
7689 pf->num_lan_msix = 1;
7690
7691 } else if (v_actual != v_budget) {
7692 int vec;
7693
7694 /* reserve the misc vector */
7695 vec = v_actual - 1;
7696
7697 /* Scale vector usage down */
7698 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
7699 pf->num_vmdq_vsis = 1;
7700 pf->num_vmdq_qps = 1;
7701 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7702
7703 /* partition out the remaining vectors */
7704 switch (vec) {
7705 case 2:
7706 pf->num_lan_msix = 1;
7707 break;
7708 case 3:
7709 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7710 pf->num_lan_msix = 1;
7711 pf->num_iwarp_msix = 1;
7712 } else {
7713 pf->num_lan_msix = 2;
7714 }
7715 #ifdef I40E_FCOE
7716 /* give one vector to FCoE */
7717 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7718 pf->num_lan_msix = 1;
7719 pf->num_fcoe_msix = 1;
7720 }
7721 #endif
7722 break;
7723 default:
7724 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
7725 pf->num_iwarp_msix = min_t(int, (vec / 3),
7726 iwarp_requested);
7727 pf->num_vmdq_vsis = min_t(int, (vec / 3),
7728 I40E_DEFAULT_NUM_VMDQ_VSI);
7729 } else {
7730 pf->num_vmdq_vsis = min_t(int, (vec / 2),
7731 I40E_DEFAULT_NUM_VMDQ_VSI);
7732 }
7733 pf->num_lan_msix = min_t(int,
7734 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
7735 pf->num_lan_msix);
7736 #ifdef I40E_FCOE
7737 /* give one vector to FCoE */
7738 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7739 pf->num_fcoe_msix = 1;
7740 vec--;
7741 }
7742 #endif
7743 break;
7744 }
7745 }
7746
7747 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7748 (pf->num_vmdq_msix == 0)) {
7749 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7750 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7751 }
7752
7753 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
7754 (pf->num_iwarp_msix == 0)) {
7755 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
7756 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
7757 }
7758 #ifdef I40E_FCOE
7759
7760 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7761 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7762 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7763 }
7764 #endif
7765 return v_actual;
7766 }
7767
7768 /**
7769 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7770 * @vsi: the VSI being configured
7771 * @v_idx: index of the vector in the vsi struct
7772 *
7773 * We allocate one q_vector. If allocation fails we return -ENOMEM.
7774 **/
7775 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7776 {
7777 struct i40e_q_vector *q_vector;
7778
7779 /* allocate q_vector */
7780 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7781 if (!q_vector)
7782 return -ENOMEM;
7783
7784 q_vector->vsi = vsi;
7785 q_vector->v_idx = v_idx;
7786 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7787 if (vsi->netdev)
7788 netif_napi_add(vsi->netdev, &q_vector->napi,
7789 i40e_napi_poll, NAPI_POLL_WEIGHT);
7790
7791 q_vector->rx.latency_range = I40E_LOW_LATENCY;
7792 q_vector->tx.latency_range = I40E_LOW_LATENCY;
7793
7794 /* tie q_vector and vsi together */
7795 vsi->q_vectors[v_idx] = q_vector;
7796
7797 return 0;
7798 }
7799
7800 /**
7801 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7802 * @vsi: the VSI being configured
7803 *
7804 * We allocate one q_vector per queue interrupt. If allocation fails we
7805 * return -ENOMEM.
7806 **/
7807 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7808 {
7809 struct i40e_pf *pf = vsi->back;
7810 int v_idx, num_q_vectors;
7811 int err;
7812
7813 /* if not MSIX, give the one vector only to the LAN VSI */
7814 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7815 num_q_vectors = vsi->num_q_vectors;
7816 else if (vsi == pf->vsi[pf->lan_vsi])
7817 num_q_vectors = 1;
7818 else
7819 return -EINVAL;
7820
7821 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7822 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
7823 if (err)
7824 goto err_out;
7825 }
7826
7827 return 0;
7828
7829 err_out:
7830 while (v_idx--)
7831 i40e_free_q_vector(vsi, v_idx);
7832
7833 return err;
7834 }
7835
7836 /**
7837 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7838 * @pf: board private structure to initialize
7839 **/
7840 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7841 {
7842 int vectors = 0;
7843 ssize_t size;
7844
7845 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7846 vectors = i40e_init_msix(pf);
7847 if (vectors < 0) {
7848 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
7849 I40E_FLAG_IWARP_ENABLED |
7850 #ifdef I40E_FCOE
7851 I40E_FLAG_FCOE_ENABLED |
7852 #endif
7853 I40E_FLAG_RSS_ENABLED |
7854 I40E_FLAG_DCB_CAPABLE |
7855 I40E_FLAG_SRIOV_ENABLED |
7856 I40E_FLAG_FD_SB_ENABLED |
7857 I40E_FLAG_FD_ATR_ENABLED |
7858 I40E_FLAG_VMDQ_ENABLED);
7859
7860 /* rework the queue expectations without MSIX */
7861 i40e_determine_queue_usage(pf);
7862 }
7863 }
7864
7865 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7866 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7867 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7868 vectors = pci_enable_msi(pf->pdev);
7869 if (vectors < 0) {
7870 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7871 vectors);
7872 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7873 }
7874 vectors = 1; /* one MSI or Legacy vector */
7875 }
7876
7877 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7878 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7879
7880 /* set up vector assignment tracking */
7881 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7882 pf->irq_pile = kzalloc(size, GFP_KERNEL);
7883 if (!pf->irq_pile) {
7884 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7885 return -ENOMEM;
7886 }
7887 pf->irq_pile->num_entries = vectors;
7888 pf->irq_pile->search_hint = 0;
7889
7890 /* track first vector for misc interrupts, ignore return */
7891 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7892
7893 return 0;
7894 }
7895
7896 /**
7897 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7898 * @pf: board private structure
7899 *
7900 * This sets up the handler for MSIX 0, which is used to manage the
7901 * non-queue interrupts, e.g. AdminQ and errors. This is not used
7902 * when in MSI or Legacy interrupt mode.
7903 **/
7904 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7905 {
7906 struct i40e_hw *hw = &pf->hw;
7907 int err = 0;
7908
7909 /* Only request the irq if this is the first time through, and
7910 * not when we're rebuilding after a Reset
7911 */
7912 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7913 err = request_irq(pf->msix_entries[0].vector,
7914 i40e_intr, 0, pf->int_name, pf);
7915 if (err) {
7916 dev_info(&pf->pdev->dev,
7917 "request_irq for %s failed: %d\n",
7918 pf->int_name, err);
7919 return -EFAULT;
7920 }
7921 }
7922
7923 i40e_enable_misc_int_causes(pf);
7924
7925 /* associate no queues to the misc vector */
7926 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7927 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7928
7929 i40e_flush(hw);
7930
7931 i40e_irq_dynamic_enable_icr0(pf, true);
7932
7933 return err;
7934 }
7935
7936 /**
7937 * i40e_config_rss_aq - Prepare for RSS using AQ commands
7938 * @vsi: vsi structure
7939 * @seed: RSS hash seed
7940 **/
7941 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
7942 u8 *lut, u16 lut_size)
7943 {
7944 struct i40e_aqc_get_set_rss_key_data rss_key;
7945 struct i40e_pf *pf = vsi->back;
7946 struct i40e_hw *hw = &pf->hw;
7947 bool pf_lut = false;
7948 u8 *rss_lut;
7949 int ret, i;
7950
7951 memset(&rss_key, 0, sizeof(rss_key));
7952 memcpy(&rss_key, seed, sizeof(rss_key));
7953
7954 rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
7955 if (!rss_lut)
7956 return -ENOMEM;
7957
7958 /* Populate the LUT with max no. of queues in round robin fashion */
7959 for (i = 0; i < vsi->rss_table_size; i++)
7960 rss_lut[i] = i % vsi->rss_size;
7961
7962 ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
7963 if (ret) {
7964 dev_info(&pf->pdev->dev,
7965 "Cannot set RSS key, err %s aq_err %s\n",
7966 i40e_stat_str(&pf->hw, ret),
7967 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7968 goto config_rss_aq_out;
7969 }
7970
7971 if (vsi->type == I40E_VSI_MAIN)
7972 pf_lut = true;
7973
7974 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
7975 vsi->rss_table_size);
7976 if (ret)
7977 dev_info(&pf->pdev->dev,
7978 "Cannot set RSS lut, err %s aq_err %s\n",
7979 i40e_stat_str(&pf->hw, ret),
7980 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7981
7982 config_rss_aq_out:
7983 kfree(rss_lut);
7984 return ret;
7985 }
7986
7987 /**
7988 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
7989 * @vsi: VSI structure
7990 **/
7991 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
7992 {
7993 u8 seed[I40E_HKEY_ARRAY_SIZE];
7994 struct i40e_pf *pf = vsi->back;
7995 u8 *lut;
7996 int ret;
7997
7998 if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE))
7999 return 0;
8000
8001 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8002 if (!lut)
8003 return -ENOMEM;
8004
8005 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8006 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8007 vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs);
8008 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
8009 kfree(lut);
8010
8011 return ret;
8012 }
8013
8014 /**
8015 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
8016 * @vsi: Pointer to vsi structure
8017 * @seed: Buffter to store the hash keys
8018 * @lut: Buffer to store the lookup table entries
8019 * @lut_size: Size of buffer to store the lookup table entries
8020 *
8021 * Return 0 on success, negative on failure
8022 */
8023 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
8024 u8 *lut, u16 lut_size)
8025 {
8026 struct i40e_pf *pf = vsi->back;
8027 struct i40e_hw *hw = &pf->hw;
8028 int ret = 0;
8029
8030 if (seed) {
8031 ret = i40e_aq_get_rss_key(hw, vsi->id,
8032 (struct i40e_aqc_get_set_rss_key_data *)seed);
8033 if (ret) {
8034 dev_info(&pf->pdev->dev,
8035 "Cannot get RSS key, err %s aq_err %s\n",
8036 i40e_stat_str(&pf->hw, ret),
8037 i40e_aq_str(&pf->hw,
8038 pf->hw.aq.asq_last_status));
8039 return ret;
8040 }
8041 }
8042
8043 if (lut) {
8044 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
8045
8046 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
8047 if (ret) {
8048 dev_info(&pf->pdev->dev,
8049 "Cannot get RSS lut, err %s aq_err %s\n",
8050 i40e_stat_str(&pf->hw, ret),
8051 i40e_aq_str(&pf->hw,
8052 pf->hw.aq.asq_last_status));
8053 return ret;
8054 }
8055 }
8056
8057 return ret;
8058 }
8059
8060 /**
8061 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
8062 * @vsi: Pointer to vsi structure
8063 * @seed: RSS hash seed
8064 * @lut: Lookup table
8065 * @lut_size: Lookup table size
8066 *
8067 * Returns 0 on success, negative on failure
8068 **/
8069 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
8070 const u8 *lut, u16 lut_size)
8071 {
8072 struct i40e_pf *pf = vsi->back;
8073 struct i40e_hw *hw = &pf->hw;
8074 u16 vf_id = vsi->vf_id;
8075 u8 i;
8076
8077 /* Fill out hash function seed */
8078 if (seed) {
8079 u32 *seed_dw = (u32 *)seed;
8080
8081 if (vsi->type == I40E_VSI_MAIN) {
8082 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8083 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
8084 seed_dw[i]);
8085 } else if (vsi->type == I40E_VSI_SRIOV) {
8086 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
8087 i40e_write_rx_ctl(hw,
8088 I40E_VFQF_HKEY1(i, vf_id),
8089 seed_dw[i]);
8090 } else {
8091 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
8092 }
8093 }
8094
8095 if (lut) {
8096 u32 *lut_dw = (u32 *)lut;
8097
8098 if (vsi->type == I40E_VSI_MAIN) {
8099 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8100 return -EINVAL;
8101 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8102 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
8103 } else if (vsi->type == I40E_VSI_SRIOV) {
8104 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
8105 return -EINVAL;
8106 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
8107 i40e_write_rx_ctl(hw,
8108 I40E_VFQF_HLUT1(i, vf_id),
8109 lut_dw[i]);
8110 } else {
8111 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
8112 }
8113 }
8114 i40e_flush(hw);
8115
8116 return 0;
8117 }
8118
8119 /**
8120 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
8121 * @vsi: Pointer to VSI structure
8122 * @seed: Buffer to store the keys
8123 * @lut: Buffer to store the lookup table entries
8124 * @lut_size: Size of buffer to store the lookup table entries
8125 *
8126 * Returns 0 on success, negative on failure
8127 */
8128 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
8129 u8 *lut, u16 lut_size)
8130 {
8131 struct i40e_pf *pf = vsi->back;
8132 struct i40e_hw *hw = &pf->hw;
8133 u16 i;
8134
8135 if (seed) {
8136 u32 *seed_dw = (u32 *)seed;
8137
8138 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
8139 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
8140 }
8141 if (lut) {
8142 u32 *lut_dw = (u32 *)lut;
8143
8144 if (lut_size != I40E_HLUT_ARRAY_SIZE)
8145 return -EINVAL;
8146 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
8147 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
8148 }
8149
8150 return 0;
8151 }
8152
8153 /**
8154 * i40e_config_rss - Configure RSS keys and lut
8155 * @vsi: Pointer to VSI structure
8156 * @seed: RSS hash seed
8157 * @lut: Lookup table
8158 * @lut_size: Lookup table size
8159 *
8160 * Returns 0 on success, negative on failure
8161 */
8162 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8163 {
8164 struct i40e_pf *pf = vsi->back;
8165
8166 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8167 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
8168 else
8169 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
8170 }
8171
8172 /**
8173 * i40e_get_rss - Get RSS keys and lut
8174 * @vsi: Pointer to VSI structure
8175 * @seed: Buffer to store the keys
8176 * @lut: Buffer to store the lookup table entries
8177 * lut_size: Size of buffer to store the lookup table entries
8178 *
8179 * Returns 0 on success, negative on failure
8180 */
8181 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
8182 {
8183 struct i40e_pf *pf = vsi->back;
8184
8185 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
8186 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
8187 else
8188 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
8189 }
8190
8191 /**
8192 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8193 * @pf: Pointer to board private structure
8194 * @lut: Lookup table
8195 * @rss_table_size: Lookup table size
8196 * @rss_size: Range of queue number for hashing
8197 */
8198 static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
8199 u16 rss_table_size, u16 rss_size)
8200 {
8201 u16 i;
8202
8203 for (i = 0; i < rss_table_size; i++)
8204 lut[i] = i % rss_size;
8205 }
8206
8207 /**
8208 * i40e_pf_config_rss - Prepare for RSS if used
8209 * @pf: board private structure
8210 **/
8211 static int i40e_pf_config_rss(struct i40e_pf *pf)
8212 {
8213 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8214 u8 seed[I40E_HKEY_ARRAY_SIZE];
8215 u8 *lut;
8216 struct i40e_hw *hw = &pf->hw;
8217 u32 reg_val;
8218 u64 hena;
8219 int ret;
8220
8221 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
8222 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
8223 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
8224 hena |= i40e_pf_get_default_rss_hena(pf);
8225
8226 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
8227 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
8228
8229 /* Determine the RSS table size based on the hardware capabilities */
8230 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
8231 reg_val = (pf->rss_table_size == 512) ?
8232 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
8233 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
8234 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
8235
8236 /* Determine the RSS size of the VSI */
8237 if (!vsi->rss_size)
8238 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8239 vsi->num_queue_pairs);
8240
8241 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
8242 if (!lut)
8243 return -ENOMEM;
8244
8245 /* Use user configured lut if there is one, otherwise use default */
8246 if (vsi->rss_lut_user)
8247 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
8248 else
8249 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
8250
8251 /* Use user configured hash key if there is one, otherwise
8252 * use default.
8253 */
8254 if (vsi->rss_hkey_user)
8255 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
8256 else
8257 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
8258 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
8259 kfree(lut);
8260
8261 return ret;
8262 }
8263
8264 /**
8265 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8266 * @pf: board private structure
8267 * @queue_count: the requested queue count for rss.
8268 *
8269 * returns 0 if rss is not enabled, if enabled returns the final rss queue
8270 * count which may be different from the requested queue count.
8271 **/
8272 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
8273 {
8274 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8275 int new_rss_size;
8276
8277 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
8278 return 0;
8279
8280 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8281
8282 if (queue_count != vsi->num_queue_pairs) {
8283 vsi->req_queue_pairs = queue_count;
8284 i40e_prep_for_reset(pf);
8285
8286 pf->alloc_rss_size = new_rss_size;
8287
8288 i40e_reset_and_rebuild(pf, true);
8289
8290 /* Discard the user configured hash keys and lut, if less
8291 * queues are enabled.
8292 */
8293 if (queue_count < vsi->rss_size) {
8294 i40e_clear_rss_config_user(vsi);
8295 dev_dbg(&pf->pdev->dev,
8296 "discard user configured hash keys and lut\n");
8297 }
8298
8299 /* Reset vsi->rss_size, as number of enabled queues changed */
8300 vsi->rss_size = min_t(int, pf->alloc_rss_size,
8301 vsi->num_queue_pairs);
8302
8303 i40e_pf_config_rss(pf);
8304 }
8305 dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n",
8306 pf->alloc_rss_size, pf->rss_size_max);
8307 return pf->alloc_rss_size;
8308 }
8309
8310 /**
8311 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
8312 * @pf: board private structure
8313 **/
8314 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
8315 {
8316 i40e_status status;
8317 bool min_valid, max_valid;
8318 u32 max_bw, min_bw;
8319
8320 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8321 &min_valid, &max_valid);
8322
8323 if (!status) {
8324 if (min_valid)
8325 pf->npar_min_bw = min_bw;
8326 if (max_valid)
8327 pf->npar_max_bw = max_bw;
8328 }
8329
8330 return status;
8331 }
8332
8333 /**
8334 * i40e_set_npar_bw_setting - Set BW settings for this PF partition
8335 * @pf: board private structure
8336 **/
8337 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
8338 {
8339 struct i40e_aqc_configure_partition_bw_data bw_data;
8340 i40e_status status;
8341
8342 /* Set the valid bit for this PF */
8343 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8344 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
8345 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
8346
8347 /* Set the new bandwidths */
8348 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8349
8350 return status;
8351 }
8352
8353 /**
8354 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
8355 * @pf: board private structure
8356 **/
8357 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
8358 {
8359 /* Commit temporary BW setting to permanent NVM image */
8360 enum i40e_admin_queue_err last_aq_status;
8361 i40e_status ret;
8362 u16 nvm_word;
8363
8364 if (pf->hw.partition_id != 1) {
8365 dev_info(&pf->pdev->dev,
8366 "Commit BW only works on partition 1! This is partition %d",
8367 pf->hw.partition_id);
8368 ret = I40E_NOT_SUPPORTED;
8369 goto bw_commit_out;
8370 }
8371
8372 /* Acquire NVM for read access */
8373 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8374 last_aq_status = pf->hw.aq.asq_last_status;
8375 if (ret) {
8376 dev_info(&pf->pdev->dev,
8377 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8378 i40e_stat_str(&pf->hw, ret),
8379 i40e_aq_str(&pf->hw, last_aq_status));
8380 goto bw_commit_out;
8381 }
8382
8383 /* Read word 0x10 of NVM - SW compatibility word 1 */
8384 ret = i40e_aq_read_nvm(&pf->hw,
8385 I40E_SR_NVM_CONTROL_WORD,
8386 0x10, sizeof(nvm_word), &nvm_word,
8387 false, NULL);
8388 /* Save off last admin queue command status before releasing
8389 * the NVM
8390 */
8391 last_aq_status = pf->hw.aq.asq_last_status;
8392 i40e_release_nvm(&pf->hw);
8393 if (ret) {
8394 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8395 i40e_stat_str(&pf->hw, ret),
8396 i40e_aq_str(&pf->hw, last_aq_status));
8397 goto bw_commit_out;
8398 }
8399
8400 /* Wait a bit for NVM release to complete */
8401 msleep(50);
8402
8403 /* Acquire NVM for write access */
8404 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8405 last_aq_status = pf->hw.aq.asq_last_status;
8406 if (ret) {
8407 dev_info(&pf->pdev->dev,
8408 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8409 i40e_stat_str(&pf->hw, ret),
8410 i40e_aq_str(&pf->hw, last_aq_status));
8411 goto bw_commit_out;
8412 }
8413 /* Write it back out unchanged to initiate update NVM,
8414 * which will force a write of the shadow (alt) RAM to
8415 * the NVM - thus storing the bandwidth values permanently.
8416 */
8417 ret = i40e_aq_update_nvm(&pf->hw,
8418 I40E_SR_NVM_CONTROL_WORD,
8419 0x10, sizeof(nvm_word),
8420 &nvm_word, true, NULL);
8421 /* Save off last admin queue command status before releasing
8422 * the NVM
8423 */
8424 last_aq_status = pf->hw.aq.asq_last_status;
8425 i40e_release_nvm(&pf->hw);
8426 if (ret)
8427 dev_info(&pf->pdev->dev,
8428 "BW settings NOT SAVED, err %s aq_err %s\n",
8429 i40e_stat_str(&pf->hw, ret),
8430 i40e_aq_str(&pf->hw, last_aq_status));
8431 bw_commit_out:
8432
8433 return ret;
8434 }
8435
8436 /**
8437 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8438 * @pf: board private structure to initialize
8439 *
8440 * i40e_sw_init initializes the Adapter private data structure.
8441 * Fields are initialized based on PCI device information and
8442 * OS network device settings (MTU size).
8443 **/
8444 static int i40e_sw_init(struct i40e_pf *pf)
8445 {
8446 int err = 0;
8447 int size;
8448
8449 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
8450 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
8451 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
8452 if (I40E_DEBUG_USER & debug)
8453 pf->hw.debug_mask = debug;
8454 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
8455 I40E_DEFAULT_MSG_ENABLE);
8456 }
8457
8458 /* Set default capability flags */
8459 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8460 I40E_FLAG_MSI_ENABLED |
8461 I40E_FLAG_MSIX_ENABLED;
8462
8463 if (iommu_present(&pci_bus_type))
8464 pf->flags |= I40E_FLAG_RX_PS_ENABLED;
8465 else
8466 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
8467
8468 /* Set default ITR */
8469 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8470 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8471
8472 /* Depending on PF configurations, it is possible that the RSS
8473 * maximum might end up larger than the available queues
8474 */
8475 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8476 pf->alloc_rss_size = 1;
8477 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8478 pf->rss_size_max = min_t(int, pf->rss_size_max,
8479 pf->hw.func_caps.num_tx_qp);
8480 if (pf->hw.func_caps.rss) {
8481 pf->flags |= I40E_FLAG_RSS_ENABLED;
8482 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
8483 num_online_cpus());
8484 }
8485
8486 /* MFP mode enabled */
8487 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
8488 pf->flags |= I40E_FLAG_MFP_ENABLED;
8489 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
8490 if (i40e_get_npar_bw_setting(pf))
8491 dev_warn(&pf->pdev->dev,
8492 "Could not get NPAR bw settings\n");
8493 else
8494 dev_info(&pf->pdev->dev,
8495 "Min BW = %8.8x, Max BW = %8.8x\n",
8496 pf->npar_min_bw, pf->npar_max_bw);
8497 }
8498
8499 /* FW/NVM is not yet fixed in this regard */
8500 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8501 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8502 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8503 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
8504 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8505 pf->hw.num_partitions > 1)
8506 dev_info(&pf->pdev->dev,
8507 "Flow Director Sideband mode Disabled in MFP mode\n");
8508 else
8509 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8510 pf->fdir_pf_filter_count =
8511 pf->hw.func_caps.fd_filters_guaranteed;
8512 pf->hw.fdir_shared_filter_count =
8513 pf->hw.func_caps.fd_filters_best_effort;
8514 }
8515
8516 if (i40e_is_mac_710(&pf->hw) &&
8517 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
8518 (pf->hw.aq.fw_maj_ver < 4))) {
8519 pf->flags |= I40E_FLAG_RESTART_AUTONEG;
8520 /* No DCB support for FW < v4.33 */
8521 pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
8522 }
8523
8524 /* Disable FW LLDP if FW < v4.3 */
8525 if (i40e_is_mac_710(&pf->hw) &&
8526 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
8527 (pf->hw.aq.fw_maj_ver < 4)))
8528 pf->flags |= I40E_FLAG_STOP_FW_LLDP;
8529
8530 /* Use the FW Set LLDP MIB API if FW > v4.40 */
8531 if (i40e_is_mac_710(&pf->hw) &&
8532 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
8533 (pf->hw.aq.fw_maj_ver >= 5)))
8534 pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
8535
8536 if (pf->hw.func_caps.vmdq) {
8537 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
8538 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
8539 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
8540 }
8541
8542 if (pf->hw.func_caps.iwarp) {
8543 pf->flags |= I40E_FLAG_IWARP_ENABLED;
8544 /* IWARP needs one extra vector for CQP just like MISC.*/
8545 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
8546 }
8547
8548 #ifdef I40E_FCOE
8549 i40e_init_pf_fcoe(pf);
8550
8551 #endif /* I40E_FCOE */
8552 #ifdef CONFIG_PCI_IOV
8553 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
8554 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
8555 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
8556 pf->num_req_vfs = min_t(int,
8557 pf->hw.func_caps.num_vfs,
8558 I40E_MAX_VF_COUNT);
8559 }
8560 #endif /* CONFIG_PCI_IOV */
8561 if (pf->hw.mac.type == I40E_MAC_X722) {
8562 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
8563 I40E_FLAG_128_QP_RSS_CAPABLE |
8564 I40E_FLAG_HW_ATR_EVICT_CAPABLE |
8565 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8566 I40E_FLAG_WB_ON_ITR_CAPABLE |
8567 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
8568 I40E_FLAG_NO_PCI_LINK_CHECK |
8569 I40E_FLAG_100M_SGMII_CAPABLE |
8570 I40E_FLAG_USE_SET_LLDP_MIB |
8571 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8572 } else if ((pf->hw.aq.api_maj_ver > 1) ||
8573 ((pf->hw.aq.api_maj_ver == 1) &&
8574 (pf->hw.aq.api_min_ver > 4))) {
8575 /* Supported in FW API version higher than 1.4 */
8576 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
8577 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8578 } else {
8579 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
8580 }
8581
8582 pf->eeprom_version = 0xDEAD;
8583 pf->lan_veb = I40E_NO_VEB;
8584 pf->lan_vsi = I40E_NO_VSI;
8585
8586 /* By default FW has this off for performance reasons */
8587 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
8588
8589 /* set up queue assignment tracking */
8590 size = sizeof(struct i40e_lump_tracking)
8591 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
8592 pf->qp_pile = kzalloc(size, GFP_KERNEL);
8593 if (!pf->qp_pile) {
8594 err = -ENOMEM;
8595 goto sw_init_done;
8596 }
8597 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8598 pf->qp_pile->search_hint = 0;
8599
8600 pf->tx_timeout_recovery_level = 1;
8601
8602 mutex_init(&pf->switch_mutex);
8603
8604 /* If NPAR is enabled nudge the Tx scheduler */
8605 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8606 i40e_set_npar_bw_setting(pf);
8607
8608 sw_init_done:
8609 return err;
8610 }
8611
8612 /**
8613 * i40e_set_ntuple - set the ntuple feature flag and take action
8614 * @pf: board private structure to initialize
8615 * @features: the feature set that the stack is suggesting
8616 *
8617 * returns a bool to indicate if reset needs to happen
8618 **/
8619 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8620 {
8621 bool need_reset = false;
8622
8623 /* Check if Flow Director n-tuple support was enabled or disabled. If
8624 * the state changed, we need to reset.
8625 */
8626 if (features & NETIF_F_NTUPLE) {
8627 /* Enable filters and mark for reset */
8628 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8629 need_reset = true;
8630 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8631 } else {
8632 /* turn off filters, mark for reset and clear SW filter list */
8633 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8634 need_reset = true;
8635 i40e_fdir_filter_exit(pf);
8636 }
8637 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8638 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
8639 /* reset fd counters */
8640 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
8641 pf->fdir_pf_active_filters = 0;
8642 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8643 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8644 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8645 /* if ATR was auto disabled it can be re-enabled. */
8646 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8647 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
8648 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8649 }
8650 return need_reset;
8651 }
8652
8653 /**
8654 * i40e_set_features - set the netdev feature flags
8655 * @netdev: ptr to the netdev being adjusted
8656 * @features: the feature set that the stack is suggesting
8657 **/
8658 static int i40e_set_features(struct net_device *netdev,
8659 netdev_features_t features)
8660 {
8661 struct i40e_netdev_priv *np = netdev_priv(netdev);
8662 struct i40e_vsi *vsi = np->vsi;
8663 struct i40e_pf *pf = vsi->back;
8664 bool need_reset;
8665
8666 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8667 i40e_vlan_stripping_enable(vsi);
8668 else
8669 i40e_vlan_stripping_disable(vsi);
8670
8671 need_reset = i40e_set_ntuple(pf, features);
8672
8673 if (need_reset)
8674 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8675
8676 return 0;
8677 }
8678
8679 #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
8680 /**
8681 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
8682 * @pf: board private structure
8683 * @port: The UDP port to look up
8684 *
8685 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8686 **/
8687 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
8688 {
8689 u8 i;
8690
8691 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8692 if (pf->udp_ports[i].index == port)
8693 return i;
8694 }
8695
8696 return i;
8697 }
8698
8699 #endif
8700
8701 #if IS_ENABLED(CONFIG_VXLAN)
8702 /**
8703 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
8704 * @netdev: This physical port's netdev
8705 * @sa_family: Socket Family that VXLAN is notifying us about
8706 * @port: New UDP port number that VXLAN started listening to
8707 **/
8708 static void i40e_add_vxlan_port(struct net_device *netdev,
8709 sa_family_t sa_family, __be16 port)
8710 {
8711 struct i40e_netdev_priv *np = netdev_priv(netdev);
8712 struct i40e_vsi *vsi = np->vsi;
8713 struct i40e_pf *pf = vsi->back;
8714 u8 next_idx;
8715 u8 idx;
8716
8717 idx = i40e_get_udp_port_idx(pf, port);
8718
8719 /* Check if port already exists */
8720 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8721 netdev_info(netdev, "vxlan port %d already offloaded\n",
8722 ntohs(port));
8723 return;
8724 }
8725
8726 /* Now check if there is space to add the new port */
8727 next_idx = i40e_get_udp_port_idx(pf, 0);
8728
8729 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8730 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
8731 ntohs(port));
8732 return;
8733 }
8734
8735 /* New port: add it and mark its index in the bitmap */
8736 pf->udp_ports[next_idx].index = port;
8737 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
8738 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8739 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8740 }
8741
8742 /**
8743 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
8744 * @netdev: This physical port's netdev
8745 * @sa_family: Socket Family that VXLAN is notifying us about
8746 * @port: UDP port number that VXLAN stopped listening to
8747 **/
8748 static void i40e_del_vxlan_port(struct net_device *netdev,
8749 sa_family_t sa_family, __be16 port)
8750 {
8751 struct i40e_netdev_priv *np = netdev_priv(netdev);
8752 struct i40e_vsi *vsi = np->vsi;
8753 struct i40e_pf *pf = vsi->back;
8754 u8 idx;
8755
8756 idx = i40e_get_udp_port_idx(pf, port);
8757
8758 /* Check if port already exists */
8759 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8760 /* if port exists, set it to 0 (mark for deletion)
8761 * and make it pending
8762 */
8763 pf->udp_ports[idx].index = 0;
8764 pf->pending_udp_bitmap |= BIT_ULL(idx);
8765 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8766 } else {
8767 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
8768 ntohs(port));
8769 }
8770 }
8771 #endif
8772
8773 #if IS_ENABLED(CONFIG_GENEVE)
8774 /**
8775 * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
8776 * @netdev: This physical port's netdev
8777 * @sa_family: Socket Family that GENEVE is notifying us about
8778 * @port: New UDP port number that GENEVE started listening to
8779 **/
8780 static void i40e_add_geneve_port(struct net_device *netdev,
8781 sa_family_t sa_family, __be16 port)
8782 {
8783 struct i40e_netdev_priv *np = netdev_priv(netdev);
8784 struct i40e_vsi *vsi = np->vsi;
8785 struct i40e_pf *pf = vsi->back;
8786 u8 next_idx;
8787 u8 idx;
8788
8789 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8790 return;
8791
8792 idx = i40e_get_udp_port_idx(pf, port);
8793
8794 /* Check if port already exists */
8795 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8796 netdev_info(netdev, "udp port %d already offloaded\n",
8797 ntohs(port));
8798 return;
8799 }
8800
8801 /* Now check if there is space to add the new port */
8802 next_idx = i40e_get_udp_port_idx(pf, 0);
8803
8804 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8805 netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n",
8806 ntohs(port));
8807 return;
8808 }
8809
8810 /* New port: add it and mark its index in the bitmap */
8811 pf->udp_ports[next_idx].index = port;
8812 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
8813 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
8814 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8815
8816 dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
8817 }
8818
8819 /**
8820 * i40e_del_geneve_port - Get notifications about GENEVE ports that go away
8821 * @netdev: This physical port's netdev
8822 * @sa_family: Socket Family that GENEVE is notifying us about
8823 * @port: UDP port number that GENEVE stopped listening to
8824 **/
8825 static void i40e_del_geneve_port(struct net_device *netdev,
8826 sa_family_t sa_family, __be16 port)
8827 {
8828 struct i40e_netdev_priv *np = netdev_priv(netdev);
8829 struct i40e_vsi *vsi = np->vsi;
8830 struct i40e_pf *pf = vsi->back;
8831 u8 idx;
8832
8833 if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
8834 return;
8835
8836 idx = i40e_get_udp_port_idx(pf, port);
8837
8838 /* Check if port already exists */
8839 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8840 /* if port exists, set it to 0 (mark for deletion)
8841 * and make it pending
8842 */
8843 pf->udp_ports[idx].index = 0;
8844 pf->pending_udp_bitmap |= BIT_ULL(idx);
8845 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
8846
8847 dev_info(&pf->pdev->dev, "deleting geneve port %d\n",
8848 ntohs(port));
8849 } else {
8850 netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
8851 ntohs(port));
8852 }
8853 }
8854 #endif
8855
8856 static int i40e_get_phys_port_id(struct net_device *netdev,
8857 struct netdev_phys_item_id *ppid)
8858 {
8859 struct i40e_netdev_priv *np = netdev_priv(netdev);
8860 struct i40e_pf *pf = np->vsi->back;
8861 struct i40e_hw *hw = &pf->hw;
8862
8863 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8864 return -EOPNOTSUPP;
8865
8866 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8867 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8868
8869 return 0;
8870 }
8871
8872 /**
8873 * i40e_ndo_fdb_add - add an entry to the hardware database
8874 * @ndm: the input from the stack
8875 * @tb: pointer to array of nladdr (unused)
8876 * @dev: the net device pointer
8877 * @addr: the MAC address entry being added
8878 * @flags: instructions from stack about fdb operation
8879 */
8880 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8881 struct net_device *dev,
8882 const unsigned char *addr, u16 vid,
8883 u16 flags)
8884 {
8885 struct i40e_netdev_priv *np = netdev_priv(dev);
8886 struct i40e_pf *pf = np->vsi->back;
8887 int err = 0;
8888
8889 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8890 return -EOPNOTSUPP;
8891
8892 if (vid) {
8893 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8894 return -EINVAL;
8895 }
8896
8897 /* Hardware does not support aging addresses so if a
8898 * ndm_state is given only allow permanent addresses
8899 */
8900 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8901 netdev_info(dev, "FDB only supports static addresses\n");
8902 return -EINVAL;
8903 }
8904
8905 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8906 err = dev_uc_add_excl(dev, addr);
8907 else if (is_multicast_ether_addr(addr))
8908 err = dev_mc_add_excl(dev, addr);
8909 else
8910 err = -EINVAL;
8911
8912 /* Only return duplicate errors if NLM_F_EXCL is set */
8913 if (err == -EEXIST && !(flags & NLM_F_EXCL))
8914 err = 0;
8915
8916 return err;
8917 }
8918
8919 /**
8920 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8921 * @dev: the netdev being configured
8922 * @nlh: RTNL message
8923 *
8924 * Inserts a new hardware bridge if not already created and
8925 * enables the bridging mode requested (VEB or VEPA). If the
8926 * hardware bridge has already been inserted and the request
8927 * is to change the mode then that requires a PF reset to
8928 * allow rebuild of the components with required hardware
8929 * bridge mode enabled.
8930 **/
8931 static int i40e_ndo_bridge_setlink(struct net_device *dev,
8932 struct nlmsghdr *nlh,
8933 u16 flags)
8934 {
8935 struct i40e_netdev_priv *np = netdev_priv(dev);
8936 struct i40e_vsi *vsi = np->vsi;
8937 struct i40e_pf *pf = vsi->back;
8938 struct i40e_veb *veb = NULL;
8939 struct nlattr *attr, *br_spec;
8940 int i, rem;
8941
8942 /* Only for PF VSI for now */
8943 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8944 return -EOPNOTSUPP;
8945
8946 /* Find the HW bridge for PF VSI */
8947 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8948 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8949 veb = pf->veb[i];
8950 }
8951
8952 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8953
8954 nla_for_each_nested(attr, br_spec, rem) {
8955 __u16 mode;
8956
8957 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8958 continue;
8959
8960 mode = nla_get_u16(attr);
8961 if ((mode != BRIDGE_MODE_VEPA) &&
8962 (mode != BRIDGE_MODE_VEB))
8963 return -EINVAL;
8964
8965 /* Insert a new HW bridge */
8966 if (!veb) {
8967 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8968 vsi->tc_config.enabled_tc);
8969 if (veb) {
8970 veb->bridge_mode = mode;
8971 i40e_config_bridge_mode(veb);
8972 } else {
8973 /* No Bridge HW offload available */
8974 return -ENOENT;
8975 }
8976 break;
8977 } else if (mode != veb->bridge_mode) {
8978 /* Existing HW bridge but different mode needs reset */
8979 veb->bridge_mode = mode;
8980 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8981 if (mode == BRIDGE_MODE_VEB)
8982 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8983 else
8984 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8985 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8986 break;
8987 }
8988 }
8989
8990 return 0;
8991 }
8992
8993 /**
8994 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8995 * @skb: skb buff
8996 * @pid: process id
8997 * @seq: RTNL message seq #
8998 * @dev: the netdev being configured
8999 * @filter_mask: unused
9000 * @nlflags: netlink flags passed in
9001 *
9002 * Return the mode in which the hardware bridge is operating in
9003 * i.e VEB or VEPA.
9004 **/
9005 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9006 struct net_device *dev,
9007 u32 __always_unused filter_mask,
9008 int nlflags)
9009 {
9010 struct i40e_netdev_priv *np = netdev_priv(dev);
9011 struct i40e_vsi *vsi = np->vsi;
9012 struct i40e_pf *pf = vsi->back;
9013 struct i40e_veb *veb = NULL;
9014 int i;
9015
9016 /* Only for PF VSI for now */
9017 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
9018 return -EOPNOTSUPP;
9019
9020 /* Find the HW bridge for the PF VSI */
9021 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9022 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9023 veb = pf->veb[i];
9024 }
9025
9026 if (!veb)
9027 return 0;
9028
9029 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
9030 nlflags, 0, 0, filter_mask, NULL);
9031 }
9032
9033 /* Hardware supports L4 tunnel length of 128B (=2^7) which includes
9034 * inner mac plus all inner ethertypes.
9035 */
9036 #define I40E_MAX_TUNNEL_HDR_LEN 128
9037 /**
9038 * i40e_features_check - Validate encapsulated packet conforms to limits
9039 * @skb: skb buff
9040 * @dev: This physical port's netdev
9041 * @features: Offload features that the stack believes apply
9042 **/
9043 static netdev_features_t i40e_features_check(struct sk_buff *skb,
9044 struct net_device *dev,
9045 netdev_features_t features)
9046 {
9047 if (skb->encapsulation &&
9048 ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
9049 I40E_MAX_TUNNEL_HDR_LEN))
9050 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9051
9052 return features;
9053 }
9054
9055 static const struct net_device_ops i40e_netdev_ops = {
9056 .ndo_open = i40e_open,
9057 .ndo_stop = i40e_close,
9058 .ndo_start_xmit = i40e_lan_xmit_frame,
9059 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
9060 .ndo_set_rx_mode = i40e_set_rx_mode,
9061 .ndo_validate_addr = eth_validate_addr,
9062 .ndo_set_mac_address = i40e_set_mac,
9063 .ndo_change_mtu = i40e_change_mtu,
9064 .ndo_do_ioctl = i40e_ioctl,
9065 .ndo_tx_timeout = i40e_tx_timeout,
9066 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
9067 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
9068 #ifdef CONFIG_NET_POLL_CONTROLLER
9069 .ndo_poll_controller = i40e_netpoll,
9070 #endif
9071 .ndo_setup_tc = __i40e_setup_tc,
9072 #ifdef I40E_FCOE
9073 .ndo_fcoe_enable = i40e_fcoe_enable,
9074 .ndo_fcoe_disable = i40e_fcoe_disable,
9075 #endif
9076 .ndo_set_features = i40e_set_features,
9077 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
9078 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
9079 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
9080 .ndo_get_vf_config = i40e_ndo_get_vf_config,
9081 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
9082 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
9083 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
9084 #if IS_ENABLED(CONFIG_VXLAN)
9085 .ndo_add_vxlan_port = i40e_add_vxlan_port,
9086 .ndo_del_vxlan_port = i40e_del_vxlan_port,
9087 #endif
9088 #if IS_ENABLED(CONFIG_GENEVE)
9089 .ndo_add_geneve_port = i40e_add_geneve_port,
9090 .ndo_del_geneve_port = i40e_del_geneve_port,
9091 #endif
9092 .ndo_get_phys_port_id = i40e_get_phys_port_id,
9093 .ndo_fdb_add = i40e_ndo_fdb_add,
9094 .ndo_features_check = i40e_features_check,
9095 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
9096 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
9097 };
9098
9099 /**
9100 * i40e_config_netdev - Setup the netdev flags
9101 * @vsi: the VSI being configured
9102 *
9103 * Returns 0 on success, negative value on failure
9104 **/
9105 static int i40e_config_netdev(struct i40e_vsi *vsi)
9106 {
9107 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
9108 struct i40e_pf *pf = vsi->back;
9109 struct i40e_hw *hw = &pf->hw;
9110 struct i40e_netdev_priv *np;
9111 struct net_device *netdev;
9112 u8 mac_addr[ETH_ALEN];
9113 int etherdev_size;
9114
9115 etherdev_size = sizeof(struct i40e_netdev_priv);
9116 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
9117 if (!netdev)
9118 return -ENOMEM;
9119
9120 vsi->netdev = netdev;
9121 np = netdev_priv(netdev);
9122 np->vsi = vsi;
9123
9124 netdev->hw_enc_features |= NETIF_F_SG |
9125 NETIF_F_IP_CSUM |
9126 NETIF_F_IPV6_CSUM |
9127 NETIF_F_HIGHDMA |
9128 NETIF_F_SOFT_FEATURES |
9129 NETIF_F_TSO |
9130 NETIF_F_TSO_ECN |
9131 NETIF_F_TSO6 |
9132 NETIF_F_GSO_GRE |
9133 NETIF_F_GSO_IPIP |
9134 NETIF_F_GSO_SIT |
9135 NETIF_F_GSO_UDP_TUNNEL |
9136 NETIF_F_GSO_UDP_TUNNEL_CSUM |
9137 NETIF_F_SCTP_CRC |
9138 NETIF_F_RXHASH |
9139 NETIF_F_RXCSUM |
9140 0;
9141
9142 if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
9143 netdev->hw_enc_features ^= NETIF_F_GSO_UDP_TUNNEL_CSUM;
9144
9145 /* record features VLANs can make use of */
9146 netdev->vlan_features |= netdev->hw_enc_features;
9147
9148 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
9149 netdev->hw_features |= NETIF_F_NTUPLE;
9150
9151 netdev->hw_features |= netdev->hw_enc_features |
9152 NETIF_F_HW_VLAN_CTAG_TX |
9153 NETIF_F_HW_VLAN_CTAG_RX;
9154
9155 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
9156
9157 if (vsi->type == I40E_VSI_MAIN) {
9158 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9159 ether_addr_copy(mac_addr, hw->mac.perm_addr);
9160 /* The following steps are necessary to prevent reception
9161 * of tagged packets - some older NVM configurations load a
9162 * default a MAC-VLAN filter that accepts any tagged packet
9163 * which must be replaced by a normal filter.
9164 */
9165 if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
9166 spin_lock_bh(&vsi->mac_filter_list_lock);
9167 i40e_add_filter(vsi, mac_addr,
9168 I40E_VLAN_ANY, false, true);
9169 spin_unlock_bh(&vsi->mac_filter_list_lock);
9170 }
9171 } else if ((pf->hw.aq.api_maj_ver > 1) ||
9172 ((pf->hw.aq.api_maj_ver == 1) &&
9173 (pf->hw.aq.api_min_ver > 4))) {
9174 /* Supported in FW API version higher than 1.4 */
9175 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
9176 pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
9177 } else {
9178 /* relate the VSI_VMDQ name to the VSI_MAIN name */
9179 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
9180 pf->vsi[pf->lan_vsi]->netdev->name);
9181 random_ether_addr(mac_addr);
9182
9183 spin_lock_bh(&vsi->mac_filter_list_lock);
9184 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
9185 spin_unlock_bh(&vsi->mac_filter_list_lock);
9186 }
9187
9188 spin_lock_bh(&vsi->mac_filter_list_lock);
9189 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
9190 spin_unlock_bh(&vsi->mac_filter_list_lock);
9191
9192 ether_addr_copy(netdev->dev_addr, mac_addr);
9193 ether_addr_copy(netdev->perm_addr, mac_addr);
9194
9195 netdev->priv_flags |= IFF_UNICAST_FLT;
9196 netdev->priv_flags |= IFF_SUPP_NOFCS;
9197 /* Setup netdev TC information */
9198 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
9199
9200 netdev->netdev_ops = &i40e_netdev_ops;
9201 netdev->watchdog_timeo = 5 * HZ;
9202 i40e_set_ethtool_ops(netdev);
9203 #ifdef I40E_FCOE
9204 i40e_fcoe_config_netdev(netdev, vsi);
9205 #endif
9206
9207 return 0;
9208 }
9209
9210 /**
9211 * i40e_vsi_delete - Delete a VSI from the switch
9212 * @vsi: the VSI being removed
9213 *
9214 * Returns 0 on success, negative value on failure
9215 **/
9216 static void i40e_vsi_delete(struct i40e_vsi *vsi)
9217 {
9218 /* remove default VSI is not allowed */
9219 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
9220 return;
9221
9222 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
9223 }
9224
9225 /**
9226 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9227 * @vsi: the VSI being queried
9228 *
9229 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9230 **/
9231 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
9232 {
9233 struct i40e_veb *veb;
9234 struct i40e_pf *pf = vsi->back;
9235
9236 /* Uplink is not a bridge so default to VEB */
9237 if (vsi->veb_idx == I40E_NO_VEB)
9238 return 1;
9239
9240 veb = pf->veb[vsi->veb_idx];
9241 if (!veb) {
9242 dev_info(&pf->pdev->dev,
9243 "There is no veb associated with the bridge\n");
9244 return -ENOENT;
9245 }
9246
9247 /* Uplink is a bridge in VEPA mode */
9248 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
9249 return 0;
9250 } else {
9251 /* Uplink is a bridge in VEB mode */
9252 return 1;
9253 }
9254
9255 /* VEPA is now default bridge, so return 0 */
9256 return 0;
9257 }
9258
9259 /**
9260 * i40e_add_vsi - Add a VSI to the switch
9261 * @vsi: the VSI being configured
9262 *
9263 * This initializes a VSI context depending on the VSI type to be added and
9264 * passes it down to the add_vsi aq command.
9265 **/
9266 static int i40e_add_vsi(struct i40e_vsi *vsi)
9267 {
9268 int ret = -ENODEV;
9269 u8 laa_macaddr[ETH_ALEN];
9270 bool found_laa_mac_filter = false;
9271 struct i40e_pf *pf = vsi->back;
9272 struct i40e_hw *hw = &pf->hw;
9273 struct i40e_vsi_context ctxt;
9274 struct i40e_mac_filter *f, *ftmp;
9275
9276 u8 enabled_tc = 0x1; /* TC0 enabled */
9277 int f_count = 0;
9278
9279 memset(&ctxt, 0, sizeof(ctxt));
9280 switch (vsi->type) {
9281 case I40E_VSI_MAIN:
9282 /* The PF's main VSI is already setup as part of the
9283 * device initialization, so we'll not bother with
9284 * the add_vsi call, but we will retrieve the current
9285 * VSI context.
9286 */
9287 ctxt.seid = pf->main_vsi_seid;
9288 ctxt.pf_num = pf->hw.pf_id;
9289 ctxt.vf_num = 0;
9290 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9291 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9292 if (ret) {
9293 dev_info(&pf->pdev->dev,
9294 "couldn't get PF vsi config, err %s aq_err %s\n",
9295 i40e_stat_str(&pf->hw, ret),
9296 i40e_aq_str(&pf->hw,
9297 pf->hw.aq.asq_last_status));
9298 return -ENOENT;
9299 }
9300 vsi->info = ctxt.info;
9301 vsi->info.valid_sections = 0;
9302
9303 vsi->seid = ctxt.seid;
9304 vsi->id = ctxt.vsi_number;
9305
9306 enabled_tc = i40e_pf_get_tc_map(pf);
9307
9308 /* MFP mode setup queue map and update VSI */
9309 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
9310 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
9311 memset(&ctxt, 0, sizeof(ctxt));
9312 ctxt.seid = pf->main_vsi_seid;
9313 ctxt.pf_num = pf->hw.pf_id;
9314 ctxt.vf_num = 0;
9315 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
9316 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9317 if (ret) {
9318 dev_info(&pf->pdev->dev,
9319 "update vsi failed, err %s aq_err %s\n",
9320 i40e_stat_str(&pf->hw, ret),
9321 i40e_aq_str(&pf->hw,
9322 pf->hw.aq.asq_last_status));
9323 ret = -ENOENT;
9324 goto err;
9325 }
9326 /* update the local VSI info queue map */
9327 i40e_vsi_update_queue_map(vsi, &ctxt);
9328 vsi->info.valid_sections = 0;
9329 } else {
9330 /* Default/Main VSI is only enabled for TC0
9331 * reconfigure it to enable all TCs that are
9332 * available on the port in SFP mode.
9333 * For MFP case the iSCSI PF would use this
9334 * flow to enable LAN+iSCSI TC.
9335 */
9336 ret = i40e_vsi_config_tc(vsi, enabled_tc);
9337 if (ret) {
9338 dev_info(&pf->pdev->dev,
9339 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9340 enabled_tc,
9341 i40e_stat_str(&pf->hw, ret),
9342 i40e_aq_str(&pf->hw,
9343 pf->hw.aq.asq_last_status));
9344 ret = -ENOENT;
9345 }
9346 }
9347 break;
9348
9349 case I40E_VSI_FDIR:
9350 ctxt.pf_num = hw->pf_id;
9351 ctxt.vf_num = 0;
9352 ctxt.uplink_seid = vsi->uplink_seid;
9353 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9354 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9355 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
9356 (i40e_is_vsi_uplink_mode_veb(vsi))) {
9357 ctxt.info.valid_sections |=
9358 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9359 ctxt.info.switch_id =
9360 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9361 }
9362 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9363 break;
9364
9365 case I40E_VSI_VMDQ2:
9366 ctxt.pf_num = hw->pf_id;
9367 ctxt.vf_num = 0;
9368 ctxt.uplink_seid = vsi->uplink_seid;
9369 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9370 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
9371
9372 /* This VSI is connected to VEB so the switch_id
9373 * should be set to zero by default.
9374 */
9375 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9376 ctxt.info.valid_sections |=
9377 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9378 ctxt.info.switch_id =
9379 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9380 }
9381
9382 /* Setup the VSI tx/rx queue map for TC0 only for now */
9383 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9384 break;
9385
9386 case I40E_VSI_SRIOV:
9387 ctxt.pf_num = hw->pf_id;
9388 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
9389 ctxt.uplink_seid = vsi->uplink_seid;
9390 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
9391 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
9392
9393 /* This VSI is connected to VEB so the switch_id
9394 * should be set to zero by default.
9395 */
9396 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
9397 ctxt.info.valid_sections |=
9398 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9399 ctxt.info.switch_id =
9400 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9401 }
9402
9403 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
9404 ctxt.info.valid_sections |=
9405 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
9406 ctxt.info.queueing_opt_flags |=
9407 I40E_AQ_VSI_QUE_OPT_TCP_ENA;
9408 }
9409
9410 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
9411 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
9412 if (pf->vf[vsi->vf_id].spoofchk) {
9413 ctxt.info.valid_sections |=
9414 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
9415 ctxt.info.sec_flags |=
9416 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
9417 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
9418 }
9419 /* Setup the VSI tx/rx queue map for TC0 only for now */
9420 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
9421 break;
9422
9423 #ifdef I40E_FCOE
9424 case I40E_VSI_FCOE:
9425 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
9426 if (ret) {
9427 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
9428 return ret;
9429 }
9430 break;
9431
9432 #endif /* I40E_FCOE */
9433 case I40E_VSI_IWARP:
9434 /* send down message to iWARP */
9435 break;
9436
9437 default:
9438 return -ENODEV;
9439 }
9440
9441 if (vsi->type != I40E_VSI_MAIN) {
9442 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9443 if (ret) {
9444 dev_info(&vsi->back->pdev->dev,
9445 "add vsi failed, err %s aq_err %s\n",
9446 i40e_stat_str(&pf->hw, ret),
9447 i40e_aq_str(&pf->hw,
9448 pf->hw.aq.asq_last_status));
9449 ret = -ENOENT;
9450 goto err;
9451 }
9452 vsi->info = ctxt.info;
9453 vsi->info.valid_sections = 0;
9454 vsi->seid = ctxt.seid;
9455 vsi->id = ctxt.vsi_number;
9456 }
9457
9458 spin_lock_bh(&vsi->mac_filter_list_lock);
9459 /* If macvlan filters already exist, force them to get loaded */
9460 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
9461 f->changed = true;
9462 f_count++;
9463
9464 /* Expected to have only one MAC filter entry for LAA in list */
9465 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
9466 ether_addr_copy(laa_macaddr, f->macaddr);
9467 found_laa_mac_filter = true;
9468 }
9469 }
9470 spin_unlock_bh(&vsi->mac_filter_list_lock);
9471
9472 if (found_laa_mac_filter) {
9473 struct i40e_aqc_remove_macvlan_element_data element;
9474
9475 memset(&element, 0, sizeof(element));
9476 ether_addr_copy(element.mac_addr, laa_macaddr);
9477 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
9478 ret = i40e_aq_remove_macvlan(hw, vsi->seid,
9479 &element, 1, NULL);
9480 if (ret) {
9481 /* some older FW has a different default */
9482 element.flags |=
9483 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
9484 i40e_aq_remove_macvlan(hw, vsi->seid,
9485 &element, 1, NULL);
9486 }
9487
9488 i40e_aq_mac_address_write(hw,
9489 I40E_AQC_WRITE_TYPE_LAA_WOL,
9490 laa_macaddr, NULL);
9491 }
9492
9493 if (f_count) {
9494 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
9495 pf->flags |= I40E_FLAG_FILTER_SYNC;
9496 }
9497
9498 /* Update VSI BW information */
9499 ret = i40e_vsi_get_bw_info(vsi);
9500 if (ret) {
9501 dev_info(&pf->pdev->dev,
9502 "couldn't get vsi bw info, err %s aq_err %s\n",
9503 i40e_stat_str(&pf->hw, ret),
9504 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9505 /* VSI is already added so not tearing that up */
9506 ret = 0;
9507 }
9508
9509 err:
9510 return ret;
9511 }
9512
9513 /**
9514 * i40e_vsi_release - Delete a VSI and free its resources
9515 * @vsi: the VSI being removed
9516 *
9517 * Returns 0 on success or < 0 on error
9518 **/
9519 int i40e_vsi_release(struct i40e_vsi *vsi)
9520 {
9521 struct i40e_mac_filter *f, *ftmp;
9522 struct i40e_veb *veb = NULL;
9523 struct i40e_pf *pf;
9524 u16 uplink_seid;
9525 int i, n;
9526
9527 pf = vsi->back;
9528
9529 /* release of a VEB-owner or last VSI is not allowed */
9530 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
9531 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
9532 vsi->seid, vsi->uplink_seid);
9533 return -ENODEV;
9534 }
9535 if (vsi == pf->vsi[pf->lan_vsi] &&
9536 !test_bit(__I40E_DOWN, &pf->state)) {
9537 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9538 return -ENODEV;
9539 }
9540
9541 uplink_seid = vsi->uplink_seid;
9542 if (vsi->type != I40E_VSI_SRIOV) {
9543 if (vsi->netdev_registered) {
9544 vsi->netdev_registered = false;
9545 if (vsi->netdev) {
9546 /* results in a call to i40e_close() */
9547 unregister_netdev(vsi->netdev);
9548 }
9549 } else {
9550 i40e_vsi_close(vsi);
9551 }
9552 i40e_vsi_disable_irq(vsi);
9553 }
9554
9555 spin_lock_bh(&vsi->mac_filter_list_lock);
9556 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
9557 i40e_del_filter(vsi, f->macaddr, f->vlan,
9558 f->is_vf, f->is_netdev);
9559 spin_unlock_bh(&vsi->mac_filter_list_lock);
9560
9561 i40e_sync_vsi_filters(vsi);
9562
9563 i40e_vsi_delete(vsi);
9564 i40e_vsi_free_q_vectors(vsi);
9565 if (vsi->netdev) {
9566 free_netdev(vsi->netdev);
9567 vsi->netdev = NULL;
9568 }
9569 i40e_vsi_clear_rings(vsi);
9570 i40e_vsi_clear(vsi);
9571
9572 /* If this was the last thing on the VEB, except for the
9573 * controlling VSI, remove the VEB, which puts the controlling
9574 * VSI onto the next level down in the switch.
9575 *
9576 * Well, okay, there's one more exception here: don't remove
9577 * the orphan VEBs yet. We'll wait for an explicit remove request
9578 * from up the network stack.
9579 */
9580 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
9581 if (pf->vsi[i] &&
9582 pf->vsi[i]->uplink_seid == uplink_seid &&
9583 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9584 n++; /* count the VSIs */
9585 }
9586 }
9587 for (i = 0; i < I40E_MAX_VEB; i++) {
9588 if (!pf->veb[i])
9589 continue;
9590 if (pf->veb[i]->uplink_seid == uplink_seid)
9591 n++; /* count the VEBs */
9592 if (pf->veb[i]->seid == uplink_seid)
9593 veb = pf->veb[i];
9594 }
9595 if (n == 0 && veb && veb->uplink_seid != 0)
9596 i40e_veb_release(veb);
9597
9598 return 0;
9599 }
9600
9601 /**
9602 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
9603 * @vsi: ptr to the VSI
9604 *
9605 * This should only be called after i40e_vsi_mem_alloc() which allocates the
9606 * corresponding SW VSI structure and initializes num_queue_pairs for the
9607 * newly allocated VSI.
9608 *
9609 * Returns 0 on success or negative on failure
9610 **/
9611 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
9612 {
9613 int ret = -ENOENT;
9614 struct i40e_pf *pf = vsi->back;
9615
9616 if (vsi->q_vectors[0]) {
9617 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
9618 vsi->seid);
9619 return -EEXIST;
9620 }
9621
9622 if (vsi->base_vector) {
9623 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
9624 vsi->seid, vsi->base_vector);
9625 return -EEXIST;
9626 }
9627
9628 ret = i40e_vsi_alloc_q_vectors(vsi);
9629 if (ret) {
9630 dev_info(&pf->pdev->dev,
9631 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
9632 vsi->num_q_vectors, vsi->seid, ret);
9633 vsi->num_q_vectors = 0;
9634 goto vector_setup_out;
9635 }
9636
9637 /* In Legacy mode, we do not have to get any other vector since we
9638 * piggyback on the misc/ICR0 for queue interrupts.
9639 */
9640 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
9641 return ret;
9642 if (vsi->num_q_vectors)
9643 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
9644 vsi->num_q_vectors, vsi->idx);
9645 if (vsi->base_vector < 0) {
9646 dev_info(&pf->pdev->dev,
9647 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
9648 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
9649 i40e_vsi_free_q_vectors(vsi);
9650 ret = -ENOENT;
9651 goto vector_setup_out;
9652 }
9653
9654 vector_setup_out:
9655 return ret;
9656 }
9657
9658 /**
9659 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
9660 * @vsi: pointer to the vsi.
9661 *
9662 * This re-allocates a vsi's queue resources.
9663 *
9664 * Returns pointer to the successfully allocated and configured VSI sw struct
9665 * on success, otherwise returns NULL on failure.
9666 **/
9667 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
9668 {
9669 struct i40e_pf *pf;
9670 u8 enabled_tc;
9671 int ret;
9672
9673 if (!vsi)
9674 return NULL;
9675
9676 pf = vsi->back;
9677
9678 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9679 i40e_vsi_clear_rings(vsi);
9680
9681 i40e_vsi_free_arrays(vsi, false);
9682 i40e_set_num_rings_in_vsi(vsi);
9683 ret = i40e_vsi_alloc_arrays(vsi, false);
9684 if (ret)
9685 goto err_vsi;
9686
9687 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
9688 if (ret < 0) {
9689 dev_info(&pf->pdev->dev,
9690 "failed to get tracking for %d queues for VSI %d err %d\n",
9691 vsi->alloc_queue_pairs, vsi->seid, ret);
9692 goto err_vsi;
9693 }
9694 vsi->base_queue = ret;
9695
9696 /* Update the FW view of the VSI. Force a reset of TC and queue
9697 * layout configurations.
9698 */
9699 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9700 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9701 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9702 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9703
9704 /* assign it some queues */
9705 ret = i40e_alloc_rings(vsi);
9706 if (ret)
9707 goto err_rings;
9708
9709 /* map all of the rings to the q_vectors */
9710 i40e_vsi_map_rings_to_vectors(vsi);
9711 return vsi;
9712
9713 err_rings:
9714 i40e_vsi_free_q_vectors(vsi);
9715 if (vsi->netdev_registered) {
9716 vsi->netdev_registered = false;
9717 unregister_netdev(vsi->netdev);
9718 free_netdev(vsi->netdev);
9719 vsi->netdev = NULL;
9720 }
9721 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9722 err_vsi:
9723 i40e_vsi_clear(vsi);
9724 return NULL;
9725 }
9726
9727 /**
9728 * i40e_macaddr_init - explicitly write the mac address filters.
9729 *
9730 * @vsi: pointer to the vsi.
9731 * @macaddr: the MAC address
9732 *
9733 * This is needed when the macaddr has been obtained by other
9734 * means than the default, e.g., from Open Firmware or IDPROM.
9735 * Returns 0 on success, negative on failure
9736 **/
9737 static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
9738 {
9739 int ret;
9740 struct i40e_aqc_add_macvlan_element_data element;
9741
9742 ret = i40e_aq_mac_address_write(&vsi->back->hw,
9743 I40E_AQC_WRITE_TYPE_LAA_WOL,
9744 macaddr, NULL);
9745 if (ret) {
9746 dev_info(&vsi->back->pdev->dev,
9747 "Addr change for VSI failed: %d\n", ret);
9748 return -EADDRNOTAVAIL;
9749 }
9750
9751 memset(&element, 0, sizeof(element));
9752 ether_addr_copy(element.mac_addr, macaddr);
9753 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
9754 ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
9755 if (ret) {
9756 dev_info(&vsi->back->pdev->dev,
9757 "add filter failed err %s aq_err %s\n",
9758 i40e_stat_str(&vsi->back->hw, ret),
9759 i40e_aq_str(&vsi->back->hw,
9760 vsi->back->hw.aq.asq_last_status));
9761 }
9762 return ret;
9763 }
9764
9765 /**
9766 * i40e_vsi_setup - Set up a VSI by a given type
9767 * @pf: board private structure
9768 * @type: VSI type
9769 * @uplink_seid: the switch element to link to
9770 * @param1: usage depends upon VSI type. For VF types, indicates VF id
9771 *
9772 * This allocates the sw VSI structure and its queue resources, then add a VSI
9773 * to the identified VEB.
9774 *
9775 * Returns pointer to the successfully allocated and configure VSI sw struct on
9776 * success, otherwise returns NULL on failure.
9777 **/
9778 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9779 u16 uplink_seid, u32 param1)
9780 {
9781 struct i40e_vsi *vsi = NULL;
9782 struct i40e_veb *veb = NULL;
9783 int ret, i;
9784 int v_idx;
9785
9786 /* The requested uplink_seid must be either
9787 * - the PF's port seid
9788 * no VEB is needed because this is the PF
9789 * or this is a Flow Director special case VSI
9790 * - seid of an existing VEB
9791 * - seid of a VSI that owns an existing VEB
9792 * - seid of a VSI that doesn't own a VEB
9793 * a new VEB is created and the VSI becomes the owner
9794 * - seid of the PF VSI, which is what creates the first VEB
9795 * this is a special case of the previous
9796 *
9797 * Find which uplink_seid we were given and create a new VEB if needed
9798 */
9799 for (i = 0; i < I40E_MAX_VEB; i++) {
9800 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9801 veb = pf->veb[i];
9802 break;
9803 }
9804 }
9805
9806 if (!veb && uplink_seid != pf->mac_seid) {
9807
9808 for (i = 0; i < pf->num_alloc_vsi; i++) {
9809 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9810 vsi = pf->vsi[i];
9811 break;
9812 }
9813 }
9814 if (!vsi) {
9815 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9816 uplink_seid);
9817 return NULL;
9818 }
9819
9820 if (vsi->uplink_seid == pf->mac_seid)
9821 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9822 vsi->tc_config.enabled_tc);
9823 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9824 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9825 vsi->tc_config.enabled_tc);
9826 if (veb) {
9827 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9828 dev_info(&vsi->back->pdev->dev,
9829 "New VSI creation error, uplink seid of LAN VSI expected.\n");
9830 return NULL;
9831 }
9832 /* We come up by default in VEPA mode if SRIOV is not
9833 * already enabled, in which case we can't force VEPA
9834 * mode.
9835 */
9836 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9837 veb->bridge_mode = BRIDGE_MODE_VEPA;
9838 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9839 }
9840 i40e_config_bridge_mode(veb);
9841 }
9842 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9843 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9844 veb = pf->veb[i];
9845 }
9846 if (!veb) {
9847 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9848 return NULL;
9849 }
9850
9851 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9852 uplink_seid = veb->seid;
9853 }
9854
9855 /* get vsi sw struct */
9856 v_idx = i40e_vsi_mem_alloc(pf, type);
9857 if (v_idx < 0)
9858 goto err_alloc;
9859 vsi = pf->vsi[v_idx];
9860 if (!vsi)
9861 goto err_alloc;
9862 vsi->type = type;
9863 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9864
9865 if (type == I40E_VSI_MAIN)
9866 pf->lan_vsi = v_idx;
9867 else if (type == I40E_VSI_SRIOV)
9868 vsi->vf_id = param1;
9869 /* assign it some queues */
9870 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9871 vsi->idx);
9872 if (ret < 0) {
9873 dev_info(&pf->pdev->dev,
9874 "failed to get tracking for %d queues for VSI %d err=%d\n",
9875 vsi->alloc_queue_pairs, vsi->seid, ret);
9876 goto err_vsi;
9877 }
9878 vsi->base_queue = ret;
9879
9880 /* get a VSI from the hardware */
9881 vsi->uplink_seid = uplink_seid;
9882 ret = i40e_add_vsi(vsi);
9883 if (ret)
9884 goto err_vsi;
9885
9886 switch (vsi->type) {
9887 /* setup the netdev if needed */
9888 case I40E_VSI_MAIN:
9889 /* Apply relevant filters if a platform-specific mac
9890 * address was selected.
9891 */
9892 if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
9893 ret = i40e_macaddr_init(vsi, pf->hw.mac.addr);
9894 if (ret) {
9895 dev_warn(&pf->pdev->dev,
9896 "could not set up macaddr; err %d\n",
9897 ret);
9898 }
9899 }
9900 case I40E_VSI_VMDQ2:
9901 case I40E_VSI_FCOE:
9902 ret = i40e_config_netdev(vsi);
9903 if (ret)
9904 goto err_netdev;
9905 ret = register_netdev(vsi->netdev);
9906 if (ret)
9907 goto err_netdev;
9908 vsi->netdev_registered = true;
9909 netif_carrier_off(vsi->netdev);
9910 #ifdef CONFIG_I40E_DCB
9911 /* Setup DCB netlink interface */
9912 i40e_dcbnl_setup(vsi);
9913 #endif /* CONFIG_I40E_DCB */
9914 /* fall through */
9915
9916 case I40E_VSI_FDIR:
9917 /* set up vectors and rings if needed */
9918 ret = i40e_vsi_setup_vectors(vsi);
9919 if (ret)
9920 goto err_msix;
9921
9922 ret = i40e_alloc_rings(vsi);
9923 if (ret)
9924 goto err_rings;
9925
9926 /* map all of the rings to the q_vectors */
9927 i40e_vsi_map_rings_to_vectors(vsi);
9928
9929 i40e_vsi_reset_stats(vsi);
9930 break;
9931
9932 default:
9933 /* no netdev or rings for the other VSI types */
9934 break;
9935 }
9936
9937 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9938 (vsi->type == I40E_VSI_VMDQ2)) {
9939 ret = i40e_vsi_config_rss(vsi);
9940 }
9941 return vsi;
9942
9943 err_rings:
9944 i40e_vsi_free_q_vectors(vsi);
9945 err_msix:
9946 if (vsi->netdev_registered) {
9947 vsi->netdev_registered = false;
9948 unregister_netdev(vsi->netdev);
9949 free_netdev(vsi->netdev);
9950 vsi->netdev = NULL;
9951 }
9952 err_netdev:
9953 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9954 err_vsi:
9955 i40e_vsi_clear(vsi);
9956 err_alloc:
9957 return NULL;
9958 }
9959
9960 /**
9961 * i40e_veb_get_bw_info - Query VEB BW information
9962 * @veb: the veb to query
9963 *
9964 * Query the Tx scheduler BW configuration data for given VEB
9965 **/
9966 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9967 {
9968 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9969 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9970 struct i40e_pf *pf = veb->pf;
9971 struct i40e_hw *hw = &pf->hw;
9972 u32 tc_bw_max;
9973 int ret = 0;
9974 int i;
9975
9976 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9977 &bw_data, NULL);
9978 if (ret) {
9979 dev_info(&pf->pdev->dev,
9980 "query veb bw config failed, err %s aq_err %s\n",
9981 i40e_stat_str(&pf->hw, ret),
9982 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9983 goto out;
9984 }
9985
9986 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9987 &ets_data, NULL);
9988 if (ret) {
9989 dev_info(&pf->pdev->dev,
9990 "query veb bw ets config failed, err %s aq_err %s\n",
9991 i40e_stat_str(&pf->hw, ret),
9992 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9993 goto out;
9994 }
9995
9996 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9997 veb->bw_max_quanta = ets_data.tc_bw_max;
9998 veb->is_abs_credits = bw_data.absolute_credits_enable;
9999 veb->enabled_tc = ets_data.tc_valid_bits;
10000 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
10001 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
10002 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10003 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
10004 veb->bw_tc_limit_credits[i] =
10005 le16_to_cpu(bw_data.tc_bw_limits[i]);
10006 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
10007 }
10008
10009 out:
10010 return ret;
10011 }
10012
10013 /**
10014 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
10015 * @pf: board private structure
10016 *
10017 * On error: returns error code (negative)
10018 * On success: returns vsi index in PF (positive)
10019 **/
10020 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
10021 {
10022 int ret = -ENOENT;
10023 struct i40e_veb *veb;
10024 int i;
10025
10026 /* Need to protect the allocation of switch elements at the PF level */
10027 mutex_lock(&pf->switch_mutex);
10028
10029 /* VEB list may be fragmented if VEB creation/destruction has
10030 * been happening. We can afford to do a quick scan to look
10031 * for any free slots in the list.
10032 *
10033 * find next empty veb slot, looping back around if necessary
10034 */
10035 i = 0;
10036 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
10037 i++;
10038 if (i >= I40E_MAX_VEB) {
10039 ret = -ENOMEM;
10040 goto err_alloc_veb; /* out of VEB slots! */
10041 }
10042
10043 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
10044 if (!veb) {
10045 ret = -ENOMEM;
10046 goto err_alloc_veb;
10047 }
10048 veb->pf = pf;
10049 veb->idx = i;
10050 veb->enabled_tc = 1;
10051
10052 pf->veb[i] = veb;
10053 ret = i;
10054 err_alloc_veb:
10055 mutex_unlock(&pf->switch_mutex);
10056 return ret;
10057 }
10058
10059 /**
10060 * i40e_switch_branch_release - Delete a branch of the switch tree
10061 * @branch: where to start deleting
10062 *
10063 * This uses recursion to find the tips of the branch to be
10064 * removed, deleting until we get back to and can delete this VEB.
10065 **/
10066 static void i40e_switch_branch_release(struct i40e_veb *branch)
10067 {
10068 struct i40e_pf *pf = branch->pf;
10069 u16 branch_seid = branch->seid;
10070 u16 veb_idx = branch->idx;
10071 int i;
10072
10073 /* release any VEBs on this VEB - RECURSION */
10074 for (i = 0; i < I40E_MAX_VEB; i++) {
10075 if (!pf->veb[i])
10076 continue;
10077 if (pf->veb[i]->uplink_seid == branch->seid)
10078 i40e_switch_branch_release(pf->veb[i]);
10079 }
10080
10081 /* Release the VSIs on this VEB, but not the owner VSI.
10082 *
10083 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
10084 * the VEB itself, so don't use (*branch) after this loop.
10085 */
10086 for (i = 0; i < pf->num_alloc_vsi; i++) {
10087 if (!pf->vsi[i])
10088 continue;
10089 if (pf->vsi[i]->uplink_seid == branch_seid &&
10090 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
10091 i40e_vsi_release(pf->vsi[i]);
10092 }
10093 }
10094
10095 /* There's one corner case where the VEB might not have been
10096 * removed, so double check it here and remove it if needed.
10097 * This case happens if the veb was created from the debugfs
10098 * commands and no VSIs were added to it.
10099 */
10100 if (pf->veb[veb_idx])
10101 i40e_veb_release(pf->veb[veb_idx]);
10102 }
10103
10104 /**
10105 * i40e_veb_clear - remove veb struct
10106 * @veb: the veb to remove
10107 **/
10108 static void i40e_veb_clear(struct i40e_veb *veb)
10109 {
10110 if (!veb)
10111 return;
10112
10113 if (veb->pf) {
10114 struct i40e_pf *pf = veb->pf;
10115
10116 mutex_lock(&pf->switch_mutex);
10117 if (pf->veb[veb->idx] == veb)
10118 pf->veb[veb->idx] = NULL;
10119 mutex_unlock(&pf->switch_mutex);
10120 }
10121
10122 kfree(veb);
10123 }
10124
10125 /**
10126 * i40e_veb_release - Delete a VEB and free its resources
10127 * @veb: the VEB being removed
10128 **/
10129 void i40e_veb_release(struct i40e_veb *veb)
10130 {
10131 struct i40e_vsi *vsi = NULL;
10132 struct i40e_pf *pf;
10133 int i, n = 0;
10134
10135 pf = veb->pf;
10136
10137 /* find the remaining VSI and check for extras */
10138 for (i = 0; i < pf->num_alloc_vsi; i++) {
10139 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
10140 n++;
10141 vsi = pf->vsi[i];
10142 }
10143 }
10144 if (n != 1) {
10145 dev_info(&pf->pdev->dev,
10146 "can't remove VEB %d with %d VSIs left\n",
10147 veb->seid, n);
10148 return;
10149 }
10150
10151 /* move the remaining VSI to uplink veb */
10152 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
10153 if (veb->uplink_seid) {
10154 vsi->uplink_seid = veb->uplink_seid;
10155 if (veb->uplink_seid == pf->mac_seid)
10156 vsi->veb_idx = I40E_NO_VEB;
10157 else
10158 vsi->veb_idx = veb->veb_idx;
10159 } else {
10160 /* floating VEB */
10161 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10162 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
10163 }
10164
10165 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10166 i40e_veb_clear(veb);
10167 }
10168
10169 /**
10170 * i40e_add_veb - create the VEB in the switch
10171 * @veb: the VEB to be instantiated
10172 * @vsi: the controlling VSI
10173 **/
10174 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10175 {
10176 struct i40e_pf *pf = veb->pf;
10177 bool is_default = veb->pf->cur_promisc;
10178 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10179 int ret;
10180
10181 /* get a VEB from the hardware */
10182 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10183 veb->enabled_tc, is_default,
10184 &veb->seid, enable_stats, NULL);
10185 if (ret) {
10186 dev_info(&pf->pdev->dev,
10187 "couldn't add VEB, err %s aq_err %s\n",
10188 i40e_stat_str(&pf->hw, ret),
10189 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10190 return -EPERM;
10191 }
10192
10193 /* get statistics counter */
10194 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
10195 &veb->stats_idx, NULL, NULL, NULL);
10196 if (ret) {
10197 dev_info(&pf->pdev->dev,
10198 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10199 i40e_stat_str(&pf->hw, ret),
10200 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10201 return -EPERM;
10202 }
10203 ret = i40e_veb_get_bw_info(veb);
10204 if (ret) {
10205 dev_info(&pf->pdev->dev,
10206 "couldn't get VEB bw info, err %s aq_err %s\n",
10207 i40e_stat_str(&pf->hw, ret),
10208 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10209 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
10210 return -ENOENT;
10211 }
10212
10213 vsi->uplink_seid = veb->seid;
10214 vsi->veb_idx = veb->idx;
10215 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
10216
10217 return 0;
10218 }
10219
10220 /**
10221 * i40e_veb_setup - Set up a VEB
10222 * @pf: board private structure
10223 * @flags: VEB setup flags
10224 * @uplink_seid: the switch element to link to
10225 * @vsi_seid: the initial VSI seid
10226 * @enabled_tc: Enabled TC bit-map
10227 *
10228 * This allocates the sw VEB structure and links it into the switch
10229 * It is possible and legal for this to be a duplicate of an already
10230 * existing VEB. It is also possible for both uplink and vsi seids
10231 * to be zero, in order to create a floating VEB.
10232 *
10233 * Returns pointer to the successfully allocated VEB sw struct on
10234 * success, otherwise returns NULL on failure.
10235 **/
10236 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
10237 u16 uplink_seid, u16 vsi_seid,
10238 u8 enabled_tc)
10239 {
10240 struct i40e_veb *veb, *uplink_veb = NULL;
10241 int vsi_idx, veb_idx;
10242 int ret;
10243
10244 /* if one seid is 0, the other must be 0 to create a floating relay */
10245 if ((uplink_seid == 0 || vsi_seid == 0) &&
10246 (uplink_seid + vsi_seid != 0)) {
10247 dev_info(&pf->pdev->dev,
10248 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10249 uplink_seid, vsi_seid);
10250 return NULL;
10251 }
10252
10253 /* make sure there is such a vsi and uplink */
10254 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
10255 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
10256 break;
10257 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
10258 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
10259 vsi_seid);
10260 return NULL;
10261 }
10262
10263 if (uplink_seid && uplink_seid != pf->mac_seid) {
10264 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10265 if (pf->veb[veb_idx] &&
10266 pf->veb[veb_idx]->seid == uplink_seid) {
10267 uplink_veb = pf->veb[veb_idx];
10268 break;
10269 }
10270 }
10271 if (!uplink_veb) {
10272 dev_info(&pf->pdev->dev,
10273 "uplink seid %d not found\n", uplink_seid);
10274 return NULL;
10275 }
10276 }
10277
10278 /* get veb sw struct */
10279 veb_idx = i40e_veb_mem_alloc(pf);
10280 if (veb_idx < 0)
10281 goto err_alloc;
10282 veb = pf->veb[veb_idx];
10283 veb->flags = flags;
10284 veb->uplink_seid = uplink_seid;
10285 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
10286 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
10287
10288 /* create the VEB in the switch */
10289 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
10290 if (ret)
10291 goto err_veb;
10292 if (vsi_idx == pf->lan_vsi)
10293 pf->lan_veb = veb->idx;
10294
10295 return veb;
10296
10297 err_veb:
10298 i40e_veb_clear(veb);
10299 err_alloc:
10300 return NULL;
10301 }
10302
10303 /**
10304 * i40e_setup_pf_switch_element - set PF vars based on switch type
10305 * @pf: board private structure
10306 * @ele: element we are building info from
10307 * @num_reported: total number of elements
10308 * @printconfig: should we print the contents
10309 *
10310 * helper function to assist in extracting a few useful SEID values.
10311 **/
10312 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
10313 struct i40e_aqc_switch_config_element_resp *ele,
10314 u16 num_reported, bool printconfig)
10315 {
10316 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
10317 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
10318 u8 element_type = ele->element_type;
10319 u16 seid = le16_to_cpu(ele->seid);
10320
10321 if (printconfig)
10322 dev_info(&pf->pdev->dev,
10323 "type=%d seid=%d uplink=%d downlink=%d\n",
10324 element_type, seid, uplink_seid, downlink_seid);
10325
10326 switch (element_type) {
10327 case I40E_SWITCH_ELEMENT_TYPE_MAC:
10328 pf->mac_seid = seid;
10329 break;
10330 case I40E_SWITCH_ELEMENT_TYPE_VEB:
10331 /* Main VEB? */
10332 if (uplink_seid != pf->mac_seid)
10333 break;
10334 if (pf->lan_veb == I40E_NO_VEB) {
10335 int v;
10336
10337 /* find existing or else empty VEB */
10338 for (v = 0; v < I40E_MAX_VEB; v++) {
10339 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
10340 pf->lan_veb = v;
10341 break;
10342 }
10343 }
10344 if (pf->lan_veb == I40E_NO_VEB) {
10345 v = i40e_veb_mem_alloc(pf);
10346 if (v < 0)
10347 break;
10348 pf->lan_veb = v;
10349 }
10350 }
10351
10352 pf->veb[pf->lan_veb]->seid = seid;
10353 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
10354 pf->veb[pf->lan_veb]->pf = pf;
10355 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
10356 break;
10357 case I40E_SWITCH_ELEMENT_TYPE_VSI:
10358 if (num_reported != 1)
10359 break;
10360 /* This is immediately after a reset so we can assume this is
10361 * the PF's VSI
10362 */
10363 pf->mac_seid = uplink_seid;
10364 pf->pf_seid = downlink_seid;
10365 pf->main_vsi_seid = seid;
10366 if (printconfig)
10367 dev_info(&pf->pdev->dev,
10368 "pf_seid=%d main_vsi_seid=%d\n",
10369 pf->pf_seid, pf->main_vsi_seid);
10370 break;
10371 case I40E_SWITCH_ELEMENT_TYPE_PF:
10372 case I40E_SWITCH_ELEMENT_TYPE_VF:
10373 case I40E_SWITCH_ELEMENT_TYPE_EMP:
10374 case I40E_SWITCH_ELEMENT_TYPE_BMC:
10375 case I40E_SWITCH_ELEMENT_TYPE_PE:
10376 case I40E_SWITCH_ELEMENT_TYPE_PA:
10377 /* ignore these for now */
10378 break;
10379 default:
10380 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
10381 element_type, seid);
10382 break;
10383 }
10384 }
10385
10386 /**
10387 * i40e_fetch_switch_configuration - Get switch config from firmware
10388 * @pf: board private structure
10389 * @printconfig: should we print the contents
10390 *
10391 * Get the current switch configuration from the device and
10392 * extract a few useful SEID values.
10393 **/
10394 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
10395 {
10396 struct i40e_aqc_get_switch_config_resp *sw_config;
10397 u16 next_seid = 0;
10398 int ret = 0;
10399 u8 *aq_buf;
10400 int i;
10401
10402 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
10403 if (!aq_buf)
10404 return -ENOMEM;
10405
10406 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
10407 do {
10408 u16 num_reported, num_total;
10409
10410 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
10411 I40E_AQ_LARGE_BUF,
10412 &next_seid, NULL);
10413 if (ret) {
10414 dev_info(&pf->pdev->dev,
10415 "get switch config failed err %s aq_err %s\n",
10416 i40e_stat_str(&pf->hw, ret),
10417 i40e_aq_str(&pf->hw,
10418 pf->hw.aq.asq_last_status));
10419 kfree(aq_buf);
10420 return -ENOENT;
10421 }
10422
10423 num_reported = le16_to_cpu(sw_config->header.num_reported);
10424 num_total = le16_to_cpu(sw_config->header.num_total);
10425
10426 if (printconfig)
10427 dev_info(&pf->pdev->dev,
10428 "header: %d reported %d total\n",
10429 num_reported, num_total);
10430
10431 for (i = 0; i < num_reported; i++) {
10432 struct i40e_aqc_switch_config_element_resp *ele =
10433 &sw_config->element[i];
10434
10435 i40e_setup_pf_switch_element(pf, ele, num_reported,
10436 printconfig);
10437 }
10438 } while (next_seid != 0);
10439
10440 kfree(aq_buf);
10441 return ret;
10442 }
10443
10444 /**
10445 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
10446 * @pf: board private structure
10447 * @reinit: if the Main VSI needs to re-initialized.
10448 *
10449 * Returns 0 on success, negative value on failure
10450 **/
10451 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
10452 {
10453 int ret;
10454
10455 /* find out what's out there already */
10456 ret = i40e_fetch_switch_configuration(pf, false);
10457 if (ret) {
10458 dev_info(&pf->pdev->dev,
10459 "couldn't fetch switch config, err %s aq_err %s\n",
10460 i40e_stat_str(&pf->hw, ret),
10461 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10462 return ret;
10463 }
10464 i40e_pf_reset_stats(pf);
10465
10466 /* first time setup */
10467 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
10468 struct i40e_vsi *vsi = NULL;
10469 u16 uplink_seid;
10470
10471 /* Set up the PF VSI associated with the PF's main VSI
10472 * that is already in the HW switch
10473 */
10474 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
10475 uplink_seid = pf->veb[pf->lan_veb]->seid;
10476 else
10477 uplink_seid = pf->mac_seid;
10478 if (pf->lan_vsi == I40E_NO_VSI)
10479 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
10480 else if (reinit)
10481 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
10482 if (!vsi) {
10483 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
10484 i40e_fdir_teardown(pf);
10485 return -EAGAIN;
10486 }
10487 } else {
10488 /* force a reset of TC and queue layout configurations */
10489 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10490
10491 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10492 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10493 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10494 }
10495 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
10496
10497 i40e_fdir_sb_setup(pf);
10498
10499 /* Setup static PF queue filter control settings */
10500 ret = i40e_setup_pf_filter_control(pf);
10501 if (ret) {
10502 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
10503 ret);
10504 /* Failure here should not stop continuing other steps */
10505 }
10506
10507 /* enable RSS in the HW, even for only one queue, as the stack can use
10508 * the hash
10509 */
10510 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
10511 i40e_pf_config_rss(pf);
10512
10513 /* fill in link information and enable LSE reporting */
10514 i40e_update_link_info(&pf->hw);
10515 i40e_link_event(pf);
10516
10517 /* Initialize user-specific link properties */
10518 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
10519 I40E_AQ_AN_COMPLETED) ? true : false);
10520
10521 i40e_ptp_init(pf);
10522
10523 return ret;
10524 }
10525
10526 /**
10527 * i40e_determine_queue_usage - Work out queue distribution
10528 * @pf: board private structure
10529 **/
10530 static void i40e_determine_queue_usage(struct i40e_pf *pf)
10531 {
10532 int queues_left;
10533
10534 pf->num_lan_qps = 0;
10535 #ifdef I40E_FCOE
10536 pf->num_fcoe_qps = 0;
10537 #endif
10538
10539 /* Find the max queues to be put into basic use. We'll always be
10540 * using TC0, whether or not DCB is running, and TC0 will get the
10541 * big RSS set.
10542 */
10543 queues_left = pf->hw.func_caps.num_tx_qp;
10544
10545 if ((queues_left == 1) ||
10546 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
10547 /* one qp for PF, no queues for anything else */
10548 queues_left = 0;
10549 pf->alloc_rss_size = pf->num_lan_qps = 1;
10550
10551 /* make sure all the fancies are disabled */
10552 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
10553 I40E_FLAG_IWARP_ENABLED |
10554 #ifdef I40E_FCOE
10555 I40E_FLAG_FCOE_ENABLED |
10556 #endif
10557 I40E_FLAG_FD_SB_ENABLED |
10558 I40E_FLAG_FD_ATR_ENABLED |
10559 I40E_FLAG_DCB_CAPABLE |
10560 I40E_FLAG_SRIOV_ENABLED |
10561 I40E_FLAG_VMDQ_ENABLED);
10562 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
10563 I40E_FLAG_FD_SB_ENABLED |
10564 I40E_FLAG_FD_ATR_ENABLED |
10565 I40E_FLAG_DCB_CAPABLE))) {
10566 /* one qp for PF */
10567 pf->alloc_rss_size = pf->num_lan_qps = 1;
10568 queues_left -= pf->num_lan_qps;
10569
10570 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
10571 I40E_FLAG_IWARP_ENABLED |
10572 #ifdef I40E_FCOE
10573 I40E_FLAG_FCOE_ENABLED |
10574 #endif
10575 I40E_FLAG_FD_SB_ENABLED |
10576 I40E_FLAG_FD_ATR_ENABLED |
10577 I40E_FLAG_DCB_ENABLED |
10578 I40E_FLAG_VMDQ_ENABLED);
10579 } else {
10580 /* Not enough queues for all TCs */
10581 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
10582 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
10583 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10584 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10585 }
10586 pf->num_lan_qps = max_t(int, pf->rss_size_max,
10587 num_online_cpus());
10588 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
10589 pf->hw.func_caps.num_tx_qp);
10590
10591 queues_left -= pf->num_lan_qps;
10592 }
10593
10594 #ifdef I40E_FCOE
10595 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
10596 if (I40E_DEFAULT_FCOE <= queues_left) {
10597 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
10598 } else if (I40E_MINIMUM_FCOE <= queues_left) {
10599 pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
10600 } else {
10601 pf->num_fcoe_qps = 0;
10602 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
10603 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
10604 }
10605
10606 queues_left -= pf->num_fcoe_qps;
10607 }
10608
10609 #endif
10610 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10611 if (queues_left > 1) {
10612 queues_left -= 1; /* save 1 queue for FD */
10613 } else {
10614 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10615 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
10616 }
10617 }
10618
10619 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10620 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
10621 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
10622 (queues_left / pf->num_vf_qps));
10623 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
10624 }
10625
10626 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10627 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
10628 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
10629 (queues_left / pf->num_vmdq_qps));
10630 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
10631 }
10632
10633 pf->queues_left = queues_left;
10634 dev_dbg(&pf->pdev->dev,
10635 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
10636 pf->hw.func_caps.num_tx_qp,
10637 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
10638 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
10639 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
10640 queues_left);
10641 #ifdef I40E_FCOE
10642 dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
10643 #endif
10644 }
10645
10646 /**
10647 * i40e_setup_pf_filter_control - Setup PF static filter control
10648 * @pf: PF to be setup
10649 *
10650 * i40e_setup_pf_filter_control sets up a PF's initial filter control
10651 * settings. If PE/FCoE are enabled then it will also set the per PF
10652 * based filter sizes required for them. It also enables Flow director,
10653 * ethertype and macvlan type filter settings for the pf.
10654 *
10655 * Returns 0 on success, negative on failure
10656 **/
10657 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
10658 {
10659 struct i40e_filter_control_settings *settings = &pf->filter_settings;
10660
10661 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
10662
10663 /* Flow Director is enabled */
10664 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
10665 settings->enable_fdir = true;
10666
10667 /* Ethtype and MACVLAN filters enabled for PF */
10668 settings->enable_ethtype = true;
10669 settings->enable_macvlan = true;
10670
10671 if (i40e_set_filter_control(&pf->hw, settings))
10672 return -ENOENT;
10673
10674 return 0;
10675 }
10676
10677 #define INFO_STRING_LEN 255
10678 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
10679 static void i40e_print_features(struct i40e_pf *pf)
10680 {
10681 struct i40e_hw *hw = &pf->hw;
10682 char *buf;
10683 int i;
10684
10685 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
10686 if (!buf)
10687 return;
10688
10689 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
10690 #ifdef CONFIG_PCI_IOV
10691 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
10692 #endif
10693 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s",
10694 pf->hw.func_caps.num_vsis,
10695 pf->vsi[pf->lan_vsi]->num_queue_pairs,
10696 pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
10697
10698 if (pf->flags & I40E_FLAG_RSS_ENABLED)
10699 i += snprintf(&buf[i], REMAIN(i), " RSS");
10700 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
10701 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
10702 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10703 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
10704 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
10705 }
10706 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
10707 i += snprintf(&buf[i], REMAIN(i), " DCB");
10708 #if IS_ENABLED(CONFIG_VXLAN)
10709 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
10710 #endif
10711 #if IS_ENABLED(CONFIG_GENEVE)
10712 i += snprintf(&buf[i], REMAIN(i), " Geneve");
10713 #endif
10714 if (pf->flags & I40E_FLAG_PTP)
10715 i += snprintf(&buf[i], REMAIN(i), " PTP");
10716 #ifdef I40E_FCOE
10717 if (pf->flags & I40E_FLAG_FCOE_ENABLED)
10718 i += snprintf(&buf[i], REMAIN(i), " FCOE");
10719 #endif
10720 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10721 i += snprintf(&buf[i], REMAIN(i), " VEB");
10722 else
10723 i += snprintf(&buf[i], REMAIN(i), " VEPA");
10724
10725 dev_info(&pf->pdev->dev, "%s\n", buf);
10726 kfree(buf);
10727 WARN_ON(i > INFO_STRING_LEN);
10728 }
10729
10730 /**
10731 * i40e_get_platform_mac_addr - get platform-specific MAC address
10732 *
10733 * @pdev: PCI device information struct
10734 * @pf: board private structure
10735 *
10736 * Look up the MAC address in Open Firmware on systems that support it,
10737 * and use IDPROM on SPARC if no OF address is found. On return, the
10738 * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
10739 * has been selected.
10740 **/
10741 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
10742 {
10743 pf->flags &= ~I40E_FLAG_PF_MAC;
10744 if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
10745 pf->flags |= I40E_FLAG_PF_MAC;
10746 }
10747
10748 /**
10749 * i40e_probe - Device initialization routine
10750 * @pdev: PCI device information struct
10751 * @ent: entry in i40e_pci_tbl
10752 *
10753 * i40e_probe initializes a PF identified by a pci_dev structure.
10754 * The OS initialization, configuring of the PF private structure,
10755 * and a hardware reset occur.
10756 *
10757 * Returns 0 on success, negative on failure
10758 **/
10759 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10760 {
10761 struct i40e_aq_get_phy_abilities_resp abilities;
10762 struct i40e_pf *pf;
10763 struct i40e_hw *hw;
10764 static u16 pfs_found;
10765 u16 wol_nvm_bits;
10766 u16 link_status;
10767 int err;
10768 u32 val;
10769 u32 i;
10770 u8 set_fc_aq_fail;
10771
10772 err = pci_enable_device_mem(pdev);
10773 if (err)
10774 return err;
10775
10776 /* set up for high or low dma */
10777 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10778 if (err) {
10779 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10780 if (err) {
10781 dev_err(&pdev->dev,
10782 "DMA configuration failed: 0x%x\n", err);
10783 goto err_dma;
10784 }
10785 }
10786
10787 /* set up pci connections */
10788 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
10789 IORESOURCE_MEM), i40e_driver_name);
10790 if (err) {
10791 dev_info(&pdev->dev,
10792 "pci_request_selected_regions failed %d\n", err);
10793 goto err_pci_reg;
10794 }
10795
10796 pci_enable_pcie_error_reporting(pdev);
10797 pci_set_master(pdev);
10798
10799 /* Now that we have a PCI connection, we need to do the
10800 * low level device setup. This is primarily setting up
10801 * the Admin Queue structures and then querying for the
10802 * device's current profile information.
10803 */
10804 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
10805 if (!pf) {
10806 err = -ENOMEM;
10807 goto err_pf_alloc;
10808 }
10809 pf->next_vsi = 0;
10810 pf->pdev = pdev;
10811 set_bit(__I40E_DOWN, &pf->state);
10812
10813 hw = &pf->hw;
10814 hw->back = pf;
10815
10816 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10817 I40E_MAX_CSR_SPACE);
10818
10819 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
10820 if (!hw->hw_addr) {
10821 err = -EIO;
10822 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10823 (unsigned int)pci_resource_start(pdev, 0),
10824 pf->ioremap_len, err);
10825 goto err_ioremap;
10826 }
10827 hw->vendor_id = pdev->vendor;
10828 hw->device_id = pdev->device;
10829 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10830 hw->subsystem_vendor_id = pdev->subsystem_vendor;
10831 hw->subsystem_device_id = pdev->subsystem_device;
10832 hw->bus.device = PCI_SLOT(pdev->devfn);
10833 hw->bus.func = PCI_FUNC(pdev->devfn);
10834 pf->instance = pfs_found;
10835
10836 /* set up the locks for the AQ, do this only once in probe
10837 * and destroy them only once in remove
10838 */
10839 mutex_init(&hw->aq.asq_mutex);
10840 mutex_init(&hw->aq.arq_mutex);
10841
10842 if (debug != -1) {
10843 pf->msg_enable = pf->hw.debug_mask;
10844 pf->msg_enable = debug;
10845 }
10846
10847 /* do a special CORER for clearing PXE mode once at init */
10848 if (hw->revision_id == 0 &&
10849 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10850 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10851 i40e_flush(hw);
10852 msleep(200);
10853 pf->corer_count++;
10854
10855 i40e_clear_pxe_mode(hw);
10856 }
10857
10858 /* Reset here to make sure all is clean and to define PF 'n' */
10859 i40e_clear_hw(hw);
10860 err = i40e_pf_reset(hw);
10861 if (err) {
10862 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10863 goto err_pf_reset;
10864 }
10865 pf->pfr_count++;
10866
10867 hw->aq.num_arq_entries = I40E_AQ_LEN;
10868 hw->aq.num_asq_entries = I40E_AQ_LEN;
10869 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10870 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10871 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
10872
10873 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
10874 "%s-%s:misc",
10875 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
10876
10877 err = i40e_init_shared_code(hw);
10878 if (err) {
10879 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10880 err);
10881 goto err_pf_reset;
10882 }
10883
10884 /* set up a default setting for link flow control */
10885 pf->hw.fc.requested_mode = I40E_FC_NONE;
10886
10887 err = i40e_init_adminq(hw);
10888 if (err) {
10889 if (err == I40E_ERR_FIRMWARE_API_VERSION)
10890 dev_info(&pdev->dev,
10891 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10892 else
10893 dev_info(&pdev->dev,
10894 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
10895
10896 goto err_pf_reset;
10897 }
10898
10899 /* provide nvm, fw, api versions */
10900 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
10901 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
10902 hw->aq.api_maj_ver, hw->aq.api_min_ver,
10903 i40e_nvm_version_str(hw));
10904
10905 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10906 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
10907 dev_info(&pdev->dev,
10908 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10909 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10910 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
10911 dev_info(&pdev->dev,
10912 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
10913
10914 i40e_verify_eeprom(pf);
10915
10916 /* Rev 0 hardware was never productized */
10917 if (hw->revision_id < 1)
10918 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10919
10920 i40e_clear_pxe_mode(hw);
10921 err = i40e_get_capabilities(pf);
10922 if (err)
10923 goto err_adminq_setup;
10924
10925 err = i40e_sw_init(pf);
10926 if (err) {
10927 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10928 goto err_sw_init;
10929 }
10930
10931 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10932 hw->func_caps.num_rx_qp,
10933 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10934 if (err) {
10935 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10936 goto err_init_lan_hmc;
10937 }
10938
10939 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10940 if (err) {
10941 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10942 err = -ENOENT;
10943 goto err_configure_lan_hmc;
10944 }
10945
10946 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
10947 * Ignore error return codes because if it was already disabled via
10948 * hardware settings this will fail
10949 */
10950 if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
10951 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10952 i40e_aq_stop_lldp(hw, true, NULL);
10953 }
10954
10955 i40e_get_mac_addr(hw, hw->mac.addr);
10956 /* allow a platform config to override the HW addr */
10957 i40e_get_platform_mac_addr(pdev, pf);
10958 if (!is_valid_ether_addr(hw->mac.addr)) {
10959 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10960 err = -EIO;
10961 goto err_mac_addr;
10962 }
10963 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10964 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10965 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10966 if (is_valid_ether_addr(hw->mac.port_addr))
10967 pf->flags |= I40E_FLAG_PORT_ID_VALID;
10968 #ifdef I40E_FCOE
10969 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10970 if (err)
10971 dev_info(&pdev->dev,
10972 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10973 if (!is_valid_ether_addr(hw->mac.san_addr)) {
10974 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10975 hw->mac.san_addr);
10976 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10977 }
10978 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10979 #endif /* I40E_FCOE */
10980
10981 pci_set_drvdata(pdev, pf);
10982 pci_save_state(pdev);
10983 #ifdef CONFIG_I40E_DCB
10984 err = i40e_init_pf_dcb(pf);
10985 if (err) {
10986 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10987 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10988 /* Continue without DCB enabled */
10989 }
10990 #endif /* CONFIG_I40E_DCB */
10991
10992 /* set up periodic task facility */
10993 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10994 pf->service_timer_period = HZ;
10995
10996 INIT_WORK(&pf->service_task, i40e_service_task);
10997 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10998 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10999
11000 /* NVM bit on means WoL disabled for the port */
11001 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
11002 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
11003 pf->wol_en = false;
11004 else
11005 pf->wol_en = true;
11006 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
11007
11008 /* set up the main switch operations */
11009 i40e_determine_queue_usage(pf);
11010 err = i40e_init_interrupt_scheme(pf);
11011 if (err)
11012 goto err_switch_setup;
11013
11014 /* The number of VSIs reported by the FW is the minimum guaranteed
11015 * to us; HW supports far more and we share the remaining pool with
11016 * the other PFs. We allocate space for more than the guarantee with
11017 * the understanding that we might not get them all later.
11018 */
11019 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
11020 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
11021 else
11022 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
11023
11024 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
11025 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
11026 GFP_KERNEL);
11027 if (!pf->vsi) {
11028 err = -ENOMEM;
11029 goto err_switch_setup;
11030 }
11031
11032 #ifdef CONFIG_PCI_IOV
11033 /* prep for VF support */
11034 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11035 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11036 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
11037 if (pci_num_vf(pdev))
11038 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11039 }
11040 #endif
11041 err = i40e_setup_pf_switch(pf, false);
11042 if (err) {
11043 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
11044 goto err_vsis;
11045 }
11046
11047 /* Make sure flow control is set according to current settings */
11048 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
11049 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
11050 dev_dbg(&pf->pdev->dev,
11051 "Set fc with err %s aq_err %s on get_phy_cap\n",
11052 i40e_stat_str(hw, err),
11053 i40e_aq_str(hw, hw->aq.asq_last_status));
11054 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
11055 dev_dbg(&pf->pdev->dev,
11056 "Set fc with err %s aq_err %s on set_phy_config\n",
11057 i40e_stat_str(hw, err),
11058 i40e_aq_str(hw, hw->aq.asq_last_status));
11059 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
11060 dev_dbg(&pf->pdev->dev,
11061 "Set fc with err %s aq_err %s on get_link_info\n",
11062 i40e_stat_str(hw, err),
11063 i40e_aq_str(hw, hw->aq.asq_last_status));
11064
11065 /* if FDIR VSI was set up, start it now */
11066 for (i = 0; i < pf->num_alloc_vsi; i++) {
11067 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
11068 i40e_vsi_open(pf->vsi[i]);
11069 break;
11070 }
11071 }
11072
11073 /* The driver only wants link up/down and module qualification
11074 * reports from firmware. Note the negative logic.
11075 */
11076 err = i40e_aq_set_phy_int_mask(&pf->hw,
11077 ~(I40E_AQ_EVENT_LINK_UPDOWN |
11078 I40E_AQ_EVENT_MEDIA_NA |
11079 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
11080 if (err)
11081 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
11082 i40e_stat_str(&pf->hw, err),
11083 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11084
11085 /* Reconfigure hardware for allowing smaller MSS in the case
11086 * of TSO, so that we avoid the MDD being fired and causing
11087 * a reset in the case of small MSS+TSO.
11088 */
11089 val = rd32(hw, I40E_REG_MSS);
11090 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11091 val &= ~I40E_REG_MSS_MIN_MASK;
11092 val |= I40E_64BYTE_MSS;
11093 wr32(hw, I40E_REG_MSS, val);
11094 }
11095
11096 if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
11097 msleep(75);
11098 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
11099 if (err)
11100 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
11101 i40e_stat_str(&pf->hw, err),
11102 i40e_aq_str(&pf->hw,
11103 pf->hw.aq.asq_last_status));
11104 }
11105 /* The main driver is (mostly) up and happy. We need to set this state
11106 * before setting up the misc vector or we get a race and the vector
11107 * ends up disabled forever.
11108 */
11109 clear_bit(__I40E_DOWN, &pf->state);
11110
11111 /* In case of MSIX we are going to setup the misc vector right here
11112 * to handle admin queue events etc. In case of legacy and MSI
11113 * the misc functionality and queue processing is combined in
11114 * the same vector and that gets setup at open.
11115 */
11116 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11117 err = i40e_setup_misc_vector(pf);
11118 if (err) {
11119 dev_info(&pdev->dev,
11120 "setup of misc vector failed: %d\n", err);
11121 goto err_vsis;
11122 }
11123 }
11124
11125 #ifdef CONFIG_PCI_IOV
11126 /* prep for VF support */
11127 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
11128 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
11129 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
11130 /* disable link interrupts for VFs */
11131 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
11132 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
11133 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
11134 i40e_flush(hw);
11135
11136 if (pci_num_vf(pdev)) {
11137 dev_info(&pdev->dev,
11138 "Active VFs found, allocating resources.\n");
11139 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
11140 if (err)
11141 dev_info(&pdev->dev,
11142 "Error %d allocating resources for existing VFs\n",
11143 err);
11144 }
11145 }
11146 #endif /* CONFIG_PCI_IOV */
11147
11148 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11149 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
11150 pf->num_iwarp_msix,
11151 I40E_IWARP_IRQ_PILE_ID);
11152 if (pf->iwarp_base_vector < 0) {
11153 dev_info(&pdev->dev,
11154 "failed to get tracking for %d vectors for IWARP err=%d\n",
11155 pf->num_iwarp_msix, pf->iwarp_base_vector);
11156 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11157 }
11158 }
11159
11160 i40e_dbg_pf_init(pf);
11161
11162 /* tell the firmware that we're starting */
11163 i40e_send_version(pf);
11164
11165 /* since everything's happy, start the service_task timer */
11166 mod_timer(&pf->service_timer,
11167 round_jiffies(jiffies + pf->service_timer_period));
11168
11169 /* add this PF to client device list and launch a client service task */
11170 err = i40e_lan_add_device(pf);
11171 if (err)
11172 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
11173 err);
11174
11175 #ifdef I40E_FCOE
11176 /* create FCoE interface */
11177 i40e_fcoe_vsi_setup(pf);
11178
11179 #endif
11180 #define PCI_SPEED_SIZE 8
11181 #define PCI_WIDTH_SIZE 8
11182 /* Devices on the IOSF bus do not have this information
11183 * and will report PCI Gen 1 x 1 by default so don't bother
11184 * checking them.
11185 */
11186 if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
11187 char speed[PCI_SPEED_SIZE] = "Unknown";
11188 char width[PCI_WIDTH_SIZE] = "Unknown";
11189
11190 /* Get the negotiated link width and speed from PCI config
11191 * space
11192 */
11193 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
11194 &link_status);
11195
11196 i40e_set_pci_config_data(hw, link_status);
11197
11198 switch (hw->bus.speed) {
11199 case i40e_bus_speed_8000:
11200 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
11201 case i40e_bus_speed_5000:
11202 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
11203 case i40e_bus_speed_2500:
11204 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
11205 default:
11206 break;
11207 }
11208 switch (hw->bus.width) {
11209 case i40e_bus_width_pcie_x8:
11210 strncpy(width, "8", PCI_WIDTH_SIZE); break;
11211 case i40e_bus_width_pcie_x4:
11212 strncpy(width, "4", PCI_WIDTH_SIZE); break;
11213 case i40e_bus_width_pcie_x2:
11214 strncpy(width, "2", PCI_WIDTH_SIZE); break;
11215 case i40e_bus_width_pcie_x1:
11216 strncpy(width, "1", PCI_WIDTH_SIZE); break;
11217 default:
11218 break;
11219 }
11220
11221 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
11222 speed, width);
11223
11224 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
11225 hw->bus.speed < i40e_bus_speed_8000) {
11226 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11227 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11228 }
11229 }
11230
11231 /* get the requested speeds from the fw */
11232 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
11233 if (err)
11234 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
11235 i40e_stat_str(&pf->hw, err),
11236 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11237 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
11238
11239 /* get the supported phy types from the fw */
11240 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
11241 if (err)
11242 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
11243 i40e_stat_str(&pf->hw, err),
11244 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11245 pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
11246
11247 /* Add a filter to drop all Flow control frames from any VSI from being
11248 * transmitted. By doing so we stop a malicious VF from sending out
11249 * PAUSE or PFC frames and potentially controlling traffic for other
11250 * PF/VF VSIs.
11251 * The FW can still send Flow control frames if enabled.
11252 */
11253 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
11254 pf->main_vsi_seid);
11255
11256 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
11257 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
11258 pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
11259
11260 /* print a string summarizing features */
11261 i40e_print_features(pf);
11262
11263 return 0;
11264
11265 /* Unwind what we've done if something failed in the setup */
11266 err_vsis:
11267 set_bit(__I40E_DOWN, &pf->state);
11268 i40e_clear_interrupt_scheme(pf);
11269 kfree(pf->vsi);
11270 err_switch_setup:
11271 i40e_reset_interrupt_capability(pf);
11272 del_timer_sync(&pf->service_timer);
11273 err_mac_addr:
11274 err_configure_lan_hmc:
11275 (void)i40e_shutdown_lan_hmc(hw);
11276 err_init_lan_hmc:
11277 kfree(pf->qp_pile);
11278 err_sw_init:
11279 err_adminq_setup:
11280 err_pf_reset:
11281 iounmap(hw->hw_addr);
11282 err_ioremap:
11283 kfree(pf);
11284 err_pf_alloc:
11285 pci_disable_pcie_error_reporting(pdev);
11286 pci_release_selected_regions(pdev,
11287 pci_select_bars(pdev, IORESOURCE_MEM));
11288 err_pci_reg:
11289 err_dma:
11290 pci_disable_device(pdev);
11291 return err;
11292 }
11293
11294 /**
11295 * i40e_remove - Device removal routine
11296 * @pdev: PCI device information struct
11297 *
11298 * i40e_remove is called by the PCI subsystem to alert the driver
11299 * that is should release a PCI device. This could be caused by a
11300 * Hot-Plug event, or because the driver is going to be removed from
11301 * memory.
11302 **/
11303 static void i40e_remove(struct pci_dev *pdev)
11304 {
11305 struct i40e_pf *pf = pci_get_drvdata(pdev);
11306 struct i40e_hw *hw = &pf->hw;
11307 i40e_status ret_code;
11308 int i;
11309
11310 i40e_dbg_pf_exit(pf);
11311
11312 i40e_ptp_stop(pf);
11313
11314 /* Disable RSS in hw */
11315 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
11316 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
11317
11318 /* no more scheduling of any task */
11319 set_bit(__I40E_SUSPENDED, &pf->state);
11320 set_bit(__I40E_DOWN, &pf->state);
11321 if (pf->service_timer.data)
11322 del_timer_sync(&pf->service_timer);
11323 if (pf->service_task.func)
11324 cancel_work_sync(&pf->service_task);
11325
11326 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
11327 i40e_free_vfs(pf);
11328 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
11329 }
11330
11331 i40e_fdir_teardown(pf);
11332
11333 /* If there is a switch structure or any orphans, remove them.
11334 * This will leave only the PF's VSI remaining.
11335 */
11336 for (i = 0; i < I40E_MAX_VEB; i++) {
11337 if (!pf->veb[i])
11338 continue;
11339
11340 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
11341 pf->veb[i]->uplink_seid == 0)
11342 i40e_switch_branch_release(pf->veb[i]);
11343 }
11344
11345 /* Now we can shutdown the PF's VSI, just before we kill
11346 * adminq and hmc.
11347 */
11348 if (pf->vsi[pf->lan_vsi])
11349 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
11350
11351 /* remove attached clients */
11352 ret_code = i40e_lan_del_device(pf);
11353 if (ret_code) {
11354 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
11355 ret_code);
11356 }
11357
11358 /* shutdown and destroy the HMC */
11359 if (hw->hmc.hmc_obj) {
11360 ret_code = i40e_shutdown_lan_hmc(hw);
11361 if (ret_code)
11362 dev_warn(&pdev->dev,
11363 "Failed to destroy the HMC resources: %d\n",
11364 ret_code);
11365 }
11366
11367 /* shutdown the adminq */
11368 ret_code = i40e_shutdown_adminq(hw);
11369 if (ret_code)
11370 dev_warn(&pdev->dev,
11371 "Failed to destroy the Admin Queue resources: %d\n",
11372 ret_code);
11373
11374 /* destroy the locks only once, here */
11375 mutex_destroy(&hw->aq.arq_mutex);
11376 mutex_destroy(&hw->aq.asq_mutex);
11377
11378 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11379 i40e_clear_interrupt_scheme(pf);
11380 for (i = 0; i < pf->num_alloc_vsi; i++) {
11381 if (pf->vsi[i]) {
11382 i40e_vsi_clear_rings(pf->vsi[i]);
11383 i40e_vsi_clear(pf->vsi[i]);
11384 pf->vsi[i] = NULL;
11385 }
11386 }
11387
11388 for (i = 0; i < I40E_MAX_VEB; i++) {
11389 kfree(pf->veb[i]);
11390 pf->veb[i] = NULL;
11391 }
11392
11393 kfree(pf->qp_pile);
11394 kfree(pf->vsi);
11395
11396 iounmap(hw->hw_addr);
11397 kfree(pf);
11398 pci_release_selected_regions(pdev,
11399 pci_select_bars(pdev, IORESOURCE_MEM));
11400
11401 pci_disable_pcie_error_reporting(pdev);
11402 pci_disable_device(pdev);
11403 }
11404
11405 /**
11406 * i40e_pci_error_detected - warning that something funky happened in PCI land
11407 * @pdev: PCI device information struct
11408 *
11409 * Called to warn that something happened and the error handling steps
11410 * are in progress. Allows the driver to quiesce things, be ready for
11411 * remediation.
11412 **/
11413 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
11414 enum pci_channel_state error)
11415 {
11416 struct i40e_pf *pf = pci_get_drvdata(pdev);
11417
11418 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
11419
11420 /* shutdown all operations */
11421 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
11422 rtnl_lock();
11423 i40e_prep_for_reset(pf);
11424 rtnl_unlock();
11425 }
11426
11427 /* Request a slot reset */
11428 return PCI_ERS_RESULT_NEED_RESET;
11429 }
11430
11431 /**
11432 * i40e_pci_error_slot_reset - a PCI slot reset just happened
11433 * @pdev: PCI device information struct
11434 *
11435 * Called to find if the driver can work with the device now that
11436 * the pci slot has been reset. If a basic connection seems good
11437 * (registers are readable and have sane content) then return a
11438 * happy little PCI_ERS_RESULT_xxx.
11439 **/
11440 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
11441 {
11442 struct i40e_pf *pf = pci_get_drvdata(pdev);
11443 pci_ers_result_t result;
11444 int err;
11445 u32 reg;
11446
11447 dev_dbg(&pdev->dev, "%s\n", __func__);
11448 if (pci_enable_device_mem(pdev)) {
11449 dev_info(&pdev->dev,
11450 "Cannot re-enable PCI device after reset.\n");
11451 result = PCI_ERS_RESULT_DISCONNECT;
11452 } else {
11453 pci_set_master(pdev);
11454 pci_restore_state(pdev);
11455 pci_save_state(pdev);
11456 pci_wake_from_d3(pdev, false);
11457
11458 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
11459 if (reg == 0)
11460 result = PCI_ERS_RESULT_RECOVERED;
11461 else
11462 result = PCI_ERS_RESULT_DISCONNECT;
11463 }
11464
11465 err = pci_cleanup_aer_uncorrect_error_status(pdev);
11466 if (err) {
11467 dev_info(&pdev->dev,
11468 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11469 err);
11470 /* non-fatal, continue */
11471 }
11472
11473 return result;
11474 }
11475
11476 /**
11477 * i40e_pci_error_resume - restart operations after PCI error recovery
11478 * @pdev: PCI device information struct
11479 *
11480 * Called to allow the driver to bring things back up after PCI error
11481 * and/or reset recovery has finished.
11482 **/
11483 static void i40e_pci_error_resume(struct pci_dev *pdev)
11484 {
11485 struct i40e_pf *pf = pci_get_drvdata(pdev);
11486
11487 dev_dbg(&pdev->dev, "%s\n", __func__);
11488 if (test_bit(__I40E_SUSPENDED, &pf->state))
11489 return;
11490
11491 rtnl_lock();
11492 i40e_handle_reset_warning(pf);
11493 rtnl_unlock();
11494 }
11495
11496 /**
11497 * i40e_shutdown - PCI callback for shutting down
11498 * @pdev: PCI device information struct
11499 **/
11500 static void i40e_shutdown(struct pci_dev *pdev)
11501 {
11502 struct i40e_pf *pf = pci_get_drvdata(pdev);
11503 struct i40e_hw *hw = &pf->hw;
11504
11505 set_bit(__I40E_SUSPENDED, &pf->state);
11506 set_bit(__I40E_DOWN, &pf->state);
11507 rtnl_lock();
11508 i40e_prep_for_reset(pf);
11509 rtnl_unlock();
11510
11511 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11512 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11513
11514 del_timer_sync(&pf->service_timer);
11515 cancel_work_sync(&pf->service_task);
11516 i40e_fdir_teardown(pf);
11517
11518 rtnl_lock();
11519 i40e_prep_for_reset(pf);
11520 rtnl_unlock();
11521
11522 wr32(hw, I40E_PFPM_APM,
11523 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11524 wr32(hw, I40E_PFPM_WUFC,
11525 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11526
11527 i40e_clear_interrupt_scheme(pf);
11528
11529 if (system_state == SYSTEM_POWER_OFF) {
11530 pci_wake_from_d3(pdev, pf->wol_en);
11531 pci_set_power_state(pdev, PCI_D3hot);
11532 }
11533 }
11534
11535 #ifdef CONFIG_PM
11536 /**
11537 * i40e_suspend - PCI callback for moving to D3
11538 * @pdev: PCI device information struct
11539 **/
11540 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11541 {
11542 struct i40e_pf *pf = pci_get_drvdata(pdev);
11543 struct i40e_hw *hw = &pf->hw;
11544
11545 set_bit(__I40E_SUSPENDED, &pf->state);
11546 set_bit(__I40E_DOWN, &pf->state);
11547
11548 rtnl_lock();
11549 i40e_prep_for_reset(pf);
11550 rtnl_unlock();
11551
11552 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11553 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11554
11555 pci_wake_from_d3(pdev, pf->wol_en);
11556 pci_set_power_state(pdev, PCI_D3hot);
11557
11558 return 0;
11559 }
11560
11561 /**
11562 * i40e_resume - PCI callback for waking up from D3
11563 * @pdev: PCI device information struct
11564 **/
11565 static int i40e_resume(struct pci_dev *pdev)
11566 {
11567 struct i40e_pf *pf = pci_get_drvdata(pdev);
11568 u32 err;
11569
11570 pci_set_power_state(pdev, PCI_D0);
11571 pci_restore_state(pdev);
11572 /* pci_restore_state() clears dev->state_saves, so
11573 * call pci_save_state() again to restore it.
11574 */
11575 pci_save_state(pdev);
11576
11577 err = pci_enable_device_mem(pdev);
11578 if (err) {
11579 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
11580 return err;
11581 }
11582 pci_set_master(pdev);
11583
11584 /* no wakeup events while running */
11585 pci_wake_from_d3(pdev, false);
11586
11587 /* handling the reset will rebuild the device state */
11588 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
11589 clear_bit(__I40E_DOWN, &pf->state);
11590 rtnl_lock();
11591 i40e_reset_and_rebuild(pf, false);
11592 rtnl_unlock();
11593 }
11594
11595 return 0;
11596 }
11597
11598 #endif
11599 static const struct pci_error_handlers i40e_err_handler = {
11600 .error_detected = i40e_pci_error_detected,
11601 .slot_reset = i40e_pci_error_slot_reset,
11602 .resume = i40e_pci_error_resume,
11603 };
11604
11605 static struct pci_driver i40e_driver = {
11606 .name = i40e_driver_name,
11607 .id_table = i40e_pci_tbl,
11608 .probe = i40e_probe,
11609 .remove = i40e_remove,
11610 #ifdef CONFIG_PM
11611 .suspend = i40e_suspend,
11612 .resume = i40e_resume,
11613 #endif
11614 .shutdown = i40e_shutdown,
11615 .err_handler = &i40e_err_handler,
11616 .sriov_configure = i40e_pci_sriov_configure,
11617 };
11618
11619 /**
11620 * i40e_init_module - Driver registration routine
11621 *
11622 * i40e_init_module is the first routine called when the driver is
11623 * loaded. All it does is register with the PCI subsystem.
11624 **/
11625 static int __init i40e_init_module(void)
11626 {
11627 pr_info("%s: %s - version %s\n", i40e_driver_name,
11628 i40e_driver_string, i40e_driver_version_str);
11629 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
11630
11631 /* we will see if single thread per module is enough for now,
11632 * it can't be any worse than using the system workqueue which
11633 * was already single threaded
11634 */
11635 i40e_wq = create_singlethread_workqueue(i40e_driver_name);
11636 if (!i40e_wq) {
11637 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
11638 return -ENOMEM;
11639 }
11640
11641 i40e_dbg_init();
11642 return pci_register_driver(&i40e_driver);
11643 }
11644 module_init(i40e_init_module);
11645
11646 /**
11647 * i40e_exit_module - Driver exit cleanup routine
11648 *
11649 * i40e_exit_module is called just before the driver is removed
11650 * from memory.
11651 **/
11652 static void __exit i40e_exit_module(void)
11653 {
11654 pci_unregister_driver(&i40e_driver);
11655 destroy_workqueue(i40e_wq);
11656 i40e_dbg_exit();
11657 }
11658 module_exit(i40e_exit_module);
This page took 0.395661 seconds and 5 git commands to generate.