Merge tag 'regmap-offload-update-bits' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 /* Local includes */
28 #include "i40e.h"
29 #include "i40e_diag.h"
30 #ifdef CONFIG_I40E_VXLAN
31 #include <net/vxlan.h>
32 #endif
33
34 const char i40e_driver_name[] = "i40e";
35 static const char i40e_driver_string[] =
36 "Intel(R) Ethernet Connection XL710 Network Driver";
37
38 #define DRV_KERN "-k"
39
40 #define DRV_VERSION_MAJOR 1
41 #define DRV_VERSION_MINOR 3
42 #define DRV_VERSION_BUILD 21
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN
46 const char i40e_driver_version_str[] = DRV_VERSION;
47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
48
49 /* a bit of forward declarations */
50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
52 static int i40e_add_vsi(struct i40e_vsi *vsi);
53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
59 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
60
61 /* i40e_pci_tbl - PCI Device ID Table
62 *
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
68 static const struct pci_device_id i40e_pci_tbl[] = {
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
84 /* required last entry */
85 {0, }
86 };
87 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
88
89 #define I40E_MAX_VF_COUNT 128
90 static int debug = -1;
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
93
94 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
95 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
96 MODULE_LICENSE("GPL");
97 MODULE_VERSION(DRV_VERSION);
98
99 /**
100 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
101 * @hw: pointer to the HW structure
102 * @mem: ptr to mem struct to fill out
103 * @size: size of memory requested
104 * @alignment: what to align the allocation to
105 **/
106 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
107 u64 size, u32 alignment)
108 {
109 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
110
111 mem->size = ALIGN(size, alignment);
112 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
113 &mem->pa, GFP_KERNEL);
114 if (!mem->va)
115 return -ENOMEM;
116
117 return 0;
118 }
119
120 /**
121 * i40e_free_dma_mem_d - OS specific memory free for shared code
122 * @hw: pointer to the HW structure
123 * @mem: ptr to mem struct to free
124 **/
125 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
126 {
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
128
129 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
130 mem->va = NULL;
131 mem->pa = 0;
132 mem->size = 0;
133
134 return 0;
135 }
136
137 /**
138 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
139 * @hw: pointer to the HW structure
140 * @mem: ptr to mem struct to fill out
141 * @size: size of memory requested
142 **/
143 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
144 u32 size)
145 {
146 mem->size = size;
147 mem->va = kzalloc(size, GFP_KERNEL);
148
149 if (!mem->va)
150 return -ENOMEM;
151
152 return 0;
153 }
154
155 /**
156 * i40e_free_virt_mem_d - OS specific memory free for shared code
157 * @hw: pointer to the HW structure
158 * @mem: ptr to mem struct to free
159 **/
160 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
161 {
162 /* it's ok to kfree a NULL pointer */
163 kfree(mem->va);
164 mem->va = NULL;
165 mem->size = 0;
166
167 return 0;
168 }
169
170 /**
171 * i40e_get_lump - find a lump of free generic resource
172 * @pf: board private structure
173 * @pile: the pile of resource to search
174 * @needed: the number of items needed
175 * @id: an owner id to stick on the items assigned
176 *
177 * Returns the base item index of the lump, or negative for error
178 *
179 * The search_hint trick and lack of advanced fit-finding only work
180 * because we're highly likely to have all the same size lump requests.
181 * Linear search time and any fragmentation should be minimal.
182 **/
183 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
184 u16 needed, u16 id)
185 {
186 int ret = -ENOMEM;
187 int i, j;
188
189 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
190 dev_info(&pf->pdev->dev,
191 "param err: pile=%p needed=%d id=0x%04x\n",
192 pile, needed, id);
193 return -EINVAL;
194 }
195
196 /* start the linear search with an imperfect hint */
197 i = pile->search_hint;
198 while (i < pile->num_entries) {
199 /* skip already allocated entries */
200 if (pile->list[i] & I40E_PILE_VALID_BIT) {
201 i++;
202 continue;
203 }
204
205 /* do we have enough in this lump? */
206 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
207 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
208 break;
209 }
210
211 if (j == needed) {
212 /* there was enough, so assign it to the requestor */
213 for (j = 0; j < needed; j++)
214 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
215 ret = i;
216 pile->search_hint = i + j;
217 break;
218 } else {
219 /* not enough, so skip over it and continue looking */
220 i += j;
221 }
222 }
223
224 return ret;
225 }
226
227 /**
228 * i40e_put_lump - return a lump of generic resource
229 * @pile: the pile of resource to search
230 * @index: the base item index
231 * @id: the owner id of the items assigned
232 *
233 * Returns the count of items in the lump
234 **/
235 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
236 {
237 int valid_id = (id | I40E_PILE_VALID_BIT);
238 int count = 0;
239 int i;
240
241 if (!pile || index >= pile->num_entries)
242 return -EINVAL;
243
244 for (i = index;
245 i < pile->num_entries && pile->list[i] == valid_id;
246 i++) {
247 pile->list[i] = 0;
248 count++;
249 }
250
251 if (count && index < pile->search_hint)
252 pile->search_hint = index;
253
254 return count;
255 }
256
257 /**
258 * i40e_find_vsi_from_id - searches for the vsi with the given id
259 * @pf - the pf structure to search for the vsi
260 * @id - id of the vsi it is searching for
261 **/
262 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
263 {
264 int i;
265
266 for (i = 0; i < pf->num_alloc_vsi; i++)
267 if (pf->vsi[i] && (pf->vsi[i]->id == id))
268 return pf->vsi[i];
269
270 return NULL;
271 }
272
273 /**
274 * i40e_service_event_schedule - Schedule the service task to wake up
275 * @pf: board private structure
276 *
277 * If not already scheduled, this puts the task into the work queue
278 **/
279 static void i40e_service_event_schedule(struct i40e_pf *pf)
280 {
281 if (!test_bit(__I40E_DOWN, &pf->state) &&
282 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
283 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
284 schedule_work(&pf->service_task);
285 }
286
287 /**
288 * i40e_tx_timeout - Respond to a Tx Hang
289 * @netdev: network interface device structure
290 *
291 * If any port has noticed a Tx timeout, it is likely that the whole
292 * device is munged, not just the one netdev port, so go for the full
293 * reset.
294 **/
295 #ifdef I40E_FCOE
296 void i40e_tx_timeout(struct net_device *netdev)
297 #else
298 static void i40e_tx_timeout(struct net_device *netdev)
299 #endif
300 {
301 struct i40e_netdev_priv *np = netdev_priv(netdev);
302 struct i40e_vsi *vsi = np->vsi;
303 struct i40e_pf *pf = vsi->back;
304 struct i40e_ring *tx_ring = NULL;
305 unsigned int i, hung_queue = 0;
306 u32 head, val;
307
308 pf->tx_timeout_count++;
309
310 /* find the stopped queue the same way the stack does */
311 for (i = 0; i < netdev->num_tx_queues; i++) {
312 struct netdev_queue *q;
313 unsigned long trans_start;
314
315 q = netdev_get_tx_queue(netdev, i);
316 trans_start = q->trans_start ? : netdev->trans_start;
317 if (netif_xmit_stopped(q) &&
318 time_after(jiffies,
319 (trans_start + netdev->watchdog_timeo))) {
320 hung_queue = i;
321 break;
322 }
323 }
324
325 if (i == netdev->num_tx_queues) {
326 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
327 } else {
328 /* now that we have an index, find the tx_ring struct */
329 for (i = 0; i < vsi->num_queue_pairs; i++) {
330 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
331 if (hung_queue ==
332 vsi->tx_rings[i]->queue_index) {
333 tx_ring = vsi->tx_rings[i];
334 break;
335 }
336 }
337 }
338 }
339
340 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
341 pf->tx_timeout_recovery_level = 1; /* reset after some time */
342 else if (time_before(jiffies,
343 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
344 return; /* don't do any new action before the next timeout */
345
346 if (tx_ring) {
347 head = i40e_get_head(tx_ring);
348 /* Read interrupt register */
349 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
350 val = rd32(&pf->hw,
351 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
352 tx_ring->vsi->base_vector - 1));
353 else
354 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
355
356 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
357 vsi->seid, hung_queue, tx_ring->next_to_clean,
358 head, tx_ring->next_to_use,
359 readl(tx_ring->tail), val);
360 }
361
362 pf->tx_timeout_last_recovery = jiffies;
363 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
364 pf->tx_timeout_recovery_level, hung_queue);
365
366 switch (pf->tx_timeout_recovery_level) {
367 case 1:
368 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
369 break;
370 case 2:
371 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
372 break;
373 case 3:
374 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
375 break;
376 default:
377 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
378 break;
379 }
380
381 i40e_service_event_schedule(pf);
382 pf->tx_timeout_recovery_level++;
383 }
384
385 /**
386 * i40e_release_rx_desc - Store the new tail and head values
387 * @rx_ring: ring to bump
388 * @val: new head index
389 **/
390 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
391 {
392 rx_ring->next_to_use = val;
393
394 /* Force memory writes to complete before letting h/w
395 * know there are new descriptors to fetch. (Only
396 * applicable for weak-ordered memory model archs,
397 * such as IA-64).
398 */
399 wmb();
400 writel(val, rx_ring->tail);
401 }
402
403 /**
404 * i40e_get_vsi_stats_struct - Get System Network Statistics
405 * @vsi: the VSI we care about
406 *
407 * Returns the address of the device statistics structure.
408 * The statistics are actually updated from the service task.
409 **/
410 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
411 {
412 return &vsi->net_stats;
413 }
414
415 /**
416 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
417 * @netdev: network interface device structure
418 *
419 * Returns the address of the device statistics structure.
420 * The statistics are actually updated from the service task.
421 **/
422 #ifdef I40E_FCOE
423 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
424 struct net_device *netdev,
425 struct rtnl_link_stats64 *stats)
426 #else
427 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
428 struct net_device *netdev,
429 struct rtnl_link_stats64 *stats)
430 #endif
431 {
432 struct i40e_netdev_priv *np = netdev_priv(netdev);
433 struct i40e_ring *tx_ring, *rx_ring;
434 struct i40e_vsi *vsi = np->vsi;
435 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
436 int i;
437
438 if (test_bit(__I40E_DOWN, &vsi->state))
439 return stats;
440
441 if (!vsi->tx_rings)
442 return stats;
443
444 rcu_read_lock();
445 for (i = 0; i < vsi->num_queue_pairs; i++) {
446 u64 bytes, packets;
447 unsigned int start;
448
449 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
450 if (!tx_ring)
451 continue;
452
453 do {
454 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
455 packets = tx_ring->stats.packets;
456 bytes = tx_ring->stats.bytes;
457 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
458
459 stats->tx_packets += packets;
460 stats->tx_bytes += bytes;
461 rx_ring = &tx_ring[1];
462
463 do {
464 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
465 packets = rx_ring->stats.packets;
466 bytes = rx_ring->stats.bytes;
467 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
468
469 stats->rx_packets += packets;
470 stats->rx_bytes += bytes;
471 }
472 rcu_read_unlock();
473
474 /* following stats updated by i40e_watchdog_subtask() */
475 stats->multicast = vsi_stats->multicast;
476 stats->tx_errors = vsi_stats->tx_errors;
477 stats->tx_dropped = vsi_stats->tx_dropped;
478 stats->rx_errors = vsi_stats->rx_errors;
479 stats->rx_dropped = vsi_stats->rx_dropped;
480 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
481 stats->rx_length_errors = vsi_stats->rx_length_errors;
482
483 return stats;
484 }
485
486 /**
487 * i40e_vsi_reset_stats - Resets all stats of the given vsi
488 * @vsi: the VSI to have its stats reset
489 **/
490 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
491 {
492 struct rtnl_link_stats64 *ns;
493 int i;
494
495 if (!vsi)
496 return;
497
498 ns = i40e_get_vsi_stats_struct(vsi);
499 memset(ns, 0, sizeof(*ns));
500 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
501 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
502 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
503 if (vsi->rx_rings && vsi->rx_rings[0]) {
504 for (i = 0; i < vsi->num_queue_pairs; i++) {
505 memset(&vsi->rx_rings[i]->stats, 0 ,
506 sizeof(vsi->rx_rings[i]->stats));
507 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
508 sizeof(vsi->rx_rings[i]->rx_stats));
509 memset(&vsi->tx_rings[i]->stats, 0 ,
510 sizeof(vsi->tx_rings[i]->stats));
511 memset(&vsi->tx_rings[i]->tx_stats, 0,
512 sizeof(vsi->tx_rings[i]->tx_stats));
513 }
514 }
515 vsi->stat_offsets_loaded = false;
516 }
517
518 /**
519 * i40e_pf_reset_stats - Reset all of the stats for the given PF
520 * @pf: the PF to be reset
521 **/
522 void i40e_pf_reset_stats(struct i40e_pf *pf)
523 {
524 int i;
525
526 memset(&pf->stats, 0, sizeof(pf->stats));
527 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
528 pf->stat_offsets_loaded = false;
529
530 for (i = 0; i < I40E_MAX_VEB; i++) {
531 if (pf->veb[i]) {
532 memset(&pf->veb[i]->stats, 0,
533 sizeof(pf->veb[i]->stats));
534 memset(&pf->veb[i]->stats_offsets, 0,
535 sizeof(pf->veb[i]->stats_offsets));
536 pf->veb[i]->stat_offsets_loaded = false;
537 }
538 }
539 }
540
541 /**
542 * i40e_stat_update48 - read and update a 48 bit stat from the chip
543 * @hw: ptr to the hardware info
544 * @hireg: the high 32 bit reg to read
545 * @loreg: the low 32 bit reg to read
546 * @offset_loaded: has the initial offset been loaded yet
547 * @offset: ptr to current offset value
548 * @stat: ptr to the stat
549 *
550 * Since the device stats are not reset at PFReset, they likely will not
551 * be zeroed when the driver starts. We'll save the first values read
552 * and use them as offsets to be subtracted from the raw values in order
553 * to report stats that count from zero. In the process, we also manage
554 * the potential roll-over.
555 **/
556 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
557 bool offset_loaded, u64 *offset, u64 *stat)
558 {
559 u64 new_data;
560
561 if (hw->device_id == I40E_DEV_ID_QEMU) {
562 new_data = rd32(hw, loreg);
563 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
564 } else {
565 new_data = rd64(hw, loreg);
566 }
567 if (!offset_loaded)
568 *offset = new_data;
569 if (likely(new_data >= *offset))
570 *stat = new_data - *offset;
571 else
572 *stat = (new_data + BIT_ULL(48)) - *offset;
573 *stat &= 0xFFFFFFFFFFFFULL;
574 }
575
576 /**
577 * i40e_stat_update32 - read and update a 32 bit stat from the chip
578 * @hw: ptr to the hardware info
579 * @reg: the hw reg to read
580 * @offset_loaded: has the initial offset been loaded yet
581 * @offset: ptr to current offset value
582 * @stat: ptr to the stat
583 **/
584 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
585 bool offset_loaded, u64 *offset, u64 *stat)
586 {
587 u32 new_data;
588
589 new_data = rd32(hw, reg);
590 if (!offset_loaded)
591 *offset = new_data;
592 if (likely(new_data >= *offset))
593 *stat = (u32)(new_data - *offset);
594 else
595 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
596 }
597
598 /**
599 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
600 * @vsi: the VSI to be updated
601 **/
602 void i40e_update_eth_stats(struct i40e_vsi *vsi)
603 {
604 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
605 struct i40e_pf *pf = vsi->back;
606 struct i40e_hw *hw = &pf->hw;
607 struct i40e_eth_stats *oes;
608 struct i40e_eth_stats *es; /* device's eth stats */
609
610 es = &vsi->eth_stats;
611 oes = &vsi->eth_stats_offsets;
612
613 /* Gather up the stats that the hw collects */
614 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
615 vsi->stat_offsets_loaded,
616 &oes->tx_errors, &es->tx_errors);
617 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
618 vsi->stat_offsets_loaded,
619 &oes->rx_discards, &es->rx_discards);
620 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
621 vsi->stat_offsets_loaded,
622 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
623 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
624 vsi->stat_offsets_loaded,
625 &oes->tx_errors, &es->tx_errors);
626
627 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
628 I40E_GLV_GORCL(stat_idx),
629 vsi->stat_offsets_loaded,
630 &oes->rx_bytes, &es->rx_bytes);
631 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
632 I40E_GLV_UPRCL(stat_idx),
633 vsi->stat_offsets_loaded,
634 &oes->rx_unicast, &es->rx_unicast);
635 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
636 I40E_GLV_MPRCL(stat_idx),
637 vsi->stat_offsets_loaded,
638 &oes->rx_multicast, &es->rx_multicast);
639 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
640 I40E_GLV_BPRCL(stat_idx),
641 vsi->stat_offsets_loaded,
642 &oes->rx_broadcast, &es->rx_broadcast);
643
644 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
645 I40E_GLV_GOTCL(stat_idx),
646 vsi->stat_offsets_loaded,
647 &oes->tx_bytes, &es->tx_bytes);
648 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
649 I40E_GLV_UPTCL(stat_idx),
650 vsi->stat_offsets_loaded,
651 &oes->tx_unicast, &es->tx_unicast);
652 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
653 I40E_GLV_MPTCL(stat_idx),
654 vsi->stat_offsets_loaded,
655 &oes->tx_multicast, &es->tx_multicast);
656 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
657 I40E_GLV_BPTCL(stat_idx),
658 vsi->stat_offsets_loaded,
659 &oes->tx_broadcast, &es->tx_broadcast);
660 vsi->stat_offsets_loaded = true;
661 }
662
663 /**
664 * i40e_update_veb_stats - Update Switch component statistics
665 * @veb: the VEB being updated
666 **/
667 static void i40e_update_veb_stats(struct i40e_veb *veb)
668 {
669 struct i40e_pf *pf = veb->pf;
670 struct i40e_hw *hw = &pf->hw;
671 struct i40e_eth_stats *oes;
672 struct i40e_eth_stats *es; /* device's eth stats */
673 struct i40e_veb_tc_stats *veb_oes;
674 struct i40e_veb_tc_stats *veb_es;
675 int i, idx = 0;
676
677 idx = veb->stats_idx;
678 es = &veb->stats;
679 oes = &veb->stats_offsets;
680 veb_es = &veb->tc_stats;
681 veb_oes = &veb->tc_stats_offsets;
682
683 /* Gather up the stats that the hw collects */
684 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
685 veb->stat_offsets_loaded,
686 &oes->tx_discards, &es->tx_discards);
687 if (hw->revision_id > 0)
688 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
689 veb->stat_offsets_loaded,
690 &oes->rx_unknown_protocol,
691 &es->rx_unknown_protocol);
692 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
693 veb->stat_offsets_loaded,
694 &oes->rx_bytes, &es->rx_bytes);
695 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
696 veb->stat_offsets_loaded,
697 &oes->rx_unicast, &es->rx_unicast);
698 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
699 veb->stat_offsets_loaded,
700 &oes->rx_multicast, &es->rx_multicast);
701 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
702 veb->stat_offsets_loaded,
703 &oes->rx_broadcast, &es->rx_broadcast);
704
705 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->tx_bytes, &es->tx_bytes);
708 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
709 veb->stat_offsets_loaded,
710 &oes->tx_unicast, &es->tx_unicast);
711 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
712 veb->stat_offsets_loaded,
713 &oes->tx_multicast, &es->tx_multicast);
714 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
715 veb->stat_offsets_loaded,
716 &oes->tx_broadcast, &es->tx_broadcast);
717 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
718 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
719 I40E_GLVEBTC_RPCL(i, idx),
720 veb->stat_offsets_loaded,
721 &veb_oes->tc_rx_packets[i],
722 &veb_es->tc_rx_packets[i]);
723 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
724 I40E_GLVEBTC_RBCL(i, idx),
725 veb->stat_offsets_loaded,
726 &veb_oes->tc_rx_bytes[i],
727 &veb_es->tc_rx_bytes[i]);
728 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
729 I40E_GLVEBTC_TPCL(i, idx),
730 veb->stat_offsets_loaded,
731 &veb_oes->tc_tx_packets[i],
732 &veb_es->tc_tx_packets[i]);
733 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
734 I40E_GLVEBTC_TBCL(i, idx),
735 veb->stat_offsets_loaded,
736 &veb_oes->tc_tx_bytes[i],
737 &veb_es->tc_tx_bytes[i]);
738 }
739 veb->stat_offsets_loaded = true;
740 }
741
742 #ifdef I40E_FCOE
743 /**
744 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
745 * @vsi: the VSI that is capable of doing FCoE
746 **/
747 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
748 {
749 struct i40e_pf *pf = vsi->back;
750 struct i40e_hw *hw = &pf->hw;
751 struct i40e_fcoe_stats *ofs;
752 struct i40e_fcoe_stats *fs; /* device's eth stats */
753 int idx;
754
755 if (vsi->type != I40E_VSI_FCOE)
756 return;
757
758 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
759 fs = &vsi->fcoe_stats;
760 ofs = &vsi->fcoe_stats_offsets;
761
762 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
763 vsi->fcoe_stat_offsets_loaded,
764 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
765 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
766 vsi->fcoe_stat_offsets_loaded,
767 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
768 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
769 vsi->fcoe_stat_offsets_loaded,
770 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
771 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
772 vsi->fcoe_stat_offsets_loaded,
773 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
774 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
775 vsi->fcoe_stat_offsets_loaded,
776 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
777 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
778 vsi->fcoe_stat_offsets_loaded,
779 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
780 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
781 vsi->fcoe_stat_offsets_loaded,
782 &ofs->fcoe_last_error, &fs->fcoe_last_error);
783 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
784 vsi->fcoe_stat_offsets_loaded,
785 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
786
787 vsi->fcoe_stat_offsets_loaded = true;
788 }
789
790 #endif
791 /**
792 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
793 * @pf: the corresponding PF
794 *
795 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
796 **/
797 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
798 {
799 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
800 struct i40e_hw_port_stats *nsd = &pf->stats;
801 struct i40e_hw *hw = &pf->hw;
802 u64 xoff = 0;
803
804 if ((hw->fc.current_mode != I40E_FC_FULL) &&
805 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
806 return;
807
808 xoff = nsd->link_xoff_rx;
809 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
810 pf->stat_offsets_loaded,
811 &osd->link_xoff_rx, &nsd->link_xoff_rx);
812
813 /* No new LFC xoff rx */
814 if (!(nsd->link_xoff_rx - xoff))
815 return;
816
817 }
818
819 /**
820 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
821 * @pf: the corresponding PF
822 *
823 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
824 **/
825 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
826 {
827 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
828 struct i40e_hw_port_stats *nsd = &pf->stats;
829 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
830 struct i40e_dcbx_config *dcb_cfg;
831 struct i40e_hw *hw = &pf->hw;
832 u16 i;
833 u8 tc;
834
835 dcb_cfg = &hw->local_dcbx_config;
836
837 /* Collect Link XOFF stats when PFC is disabled */
838 if (!dcb_cfg->pfc.pfcenable) {
839 i40e_update_link_xoff_rx(pf);
840 return;
841 }
842
843 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
844 u64 prio_xoff = nsd->priority_xoff_rx[i];
845 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
846 pf->stat_offsets_loaded,
847 &osd->priority_xoff_rx[i],
848 &nsd->priority_xoff_rx[i]);
849
850 /* No new PFC xoff rx */
851 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
852 continue;
853 /* Get the TC for given priority */
854 tc = dcb_cfg->etscfg.prioritytable[i];
855 xoff[tc] = true;
856 }
857 }
858
859 /**
860 * i40e_update_vsi_stats - Update the vsi statistics counters.
861 * @vsi: the VSI to be updated
862 *
863 * There are a few instances where we store the same stat in a
864 * couple of different structs. This is partly because we have
865 * the netdev stats that need to be filled out, which is slightly
866 * different from the "eth_stats" defined by the chip and used in
867 * VF communications. We sort it out here.
868 **/
869 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
870 {
871 struct i40e_pf *pf = vsi->back;
872 struct rtnl_link_stats64 *ons;
873 struct rtnl_link_stats64 *ns; /* netdev stats */
874 struct i40e_eth_stats *oes;
875 struct i40e_eth_stats *es; /* device's eth stats */
876 u32 tx_restart, tx_busy;
877 struct i40e_ring *p;
878 u32 rx_page, rx_buf;
879 u64 bytes, packets;
880 unsigned int start;
881 u64 rx_p, rx_b;
882 u64 tx_p, tx_b;
883 u16 q;
884
885 if (test_bit(__I40E_DOWN, &vsi->state) ||
886 test_bit(__I40E_CONFIG_BUSY, &pf->state))
887 return;
888
889 ns = i40e_get_vsi_stats_struct(vsi);
890 ons = &vsi->net_stats_offsets;
891 es = &vsi->eth_stats;
892 oes = &vsi->eth_stats_offsets;
893
894 /* Gather up the netdev and vsi stats that the driver collects
895 * on the fly during packet processing
896 */
897 rx_b = rx_p = 0;
898 tx_b = tx_p = 0;
899 tx_restart = tx_busy = 0;
900 rx_page = 0;
901 rx_buf = 0;
902 rcu_read_lock();
903 for (q = 0; q < vsi->num_queue_pairs; q++) {
904 /* locate Tx ring */
905 p = ACCESS_ONCE(vsi->tx_rings[q]);
906
907 do {
908 start = u64_stats_fetch_begin_irq(&p->syncp);
909 packets = p->stats.packets;
910 bytes = p->stats.bytes;
911 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
912 tx_b += bytes;
913 tx_p += packets;
914 tx_restart += p->tx_stats.restart_queue;
915 tx_busy += p->tx_stats.tx_busy;
916
917 /* Rx queue is part of the same block as Tx queue */
918 p = &p[1];
919 do {
920 start = u64_stats_fetch_begin_irq(&p->syncp);
921 packets = p->stats.packets;
922 bytes = p->stats.bytes;
923 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
924 rx_b += bytes;
925 rx_p += packets;
926 rx_buf += p->rx_stats.alloc_buff_failed;
927 rx_page += p->rx_stats.alloc_page_failed;
928 }
929 rcu_read_unlock();
930 vsi->tx_restart = tx_restart;
931 vsi->tx_busy = tx_busy;
932 vsi->rx_page_failed = rx_page;
933 vsi->rx_buf_failed = rx_buf;
934
935 ns->rx_packets = rx_p;
936 ns->rx_bytes = rx_b;
937 ns->tx_packets = tx_p;
938 ns->tx_bytes = tx_b;
939
940 /* update netdev stats from eth stats */
941 i40e_update_eth_stats(vsi);
942 ons->tx_errors = oes->tx_errors;
943 ns->tx_errors = es->tx_errors;
944 ons->multicast = oes->rx_multicast;
945 ns->multicast = es->rx_multicast;
946 ons->rx_dropped = oes->rx_discards;
947 ns->rx_dropped = es->rx_discards;
948 ons->tx_dropped = oes->tx_discards;
949 ns->tx_dropped = es->tx_discards;
950
951 /* pull in a couple PF stats if this is the main vsi */
952 if (vsi == pf->vsi[pf->lan_vsi]) {
953 ns->rx_crc_errors = pf->stats.crc_errors;
954 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
955 ns->rx_length_errors = pf->stats.rx_length_errors;
956 }
957 }
958
959 /**
960 * i40e_update_pf_stats - Update the PF statistics counters.
961 * @pf: the PF to be updated
962 **/
963 static void i40e_update_pf_stats(struct i40e_pf *pf)
964 {
965 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
966 struct i40e_hw_port_stats *nsd = &pf->stats;
967 struct i40e_hw *hw = &pf->hw;
968 u32 val;
969 int i;
970
971 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
972 I40E_GLPRT_GORCL(hw->port),
973 pf->stat_offsets_loaded,
974 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
975 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
976 I40E_GLPRT_GOTCL(hw->port),
977 pf->stat_offsets_loaded,
978 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
979 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->eth.rx_discards,
982 &nsd->eth.rx_discards);
983 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
984 I40E_GLPRT_UPRCL(hw->port),
985 pf->stat_offsets_loaded,
986 &osd->eth.rx_unicast,
987 &nsd->eth.rx_unicast);
988 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
989 I40E_GLPRT_MPRCL(hw->port),
990 pf->stat_offsets_loaded,
991 &osd->eth.rx_multicast,
992 &nsd->eth.rx_multicast);
993 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
994 I40E_GLPRT_BPRCL(hw->port),
995 pf->stat_offsets_loaded,
996 &osd->eth.rx_broadcast,
997 &nsd->eth.rx_broadcast);
998 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
999 I40E_GLPRT_UPTCL(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->eth.tx_unicast,
1002 &nsd->eth.tx_unicast);
1003 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1004 I40E_GLPRT_MPTCL(hw->port),
1005 pf->stat_offsets_loaded,
1006 &osd->eth.tx_multicast,
1007 &nsd->eth.tx_multicast);
1008 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1009 I40E_GLPRT_BPTCL(hw->port),
1010 pf->stat_offsets_loaded,
1011 &osd->eth.tx_broadcast,
1012 &nsd->eth.tx_broadcast);
1013
1014 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1015 pf->stat_offsets_loaded,
1016 &osd->tx_dropped_link_down,
1017 &nsd->tx_dropped_link_down);
1018
1019 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->crc_errors, &nsd->crc_errors);
1022
1023 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1024 pf->stat_offsets_loaded,
1025 &osd->illegal_bytes, &nsd->illegal_bytes);
1026
1027 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1028 pf->stat_offsets_loaded,
1029 &osd->mac_local_faults,
1030 &nsd->mac_local_faults);
1031 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1032 pf->stat_offsets_loaded,
1033 &osd->mac_remote_faults,
1034 &nsd->mac_remote_faults);
1035
1036 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1037 pf->stat_offsets_loaded,
1038 &osd->rx_length_errors,
1039 &nsd->rx_length_errors);
1040
1041 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->link_xon_rx, &nsd->link_xon_rx);
1044 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1045 pf->stat_offsets_loaded,
1046 &osd->link_xon_tx, &nsd->link_xon_tx);
1047 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
1048 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1049 pf->stat_offsets_loaded,
1050 &osd->link_xoff_tx, &nsd->link_xoff_tx);
1051
1052 for (i = 0; i < 8; i++) {
1053 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1054 pf->stat_offsets_loaded,
1055 &osd->priority_xon_rx[i],
1056 &nsd->priority_xon_rx[i]);
1057 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1058 pf->stat_offsets_loaded,
1059 &osd->priority_xon_tx[i],
1060 &nsd->priority_xon_tx[i]);
1061 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1062 pf->stat_offsets_loaded,
1063 &osd->priority_xoff_tx[i],
1064 &nsd->priority_xoff_tx[i]);
1065 i40e_stat_update32(hw,
1066 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1067 pf->stat_offsets_loaded,
1068 &osd->priority_xon_2_xoff[i],
1069 &nsd->priority_xon_2_xoff[i]);
1070 }
1071
1072 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1073 I40E_GLPRT_PRC64L(hw->port),
1074 pf->stat_offsets_loaded,
1075 &osd->rx_size_64, &nsd->rx_size_64);
1076 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1077 I40E_GLPRT_PRC127L(hw->port),
1078 pf->stat_offsets_loaded,
1079 &osd->rx_size_127, &nsd->rx_size_127);
1080 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1081 I40E_GLPRT_PRC255L(hw->port),
1082 pf->stat_offsets_loaded,
1083 &osd->rx_size_255, &nsd->rx_size_255);
1084 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1085 I40E_GLPRT_PRC511L(hw->port),
1086 pf->stat_offsets_loaded,
1087 &osd->rx_size_511, &nsd->rx_size_511);
1088 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1089 I40E_GLPRT_PRC1023L(hw->port),
1090 pf->stat_offsets_loaded,
1091 &osd->rx_size_1023, &nsd->rx_size_1023);
1092 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1093 I40E_GLPRT_PRC1522L(hw->port),
1094 pf->stat_offsets_loaded,
1095 &osd->rx_size_1522, &nsd->rx_size_1522);
1096 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1097 I40E_GLPRT_PRC9522L(hw->port),
1098 pf->stat_offsets_loaded,
1099 &osd->rx_size_big, &nsd->rx_size_big);
1100
1101 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1102 I40E_GLPRT_PTC64L(hw->port),
1103 pf->stat_offsets_loaded,
1104 &osd->tx_size_64, &nsd->tx_size_64);
1105 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1106 I40E_GLPRT_PTC127L(hw->port),
1107 pf->stat_offsets_loaded,
1108 &osd->tx_size_127, &nsd->tx_size_127);
1109 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1110 I40E_GLPRT_PTC255L(hw->port),
1111 pf->stat_offsets_loaded,
1112 &osd->tx_size_255, &nsd->tx_size_255);
1113 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1114 I40E_GLPRT_PTC511L(hw->port),
1115 pf->stat_offsets_loaded,
1116 &osd->tx_size_511, &nsd->tx_size_511);
1117 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1118 I40E_GLPRT_PTC1023L(hw->port),
1119 pf->stat_offsets_loaded,
1120 &osd->tx_size_1023, &nsd->tx_size_1023);
1121 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1122 I40E_GLPRT_PTC1522L(hw->port),
1123 pf->stat_offsets_loaded,
1124 &osd->tx_size_1522, &nsd->tx_size_1522);
1125 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1126 I40E_GLPRT_PTC9522L(hw->port),
1127 pf->stat_offsets_loaded,
1128 &osd->tx_size_big, &nsd->tx_size_big);
1129
1130 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1131 pf->stat_offsets_loaded,
1132 &osd->rx_undersize, &nsd->rx_undersize);
1133 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1134 pf->stat_offsets_loaded,
1135 &osd->rx_fragments, &nsd->rx_fragments);
1136 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1137 pf->stat_offsets_loaded,
1138 &osd->rx_oversize, &nsd->rx_oversize);
1139 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1140 pf->stat_offsets_loaded,
1141 &osd->rx_jabber, &nsd->rx_jabber);
1142
1143 /* FDIR stats */
1144 i40e_stat_update32(hw,
1145 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1146 pf->stat_offsets_loaded,
1147 &osd->fd_atr_match, &nsd->fd_atr_match);
1148 i40e_stat_update32(hw,
1149 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1150 pf->stat_offsets_loaded,
1151 &osd->fd_sb_match, &nsd->fd_sb_match);
1152 i40e_stat_update32(hw,
1153 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1154 pf->stat_offsets_loaded,
1155 &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1156
1157 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1158 nsd->tx_lpi_status =
1159 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1160 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1161 nsd->rx_lpi_status =
1162 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1163 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1164 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1165 pf->stat_offsets_loaded,
1166 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1167 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1168 pf->stat_offsets_loaded,
1169 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1170
1171 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1172 !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1173 nsd->fd_sb_status = true;
1174 else
1175 nsd->fd_sb_status = false;
1176
1177 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1178 !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1179 nsd->fd_atr_status = true;
1180 else
1181 nsd->fd_atr_status = false;
1182
1183 pf->stat_offsets_loaded = true;
1184 }
1185
1186 /**
1187 * i40e_update_stats - Update the various statistics counters.
1188 * @vsi: the VSI to be updated
1189 *
1190 * Update the various stats for this VSI and its related entities.
1191 **/
1192 void i40e_update_stats(struct i40e_vsi *vsi)
1193 {
1194 struct i40e_pf *pf = vsi->back;
1195
1196 if (vsi == pf->vsi[pf->lan_vsi])
1197 i40e_update_pf_stats(pf);
1198
1199 i40e_update_vsi_stats(vsi);
1200 #ifdef I40E_FCOE
1201 i40e_update_fcoe_stats(vsi);
1202 #endif
1203 }
1204
1205 /**
1206 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1207 * @vsi: the VSI to be searched
1208 * @macaddr: the MAC address
1209 * @vlan: the vlan
1210 * @is_vf: make sure its a VF filter, else doesn't matter
1211 * @is_netdev: make sure its a netdev filter, else doesn't matter
1212 *
1213 * Returns ptr to the filter object or NULL
1214 **/
1215 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1216 u8 *macaddr, s16 vlan,
1217 bool is_vf, bool is_netdev)
1218 {
1219 struct i40e_mac_filter *f;
1220
1221 if (!vsi || !macaddr)
1222 return NULL;
1223
1224 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1225 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1226 (vlan == f->vlan) &&
1227 (!is_vf || f->is_vf) &&
1228 (!is_netdev || f->is_netdev))
1229 return f;
1230 }
1231 return NULL;
1232 }
1233
1234 /**
1235 * i40e_find_mac - Find a mac addr in the macvlan filters list
1236 * @vsi: the VSI to be searched
1237 * @macaddr: the MAC address we are searching for
1238 * @is_vf: make sure its a VF filter, else doesn't matter
1239 * @is_netdev: make sure its a netdev filter, else doesn't matter
1240 *
1241 * Returns the first filter with the provided MAC address or NULL if
1242 * MAC address was not found
1243 **/
1244 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1245 bool is_vf, bool is_netdev)
1246 {
1247 struct i40e_mac_filter *f;
1248
1249 if (!vsi || !macaddr)
1250 return NULL;
1251
1252 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1253 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1254 (!is_vf || f->is_vf) &&
1255 (!is_netdev || f->is_netdev))
1256 return f;
1257 }
1258 return NULL;
1259 }
1260
1261 /**
1262 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1263 * @vsi: the VSI to be searched
1264 *
1265 * Returns true if VSI is in vlan mode or false otherwise
1266 **/
1267 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1268 {
1269 struct i40e_mac_filter *f;
1270
1271 /* Only -1 for all the filters denotes not in vlan mode
1272 * so we have to go through all the list in order to make sure
1273 */
1274 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1275 if (f->vlan >= 0 || vsi->info.pvid)
1276 return true;
1277 }
1278
1279 return false;
1280 }
1281
1282 /**
1283 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1284 * @vsi: the VSI to be searched
1285 * @macaddr: the mac address to be filtered
1286 * @is_vf: true if it is a VF
1287 * @is_netdev: true if it is a netdev
1288 *
1289 * Goes through all the macvlan filters and adds a
1290 * macvlan filter for each unique vlan that already exists
1291 *
1292 * Returns first filter found on success, else NULL
1293 **/
1294 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1295 bool is_vf, bool is_netdev)
1296 {
1297 struct i40e_mac_filter *f;
1298
1299 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1300 if (vsi->info.pvid)
1301 f->vlan = le16_to_cpu(vsi->info.pvid);
1302 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1303 is_vf, is_netdev)) {
1304 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1305 is_vf, is_netdev))
1306 return NULL;
1307 }
1308 }
1309
1310 return list_first_entry_or_null(&vsi->mac_filter_list,
1311 struct i40e_mac_filter, list);
1312 }
1313
1314 /**
1315 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1316 * @vsi: the PF Main VSI - inappropriate for any other VSI
1317 * @macaddr: the MAC address
1318 *
1319 * Some older firmware configurations set up a default promiscuous VLAN
1320 * filter that needs to be removed.
1321 **/
1322 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1323 {
1324 struct i40e_aqc_remove_macvlan_element_data element;
1325 struct i40e_pf *pf = vsi->back;
1326 i40e_status ret;
1327
1328 /* Only appropriate for the PF main VSI */
1329 if (vsi->type != I40E_VSI_MAIN)
1330 return -EINVAL;
1331
1332 memset(&element, 0, sizeof(element));
1333 ether_addr_copy(element.mac_addr, macaddr);
1334 element.vlan_tag = 0;
1335 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1336 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1337 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1338 if (ret)
1339 return -ENOENT;
1340
1341 return 0;
1342 }
1343
1344 /**
1345 * i40e_add_filter - Add a mac/vlan filter to the VSI
1346 * @vsi: the VSI to be searched
1347 * @macaddr: the MAC address
1348 * @vlan: the vlan
1349 * @is_vf: make sure its a VF filter, else doesn't matter
1350 * @is_netdev: make sure its a netdev filter, else doesn't matter
1351 *
1352 * Returns ptr to the filter object or NULL when no memory available.
1353 **/
1354 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1355 u8 *macaddr, s16 vlan,
1356 bool is_vf, bool is_netdev)
1357 {
1358 struct i40e_mac_filter *f;
1359
1360 if (!vsi || !macaddr)
1361 return NULL;
1362
1363 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1364 if (!f) {
1365 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1366 if (!f)
1367 goto add_filter_out;
1368
1369 ether_addr_copy(f->macaddr, macaddr);
1370 f->vlan = vlan;
1371 f->changed = true;
1372
1373 INIT_LIST_HEAD(&f->list);
1374 list_add(&f->list, &vsi->mac_filter_list);
1375 }
1376
1377 /* increment counter and add a new flag if needed */
1378 if (is_vf) {
1379 if (!f->is_vf) {
1380 f->is_vf = true;
1381 f->counter++;
1382 }
1383 } else if (is_netdev) {
1384 if (!f->is_netdev) {
1385 f->is_netdev = true;
1386 f->counter++;
1387 }
1388 } else {
1389 f->counter++;
1390 }
1391
1392 /* changed tells sync_filters_subtask to
1393 * push the filter down to the firmware
1394 */
1395 if (f->changed) {
1396 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1397 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1398 }
1399
1400 add_filter_out:
1401 return f;
1402 }
1403
1404 /**
1405 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1406 * @vsi: the VSI to be searched
1407 * @macaddr: the MAC address
1408 * @vlan: the vlan
1409 * @is_vf: make sure it's a VF filter, else doesn't matter
1410 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1411 **/
1412 void i40e_del_filter(struct i40e_vsi *vsi,
1413 u8 *macaddr, s16 vlan,
1414 bool is_vf, bool is_netdev)
1415 {
1416 struct i40e_mac_filter *f;
1417
1418 if (!vsi || !macaddr)
1419 return;
1420
1421 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1422 if (!f || f->counter == 0)
1423 return;
1424
1425 if (is_vf) {
1426 if (f->is_vf) {
1427 f->is_vf = false;
1428 f->counter--;
1429 }
1430 } else if (is_netdev) {
1431 if (f->is_netdev) {
1432 f->is_netdev = false;
1433 f->counter--;
1434 }
1435 } else {
1436 /* make sure we don't remove a filter in use by VF or netdev */
1437 int min_f = 0;
1438 min_f += (f->is_vf ? 1 : 0);
1439 min_f += (f->is_netdev ? 1 : 0);
1440
1441 if (f->counter > min_f)
1442 f->counter--;
1443 }
1444
1445 /* counter == 0 tells sync_filters_subtask to
1446 * remove the filter from the firmware's list
1447 */
1448 if (f->counter == 0) {
1449 f->changed = true;
1450 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1451 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1452 }
1453 }
1454
1455 /**
1456 * i40e_set_mac - NDO callback to set mac address
1457 * @netdev: network interface device structure
1458 * @p: pointer to an address structure
1459 *
1460 * Returns 0 on success, negative on failure
1461 **/
1462 #ifdef I40E_FCOE
1463 int i40e_set_mac(struct net_device *netdev, void *p)
1464 #else
1465 static int i40e_set_mac(struct net_device *netdev, void *p)
1466 #endif
1467 {
1468 struct i40e_netdev_priv *np = netdev_priv(netdev);
1469 struct i40e_vsi *vsi = np->vsi;
1470 struct i40e_pf *pf = vsi->back;
1471 struct i40e_hw *hw = &pf->hw;
1472 struct sockaddr *addr = p;
1473 struct i40e_mac_filter *f;
1474
1475 if (!is_valid_ether_addr(addr->sa_data))
1476 return -EADDRNOTAVAIL;
1477
1478 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1479 netdev_info(netdev, "already using mac address %pM\n",
1480 addr->sa_data);
1481 return 0;
1482 }
1483
1484 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1485 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1486 return -EADDRNOTAVAIL;
1487
1488 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1489 netdev_info(netdev, "returning to hw mac address %pM\n",
1490 hw->mac.addr);
1491 else
1492 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1493
1494 if (vsi->type == I40E_VSI_MAIN) {
1495 i40e_status ret;
1496 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1497 I40E_AQC_WRITE_TYPE_LAA_WOL,
1498 addr->sa_data, NULL);
1499 if (ret) {
1500 netdev_info(netdev,
1501 "Addr change for Main VSI failed: %d\n",
1502 ret);
1503 return -EADDRNOTAVAIL;
1504 }
1505 }
1506
1507 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1508 struct i40e_aqc_remove_macvlan_element_data element;
1509
1510 memset(&element, 0, sizeof(element));
1511 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1512 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1513 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1514 } else {
1515 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1516 false, false);
1517 }
1518
1519 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1520 struct i40e_aqc_add_macvlan_element_data element;
1521
1522 memset(&element, 0, sizeof(element));
1523 ether_addr_copy(element.mac_addr, hw->mac.addr);
1524 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1525 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1526 } else {
1527 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1528 false, false);
1529 if (f)
1530 f->is_laa = true;
1531 }
1532
1533 i40e_sync_vsi_filters(vsi, false);
1534 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1535
1536 return 0;
1537 }
1538
1539 /**
1540 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1541 * @vsi: the VSI being setup
1542 * @ctxt: VSI context structure
1543 * @enabled_tc: Enabled TCs bitmap
1544 * @is_add: True if called before Add VSI
1545 *
1546 * Setup VSI queue mapping for enabled traffic classes.
1547 **/
1548 #ifdef I40E_FCOE
1549 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1550 struct i40e_vsi_context *ctxt,
1551 u8 enabled_tc,
1552 bool is_add)
1553 #else
1554 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1555 struct i40e_vsi_context *ctxt,
1556 u8 enabled_tc,
1557 bool is_add)
1558 #endif
1559 {
1560 struct i40e_pf *pf = vsi->back;
1561 u16 sections = 0;
1562 u8 netdev_tc = 0;
1563 u16 numtc = 0;
1564 u16 qcount;
1565 u8 offset;
1566 u16 qmap;
1567 int i;
1568 u16 num_tc_qps = 0;
1569
1570 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1571 offset = 0;
1572
1573 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1574 /* Find numtc from enabled TC bitmap */
1575 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1576 if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
1577 numtc++;
1578 }
1579 if (!numtc) {
1580 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1581 numtc = 1;
1582 }
1583 } else {
1584 /* At least TC0 is enabled in case of non-DCB case */
1585 numtc = 1;
1586 }
1587
1588 vsi->tc_config.numtc = numtc;
1589 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1590 /* Number of queues per enabled TC */
1591 /* In MFP case we can have a much lower count of MSIx
1592 * vectors available and so we need to lower the used
1593 * q count.
1594 */
1595 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1596 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1597 else
1598 qcount = vsi->alloc_queue_pairs;
1599 num_tc_qps = qcount / numtc;
1600 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1601
1602 /* Setup queue offset/count for all TCs for given VSI */
1603 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1604 /* See if the given TC is enabled for the given VSI */
1605 if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
1606 /* TC is enabled */
1607 int pow, num_qps;
1608
1609 switch (vsi->type) {
1610 case I40E_VSI_MAIN:
1611 qcount = min_t(int, pf->rss_size, num_tc_qps);
1612 break;
1613 #ifdef I40E_FCOE
1614 case I40E_VSI_FCOE:
1615 qcount = num_tc_qps;
1616 break;
1617 #endif
1618 case I40E_VSI_FDIR:
1619 case I40E_VSI_SRIOV:
1620 case I40E_VSI_VMDQ2:
1621 default:
1622 qcount = num_tc_qps;
1623 WARN_ON(i != 0);
1624 break;
1625 }
1626 vsi->tc_config.tc_info[i].qoffset = offset;
1627 vsi->tc_config.tc_info[i].qcount = qcount;
1628
1629 /* find the next higher power-of-2 of num queue pairs */
1630 num_qps = qcount;
1631 pow = 0;
1632 while (num_qps && (BIT_ULL(pow) < qcount)) {
1633 pow++;
1634 num_qps >>= 1;
1635 }
1636
1637 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1638 qmap =
1639 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1640 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1641
1642 offset += qcount;
1643 } else {
1644 /* TC is not enabled so set the offset to
1645 * default queue and allocate one queue
1646 * for the given TC.
1647 */
1648 vsi->tc_config.tc_info[i].qoffset = 0;
1649 vsi->tc_config.tc_info[i].qcount = 1;
1650 vsi->tc_config.tc_info[i].netdev_tc = 0;
1651
1652 qmap = 0;
1653 }
1654 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1655 }
1656
1657 /* Set actual Tx/Rx queue pairs */
1658 vsi->num_queue_pairs = offset;
1659 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1660 if (vsi->req_queue_pairs > 0)
1661 vsi->num_queue_pairs = vsi->req_queue_pairs;
1662 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1663 vsi->num_queue_pairs = pf->num_lan_msix;
1664 }
1665
1666 /* Scheduler section valid can only be set for ADD VSI */
1667 if (is_add) {
1668 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1669
1670 ctxt->info.up_enable_bits = enabled_tc;
1671 }
1672 if (vsi->type == I40E_VSI_SRIOV) {
1673 ctxt->info.mapping_flags |=
1674 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1675 for (i = 0; i < vsi->num_queue_pairs; i++)
1676 ctxt->info.queue_mapping[i] =
1677 cpu_to_le16(vsi->base_queue + i);
1678 } else {
1679 ctxt->info.mapping_flags |=
1680 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1681 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1682 }
1683 ctxt->info.valid_sections |= cpu_to_le16(sections);
1684 }
1685
1686 /**
1687 * i40e_set_rx_mode - NDO callback to set the netdev filters
1688 * @netdev: network interface device structure
1689 **/
1690 #ifdef I40E_FCOE
1691 void i40e_set_rx_mode(struct net_device *netdev)
1692 #else
1693 static void i40e_set_rx_mode(struct net_device *netdev)
1694 #endif
1695 {
1696 struct i40e_netdev_priv *np = netdev_priv(netdev);
1697 struct i40e_mac_filter *f, *ftmp;
1698 struct i40e_vsi *vsi = np->vsi;
1699 struct netdev_hw_addr *uca;
1700 struct netdev_hw_addr *mca;
1701 struct netdev_hw_addr *ha;
1702
1703 /* add addr if not already in the filter list */
1704 netdev_for_each_uc_addr(uca, netdev) {
1705 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1706 if (i40e_is_vsi_in_vlan(vsi))
1707 i40e_put_mac_in_vlan(vsi, uca->addr,
1708 false, true);
1709 else
1710 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1711 false, true);
1712 }
1713 }
1714
1715 netdev_for_each_mc_addr(mca, netdev) {
1716 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1717 if (i40e_is_vsi_in_vlan(vsi))
1718 i40e_put_mac_in_vlan(vsi, mca->addr,
1719 false, true);
1720 else
1721 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1722 false, true);
1723 }
1724 }
1725
1726 /* remove filter if not in netdev list */
1727 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1728 bool found = false;
1729
1730 if (!f->is_netdev)
1731 continue;
1732
1733 if (is_multicast_ether_addr(f->macaddr)) {
1734 netdev_for_each_mc_addr(mca, netdev) {
1735 if (ether_addr_equal(mca->addr, f->macaddr)) {
1736 found = true;
1737 break;
1738 }
1739 }
1740 } else {
1741 netdev_for_each_uc_addr(uca, netdev) {
1742 if (ether_addr_equal(uca->addr, f->macaddr)) {
1743 found = true;
1744 break;
1745 }
1746 }
1747
1748 for_each_dev_addr(netdev, ha) {
1749 if (ether_addr_equal(ha->addr, f->macaddr)) {
1750 found = true;
1751 break;
1752 }
1753 }
1754 }
1755 if (!found)
1756 i40e_del_filter(
1757 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1758 }
1759
1760 /* check for other flag changes */
1761 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1762 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1763 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1764 }
1765 }
1766
1767 /**
1768 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1769 * @vsi: ptr to the VSI
1770 * @grab_rtnl: whether RTNL needs to be grabbed
1771 *
1772 * Push any outstanding VSI filter changes through the AdminQ.
1773 *
1774 * Returns 0 or error value
1775 **/
1776 int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
1777 {
1778 struct i40e_mac_filter *f, *ftmp;
1779 bool promisc_forced_on = false;
1780 bool add_happened = false;
1781 int filter_list_len = 0;
1782 u32 changed_flags = 0;
1783 i40e_status ret = 0;
1784 struct i40e_pf *pf;
1785 int num_add = 0;
1786 int num_del = 0;
1787 int aq_err = 0;
1788 u16 cmd_flags;
1789
1790 /* empty array typed pointers, kcalloc later */
1791 struct i40e_aqc_add_macvlan_element_data *add_list;
1792 struct i40e_aqc_remove_macvlan_element_data *del_list;
1793
1794 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1795 usleep_range(1000, 2000);
1796 pf = vsi->back;
1797
1798 if (vsi->netdev) {
1799 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1800 vsi->current_netdev_flags = vsi->netdev->flags;
1801 }
1802
1803 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1804 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1805
1806 filter_list_len = pf->hw.aq.asq_buf_size /
1807 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1808 del_list = kcalloc(filter_list_len,
1809 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1810 GFP_KERNEL);
1811 if (!del_list)
1812 return -ENOMEM;
1813
1814 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1815 if (!f->changed)
1816 continue;
1817
1818 if (f->counter != 0)
1819 continue;
1820 f->changed = false;
1821 cmd_flags = 0;
1822
1823 /* add to delete list */
1824 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1825 del_list[num_del].vlan_tag =
1826 cpu_to_le16((u16)(f->vlan ==
1827 I40E_VLAN_ANY ? 0 : f->vlan));
1828
1829 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1830 del_list[num_del].flags = cmd_flags;
1831 num_del++;
1832
1833 /* unlink from filter list */
1834 list_del(&f->list);
1835 kfree(f);
1836
1837 /* flush a full buffer */
1838 if (num_del == filter_list_len) {
1839 ret = i40e_aq_remove_macvlan(&pf->hw,
1840 vsi->seid, del_list, num_del,
1841 NULL);
1842 aq_err = pf->hw.aq.asq_last_status;
1843 num_del = 0;
1844 memset(del_list, 0, sizeof(*del_list));
1845
1846 if (ret && aq_err != I40E_AQ_RC_ENOENT)
1847 dev_info(&pf->pdev->dev,
1848 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
1849 i40e_stat_str(&pf->hw, ret),
1850 i40e_aq_str(&pf->hw, aq_err));
1851 }
1852 }
1853 if (num_del) {
1854 ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1855 del_list, num_del, NULL);
1856 aq_err = pf->hw.aq.asq_last_status;
1857 num_del = 0;
1858
1859 if (ret && aq_err != I40E_AQ_RC_ENOENT)
1860 dev_info(&pf->pdev->dev,
1861 "ignoring delete macvlan error, err %s aq_err %s\n",
1862 i40e_stat_str(&pf->hw, ret),
1863 i40e_aq_str(&pf->hw, aq_err));
1864 }
1865
1866 kfree(del_list);
1867 del_list = NULL;
1868
1869 /* do all the adds now */
1870 filter_list_len = pf->hw.aq.asq_buf_size /
1871 sizeof(struct i40e_aqc_add_macvlan_element_data),
1872 add_list = kcalloc(filter_list_len,
1873 sizeof(struct i40e_aqc_add_macvlan_element_data),
1874 GFP_KERNEL);
1875 if (!add_list)
1876 return -ENOMEM;
1877
1878 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1879 if (!f->changed)
1880 continue;
1881
1882 if (f->counter == 0)
1883 continue;
1884 f->changed = false;
1885 add_happened = true;
1886 cmd_flags = 0;
1887
1888 /* add to add array */
1889 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1890 add_list[num_add].vlan_tag =
1891 cpu_to_le16(
1892 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1893 add_list[num_add].queue_number = 0;
1894
1895 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1896 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1897 num_add++;
1898
1899 /* flush a full buffer */
1900 if (num_add == filter_list_len) {
1901 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1902 add_list, num_add,
1903 NULL);
1904 aq_err = pf->hw.aq.asq_last_status;
1905 num_add = 0;
1906
1907 if (ret)
1908 break;
1909 memset(add_list, 0, sizeof(*add_list));
1910 }
1911 }
1912 if (num_add) {
1913 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1914 add_list, num_add, NULL);
1915 aq_err = pf->hw.aq.asq_last_status;
1916 num_add = 0;
1917 }
1918 kfree(add_list);
1919 add_list = NULL;
1920
1921 if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
1922 dev_info(&pf->pdev->dev,
1923 "add filter failed, err %s aq_err %s\n",
1924 i40e_stat_str(&pf->hw, ret),
1925 i40e_aq_str(&pf->hw, aq_err));
1926 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1927 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1928 &vsi->state)) {
1929 promisc_forced_on = true;
1930 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1931 &vsi->state);
1932 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1933 }
1934 }
1935 }
1936
1937 /* check for changes in promiscuous modes */
1938 if (changed_flags & IFF_ALLMULTI) {
1939 bool cur_multipromisc;
1940 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1941 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1942 vsi->seid,
1943 cur_multipromisc,
1944 NULL);
1945 if (ret)
1946 dev_info(&pf->pdev->dev,
1947 "set multi promisc failed, err %s aq_err %s\n",
1948 i40e_stat_str(&pf->hw, ret),
1949 i40e_aq_str(&pf->hw,
1950 pf->hw.aq.asq_last_status));
1951 }
1952 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1953 bool cur_promisc;
1954 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1955 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1956 &vsi->state));
1957 if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
1958 /* set defport ON for Main VSI instead of true promisc
1959 * this way we will get all unicast/multicast and VLAN
1960 * promisc behavior but will not get VF or VMDq traffic
1961 * replicated on the Main VSI.
1962 */
1963 if (pf->cur_promisc != cur_promisc) {
1964 pf->cur_promisc = cur_promisc;
1965 if (grab_rtnl)
1966 i40e_do_reset_safe(pf,
1967 BIT(__I40E_PF_RESET_REQUESTED));
1968 else
1969 i40e_do_reset(pf,
1970 BIT(__I40E_PF_RESET_REQUESTED));
1971 }
1972 } else {
1973 ret = i40e_aq_set_vsi_unicast_promiscuous(
1974 &vsi->back->hw,
1975 vsi->seid,
1976 cur_promisc, NULL);
1977 if (ret)
1978 dev_info(&pf->pdev->dev,
1979 "set unicast promisc failed, err %d, aq_err %d\n",
1980 ret, pf->hw.aq.asq_last_status);
1981 ret = i40e_aq_set_vsi_multicast_promiscuous(
1982 &vsi->back->hw,
1983 vsi->seid,
1984 cur_promisc, NULL);
1985 if (ret)
1986 dev_info(&pf->pdev->dev,
1987 "set multicast promisc failed, err %d, aq_err %d\n",
1988 ret, pf->hw.aq.asq_last_status);
1989 }
1990 ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1991 vsi->seid,
1992 cur_promisc, NULL);
1993 if (ret)
1994 dev_info(&pf->pdev->dev,
1995 "set brdcast promisc failed, err %s, aq_err %s\n",
1996 i40e_stat_str(&pf->hw, ret),
1997 i40e_aq_str(&pf->hw,
1998 pf->hw.aq.asq_last_status));
1999 }
2000
2001 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
2002 return 0;
2003 }
2004
2005 /**
2006 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2007 * @pf: board private structure
2008 **/
2009 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2010 {
2011 int v;
2012
2013 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2014 return;
2015 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2016
2017 for (v = 0; v < pf->num_alloc_vsi; v++) {
2018 if (pf->vsi[v] &&
2019 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
2020 i40e_sync_vsi_filters(pf->vsi[v], true);
2021 }
2022 }
2023
2024 /**
2025 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2026 * @netdev: network interface device structure
2027 * @new_mtu: new value for maximum frame size
2028 *
2029 * Returns 0 on success, negative on failure
2030 **/
2031 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2032 {
2033 struct i40e_netdev_priv *np = netdev_priv(netdev);
2034 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2035 struct i40e_vsi *vsi = np->vsi;
2036
2037 /* MTU < 68 is an error and causes problems on some kernels */
2038 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
2039 return -EINVAL;
2040
2041 netdev_info(netdev, "changing MTU from %d to %d\n",
2042 netdev->mtu, new_mtu);
2043 netdev->mtu = new_mtu;
2044 if (netif_running(netdev))
2045 i40e_vsi_reinit_locked(vsi);
2046
2047 return 0;
2048 }
2049
2050 /**
2051 * i40e_ioctl - Access the hwtstamp interface
2052 * @netdev: network interface device structure
2053 * @ifr: interface request data
2054 * @cmd: ioctl command
2055 **/
2056 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2057 {
2058 struct i40e_netdev_priv *np = netdev_priv(netdev);
2059 struct i40e_pf *pf = np->vsi->back;
2060
2061 switch (cmd) {
2062 case SIOCGHWTSTAMP:
2063 return i40e_ptp_get_ts_config(pf, ifr);
2064 case SIOCSHWTSTAMP:
2065 return i40e_ptp_set_ts_config(pf, ifr);
2066 default:
2067 return -EOPNOTSUPP;
2068 }
2069 }
2070
2071 /**
2072 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2073 * @vsi: the vsi being adjusted
2074 **/
2075 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2076 {
2077 struct i40e_vsi_context ctxt;
2078 i40e_status ret;
2079
2080 if ((vsi->info.valid_sections &
2081 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2082 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2083 return; /* already enabled */
2084
2085 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2086 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2087 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2088
2089 ctxt.seid = vsi->seid;
2090 ctxt.info = vsi->info;
2091 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2092 if (ret) {
2093 dev_info(&vsi->back->pdev->dev,
2094 "update vlan stripping failed, err %s aq_err %s\n",
2095 i40e_stat_str(&vsi->back->hw, ret),
2096 i40e_aq_str(&vsi->back->hw,
2097 vsi->back->hw.aq.asq_last_status));
2098 }
2099 }
2100
2101 /**
2102 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2103 * @vsi: the vsi being adjusted
2104 **/
2105 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2106 {
2107 struct i40e_vsi_context ctxt;
2108 i40e_status ret;
2109
2110 if ((vsi->info.valid_sections &
2111 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2112 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2113 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2114 return; /* already disabled */
2115
2116 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2117 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2118 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2119
2120 ctxt.seid = vsi->seid;
2121 ctxt.info = vsi->info;
2122 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2123 if (ret) {
2124 dev_info(&vsi->back->pdev->dev,
2125 "update vlan stripping failed, err %s aq_err %s\n",
2126 i40e_stat_str(&vsi->back->hw, ret),
2127 i40e_aq_str(&vsi->back->hw,
2128 vsi->back->hw.aq.asq_last_status));
2129 }
2130 }
2131
2132 /**
2133 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2134 * @netdev: network interface to be adjusted
2135 * @features: netdev features to test if VLAN offload is enabled or not
2136 **/
2137 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2138 {
2139 struct i40e_netdev_priv *np = netdev_priv(netdev);
2140 struct i40e_vsi *vsi = np->vsi;
2141
2142 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2143 i40e_vlan_stripping_enable(vsi);
2144 else
2145 i40e_vlan_stripping_disable(vsi);
2146 }
2147
2148 /**
2149 * i40e_vsi_add_vlan - Add vsi membership for given vlan
2150 * @vsi: the vsi being configured
2151 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2152 **/
2153 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2154 {
2155 struct i40e_mac_filter *f, *add_f;
2156 bool is_netdev, is_vf;
2157
2158 is_vf = (vsi->type == I40E_VSI_SRIOV);
2159 is_netdev = !!(vsi->netdev);
2160
2161 if (is_netdev) {
2162 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2163 is_vf, is_netdev);
2164 if (!add_f) {
2165 dev_info(&vsi->back->pdev->dev,
2166 "Could not add vlan filter %d for %pM\n",
2167 vid, vsi->netdev->dev_addr);
2168 return -ENOMEM;
2169 }
2170 }
2171
2172 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2173 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2174 if (!add_f) {
2175 dev_info(&vsi->back->pdev->dev,
2176 "Could not add vlan filter %d for %pM\n",
2177 vid, f->macaddr);
2178 return -ENOMEM;
2179 }
2180 }
2181
2182 /* Now if we add a vlan tag, make sure to check if it is the first
2183 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2184 * with 0, so we now accept untagged and specified tagged traffic
2185 * (and not any taged and untagged)
2186 */
2187 if (vid > 0) {
2188 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2189 I40E_VLAN_ANY,
2190 is_vf, is_netdev)) {
2191 i40e_del_filter(vsi, vsi->netdev->dev_addr,
2192 I40E_VLAN_ANY, is_vf, is_netdev);
2193 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2194 is_vf, is_netdev);
2195 if (!add_f) {
2196 dev_info(&vsi->back->pdev->dev,
2197 "Could not add filter 0 for %pM\n",
2198 vsi->netdev->dev_addr);
2199 return -ENOMEM;
2200 }
2201 }
2202 }
2203
2204 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2205 if (vid > 0 && !vsi->info.pvid) {
2206 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2207 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2208 is_vf, is_netdev)) {
2209 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2210 is_vf, is_netdev);
2211 add_f = i40e_add_filter(vsi, f->macaddr,
2212 0, is_vf, is_netdev);
2213 if (!add_f) {
2214 dev_info(&vsi->back->pdev->dev,
2215 "Could not add filter 0 for %pM\n",
2216 f->macaddr);
2217 return -ENOMEM;
2218 }
2219 }
2220 }
2221 }
2222
2223 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2224 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2225 return 0;
2226
2227 return i40e_sync_vsi_filters(vsi, false);
2228 }
2229
2230 /**
2231 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2232 * @vsi: the vsi being configured
2233 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2234 *
2235 * Return: 0 on success or negative otherwise
2236 **/
2237 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2238 {
2239 struct net_device *netdev = vsi->netdev;
2240 struct i40e_mac_filter *f, *add_f;
2241 bool is_vf, is_netdev;
2242 int filter_count = 0;
2243
2244 is_vf = (vsi->type == I40E_VSI_SRIOV);
2245 is_netdev = !!(netdev);
2246
2247 if (is_netdev)
2248 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2249
2250 list_for_each_entry(f, &vsi->mac_filter_list, list)
2251 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2252
2253 /* go through all the filters for this VSI and if there is only
2254 * vid == 0 it means there are no other filters, so vid 0 must
2255 * be replaced with -1. This signifies that we should from now
2256 * on accept any traffic (with any tag present, or untagged)
2257 */
2258 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2259 if (is_netdev) {
2260 if (f->vlan &&
2261 ether_addr_equal(netdev->dev_addr, f->macaddr))
2262 filter_count++;
2263 }
2264
2265 if (f->vlan)
2266 filter_count++;
2267 }
2268
2269 if (!filter_count && is_netdev) {
2270 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2271 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2272 is_vf, is_netdev);
2273 if (!f) {
2274 dev_info(&vsi->back->pdev->dev,
2275 "Could not add filter %d for %pM\n",
2276 I40E_VLAN_ANY, netdev->dev_addr);
2277 return -ENOMEM;
2278 }
2279 }
2280
2281 if (!filter_count) {
2282 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2283 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2284 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2285 is_vf, is_netdev);
2286 if (!add_f) {
2287 dev_info(&vsi->back->pdev->dev,
2288 "Could not add filter %d for %pM\n",
2289 I40E_VLAN_ANY, f->macaddr);
2290 return -ENOMEM;
2291 }
2292 }
2293 }
2294
2295 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2296 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2297 return 0;
2298
2299 return i40e_sync_vsi_filters(vsi, false);
2300 }
2301
2302 /**
2303 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2304 * @netdev: network interface to be adjusted
2305 * @vid: vlan id to be added
2306 *
2307 * net_device_ops implementation for adding vlan ids
2308 **/
2309 #ifdef I40E_FCOE
2310 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2311 __always_unused __be16 proto, u16 vid)
2312 #else
2313 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2314 __always_unused __be16 proto, u16 vid)
2315 #endif
2316 {
2317 struct i40e_netdev_priv *np = netdev_priv(netdev);
2318 struct i40e_vsi *vsi = np->vsi;
2319 int ret = 0;
2320
2321 if (vid > 4095)
2322 return -EINVAL;
2323
2324 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2325
2326 /* If the network stack called us with vid = 0 then
2327 * it is asking to receive priority tagged packets with
2328 * vlan id 0. Our HW receives them by default when configured
2329 * to receive untagged packets so there is no need to add an
2330 * extra filter for vlan 0 tagged packets.
2331 */
2332 if (vid)
2333 ret = i40e_vsi_add_vlan(vsi, vid);
2334
2335 if (!ret && (vid < VLAN_N_VID))
2336 set_bit(vid, vsi->active_vlans);
2337
2338 return ret;
2339 }
2340
2341 /**
2342 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2343 * @netdev: network interface to be adjusted
2344 * @vid: vlan id to be removed
2345 *
2346 * net_device_ops implementation for removing vlan ids
2347 **/
2348 #ifdef I40E_FCOE
2349 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2350 __always_unused __be16 proto, u16 vid)
2351 #else
2352 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2353 __always_unused __be16 proto, u16 vid)
2354 #endif
2355 {
2356 struct i40e_netdev_priv *np = netdev_priv(netdev);
2357 struct i40e_vsi *vsi = np->vsi;
2358
2359 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2360
2361 /* return code is ignored as there is nothing a user
2362 * can do about failure to remove and a log message was
2363 * already printed from the other function
2364 */
2365 i40e_vsi_kill_vlan(vsi, vid);
2366
2367 clear_bit(vid, vsi->active_vlans);
2368
2369 return 0;
2370 }
2371
2372 /**
2373 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2374 * @vsi: the vsi being brought back up
2375 **/
2376 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2377 {
2378 u16 vid;
2379
2380 if (!vsi->netdev)
2381 return;
2382
2383 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2384
2385 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2386 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2387 vid);
2388 }
2389
2390 /**
2391 * i40e_vsi_add_pvid - Add pvid for the VSI
2392 * @vsi: the vsi being adjusted
2393 * @vid: the vlan id to set as a PVID
2394 **/
2395 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2396 {
2397 struct i40e_vsi_context ctxt;
2398 i40e_status ret;
2399
2400 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2401 vsi->info.pvid = cpu_to_le16(vid);
2402 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2403 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2404 I40E_AQ_VSI_PVLAN_EMOD_STR;
2405
2406 ctxt.seid = vsi->seid;
2407 ctxt.info = vsi->info;
2408 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2409 if (ret) {
2410 dev_info(&vsi->back->pdev->dev,
2411 "add pvid failed, err %s aq_err %s\n",
2412 i40e_stat_str(&vsi->back->hw, ret),
2413 i40e_aq_str(&vsi->back->hw,
2414 vsi->back->hw.aq.asq_last_status));
2415 return -ENOENT;
2416 }
2417
2418 return 0;
2419 }
2420
2421 /**
2422 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2423 * @vsi: the vsi being adjusted
2424 *
2425 * Just use the vlan_rx_register() service to put it back to normal
2426 **/
2427 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2428 {
2429 i40e_vlan_stripping_disable(vsi);
2430
2431 vsi->info.pvid = 0;
2432 }
2433
2434 /**
2435 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2436 * @vsi: ptr to the VSI
2437 *
2438 * If this function returns with an error, then it's possible one or
2439 * more of the rings is populated (while the rest are not). It is the
2440 * callers duty to clean those orphaned rings.
2441 *
2442 * Return 0 on success, negative on failure
2443 **/
2444 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2445 {
2446 int i, err = 0;
2447
2448 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2449 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2450
2451 return err;
2452 }
2453
2454 /**
2455 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2456 * @vsi: ptr to the VSI
2457 *
2458 * Free VSI's transmit software resources
2459 **/
2460 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2461 {
2462 int i;
2463
2464 if (!vsi->tx_rings)
2465 return;
2466
2467 for (i = 0; i < vsi->num_queue_pairs; i++)
2468 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2469 i40e_free_tx_resources(vsi->tx_rings[i]);
2470 }
2471
2472 /**
2473 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2474 * @vsi: ptr to the VSI
2475 *
2476 * If this function returns with an error, then it's possible one or
2477 * more of the rings is populated (while the rest are not). It is the
2478 * callers duty to clean those orphaned rings.
2479 *
2480 * Return 0 on success, negative on failure
2481 **/
2482 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2483 {
2484 int i, err = 0;
2485
2486 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2487 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2488 #ifdef I40E_FCOE
2489 i40e_fcoe_setup_ddp_resources(vsi);
2490 #endif
2491 return err;
2492 }
2493
2494 /**
2495 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2496 * @vsi: ptr to the VSI
2497 *
2498 * Free all receive software resources
2499 **/
2500 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2501 {
2502 int i;
2503
2504 if (!vsi->rx_rings)
2505 return;
2506
2507 for (i = 0; i < vsi->num_queue_pairs; i++)
2508 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2509 i40e_free_rx_resources(vsi->rx_rings[i]);
2510 #ifdef I40E_FCOE
2511 i40e_fcoe_free_ddp_resources(vsi);
2512 #endif
2513 }
2514
2515 /**
2516 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2517 * @ring: The Tx ring to configure
2518 *
2519 * This enables/disables XPS for a given Tx descriptor ring
2520 * based on the TCs enabled for the VSI that ring belongs to.
2521 **/
2522 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2523 {
2524 struct i40e_vsi *vsi = ring->vsi;
2525 cpumask_var_t mask;
2526
2527 if (!ring->q_vector || !ring->netdev)
2528 return;
2529
2530 /* Single TC mode enable XPS */
2531 if (vsi->tc_config.numtc <= 1) {
2532 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2533 netif_set_xps_queue(ring->netdev,
2534 &ring->q_vector->affinity_mask,
2535 ring->queue_index);
2536 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2537 /* Disable XPS to allow selection based on TC */
2538 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2539 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2540 free_cpumask_var(mask);
2541 }
2542 }
2543
2544 /**
2545 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2546 * @ring: The Tx ring to configure
2547 *
2548 * Configure the Tx descriptor ring in the HMC context.
2549 **/
2550 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2551 {
2552 struct i40e_vsi *vsi = ring->vsi;
2553 u16 pf_q = vsi->base_queue + ring->queue_index;
2554 struct i40e_hw *hw = &vsi->back->hw;
2555 struct i40e_hmc_obj_txq tx_ctx;
2556 i40e_status err = 0;
2557 u32 qtx_ctl = 0;
2558
2559 /* some ATR related tx ring init */
2560 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2561 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2562 ring->atr_count = 0;
2563 } else {
2564 ring->atr_sample_rate = 0;
2565 }
2566
2567 /* configure XPS */
2568 i40e_config_xps_tx_ring(ring);
2569
2570 /* clear the context structure first */
2571 memset(&tx_ctx, 0, sizeof(tx_ctx));
2572
2573 tx_ctx.new_context = 1;
2574 tx_ctx.base = (ring->dma / 128);
2575 tx_ctx.qlen = ring->count;
2576 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2577 I40E_FLAG_FD_ATR_ENABLED));
2578 #ifdef I40E_FCOE
2579 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2580 #endif
2581 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2582 /* FDIR VSI tx ring can still use RS bit and writebacks */
2583 if (vsi->type != I40E_VSI_FDIR)
2584 tx_ctx.head_wb_ena = 1;
2585 tx_ctx.head_wb_addr = ring->dma +
2586 (ring->count * sizeof(struct i40e_tx_desc));
2587
2588 /* As part of VSI creation/update, FW allocates certain
2589 * Tx arbitration queue sets for each TC enabled for
2590 * the VSI. The FW returns the handles to these queue
2591 * sets as part of the response buffer to Add VSI,
2592 * Update VSI, etc. AQ commands. It is expected that
2593 * these queue set handles be associated with the Tx
2594 * queues by the driver as part of the TX queue context
2595 * initialization. This has to be done regardless of
2596 * DCB as by default everything is mapped to TC0.
2597 */
2598 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2599 tx_ctx.rdylist_act = 0;
2600
2601 /* clear the context in the HMC */
2602 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2603 if (err) {
2604 dev_info(&vsi->back->pdev->dev,
2605 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2606 ring->queue_index, pf_q, err);
2607 return -ENOMEM;
2608 }
2609
2610 /* set the context in the HMC */
2611 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2612 if (err) {
2613 dev_info(&vsi->back->pdev->dev,
2614 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2615 ring->queue_index, pf_q, err);
2616 return -ENOMEM;
2617 }
2618
2619 /* Now associate this queue with this PCI function */
2620 if (vsi->type == I40E_VSI_VMDQ2) {
2621 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2622 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2623 I40E_QTX_CTL_VFVM_INDX_MASK;
2624 } else {
2625 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2626 }
2627
2628 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2629 I40E_QTX_CTL_PF_INDX_MASK);
2630 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2631 i40e_flush(hw);
2632
2633 /* cache tail off for easier writes later */
2634 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2635
2636 return 0;
2637 }
2638
2639 /**
2640 * i40e_configure_rx_ring - Configure a receive ring context
2641 * @ring: The Rx ring to configure
2642 *
2643 * Configure the Rx descriptor ring in the HMC context.
2644 **/
2645 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2646 {
2647 struct i40e_vsi *vsi = ring->vsi;
2648 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2649 u16 pf_q = vsi->base_queue + ring->queue_index;
2650 struct i40e_hw *hw = &vsi->back->hw;
2651 struct i40e_hmc_obj_rxq rx_ctx;
2652 i40e_status err = 0;
2653
2654 ring->state = 0;
2655
2656 /* clear the context structure first */
2657 memset(&rx_ctx, 0, sizeof(rx_ctx));
2658
2659 ring->rx_buf_len = vsi->rx_buf_len;
2660 ring->rx_hdr_len = vsi->rx_hdr_len;
2661
2662 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2663 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2664
2665 rx_ctx.base = (ring->dma / 128);
2666 rx_ctx.qlen = ring->count;
2667
2668 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2669 set_ring_16byte_desc_enabled(ring);
2670 rx_ctx.dsize = 0;
2671 } else {
2672 rx_ctx.dsize = 1;
2673 }
2674
2675 rx_ctx.dtype = vsi->dtype;
2676 if (vsi->dtype) {
2677 set_ring_ps_enabled(ring);
2678 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2679 I40E_RX_SPLIT_IP |
2680 I40E_RX_SPLIT_TCP_UDP |
2681 I40E_RX_SPLIT_SCTP;
2682 } else {
2683 rx_ctx.hsplit_0 = 0;
2684 }
2685
2686 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2687 (chain_len * ring->rx_buf_len));
2688 if (hw->revision_id == 0)
2689 rx_ctx.lrxqthresh = 0;
2690 else
2691 rx_ctx.lrxqthresh = 2;
2692 rx_ctx.crcstrip = 1;
2693 rx_ctx.l2tsel = 1;
2694 /* this controls whether VLAN is stripped from inner headers */
2695 rx_ctx.showiv = 0;
2696 #ifdef I40E_FCOE
2697 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2698 #endif
2699 /* set the prefena field to 1 because the manual says to */
2700 rx_ctx.prefena = 1;
2701
2702 /* clear the context in the HMC */
2703 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2704 if (err) {
2705 dev_info(&vsi->back->pdev->dev,
2706 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2707 ring->queue_index, pf_q, err);
2708 return -ENOMEM;
2709 }
2710
2711 /* set the context in the HMC */
2712 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2713 if (err) {
2714 dev_info(&vsi->back->pdev->dev,
2715 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2716 ring->queue_index, pf_q, err);
2717 return -ENOMEM;
2718 }
2719
2720 /* cache tail for quicker writes, and clear the reg before use */
2721 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2722 writel(0, ring->tail);
2723
2724 if (ring_is_ps_enabled(ring)) {
2725 i40e_alloc_rx_headers(ring);
2726 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
2727 } else {
2728 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
2729 }
2730
2731 return 0;
2732 }
2733
2734 /**
2735 * i40e_vsi_configure_tx - Configure the VSI for Tx
2736 * @vsi: VSI structure describing this set of rings and resources
2737 *
2738 * Configure the Tx VSI for operation.
2739 **/
2740 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2741 {
2742 int err = 0;
2743 u16 i;
2744
2745 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2746 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2747
2748 return err;
2749 }
2750
2751 /**
2752 * i40e_vsi_configure_rx - Configure the VSI for Rx
2753 * @vsi: the VSI being configured
2754 *
2755 * Configure the Rx VSI for operation.
2756 **/
2757 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2758 {
2759 int err = 0;
2760 u16 i;
2761
2762 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2763 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2764 + ETH_FCS_LEN + VLAN_HLEN;
2765 else
2766 vsi->max_frame = I40E_RXBUFFER_2048;
2767
2768 /* figure out correct receive buffer length */
2769 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2770 I40E_FLAG_RX_PS_ENABLED)) {
2771 case I40E_FLAG_RX_1BUF_ENABLED:
2772 vsi->rx_hdr_len = 0;
2773 vsi->rx_buf_len = vsi->max_frame;
2774 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2775 break;
2776 case I40E_FLAG_RX_PS_ENABLED:
2777 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2778 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2779 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2780 break;
2781 default:
2782 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2783 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2784 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2785 break;
2786 }
2787
2788 #ifdef I40E_FCOE
2789 /* setup rx buffer for FCoE */
2790 if ((vsi->type == I40E_VSI_FCOE) &&
2791 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2792 vsi->rx_hdr_len = 0;
2793 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2794 vsi->max_frame = I40E_RXBUFFER_3072;
2795 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2796 }
2797
2798 #endif /* I40E_FCOE */
2799 /* round up for the chip's needs */
2800 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2801 BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
2802 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2803 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
2804
2805 /* set up individual rings */
2806 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2807 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2808
2809 return err;
2810 }
2811
2812 /**
2813 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2814 * @vsi: ptr to the VSI
2815 **/
2816 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2817 {
2818 struct i40e_ring *tx_ring, *rx_ring;
2819 u16 qoffset, qcount;
2820 int i, n;
2821
2822 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2823 /* Reset the TC information */
2824 for (i = 0; i < vsi->num_queue_pairs; i++) {
2825 rx_ring = vsi->rx_rings[i];
2826 tx_ring = vsi->tx_rings[i];
2827 rx_ring->dcb_tc = 0;
2828 tx_ring->dcb_tc = 0;
2829 }
2830 }
2831
2832 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2833 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
2834 continue;
2835
2836 qoffset = vsi->tc_config.tc_info[n].qoffset;
2837 qcount = vsi->tc_config.tc_info[n].qcount;
2838 for (i = qoffset; i < (qoffset + qcount); i++) {
2839 rx_ring = vsi->rx_rings[i];
2840 tx_ring = vsi->tx_rings[i];
2841 rx_ring->dcb_tc = n;
2842 tx_ring->dcb_tc = n;
2843 }
2844 }
2845 }
2846
2847 /**
2848 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2849 * @vsi: ptr to the VSI
2850 **/
2851 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2852 {
2853 if (vsi->netdev)
2854 i40e_set_rx_mode(vsi->netdev);
2855 }
2856
2857 /**
2858 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2859 * @vsi: Pointer to the targeted VSI
2860 *
2861 * This function replays the hlist on the hw where all the SB Flow Director
2862 * filters were saved.
2863 **/
2864 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2865 {
2866 struct i40e_fdir_filter *filter;
2867 struct i40e_pf *pf = vsi->back;
2868 struct hlist_node *node;
2869
2870 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2871 return;
2872
2873 hlist_for_each_entry_safe(filter, node,
2874 &pf->fdir_filter_list, fdir_node) {
2875 i40e_add_del_fdir(vsi, filter, true);
2876 }
2877 }
2878
2879 /**
2880 * i40e_vsi_configure - Set up the VSI for action
2881 * @vsi: the VSI being configured
2882 **/
2883 static int i40e_vsi_configure(struct i40e_vsi *vsi)
2884 {
2885 int err;
2886
2887 i40e_set_vsi_rx_mode(vsi);
2888 i40e_restore_vlan(vsi);
2889 i40e_vsi_config_dcb_rings(vsi);
2890 err = i40e_vsi_configure_tx(vsi);
2891 if (!err)
2892 err = i40e_vsi_configure_rx(vsi);
2893
2894 return err;
2895 }
2896
2897 /**
2898 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2899 * @vsi: the VSI being configured
2900 **/
2901 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2902 {
2903 struct i40e_pf *pf = vsi->back;
2904 struct i40e_q_vector *q_vector;
2905 struct i40e_hw *hw = &pf->hw;
2906 u16 vector;
2907 int i, q;
2908 u32 val;
2909 u32 qp;
2910
2911 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2912 * and PFINT_LNKLSTn registers, e.g.:
2913 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2914 */
2915 qp = vsi->base_queue;
2916 vector = vsi->base_vector;
2917 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2918 q_vector = vsi->q_vectors[i];
2919 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2920 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2921 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2922 q_vector->rx.itr);
2923 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2924 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2925 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2926 q_vector->tx.itr);
2927
2928 /* Linked list for the queuepairs assigned to this vector */
2929 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2930 for (q = 0; q < q_vector->num_ringpairs; q++) {
2931 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2932 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2933 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2934 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2935 (I40E_QUEUE_TYPE_TX
2936 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2937
2938 wr32(hw, I40E_QINT_RQCTL(qp), val);
2939
2940 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2941 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2942 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2943 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2944 (I40E_QUEUE_TYPE_RX
2945 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2946
2947 /* Terminate the linked list */
2948 if (q == (q_vector->num_ringpairs - 1))
2949 val |= (I40E_QUEUE_END_OF_LIST
2950 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2951
2952 wr32(hw, I40E_QINT_TQCTL(qp), val);
2953 qp++;
2954 }
2955 }
2956
2957 i40e_flush(hw);
2958 }
2959
2960 /**
2961 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2962 * @hw: ptr to the hardware info
2963 **/
2964 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
2965 {
2966 struct i40e_hw *hw = &pf->hw;
2967 u32 val;
2968
2969 /* clear things first */
2970 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2971 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2972
2973 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2974 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2975 I40E_PFINT_ICR0_ENA_GRST_MASK |
2976 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2977 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2978 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2979 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2980 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2981
2982 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
2983 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
2984
2985 if (pf->flags & I40E_FLAG_PTP)
2986 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2987
2988 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2989
2990 /* SW_ITR_IDX = 0, but don't change INTENA */
2991 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2992 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2993
2994 /* OTHER_ITR_IDX = 0 */
2995 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2996 }
2997
2998 /**
2999 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3000 * @vsi: the VSI being configured
3001 **/
3002 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3003 {
3004 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3005 struct i40e_pf *pf = vsi->back;
3006 struct i40e_hw *hw = &pf->hw;
3007 u32 val;
3008
3009 /* set the ITR configuration */
3010 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
3011 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3012 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3013 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
3014 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3015 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3016
3017 i40e_enable_misc_int_causes(pf);
3018
3019 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3020 wr32(hw, I40E_PFINT_LNKLST0, 0);
3021
3022 /* Associate the queue pair to the vector and enable the queue int */
3023 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3024 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3025 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3026
3027 wr32(hw, I40E_QINT_RQCTL(0), val);
3028
3029 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3030 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3031 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3032
3033 wr32(hw, I40E_QINT_TQCTL(0), val);
3034 i40e_flush(hw);
3035 }
3036
3037 /**
3038 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3039 * @pf: board private structure
3040 **/
3041 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3042 {
3043 struct i40e_hw *hw = &pf->hw;
3044
3045 wr32(hw, I40E_PFINT_DYN_CTL0,
3046 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3047 i40e_flush(hw);
3048 }
3049
3050 /**
3051 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3052 * @pf: board private structure
3053 **/
3054 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3055 {
3056 struct i40e_hw *hw = &pf->hw;
3057 u32 val;
3058
3059 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3060 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3061 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3062
3063 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3064 i40e_flush(hw);
3065 }
3066
3067 /**
3068 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
3069 * @vsi: pointer to a vsi
3070 * @vector: enable a particular Hw Interrupt vector, without base_vector
3071 **/
3072 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
3073 {
3074 struct i40e_pf *pf = vsi->back;
3075 struct i40e_hw *hw = &pf->hw;
3076 u32 val;
3077
3078 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
3079 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3080 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3081 wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val);
3082 /* skip the flush */
3083 }
3084
3085 /**
3086 * i40e_irq_dynamic_disable - Disable default interrupt generation settings
3087 * @vsi: pointer to a vsi
3088 * @vector: disable a particular Hw Interrupt vector
3089 **/
3090 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
3091 {
3092 struct i40e_pf *pf = vsi->back;
3093 struct i40e_hw *hw = &pf->hw;
3094 u32 val;
3095
3096 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3097 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
3098 i40e_flush(hw);
3099 }
3100
3101 /**
3102 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3103 * @irq: interrupt number
3104 * @data: pointer to a q_vector
3105 **/
3106 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3107 {
3108 struct i40e_q_vector *q_vector = data;
3109
3110 if (!q_vector->tx.ring && !q_vector->rx.ring)
3111 return IRQ_HANDLED;
3112
3113 napi_schedule(&q_vector->napi);
3114
3115 return IRQ_HANDLED;
3116 }
3117
3118 /**
3119 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3120 * @vsi: the VSI being configured
3121 * @basename: name for the vector
3122 *
3123 * Allocates MSI-X vectors and requests interrupts from the kernel.
3124 **/
3125 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3126 {
3127 int q_vectors = vsi->num_q_vectors;
3128 struct i40e_pf *pf = vsi->back;
3129 int base = vsi->base_vector;
3130 int rx_int_idx = 0;
3131 int tx_int_idx = 0;
3132 int vector, err;
3133
3134 for (vector = 0; vector < q_vectors; vector++) {
3135 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3136
3137 if (q_vector->tx.ring && q_vector->rx.ring) {
3138 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3139 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3140 tx_int_idx++;
3141 } else if (q_vector->rx.ring) {
3142 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3143 "%s-%s-%d", basename, "rx", rx_int_idx++);
3144 } else if (q_vector->tx.ring) {
3145 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3146 "%s-%s-%d", basename, "tx", tx_int_idx++);
3147 } else {
3148 /* skip this unused q_vector */
3149 continue;
3150 }
3151 err = request_irq(pf->msix_entries[base + vector].vector,
3152 vsi->irq_handler,
3153 0,
3154 q_vector->name,
3155 q_vector);
3156 if (err) {
3157 dev_info(&pf->pdev->dev,
3158 "%s: request_irq failed, error: %d\n",
3159 __func__, err);
3160 goto free_queue_irqs;
3161 }
3162 /* assign the mask for this irq */
3163 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3164 &q_vector->affinity_mask);
3165 }
3166
3167 vsi->irqs_ready = true;
3168 return 0;
3169
3170 free_queue_irqs:
3171 while (vector) {
3172 vector--;
3173 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3174 NULL);
3175 free_irq(pf->msix_entries[base + vector].vector,
3176 &(vsi->q_vectors[vector]));
3177 }
3178 return err;
3179 }
3180
3181 /**
3182 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3183 * @vsi: the VSI being un-configured
3184 **/
3185 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3186 {
3187 struct i40e_pf *pf = vsi->back;
3188 struct i40e_hw *hw = &pf->hw;
3189 int base = vsi->base_vector;
3190 int i;
3191
3192 for (i = 0; i < vsi->num_queue_pairs; i++) {
3193 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3194 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3195 }
3196
3197 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3198 for (i = vsi->base_vector;
3199 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3200 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3201
3202 i40e_flush(hw);
3203 for (i = 0; i < vsi->num_q_vectors; i++)
3204 synchronize_irq(pf->msix_entries[i + base].vector);
3205 } else {
3206 /* Legacy and MSI mode - this stops all interrupt handling */
3207 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3208 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3209 i40e_flush(hw);
3210 synchronize_irq(pf->pdev->irq);
3211 }
3212 }
3213
3214 /**
3215 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3216 * @vsi: the VSI being configured
3217 **/
3218 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3219 {
3220 struct i40e_pf *pf = vsi->back;
3221 int i;
3222
3223 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3224 for (i = 0; i < vsi->num_q_vectors; i++)
3225 i40e_irq_dynamic_enable(vsi, i);
3226 } else {
3227 i40e_irq_dynamic_enable_icr0(pf);
3228 }
3229
3230 i40e_flush(&pf->hw);
3231 return 0;
3232 }
3233
3234 /**
3235 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3236 * @pf: board private structure
3237 **/
3238 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3239 {
3240 /* Disable ICR 0 */
3241 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3242 i40e_flush(&pf->hw);
3243 }
3244
3245 /**
3246 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3247 * @irq: interrupt number
3248 * @data: pointer to a q_vector
3249 *
3250 * This is the handler used for all MSI/Legacy interrupts, and deals
3251 * with both queue and non-queue interrupts. This is also used in
3252 * MSIX mode to handle the non-queue interrupts.
3253 **/
3254 static irqreturn_t i40e_intr(int irq, void *data)
3255 {
3256 struct i40e_pf *pf = (struct i40e_pf *)data;
3257 struct i40e_hw *hw = &pf->hw;
3258 irqreturn_t ret = IRQ_NONE;
3259 u32 icr0, icr0_remaining;
3260 u32 val, ena_mask;
3261
3262 icr0 = rd32(hw, I40E_PFINT_ICR0);
3263 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3264
3265 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3266 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3267 goto enable_intr;
3268
3269 /* if interrupt but no bits showing, must be SWINT */
3270 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3271 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3272 pf->sw_int_count++;
3273
3274 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3275 (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3276 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3277 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3278 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3279 }
3280
3281 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3282 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3283
3284 /* temporarily disable queue cause for NAPI processing */
3285 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3286 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3287 wr32(hw, I40E_QINT_RQCTL(0), qval);
3288
3289 qval = rd32(hw, I40E_QINT_TQCTL(0));
3290 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3291 wr32(hw, I40E_QINT_TQCTL(0), qval);
3292
3293 if (!test_bit(__I40E_DOWN, &pf->state))
3294 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
3295 }
3296
3297 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3298 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3299 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3300 }
3301
3302 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3303 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3304 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3305 }
3306
3307 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3308 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3309 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3310 }
3311
3312 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3313 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3314 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3315 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3316 val = rd32(hw, I40E_GLGEN_RSTAT);
3317 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3318 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3319 if (val == I40E_RESET_CORER) {
3320 pf->corer_count++;
3321 } else if (val == I40E_RESET_GLOBR) {
3322 pf->globr_count++;
3323 } else if (val == I40E_RESET_EMPR) {
3324 pf->empr_count++;
3325 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3326 }
3327 }
3328
3329 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3330 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3331 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3332 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3333 rd32(hw, I40E_PFHMC_ERRORINFO),
3334 rd32(hw, I40E_PFHMC_ERRORDATA));
3335 }
3336
3337 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3338 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3339
3340 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3341 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3342 i40e_ptp_tx_hwtstamp(pf);
3343 }
3344 }
3345
3346 /* If a critical error is pending we have no choice but to reset the
3347 * device.
3348 * Report and mask out any remaining unexpected interrupts.
3349 */
3350 icr0_remaining = icr0 & ena_mask;
3351 if (icr0_remaining) {
3352 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3353 icr0_remaining);
3354 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3355 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3356 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3357 dev_info(&pf->pdev->dev, "device will be reset\n");
3358 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3359 i40e_service_event_schedule(pf);
3360 }
3361 ena_mask &= ~icr0_remaining;
3362 }
3363 ret = IRQ_HANDLED;
3364
3365 enable_intr:
3366 /* re-enable interrupt causes */
3367 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3368 if (!test_bit(__I40E_DOWN, &pf->state)) {
3369 i40e_service_event_schedule(pf);
3370 i40e_irq_dynamic_enable_icr0(pf);
3371 }
3372
3373 return ret;
3374 }
3375
3376 /**
3377 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3378 * @tx_ring: tx ring to clean
3379 * @budget: how many cleans we're allowed
3380 *
3381 * Returns true if there's any budget left (e.g. the clean is finished)
3382 **/
3383 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3384 {
3385 struct i40e_vsi *vsi = tx_ring->vsi;
3386 u16 i = tx_ring->next_to_clean;
3387 struct i40e_tx_buffer *tx_buf;
3388 struct i40e_tx_desc *tx_desc;
3389
3390 tx_buf = &tx_ring->tx_bi[i];
3391 tx_desc = I40E_TX_DESC(tx_ring, i);
3392 i -= tx_ring->count;
3393
3394 do {
3395 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3396
3397 /* if next_to_watch is not set then there is no work pending */
3398 if (!eop_desc)
3399 break;
3400
3401 /* prevent any other reads prior to eop_desc */
3402 read_barrier_depends();
3403
3404 /* if the descriptor isn't done, no work yet to do */
3405 if (!(eop_desc->cmd_type_offset_bsz &
3406 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3407 break;
3408
3409 /* clear next_to_watch to prevent false hangs */
3410 tx_buf->next_to_watch = NULL;
3411
3412 tx_desc->buffer_addr = 0;
3413 tx_desc->cmd_type_offset_bsz = 0;
3414 /* move past filter desc */
3415 tx_buf++;
3416 tx_desc++;
3417 i++;
3418 if (unlikely(!i)) {
3419 i -= tx_ring->count;
3420 tx_buf = tx_ring->tx_bi;
3421 tx_desc = I40E_TX_DESC(tx_ring, 0);
3422 }
3423 /* unmap skb header data */
3424 dma_unmap_single(tx_ring->dev,
3425 dma_unmap_addr(tx_buf, dma),
3426 dma_unmap_len(tx_buf, len),
3427 DMA_TO_DEVICE);
3428 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3429 kfree(tx_buf->raw_buf);
3430
3431 tx_buf->raw_buf = NULL;
3432 tx_buf->tx_flags = 0;
3433 tx_buf->next_to_watch = NULL;
3434 dma_unmap_len_set(tx_buf, len, 0);
3435 tx_desc->buffer_addr = 0;
3436 tx_desc->cmd_type_offset_bsz = 0;
3437
3438 /* move us past the eop_desc for start of next FD desc */
3439 tx_buf++;
3440 tx_desc++;
3441 i++;
3442 if (unlikely(!i)) {
3443 i -= tx_ring->count;
3444 tx_buf = tx_ring->tx_bi;
3445 tx_desc = I40E_TX_DESC(tx_ring, 0);
3446 }
3447
3448 /* update budget accounting */
3449 budget--;
3450 } while (likely(budget));
3451
3452 i += tx_ring->count;
3453 tx_ring->next_to_clean = i;
3454
3455 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3456 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3457 }
3458 return budget > 0;
3459 }
3460
3461 /**
3462 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3463 * @irq: interrupt number
3464 * @data: pointer to a q_vector
3465 **/
3466 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3467 {
3468 struct i40e_q_vector *q_vector = data;
3469 struct i40e_vsi *vsi;
3470
3471 if (!q_vector->tx.ring)
3472 return IRQ_HANDLED;
3473
3474 vsi = q_vector->tx.ring->vsi;
3475 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3476
3477 return IRQ_HANDLED;
3478 }
3479
3480 /**
3481 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3482 * @vsi: the VSI being configured
3483 * @v_idx: vector index
3484 * @qp_idx: queue pair index
3485 **/
3486 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3487 {
3488 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3489 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3490 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3491
3492 tx_ring->q_vector = q_vector;
3493 tx_ring->next = q_vector->tx.ring;
3494 q_vector->tx.ring = tx_ring;
3495 q_vector->tx.count++;
3496
3497 rx_ring->q_vector = q_vector;
3498 rx_ring->next = q_vector->rx.ring;
3499 q_vector->rx.ring = rx_ring;
3500 q_vector->rx.count++;
3501 }
3502
3503 /**
3504 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3505 * @vsi: the VSI being configured
3506 *
3507 * This function maps descriptor rings to the queue-specific vectors
3508 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3509 * one vector per queue pair, but on a constrained vector budget, we
3510 * group the queue pairs as "efficiently" as possible.
3511 **/
3512 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3513 {
3514 int qp_remaining = vsi->num_queue_pairs;
3515 int q_vectors = vsi->num_q_vectors;
3516 int num_ringpairs;
3517 int v_start = 0;
3518 int qp_idx = 0;
3519
3520 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3521 * group them so there are multiple queues per vector.
3522 * It is also important to go through all the vectors available to be
3523 * sure that if we don't use all the vectors, that the remaining vectors
3524 * are cleared. This is especially important when decreasing the
3525 * number of queues in use.
3526 */
3527 for (; v_start < q_vectors; v_start++) {
3528 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3529
3530 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3531
3532 q_vector->num_ringpairs = num_ringpairs;
3533
3534 q_vector->rx.count = 0;
3535 q_vector->tx.count = 0;
3536 q_vector->rx.ring = NULL;
3537 q_vector->tx.ring = NULL;
3538
3539 while (num_ringpairs--) {
3540 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3541 qp_idx++;
3542 qp_remaining--;
3543 }
3544 }
3545 }
3546
3547 /**
3548 * i40e_vsi_request_irq - Request IRQ from the OS
3549 * @vsi: the VSI being configured
3550 * @basename: name for the vector
3551 **/
3552 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3553 {
3554 struct i40e_pf *pf = vsi->back;
3555 int err;
3556
3557 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3558 err = i40e_vsi_request_irq_msix(vsi, basename);
3559 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3560 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3561 pf->int_name, pf);
3562 else
3563 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3564 pf->int_name, pf);
3565
3566 if (err)
3567 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3568
3569 return err;
3570 }
3571
3572 #ifdef CONFIG_NET_POLL_CONTROLLER
3573 /**
3574 * i40e_netpoll - A Polling 'interrupt'handler
3575 * @netdev: network interface device structure
3576 *
3577 * This is used by netconsole to send skbs without having to re-enable
3578 * interrupts. It's not called while the normal interrupt routine is executing.
3579 **/
3580 #ifdef I40E_FCOE
3581 void i40e_netpoll(struct net_device *netdev)
3582 #else
3583 static void i40e_netpoll(struct net_device *netdev)
3584 #endif
3585 {
3586 struct i40e_netdev_priv *np = netdev_priv(netdev);
3587 struct i40e_vsi *vsi = np->vsi;
3588 struct i40e_pf *pf = vsi->back;
3589 int i;
3590
3591 /* if interface is down do nothing */
3592 if (test_bit(__I40E_DOWN, &vsi->state))
3593 return;
3594
3595 pf->flags |= I40E_FLAG_IN_NETPOLL;
3596 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3597 for (i = 0; i < vsi->num_q_vectors; i++)
3598 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3599 } else {
3600 i40e_intr(pf->pdev->irq, netdev);
3601 }
3602 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3603 }
3604 #endif
3605
3606 /**
3607 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3608 * @pf: the PF being configured
3609 * @pf_q: the PF queue
3610 * @enable: enable or disable state of the queue
3611 *
3612 * This routine will wait for the given Tx queue of the PF to reach the
3613 * enabled or disabled state.
3614 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3615 * multiple retries; else will return 0 in case of success.
3616 **/
3617 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3618 {
3619 int i;
3620 u32 tx_reg;
3621
3622 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3623 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3624 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3625 break;
3626
3627 usleep_range(10, 20);
3628 }
3629 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3630 return -ETIMEDOUT;
3631
3632 return 0;
3633 }
3634
3635 /**
3636 * i40e_vsi_control_tx - Start or stop a VSI's rings
3637 * @vsi: the VSI being configured
3638 * @enable: start or stop the rings
3639 **/
3640 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3641 {
3642 struct i40e_pf *pf = vsi->back;
3643 struct i40e_hw *hw = &pf->hw;
3644 int i, j, pf_q, ret = 0;
3645 u32 tx_reg;
3646
3647 pf_q = vsi->base_queue;
3648 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3649
3650 /* warn the TX unit of coming changes */
3651 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3652 if (!enable)
3653 usleep_range(10, 20);
3654
3655 for (j = 0; j < 50; j++) {
3656 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3657 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3658 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3659 break;
3660 usleep_range(1000, 2000);
3661 }
3662 /* Skip if the queue is already in the requested state */
3663 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3664 continue;
3665
3666 /* turn on/off the queue */
3667 if (enable) {
3668 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3669 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3670 } else {
3671 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3672 }
3673
3674 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3675 /* No waiting for the Tx queue to disable */
3676 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3677 continue;
3678
3679 /* wait for the change to finish */
3680 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3681 if (ret) {
3682 dev_info(&pf->pdev->dev,
3683 "%s: VSI seid %d Tx ring %d %sable timeout\n",
3684 __func__, vsi->seid, pf_q,
3685 (enable ? "en" : "dis"));
3686 break;
3687 }
3688 }
3689
3690 if (hw->revision_id == 0)
3691 mdelay(50);
3692 return ret;
3693 }
3694
3695 /**
3696 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3697 * @pf: the PF being configured
3698 * @pf_q: the PF queue
3699 * @enable: enable or disable state of the queue
3700 *
3701 * This routine will wait for the given Rx queue of the PF to reach the
3702 * enabled or disabled state.
3703 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3704 * multiple retries; else will return 0 in case of success.
3705 **/
3706 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3707 {
3708 int i;
3709 u32 rx_reg;
3710
3711 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3712 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3713 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3714 break;
3715
3716 usleep_range(10, 20);
3717 }
3718 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3719 return -ETIMEDOUT;
3720
3721 return 0;
3722 }
3723
3724 /**
3725 * i40e_vsi_control_rx - Start or stop a VSI's rings
3726 * @vsi: the VSI being configured
3727 * @enable: start or stop the rings
3728 **/
3729 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3730 {
3731 struct i40e_pf *pf = vsi->back;
3732 struct i40e_hw *hw = &pf->hw;
3733 int i, j, pf_q, ret = 0;
3734 u32 rx_reg;
3735
3736 pf_q = vsi->base_queue;
3737 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3738 for (j = 0; j < 50; j++) {
3739 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3740 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3741 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3742 break;
3743 usleep_range(1000, 2000);
3744 }
3745
3746 /* Skip if the queue is already in the requested state */
3747 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3748 continue;
3749
3750 /* turn on/off the queue */
3751 if (enable)
3752 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3753 else
3754 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3755 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3756
3757 /* wait for the change to finish */
3758 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3759 if (ret) {
3760 dev_info(&pf->pdev->dev,
3761 "%s: VSI seid %d Rx ring %d %sable timeout\n",
3762 __func__, vsi->seid, pf_q,
3763 (enable ? "en" : "dis"));
3764 break;
3765 }
3766 }
3767
3768 return ret;
3769 }
3770
3771 /**
3772 * i40e_vsi_control_rings - Start or stop a VSI's rings
3773 * @vsi: the VSI being configured
3774 * @enable: start or stop the rings
3775 **/
3776 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3777 {
3778 int ret = 0;
3779
3780 /* do rx first for enable and last for disable */
3781 if (request) {
3782 ret = i40e_vsi_control_rx(vsi, request);
3783 if (ret)
3784 return ret;
3785 ret = i40e_vsi_control_tx(vsi, request);
3786 } else {
3787 /* Ignore return value, we need to shutdown whatever we can */
3788 i40e_vsi_control_tx(vsi, request);
3789 i40e_vsi_control_rx(vsi, request);
3790 }
3791
3792 return ret;
3793 }
3794
3795 /**
3796 * i40e_vsi_free_irq - Free the irq association with the OS
3797 * @vsi: the VSI being configured
3798 **/
3799 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3800 {
3801 struct i40e_pf *pf = vsi->back;
3802 struct i40e_hw *hw = &pf->hw;
3803 int base = vsi->base_vector;
3804 u32 val, qp;
3805 int i;
3806
3807 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3808 if (!vsi->q_vectors)
3809 return;
3810
3811 if (!vsi->irqs_ready)
3812 return;
3813
3814 vsi->irqs_ready = false;
3815 for (i = 0; i < vsi->num_q_vectors; i++) {
3816 u16 vector = i + base;
3817
3818 /* free only the irqs that were actually requested */
3819 if (!vsi->q_vectors[i] ||
3820 !vsi->q_vectors[i]->num_ringpairs)
3821 continue;
3822
3823 /* clear the affinity_mask in the IRQ descriptor */
3824 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3825 NULL);
3826 free_irq(pf->msix_entries[vector].vector,
3827 vsi->q_vectors[i]);
3828
3829 /* Tear down the interrupt queue link list
3830 *
3831 * We know that they come in pairs and always
3832 * the Rx first, then the Tx. To clear the
3833 * link list, stick the EOL value into the
3834 * next_q field of the registers.
3835 */
3836 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3837 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3838 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3839 val |= I40E_QUEUE_END_OF_LIST
3840 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3841 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3842
3843 while (qp != I40E_QUEUE_END_OF_LIST) {
3844 u32 next;
3845
3846 val = rd32(hw, I40E_QINT_RQCTL(qp));
3847
3848 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3849 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3850 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3851 I40E_QINT_RQCTL_INTEVENT_MASK);
3852
3853 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3854 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3855
3856 wr32(hw, I40E_QINT_RQCTL(qp), val);
3857
3858 val = rd32(hw, I40E_QINT_TQCTL(qp));
3859
3860 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3861 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3862
3863 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3864 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3865 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3866 I40E_QINT_TQCTL_INTEVENT_MASK);
3867
3868 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3869 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3870
3871 wr32(hw, I40E_QINT_TQCTL(qp), val);
3872 qp = next;
3873 }
3874 }
3875 } else {
3876 free_irq(pf->pdev->irq, pf);
3877
3878 val = rd32(hw, I40E_PFINT_LNKLST0);
3879 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3880 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3881 val |= I40E_QUEUE_END_OF_LIST
3882 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3883 wr32(hw, I40E_PFINT_LNKLST0, val);
3884
3885 val = rd32(hw, I40E_QINT_RQCTL(qp));
3886 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3887 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3888 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3889 I40E_QINT_RQCTL_INTEVENT_MASK);
3890
3891 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3892 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3893
3894 wr32(hw, I40E_QINT_RQCTL(qp), val);
3895
3896 val = rd32(hw, I40E_QINT_TQCTL(qp));
3897
3898 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3899 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3900 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3901 I40E_QINT_TQCTL_INTEVENT_MASK);
3902
3903 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3904 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3905
3906 wr32(hw, I40E_QINT_TQCTL(qp), val);
3907 }
3908 }
3909
3910 /**
3911 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3912 * @vsi: the VSI being configured
3913 * @v_idx: Index of vector to be freed
3914 *
3915 * This function frees the memory allocated to the q_vector. In addition if
3916 * NAPI is enabled it will delete any references to the NAPI struct prior
3917 * to freeing the q_vector.
3918 **/
3919 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3920 {
3921 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3922 struct i40e_ring *ring;
3923
3924 if (!q_vector)
3925 return;
3926
3927 /* disassociate q_vector from rings */
3928 i40e_for_each_ring(ring, q_vector->tx)
3929 ring->q_vector = NULL;
3930
3931 i40e_for_each_ring(ring, q_vector->rx)
3932 ring->q_vector = NULL;
3933
3934 /* only VSI w/ an associated netdev is set up w/ NAPI */
3935 if (vsi->netdev)
3936 netif_napi_del(&q_vector->napi);
3937
3938 vsi->q_vectors[v_idx] = NULL;
3939
3940 kfree_rcu(q_vector, rcu);
3941 }
3942
3943 /**
3944 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3945 * @vsi: the VSI being un-configured
3946 *
3947 * This frees the memory allocated to the q_vectors and
3948 * deletes references to the NAPI struct.
3949 **/
3950 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3951 {
3952 int v_idx;
3953
3954 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3955 i40e_free_q_vector(vsi, v_idx);
3956 }
3957
3958 /**
3959 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3960 * @pf: board private structure
3961 **/
3962 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3963 {
3964 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3965 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3966 pci_disable_msix(pf->pdev);
3967 kfree(pf->msix_entries);
3968 pf->msix_entries = NULL;
3969 kfree(pf->irq_pile);
3970 pf->irq_pile = NULL;
3971 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3972 pci_disable_msi(pf->pdev);
3973 }
3974 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3975 }
3976
3977 /**
3978 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3979 * @pf: board private structure
3980 *
3981 * We go through and clear interrupt specific resources and reset the structure
3982 * to pre-load conditions
3983 **/
3984 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3985 {
3986 int i;
3987
3988 i40e_stop_misc_vector(pf);
3989 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3990 synchronize_irq(pf->msix_entries[0].vector);
3991 free_irq(pf->msix_entries[0].vector, pf);
3992 }
3993
3994 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3995 for (i = 0; i < pf->num_alloc_vsi; i++)
3996 if (pf->vsi[i])
3997 i40e_vsi_free_q_vectors(pf->vsi[i]);
3998 i40e_reset_interrupt_capability(pf);
3999 }
4000
4001 /**
4002 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4003 * @vsi: the VSI being configured
4004 **/
4005 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4006 {
4007 int q_idx;
4008
4009 if (!vsi->netdev)
4010 return;
4011
4012 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4013 napi_enable(&vsi->q_vectors[q_idx]->napi);
4014 }
4015
4016 /**
4017 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4018 * @vsi: the VSI being configured
4019 **/
4020 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4021 {
4022 int q_idx;
4023
4024 if (!vsi->netdev)
4025 return;
4026
4027 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4028 napi_disable(&vsi->q_vectors[q_idx]->napi);
4029 }
4030
4031 /**
4032 * i40e_vsi_close - Shut down a VSI
4033 * @vsi: the vsi to be quelled
4034 **/
4035 static void i40e_vsi_close(struct i40e_vsi *vsi)
4036 {
4037 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4038 i40e_down(vsi);
4039 i40e_vsi_free_irq(vsi);
4040 i40e_vsi_free_tx_resources(vsi);
4041 i40e_vsi_free_rx_resources(vsi);
4042 vsi->current_netdev_flags = 0;
4043 }
4044
4045 /**
4046 * i40e_quiesce_vsi - Pause a given VSI
4047 * @vsi: the VSI being paused
4048 **/
4049 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4050 {
4051 if (test_bit(__I40E_DOWN, &vsi->state))
4052 return;
4053
4054 /* No need to disable FCoE VSI when Tx suspended */
4055 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4056 vsi->type == I40E_VSI_FCOE) {
4057 dev_dbg(&vsi->back->pdev->dev,
4058 "%s: VSI seid %d skipping FCoE VSI disable\n",
4059 __func__, vsi->seid);
4060 return;
4061 }
4062
4063 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
4064 if (vsi->netdev && netif_running(vsi->netdev)) {
4065 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4066 } else {
4067 i40e_vsi_close(vsi);
4068 }
4069 }
4070
4071 /**
4072 * i40e_unquiesce_vsi - Resume a given VSI
4073 * @vsi: the VSI being resumed
4074 **/
4075 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4076 {
4077 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4078 return;
4079
4080 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4081 if (vsi->netdev && netif_running(vsi->netdev))
4082 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4083 else
4084 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4085 }
4086
4087 /**
4088 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4089 * @pf: the PF
4090 **/
4091 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4092 {
4093 int v;
4094
4095 for (v = 0; v < pf->num_alloc_vsi; v++) {
4096 if (pf->vsi[v])
4097 i40e_quiesce_vsi(pf->vsi[v]);
4098 }
4099 }
4100
4101 /**
4102 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4103 * @pf: the PF
4104 **/
4105 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4106 {
4107 int v;
4108
4109 for (v = 0; v < pf->num_alloc_vsi; v++) {
4110 if (pf->vsi[v])
4111 i40e_unquiesce_vsi(pf->vsi[v]);
4112 }
4113 }
4114
4115 #ifdef CONFIG_I40E_DCB
4116 /**
4117 * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
4118 * @vsi: the VSI being configured
4119 *
4120 * This function waits for the given VSI's Tx queues to be disabled.
4121 **/
4122 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
4123 {
4124 struct i40e_pf *pf = vsi->back;
4125 int i, pf_q, ret;
4126
4127 pf_q = vsi->base_queue;
4128 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4129 /* Check and wait for the disable status of the queue */
4130 ret = i40e_pf_txq_wait(pf, pf_q, false);
4131 if (ret) {
4132 dev_info(&pf->pdev->dev,
4133 "%s: VSI seid %d Tx ring %d disable timeout\n",
4134 __func__, vsi->seid, pf_q);
4135 return ret;
4136 }
4137 }
4138
4139 return 0;
4140 }
4141
4142 /**
4143 * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
4144 * @pf: the PF
4145 *
4146 * This function waits for the Tx queues to be in disabled state for all the
4147 * VSIs that are managed by this PF.
4148 **/
4149 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4150 {
4151 int v, ret = 0;
4152
4153 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4154 /* No need to wait for FCoE VSI queues */
4155 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4156 ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4157 if (ret)
4158 break;
4159 }
4160 }
4161
4162 return ret;
4163 }
4164
4165 #endif
4166
4167 /**
4168 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4169 * @q_idx: TX queue number
4170 * @vsi: Pointer to VSI struct
4171 *
4172 * This function checks specified queue for given VSI. Detects hung condition.
4173 * Sets hung bit since it is two step process. Before next run of service task
4174 * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
4175 * hung condition remain unchanged and during subsequent run, this function
4176 * issues SW interrupt to recover from hung condition.
4177 **/
4178 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4179 {
4180 struct i40e_ring *tx_ring = NULL;
4181 struct i40e_pf *pf;
4182 u32 head, val, tx_pending;
4183 int i;
4184
4185 pf = vsi->back;
4186
4187 /* now that we have an index, find the tx_ring struct */
4188 for (i = 0; i < vsi->num_queue_pairs; i++) {
4189 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4190 if (q_idx == vsi->tx_rings[i]->queue_index) {
4191 tx_ring = vsi->tx_rings[i];
4192 break;
4193 }
4194 }
4195 }
4196
4197 if (!tx_ring)
4198 return;
4199
4200 /* Read interrupt register */
4201 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4202 val = rd32(&pf->hw,
4203 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4204 tx_ring->vsi->base_vector - 1));
4205 else
4206 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4207
4208 head = i40e_get_head(tx_ring);
4209
4210 tx_pending = i40e_get_tx_pending(tx_ring);
4211
4212 /* Interrupts are disabled and TX pending is non-zero,
4213 * trigger the SW interrupt (don't wait). Worst case
4214 * there will be one extra interrupt which may result
4215 * into not cleaning any queues because queues are cleaned.
4216 */
4217 if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4218 i40e_force_wb(vsi, tx_ring->q_vector);
4219 }
4220
4221 /**
4222 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4223 * @pf: pointer to PF struct
4224 *
4225 * LAN VSI has netdev and netdev has TX queues. This function is to check
4226 * each of those TX queues if they are hung, trigger recovery by issuing
4227 * SW interrupt.
4228 **/
4229 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4230 {
4231 struct net_device *netdev;
4232 struct i40e_vsi *vsi;
4233 int i;
4234
4235 /* Only for LAN VSI */
4236 vsi = pf->vsi[pf->lan_vsi];
4237
4238 if (!vsi)
4239 return;
4240
4241 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4242 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4243 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4244 return;
4245
4246 /* Make sure type is MAIN VSI */
4247 if (vsi->type != I40E_VSI_MAIN)
4248 return;
4249
4250 netdev = vsi->netdev;
4251 if (!netdev)
4252 return;
4253
4254 /* Bail out if netif_carrier is not OK */
4255 if (!netif_carrier_ok(netdev))
4256 return;
4257
4258 /* Go thru' TX queues for netdev */
4259 for (i = 0; i < netdev->num_tx_queues; i++) {
4260 struct netdev_queue *q;
4261
4262 q = netdev_get_tx_queue(netdev, i);
4263 if (q)
4264 i40e_detect_recover_hung_queue(i, vsi);
4265 }
4266 }
4267
4268 /**
4269 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4270 * @pf: pointer to PF
4271 *
4272 * Get TC map for ISCSI PF type that will include iSCSI TC
4273 * and LAN TC.
4274 **/
4275 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4276 {
4277 struct i40e_dcb_app_priority_table app;
4278 struct i40e_hw *hw = &pf->hw;
4279 u8 enabled_tc = 1; /* TC0 is always enabled */
4280 u8 tc, i;
4281 /* Get the iSCSI APP TLV */
4282 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4283
4284 for (i = 0; i < dcbcfg->numapps; i++) {
4285 app = dcbcfg->app[i];
4286 if (app.selector == I40E_APP_SEL_TCPIP &&
4287 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4288 tc = dcbcfg->etscfg.prioritytable[app.priority];
4289 enabled_tc |= BIT_ULL(tc);
4290 break;
4291 }
4292 }
4293
4294 return enabled_tc;
4295 }
4296
4297 /**
4298 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4299 * @dcbcfg: the corresponding DCBx configuration structure
4300 *
4301 * Return the number of TCs from given DCBx configuration
4302 **/
4303 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4304 {
4305 u8 num_tc = 0;
4306 int i;
4307
4308 /* Scan the ETS Config Priority Table to find
4309 * traffic class enabled for a given priority
4310 * and use the traffic class index to get the
4311 * number of traffic classes enabled
4312 */
4313 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4314 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4315 num_tc = dcbcfg->etscfg.prioritytable[i];
4316 }
4317
4318 /* Traffic class index starts from zero so
4319 * increment to return the actual count
4320 */
4321 return num_tc + 1;
4322 }
4323
4324 /**
4325 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4326 * @dcbcfg: the corresponding DCBx configuration structure
4327 *
4328 * Query the current DCB configuration and return the number of
4329 * traffic classes enabled from the given DCBX config
4330 **/
4331 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4332 {
4333 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4334 u8 enabled_tc = 1;
4335 u8 i;
4336
4337 for (i = 0; i < num_tc; i++)
4338 enabled_tc |= BIT(i);
4339
4340 return enabled_tc;
4341 }
4342
4343 /**
4344 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4345 * @pf: PF being queried
4346 *
4347 * Return number of traffic classes enabled for the given PF
4348 **/
4349 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4350 {
4351 struct i40e_hw *hw = &pf->hw;
4352 u8 i, enabled_tc;
4353 u8 num_tc = 0;
4354 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4355
4356 /* If DCB is not enabled then always in single TC */
4357 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4358 return 1;
4359
4360 /* SFP mode will be enabled for all TCs on port */
4361 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4362 return i40e_dcb_get_num_tc(dcbcfg);
4363
4364 /* MFP mode return count of enabled TCs for this PF */
4365 if (pf->hw.func_caps.iscsi)
4366 enabled_tc = i40e_get_iscsi_tc_map(pf);
4367 else
4368 return 1; /* Only TC0 */
4369
4370 /* At least have TC0 */
4371 enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4372 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4373 if (enabled_tc & BIT_ULL(i))
4374 num_tc++;
4375 }
4376 return num_tc;
4377 }
4378
4379 /**
4380 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4381 * @pf: PF being queried
4382 *
4383 * Return a bitmap for first enabled traffic class for this PF.
4384 **/
4385 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4386 {
4387 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4388 u8 i = 0;
4389
4390 if (!enabled_tc)
4391 return 0x1; /* TC0 */
4392
4393 /* Find the first enabled TC */
4394 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4395 if (enabled_tc & BIT_ULL(i))
4396 break;
4397 }
4398
4399 return BIT(i);
4400 }
4401
4402 /**
4403 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4404 * @pf: PF being queried
4405 *
4406 * Return a bitmap for enabled traffic classes for this PF.
4407 **/
4408 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4409 {
4410 /* If DCB is not enabled for this PF then just return default TC */
4411 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4412 return i40e_pf_get_default_tc(pf);
4413
4414 /* SFP mode we want PF to be enabled for all TCs */
4415 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4416 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4417
4418 /* MFP enabled and iSCSI PF type */
4419 if (pf->hw.func_caps.iscsi)
4420 return i40e_get_iscsi_tc_map(pf);
4421 else
4422 return i40e_pf_get_default_tc(pf);
4423 }
4424
4425 /**
4426 * i40e_vsi_get_bw_info - Query VSI BW Information
4427 * @vsi: the VSI being queried
4428 *
4429 * Returns 0 on success, negative value on failure
4430 **/
4431 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4432 {
4433 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4434 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4435 struct i40e_pf *pf = vsi->back;
4436 struct i40e_hw *hw = &pf->hw;
4437 i40e_status ret;
4438 u32 tc_bw_max;
4439 int i;
4440
4441 /* Get the VSI level BW configuration */
4442 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4443 if (ret) {
4444 dev_info(&pf->pdev->dev,
4445 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4446 i40e_stat_str(&pf->hw, ret),
4447 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4448 return -EINVAL;
4449 }
4450
4451 /* Get the VSI level BW configuration per TC */
4452 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4453 NULL);
4454 if (ret) {
4455 dev_info(&pf->pdev->dev,
4456 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4457 i40e_stat_str(&pf->hw, ret),
4458 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4459 return -EINVAL;
4460 }
4461
4462 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4463 dev_info(&pf->pdev->dev,
4464 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4465 bw_config.tc_valid_bits,
4466 bw_ets_config.tc_valid_bits);
4467 /* Still continuing */
4468 }
4469
4470 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4471 vsi->bw_max_quanta = bw_config.max_bw;
4472 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4473 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4474 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4475 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4476 vsi->bw_ets_limit_credits[i] =
4477 le16_to_cpu(bw_ets_config.credits[i]);
4478 /* 3 bits out of 4 for each TC */
4479 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4480 }
4481
4482 return 0;
4483 }
4484
4485 /**
4486 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4487 * @vsi: the VSI being configured
4488 * @enabled_tc: TC bitmap
4489 * @bw_credits: BW shared credits per TC
4490 *
4491 * Returns 0 on success, negative value on failure
4492 **/
4493 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4494 u8 *bw_share)
4495 {
4496 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4497 i40e_status ret;
4498 int i;
4499
4500 bw_data.tc_valid_bits = enabled_tc;
4501 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4502 bw_data.tc_bw_credits[i] = bw_share[i];
4503
4504 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4505 NULL);
4506 if (ret) {
4507 dev_info(&vsi->back->pdev->dev,
4508 "AQ command Config VSI BW allocation per TC failed = %d\n",
4509 vsi->back->hw.aq.asq_last_status);
4510 return -EINVAL;
4511 }
4512
4513 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4514 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4515
4516 return 0;
4517 }
4518
4519 /**
4520 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4521 * @vsi: the VSI being configured
4522 * @enabled_tc: TC map to be enabled
4523 *
4524 **/
4525 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4526 {
4527 struct net_device *netdev = vsi->netdev;
4528 struct i40e_pf *pf = vsi->back;
4529 struct i40e_hw *hw = &pf->hw;
4530 u8 netdev_tc = 0;
4531 int i;
4532 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4533
4534 if (!netdev)
4535 return;
4536
4537 if (!enabled_tc) {
4538 netdev_reset_tc(netdev);
4539 return;
4540 }
4541
4542 /* Set up actual enabled TCs on the VSI */
4543 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4544 return;
4545
4546 /* set per TC queues for the VSI */
4547 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4548 /* Only set TC queues for enabled tcs
4549 *
4550 * e.g. For a VSI that has TC0 and TC3 enabled the
4551 * enabled_tc bitmap would be 0x00001001; the driver
4552 * will set the numtc for netdev as 2 that will be
4553 * referenced by the netdev layer as TC 0 and 1.
4554 */
4555 if (vsi->tc_config.enabled_tc & BIT_ULL(i))
4556 netdev_set_tc_queue(netdev,
4557 vsi->tc_config.tc_info[i].netdev_tc,
4558 vsi->tc_config.tc_info[i].qcount,
4559 vsi->tc_config.tc_info[i].qoffset);
4560 }
4561
4562 /* Assign UP2TC map for the VSI */
4563 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4564 /* Get the actual TC# for the UP */
4565 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4566 /* Get the mapped netdev TC# for the UP */
4567 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
4568 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4569 }
4570 }
4571
4572 /**
4573 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4574 * @vsi: the VSI being configured
4575 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4576 **/
4577 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4578 struct i40e_vsi_context *ctxt)
4579 {
4580 /* copy just the sections touched not the entire info
4581 * since not all sections are valid as returned by
4582 * update vsi params
4583 */
4584 vsi->info.mapping_flags = ctxt->info.mapping_flags;
4585 memcpy(&vsi->info.queue_mapping,
4586 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4587 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4588 sizeof(vsi->info.tc_mapping));
4589 }
4590
4591 /**
4592 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4593 * @vsi: VSI to be configured
4594 * @enabled_tc: TC bitmap
4595 *
4596 * This configures a particular VSI for TCs that are mapped to the
4597 * given TC bitmap. It uses default bandwidth share for TCs across
4598 * VSIs to configure TC for a particular VSI.
4599 *
4600 * NOTE:
4601 * It is expected that the VSI queues have been quisced before calling
4602 * this function.
4603 **/
4604 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4605 {
4606 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4607 struct i40e_vsi_context ctxt;
4608 int ret = 0;
4609 int i;
4610
4611 /* Check if enabled_tc is same as existing or new TCs */
4612 if (vsi->tc_config.enabled_tc == enabled_tc)
4613 return ret;
4614
4615 /* Enable ETS TCs with equal BW Share for now across all VSIs */
4616 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4617 if (enabled_tc & BIT_ULL(i))
4618 bw_share[i] = 1;
4619 }
4620
4621 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4622 if (ret) {
4623 dev_info(&vsi->back->pdev->dev,
4624 "Failed configuring TC map %d for VSI %d\n",
4625 enabled_tc, vsi->seid);
4626 goto out;
4627 }
4628
4629 /* Update Queue Pairs Mapping for currently enabled UPs */
4630 ctxt.seid = vsi->seid;
4631 ctxt.pf_num = vsi->back->hw.pf_id;
4632 ctxt.vf_num = 0;
4633 ctxt.uplink_seid = vsi->uplink_seid;
4634 ctxt.info = vsi->info;
4635 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4636
4637 /* Update the VSI after updating the VSI queue-mapping information */
4638 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4639 if (ret) {
4640 dev_info(&vsi->back->pdev->dev,
4641 "Update vsi tc config failed, err %s aq_err %s\n",
4642 i40e_stat_str(&vsi->back->hw, ret),
4643 i40e_aq_str(&vsi->back->hw,
4644 vsi->back->hw.aq.asq_last_status));
4645 goto out;
4646 }
4647 /* update the local VSI info with updated queue map */
4648 i40e_vsi_update_queue_map(vsi, &ctxt);
4649 vsi->info.valid_sections = 0;
4650
4651 /* Update current VSI BW information */
4652 ret = i40e_vsi_get_bw_info(vsi);
4653 if (ret) {
4654 dev_info(&vsi->back->pdev->dev,
4655 "Failed updating vsi bw info, err %s aq_err %s\n",
4656 i40e_stat_str(&vsi->back->hw, ret),
4657 i40e_aq_str(&vsi->back->hw,
4658 vsi->back->hw.aq.asq_last_status));
4659 goto out;
4660 }
4661
4662 /* Update the netdev TC setup */
4663 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4664 out:
4665 return ret;
4666 }
4667
4668 /**
4669 * i40e_veb_config_tc - Configure TCs for given VEB
4670 * @veb: given VEB
4671 * @enabled_tc: TC bitmap
4672 *
4673 * Configures given TC bitmap for VEB (switching) element
4674 **/
4675 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4676 {
4677 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4678 struct i40e_pf *pf = veb->pf;
4679 int ret = 0;
4680 int i;
4681
4682 /* No TCs or already enabled TCs just return */
4683 if (!enabled_tc || veb->enabled_tc == enabled_tc)
4684 return ret;
4685
4686 bw_data.tc_valid_bits = enabled_tc;
4687 /* bw_data.absolute_credits is not set (relative) */
4688
4689 /* Enable ETS TCs with equal BW Share for now */
4690 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4691 if (enabled_tc & BIT_ULL(i))
4692 bw_data.tc_bw_share_credits[i] = 1;
4693 }
4694
4695 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4696 &bw_data, NULL);
4697 if (ret) {
4698 dev_info(&pf->pdev->dev,
4699 "VEB bw config failed, err %s aq_err %s\n",
4700 i40e_stat_str(&pf->hw, ret),
4701 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4702 goto out;
4703 }
4704
4705 /* Update the BW information */
4706 ret = i40e_veb_get_bw_info(veb);
4707 if (ret) {
4708 dev_info(&pf->pdev->dev,
4709 "Failed getting veb bw config, err %s aq_err %s\n",
4710 i40e_stat_str(&pf->hw, ret),
4711 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4712 }
4713
4714 out:
4715 return ret;
4716 }
4717
4718 #ifdef CONFIG_I40E_DCB
4719 /**
4720 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4721 * @pf: PF struct
4722 *
4723 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4724 * the caller would've quiesce all the VSIs before calling
4725 * this function
4726 **/
4727 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4728 {
4729 u8 tc_map = 0;
4730 int ret;
4731 u8 v;
4732
4733 /* Enable the TCs available on PF to all VEBs */
4734 tc_map = i40e_pf_get_tc_map(pf);
4735 for (v = 0; v < I40E_MAX_VEB; v++) {
4736 if (!pf->veb[v])
4737 continue;
4738 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4739 if (ret) {
4740 dev_info(&pf->pdev->dev,
4741 "Failed configuring TC for VEB seid=%d\n",
4742 pf->veb[v]->seid);
4743 /* Will try to configure as many components */
4744 }
4745 }
4746
4747 /* Update each VSI */
4748 for (v = 0; v < pf->num_alloc_vsi; v++) {
4749 if (!pf->vsi[v])
4750 continue;
4751
4752 /* - Enable all TCs for the LAN VSI
4753 #ifdef I40E_FCOE
4754 * - For FCoE VSI only enable the TC configured
4755 * as per the APP TLV
4756 #endif
4757 * - For all others keep them at TC0 for now
4758 */
4759 if (v == pf->lan_vsi)
4760 tc_map = i40e_pf_get_tc_map(pf);
4761 else
4762 tc_map = i40e_pf_get_default_tc(pf);
4763 #ifdef I40E_FCOE
4764 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4765 tc_map = i40e_get_fcoe_tc_map(pf);
4766 #endif /* #ifdef I40E_FCOE */
4767
4768 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4769 if (ret) {
4770 dev_info(&pf->pdev->dev,
4771 "Failed configuring TC for VSI seid=%d\n",
4772 pf->vsi[v]->seid);
4773 /* Will try to configure as many components */
4774 } else {
4775 /* Re-configure VSI vectors based on updated TC map */
4776 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4777 if (pf->vsi[v]->netdev)
4778 i40e_dcbnl_set_all(pf->vsi[v]);
4779 }
4780 }
4781 }
4782
4783 /**
4784 * i40e_resume_port_tx - Resume port Tx
4785 * @pf: PF struct
4786 *
4787 * Resume a port's Tx and issue a PF reset in case of failure to
4788 * resume.
4789 **/
4790 static int i40e_resume_port_tx(struct i40e_pf *pf)
4791 {
4792 struct i40e_hw *hw = &pf->hw;
4793 int ret;
4794
4795 ret = i40e_aq_resume_port_tx(hw, NULL);
4796 if (ret) {
4797 dev_info(&pf->pdev->dev,
4798 "Resume Port Tx failed, err %s aq_err %s\n",
4799 i40e_stat_str(&pf->hw, ret),
4800 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4801 /* Schedule PF reset to recover */
4802 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4803 i40e_service_event_schedule(pf);
4804 }
4805
4806 return ret;
4807 }
4808
4809 /**
4810 * i40e_init_pf_dcb - Initialize DCB configuration
4811 * @pf: PF being configured
4812 *
4813 * Query the current DCB configuration and cache it
4814 * in the hardware structure
4815 **/
4816 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4817 {
4818 struct i40e_hw *hw = &pf->hw;
4819 int err = 0;
4820
4821 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
4822 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
4823 (pf->hw.aq.fw_maj_ver < 4))
4824 goto out;
4825
4826 /* Get the initial DCB configuration */
4827 err = i40e_init_dcb(hw);
4828 if (!err) {
4829 /* Device/Function is not DCBX capable */
4830 if ((!hw->func_caps.dcb) ||
4831 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4832 dev_info(&pf->pdev->dev,
4833 "DCBX offload is not supported or is disabled for this PF.\n");
4834
4835 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4836 goto out;
4837
4838 } else {
4839 /* When status is not DISABLED then DCBX in FW */
4840 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4841 DCB_CAP_DCBX_VER_IEEE;
4842
4843 pf->flags |= I40E_FLAG_DCB_CAPABLE;
4844 /* Enable DCB tagging only when more than one TC */
4845 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4846 pf->flags |= I40E_FLAG_DCB_ENABLED;
4847 dev_dbg(&pf->pdev->dev,
4848 "DCBX offload is supported for this PF.\n");
4849 }
4850 } else {
4851 dev_info(&pf->pdev->dev,
4852 "Query for DCB configuration failed, err %s aq_err %s\n",
4853 i40e_stat_str(&pf->hw, err),
4854 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4855 }
4856
4857 out:
4858 return err;
4859 }
4860 #endif /* CONFIG_I40E_DCB */
4861 #define SPEED_SIZE 14
4862 #define FC_SIZE 8
4863 /**
4864 * i40e_print_link_message - print link up or down
4865 * @vsi: the VSI for which link needs a message
4866 */
4867 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4868 {
4869 char speed[SPEED_SIZE] = "Unknown";
4870 char fc[FC_SIZE] = "RX/TX";
4871
4872 if (!isup) {
4873 netdev_info(vsi->netdev, "NIC Link is Down\n");
4874 return;
4875 }
4876
4877 /* Warn user if link speed on NPAR enabled partition is not at
4878 * least 10GB
4879 */
4880 if (vsi->back->hw.func_caps.npar_enable &&
4881 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4882 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4883 netdev_warn(vsi->netdev,
4884 "The partition detected link speed that is less than 10Gbps\n");
4885
4886 switch (vsi->back->hw.phy.link_info.link_speed) {
4887 case I40E_LINK_SPEED_40GB:
4888 strlcpy(speed, "40 Gbps", SPEED_SIZE);
4889 break;
4890 case I40E_LINK_SPEED_20GB:
4891 strncpy(speed, "20 Gbps", SPEED_SIZE);
4892 break;
4893 case I40E_LINK_SPEED_10GB:
4894 strlcpy(speed, "10 Gbps", SPEED_SIZE);
4895 break;
4896 case I40E_LINK_SPEED_1GB:
4897 strlcpy(speed, "1000 Mbps", SPEED_SIZE);
4898 break;
4899 case I40E_LINK_SPEED_100MB:
4900 strncpy(speed, "100 Mbps", SPEED_SIZE);
4901 break;
4902 default:
4903 break;
4904 }
4905
4906 switch (vsi->back->hw.fc.current_mode) {
4907 case I40E_FC_FULL:
4908 strlcpy(fc, "RX/TX", FC_SIZE);
4909 break;
4910 case I40E_FC_TX_PAUSE:
4911 strlcpy(fc, "TX", FC_SIZE);
4912 break;
4913 case I40E_FC_RX_PAUSE:
4914 strlcpy(fc, "RX", FC_SIZE);
4915 break;
4916 default:
4917 strlcpy(fc, "None", FC_SIZE);
4918 break;
4919 }
4920
4921 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4922 speed, fc);
4923 }
4924
4925 /**
4926 * i40e_up_complete - Finish the last steps of bringing up a connection
4927 * @vsi: the VSI being configured
4928 **/
4929 static int i40e_up_complete(struct i40e_vsi *vsi)
4930 {
4931 struct i40e_pf *pf = vsi->back;
4932 int err;
4933
4934 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4935 i40e_vsi_configure_msix(vsi);
4936 else
4937 i40e_configure_msi_and_legacy(vsi);
4938
4939 /* start rings */
4940 err = i40e_vsi_control_rings(vsi, true);
4941 if (err)
4942 return err;
4943
4944 clear_bit(__I40E_DOWN, &vsi->state);
4945 i40e_napi_enable_all(vsi);
4946 i40e_vsi_enable_irq(vsi);
4947
4948 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4949 (vsi->netdev)) {
4950 i40e_print_link_message(vsi, true);
4951 netif_tx_start_all_queues(vsi->netdev);
4952 netif_carrier_on(vsi->netdev);
4953 } else if (vsi->netdev) {
4954 i40e_print_link_message(vsi, false);
4955 /* need to check for qualified module here*/
4956 if ((pf->hw.phy.link_info.link_info &
4957 I40E_AQ_MEDIA_AVAILABLE) &&
4958 (!(pf->hw.phy.link_info.an_info &
4959 I40E_AQ_QUALIFIED_MODULE)))
4960 netdev_err(vsi->netdev,
4961 "the driver failed to link because an unqualified module was detected.");
4962 }
4963
4964 /* replay FDIR SB filters */
4965 if (vsi->type == I40E_VSI_FDIR) {
4966 /* reset fd counters */
4967 pf->fd_add_err = pf->fd_atr_cnt = 0;
4968 if (pf->fd_tcp_rule > 0) {
4969 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4970 if (I40E_DEBUG_FD & pf->hw.debug_mask)
4971 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4972 pf->fd_tcp_rule = 0;
4973 }
4974 i40e_fdir_filter_restore(vsi);
4975 }
4976 i40e_service_event_schedule(pf);
4977
4978 return 0;
4979 }
4980
4981 /**
4982 * i40e_vsi_reinit_locked - Reset the VSI
4983 * @vsi: the VSI being configured
4984 *
4985 * Rebuild the ring structs after some configuration
4986 * has changed, e.g. MTU size.
4987 **/
4988 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4989 {
4990 struct i40e_pf *pf = vsi->back;
4991
4992 WARN_ON(in_interrupt());
4993 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4994 usleep_range(1000, 2000);
4995 i40e_down(vsi);
4996
4997 /* Give a VF some time to respond to the reset. The
4998 * two second wait is based upon the watchdog cycle in
4999 * the VF driver.
5000 */
5001 if (vsi->type == I40E_VSI_SRIOV)
5002 msleep(2000);
5003 i40e_up(vsi);
5004 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5005 }
5006
5007 /**
5008 * i40e_up - Bring the connection back up after being down
5009 * @vsi: the VSI being configured
5010 **/
5011 int i40e_up(struct i40e_vsi *vsi)
5012 {
5013 int err;
5014
5015 err = i40e_vsi_configure(vsi);
5016 if (!err)
5017 err = i40e_up_complete(vsi);
5018
5019 return err;
5020 }
5021
5022 /**
5023 * i40e_down - Shutdown the connection processing
5024 * @vsi: the VSI being stopped
5025 **/
5026 void i40e_down(struct i40e_vsi *vsi)
5027 {
5028 int i;
5029
5030 /* It is assumed that the caller of this function
5031 * sets the vsi->state __I40E_DOWN bit.
5032 */
5033 if (vsi->netdev) {
5034 netif_carrier_off(vsi->netdev);
5035 netif_tx_disable(vsi->netdev);
5036 }
5037 i40e_vsi_disable_irq(vsi);
5038 i40e_vsi_control_rings(vsi, false);
5039 i40e_napi_disable_all(vsi);
5040
5041 for (i = 0; i < vsi->num_queue_pairs; i++) {
5042 i40e_clean_tx_ring(vsi->tx_rings[i]);
5043 i40e_clean_rx_ring(vsi->rx_rings[i]);
5044 }
5045 }
5046
5047 /**
5048 * i40e_setup_tc - configure multiple traffic classes
5049 * @netdev: net device to configure
5050 * @tc: number of traffic classes to enable
5051 **/
5052 #ifdef I40E_FCOE
5053 int i40e_setup_tc(struct net_device *netdev, u8 tc)
5054 #else
5055 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5056 #endif
5057 {
5058 struct i40e_netdev_priv *np = netdev_priv(netdev);
5059 struct i40e_vsi *vsi = np->vsi;
5060 struct i40e_pf *pf = vsi->back;
5061 u8 enabled_tc = 0;
5062 int ret = -EINVAL;
5063 int i;
5064
5065 /* Check if DCB enabled to continue */
5066 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5067 netdev_info(netdev, "DCB is not enabled for adapter\n");
5068 goto exit;
5069 }
5070
5071 /* Check if MFP enabled */
5072 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5073 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5074 goto exit;
5075 }
5076
5077 /* Check whether tc count is within enabled limit */
5078 if (tc > i40e_pf_get_num_tc(pf)) {
5079 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5080 goto exit;
5081 }
5082
5083 /* Generate TC map for number of tc requested */
5084 for (i = 0; i < tc; i++)
5085 enabled_tc |= BIT_ULL(i);
5086
5087 /* Requesting same TC configuration as already enabled */
5088 if (enabled_tc == vsi->tc_config.enabled_tc)
5089 return 0;
5090
5091 /* Quiesce VSI queues */
5092 i40e_quiesce_vsi(vsi);
5093
5094 /* Configure VSI for enabled TCs */
5095 ret = i40e_vsi_config_tc(vsi, enabled_tc);
5096 if (ret) {
5097 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5098 vsi->seid);
5099 goto exit;
5100 }
5101
5102 /* Unquiesce VSI */
5103 i40e_unquiesce_vsi(vsi);
5104
5105 exit:
5106 return ret;
5107 }
5108
5109 /**
5110 * i40e_open - Called when a network interface is made active
5111 * @netdev: network interface device structure
5112 *
5113 * The open entry point is called when a network interface is made
5114 * active by the system (IFF_UP). At this point all resources needed
5115 * for transmit and receive operations are allocated, the interrupt
5116 * handler is registered with the OS, the netdev watchdog subtask is
5117 * enabled, and the stack is notified that the interface is ready.
5118 *
5119 * Returns 0 on success, negative value on failure
5120 **/
5121 int i40e_open(struct net_device *netdev)
5122 {
5123 struct i40e_netdev_priv *np = netdev_priv(netdev);
5124 struct i40e_vsi *vsi = np->vsi;
5125 struct i40e_pf *pf = vsi->back;
5126 int err;
5127
5128 /* disallow open during test or if eeprom is broken */
5129 if (test_bit(__I40E_TESTING, &pf->state) ||
5130 test_bit(__I40E_BAD_EEPROM, &pf->state))
5131 return -EBUSY;
5132
5133 netif_carrier_off(netdev);
5134
5135 err = i40e_vsi_open(vsi);
5136 if (err)
5137 return err;
5138
5139 /* configure global TSO hardware offload settings */
5140 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5141 TCP_FLAG_FIN) >> 16);
5142 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5143 TCP_FLAG_FIN |
5144 TCP_FLAG_CWR) >> 16);
5145 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5146
5147 #ifdef CONFIG_I40E_VXLAN
5148 vxlan_get_rx_port(netdev);
5149 #endif
5150
5151 return 0;
5152 }
5153
5154 /**
5155 * i40e_vsi_open -
5156 * @vsi: the VSI to open
5157 *
5158 * Finish initialization of the VSI.
5159 *
5160 * Returns 0 on success, negative value on failure
5161 **/
5162 int i40e_vsi_open(struct i40e_vsi *vsi)
5163 {
5164 struct i40e_pf *pf = vsi->back;
5165 char int_name[I40E_INT_NAME_STR_LEN];
5166 int err;
5167
5168 /* allocate descriptors */
5169 err = i40e_vsi_setup_tx_resources(vsi);
5170 if (err)
5171 goto err_setup_tx;
5172 err = i40e_vsi_setup_rx_resources(vsi);
5173 if (err)
5174 goto err_setup_rx;
5175
5176 err = i40e_vsi_configure(vsi);
5177 if (err)
5178 goto err_setup_rx;
5179
5180 if (vsi->netdev) {
5181 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5182 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5183 err = i40e_vsi_request_irq(vsi, int_name);
5184 if (err)
5185 goto err_setup_rx;
5186
5187 /* Notify the stack of the actual queue counts. */
5188 err = netif_set_real_num_tx_queues(vsi->netdev,
5189 vsi->num_queue_pairs);
5190 if (err)
5191 goto err_set_queues;
5192
5193 err = netif_set_real_num_rx_queues(vsi->netdev,
5194 vsi->num_queue_pairs);
5195 if (err)
5196 goto err_set_queues;
5197
5198 } else if (vsi->type == I40E_VSI_FDIR) {
5199 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5200 dev_driver_string(&pf->pdev->dev),
5201 dev_name(&pf->pdev->dev));
5202 err = i40e_vsi_request_irq(vsi, int_name);
5203
5204 } else {
5205 err = -EINVAL;
5206 goto err_setup_rx;
5207 }
5208
5209 err = i40e_up_complete(vsi);
5210 if (err)
5211 goto err_up_complete;
5212
5213 return 0;
5214
5215 err_up_complete:
5216 i40e_down(vsi);
5217 err_set_queues:
5218 i40e_vsi_free_irq(vsi);
5219 err_setup_rx:
5220 i40e_vsi_free_rx_resources(vsi);
5221 err_setup_tx:
5222 i40e_vsi_free_tx_resources(vsi);
5223 if (vsi == pf->vsi[pf->lan_vsi])
5224 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5225
5226 return err;
5227 }
5228
5229 /**
5230 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5231 * @pf: Pointer to PF
5232 *
5233 * This function destroys the hlist where all the Flow Director
5234 * filters were saved.
5235 **/
5236 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5237 {
5238 struct i40e_fdir_filter *filter;
5239 struct hlist_node *node2;
5240
5241 hlist_for_each_entry_safe(filter, node2,
5242 &pf->fdir_filter_list, fdir_node) {
5243 hlist_del(&filter->fdir_node);
5244 kfree(filter);
5245 }
5246 pf->fdir_pf_active_filters = 0;
5247 }
5248
5249 /**
5250 * i40e_close - Disables a network interface
5251 * @netdev: network interface device structure
5252 *
5253 * The close entry point is called when an interface is de-activated
5254 * by the OS. The hardware is still under the driver's control, but
5255 * this netdev interface is disabled.
5256 *
5257 * Returns 0, this is not allowed to fail
5258 **/
5259 #ifdef I40E_FCOE
5260 int i40e_close(struct net_device *netdev)
5261 #else
5262 static int i40e_close(struct net_device *netdev)
5263 #endif
5264 {
5265 struct i40e_netdev_priv *np = netdev_priv(netdev);
5266 struct i40e_vsi *vsi = np->vsi;
5267
5268 i40e_vsi_close(vsi);
5269
5270 return 0;
5271 }
5272
5273 /**
5274 * i40e_do_reset - Start a PF or Core Reset sequence
5275 * @pf: board private structure
5276 * @reset_flags: which reset is requested
5277 *
5278 * The essential difference in resets is that the PF Reset
5279 * doesn't clear the packet buffers, doesn't reset the PE
5280 * firmware, and doesn't bother the other PFs on the chip.
5281 **/
5282 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5283 {
5284 u32 val;
5285
5286 WARN_ON(in_interrupt());
5287
5288 if (i40e_check_asq_alive(&pf->hw))
5289 i40e_vc_notify_reset(pf);
5290
5291 /* do the biggest reset indicated */
5292 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5293
5294 /* Request a Global Reset
5295 *
5296 * This will start the chip's countdown to the actual full
5297 * chip reset event, and a warning interrupt to be sent
5298 * to all PFs, including the requestor. Our handler
5299 * for the warning interrupt will deal with the shutdown
5300 * and recovery of the switch setup.
5301 */
5302 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5303 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5304 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5305 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5306
5307 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5308
5309 /* Request a Core Reset
5310 *
5311 * Same as Global Reset, except does *not* include the MAC/PHY
5312 */
5313 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5314 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5315 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5316 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5317 i40e_flush(&pf->hw);
5318
5319 } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5320
5321 /* Request a PF Reset
5322 *
5323 * Resets only the PF-specific registers
5324 *
5325 * This goes directly to the tear-down and rebuild of
5326 * the switch, since we need to do all the recovery as
5327 * for the Core Reset.
5328 */
5329 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5330 i40e_handle_reset_warning(pf);
5331
5332 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5333 int v;
5334
5335 /* Find the VSI(s) that requested a re-init */
5336 dev_info(&pf->pdev->dev,
5337 "VSI reinit requested\n");
5338 for (v = 0; v < pf->num_alloc_vsi; v++) {
5339 struct i40e_vsi *vsi = pf->vsi[v];
5340 if (vsi != NULL &&
5341 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5342 i40e_vsi_reinit_locked(pf->vsi[v]);
5343 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5344 }
5345 }
5346
5347 /* no further action needed, so return now */
5348 return;
5349 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5350 int v;
5351
5352 /* Find the VSI(s) that needs to be brought down */
5353 dev_info(&pf->pdev->dev, "VSI down requested\n");
5354 for (v = 0; v < pf->num_alloc_vsi; v++) {
5355 struct i40e_vsi *vsi = pf->vsi[v];
5356 if (vsi != NULL &&
5357 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5358 set_bit(__I40E_DOWN, &vsi->state);
5359 i40e_down(vsi);
5360 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5361 }
5362 }
5363
5364 /* no further action needed, so return now */
5365 return;
5366 } else {
5367 dev_info(&pf->pdev->dev,
5368 "bad reset request 0x%08x\n", reset_flags);
5369 return;
5370 }
5371 }
5372
5373 #ifdef CONFIG_I40E_DCB
5374 /**
5375 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5376 * @pf: board private structure
5377 * @old_cfg: current DCB config
5378 * @new_cfg: new DCB config
5379 **/
5380 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5381 struct i40e_dcbx_config *old_cfg,
5382 struct i40e_dcbx_config *new_cfg)
5383 {
5384 bool need_reconfig = false;
5385
5386 /* Check if ETS configuration has changed */
5387 if (memcmp(&new_cfg->etscfg,
5388 &old_cfg->etscfg,
5389 sizeof(new_cfg->etscfg))) {
5390 /* If Priority Table has changed reconfig is needed */
5391 if (memcmp(&new_cfg->etscfg.prioritytable,
5392 &old_cfg->etscfg.prioritytable,
5393 sizeof(new_cfg->etscfg.prioritytable))) {
5394 need_reconfig = true;
5395 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5396 }
5397
5398 if (memcmp(&new_cfg->etscfg.tcbwtable,
5399 &old_cfg->etscfg.tcbwtable,
5400 sizeof(new_cfg->etscfg.tcbwtable)))
5401 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5402
5403 if (memcmp(&new_cfg->etscfg.tsatable,
5404 &old_cfg->etscfg.tsatable,
5405 sizeof(new_cfg->etscfg.tsatable)))
5406 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5407 }
5408
5409 /* Check if PFC configuration has changed */
5410 if (memcmp(&new_cfg->pfc,
5411 &old_cfg->pfc,
5412 sizeof(new_cfg->pfc))) {
5413 need_reconfig = true;
5414 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5415 }
5416
5417 /* Check if APP Table has changed */
5418 if (memcmp(&new_cfg->app,
5419 &old_cfg->app,
5420 sizeof(new_cfg->app))) {
5421 need_reconfig = true;
5422 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5423 }
5424
5425 dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
5426 need_reconfig);
5427 return need_reconfig;
5428 }
5429
5430 /**
5431 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5432 * @pf: board private structure
5433 * @e: event info posted on ARQ
5434 **/
5435 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5436 struct i40e_arq_event_info *e)
5437 {
5438 struct i40e_aqc_lldp_get_mib *mib =
5439 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5440 struct i40e_hw *hw = &pf->hw;
5441 struct i40e_dcbx_config tmp_dcbx_cfg;
5442 bool need_reconfig = false;
5443 int ret = 0;
5444 u8 type;
5445
5446 /* Not DCB capable or capability disabled */
5447 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5448 return ret;
5449
5450 /* Ignore if event is not for Nearest Bridge */
5451 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5452 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5453 dev_dbg(&pf->pdev->dev,
5454 "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
5455 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5456 return ret;
5457
5458 /* Check MIB Type and return if event for Remote MIB update */
5459 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5460 dev_dbg(&pf->pdev->dev,
5461 "%s: LLDP event mib type %s\n", __func__,
5462 type ? "remote" : "local");
5463 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5464 /* Update the remote cached instance and return */
5465 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5466 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5467 &hw->remote_dcbx_config);
5468 goto exit;
5469 }
5470
5471 /* Store the old configuration */
5472 tmp_dcbx_cfg = hw->local_dcbx_config;
5473
5474 /* Reset the old DCBx configuration data */
5475 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5476 /* Get updated DCBX data from firmware */
5477 ret = i40e_get_dcb_config(&pf->hw);
5478 if (ret) {
5479 dev_info(&pf->pdev->dev,
5480 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5481 i40e_stat_str(&pf->hw, ret),
5482 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5483 goto exit;
5484 }
5485
5486 /* No change detected in DCBX configs */
5487 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5488 sizeof(tmp_dcbx_cfg))) {
5489 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5490 goto exit;
5491 }
5492
5493 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5494 &hw->local_dcbx_config);
5495
5496 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5497
5498 if (!need_reconfig)
5499 goto exit;
5500
5501 /* Enable DCB tagging only when more than one TC */
5502 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5503 pf->flags |= I40E_FLAG_DCB_ENABLED;
5504 else
5505 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5506
5507 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5508 /* Reconfiguration needed quiesce all VSIs */
5509 i40e_pf_quiesce_all_vsi(pf);
5510
5511 /* Changes in configuration update VEB/VSI */
5512 i40e_dcb_reconfigure(pf);
5513
5514 ret = i40e_resume_port_tx(pf);
5515
5516 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5517 /* In case of error no point in resuming VSIs */
5518 if (ret)
5519 goto exit;
5520
5521 /* Wait for the PF's Tx queues to be disabled */
5522 ret = i40e_pf_wait_txq_disabled(pf);
5523 if (ret) {
5524 /* Schedule PF reset to recover */
5525 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5526 i40e_service_event_schedule(pf);
5527 } else {
5528 i40e_pf_unquiesce_all_vsi(pf);
5529 }
5530
5531 exit:
5532 return ret;
5533 }
5534 #endif /* CONFIG_I40E_DCB */
5535
5536 /**
5537 * i40e_do_reset_safe - Protected reset path for userland calls.
5538 * @pf: board private structure
5539 * @reset_flags: which reset is requested
5540 *
5541 **/
5542 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5543 {
5544 rtnl_lock();
5545 i40e_do_reset(pf, reset_flags);
5546 rtnl_unlock();
5547 }
5548
5549 /**
5550 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5551 * @pf: board private structure
5552 * @e: event info posted on ARQ
5553 *
5554 * Handler for LAN Queue Overflow Event generated by the firmware for PF
5555 * and VF queues
5556 **/
5557 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5558 struct i40e_arq_event_info *e)
5559 {
5560 struct i40e_aqc_lan_overflow *data =
5561 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5562 u32 queue = le32_to_cpu(data->prtdcb_rupto);
5563 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5564 struct i40e_hw *hw = &pf->hw;
5565 struct i40e_vf *vf;
5566 u16 vf_id;
5567
5568 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5569 queue, qtx_ctl);
5570
5571 /* Queue belongs to VF, find the VF and issue VF reset */
5572 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5573 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5574 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5575 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5576 vf_id -= hw->func_caps.vf_base_id;
5577 vf = &pf->vf[vf_id];
5578 i40e_vc_notify_vf_reset(vf);
5579 /* Allow VF to process pending reset notification */
5580 msleep(20);
5581 i40e_reset_vf(vf, false);
5582 }
5583 }
5584
5585 /**
5586 * i40e_service_event_complete - Finish up the service event
5587 * @pf: board private structure
5588 **/
5589 static void i40e_service_event_complete(struct i40e_pf *pf)
5590 {
5591 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5592
5593 /* flush memory to make sure state is correct before next watchog */
5594 smp_mb__before_atomic();
5595 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5596 }
5597
5598 /**
5599 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5600 * @pf: board private structure
5601 **/
5602 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5603 {
5604 u32 val, fcnt_prog;
5605
5606 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5607 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5608 return fcnt_prog;
5609 }
5610
5611 /**
5612 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5613 * @pf: board private structure
5614 **/
5615 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5616 {
5617 u32 val, fcnt_prog;
5618
5619 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5620 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5621 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5622 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5623 return fcnt_prog;
5624 }
5625
5626 /**
5627 * i40e_get_global_fd_count - Get total FD filters programmed on device
5628 * @pf: board private structure
5629 **/
5630 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5631 {
5632 u32 val, fcnt_prog;
5633
5634 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5635 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5636 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5637 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5638 return fcnt_prog;
5639 }
5640
5641 /**
5642 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5643 * @pf: board private structure
5644 **/
5645 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5646 {
5647 u32 fcnt_prog, fcnt_avail;
5648
5649 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5650 return;
5651
5652 /* Check if, FD SB or ATR was auto disabled and if there is enough room
5653 * to re-enable
5654 */
5655 fcnt_prog = i40e_get_global_fd_count(pf);
5656 fcnt_avail = pf->fdir_pf_filter_count;
5657 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5658 (pf->fd_add_err == 0) ||
5659 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5660 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5661 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5662 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5663 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5664 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5665 }
5666 }
5667 /* Wait for some more space to be available to turn on ATR */
5668 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5669 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5670 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5671 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5672 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5673 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5674 }
5675 }
5676 }
5677
5678 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5679 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5680 /**
5681 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5682 * @pf: board private structure
5683 **/
5684 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5685 {
5686 unsigned long min_flush_time;
5687 int flush_wait_retry = 50;
5688 bool disable_atr = false;
5689 int fd_room;
5690 int reg;
5691
5692 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5693 return;
5694
5695 if (time_after(jiffies, pf->fd_flush_timestamp +
5696 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
5697 /* If the flush is happening too quick and we have mostly
5698 * SB rules we should not re-enable ATR for some time.
5699 */
5700 min_flush_time = pf->fd_flush_timestamp
5701 + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5702 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5703
5704 if (!(time_after(jiffies, min_flush_time)) &&
5705 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5706 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5707 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5708 disable_atr = true;
5709 }
5710
5711 pf->fd_flush_timestamp = jiffies;
5712 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5713 /* flush all filters */
5714 wr32(&pf->hw, I40E_PFQF_CTL_1,
5715 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5716 i40e_flush(&pf->hw);
5717 pf->fd_flush_cnt++;
5718 pf->fd_add_err = 0;
5719 do {
5720 /* Check FD flush status every 5-6msec */
5721 usleep_range(5000, 6000);
5722 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5723 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5724 break;
5725 } while (flush_wait_retry--);
5726 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5727 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5728 } else {
5729 /* replay sideband filters */
5730 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5731 if (!disable_atr)
5732 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5733 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5734 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5735 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5736 }
5737 }
5738 }
5739
5740 /**
5741 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5742 * @pf: board private structure
5743 **/
5744 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
5745 {
5746 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5747 }
5748
5749 /* We can see up to 256 filter programming desc in transit if the filters are
5750 * being applied really fast; before we see the first
5751 * filter miss error on Rx queue 0. Accumulating enough error messages before
5752 * reacting will make sure we don't cause flush too often.
5753 */
5754 #define I40E_MAX_FD_PROGRAM_ERROR 256
5755
5756 /**
5757 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5758 * @pf: board private structure
5759 **/
5760 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5761 {
5762
5763 /* if interface is down do nothing */
5764 if (test_bit(__I40E_DOWN, &pf->state))
5765 return;
5766
5767 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5768 return;
5769
5770 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5771 i40e_fdir_flush_and_replay(pf);
5772
5773 i40e_fdir_check_and_reenable(pf);
5774
5775 }
5776
5777 /**
5778 * i40e_vsi_link_event - notify VSI of a link event
5779 * @vsi: vsi to be notified
5780 * @link_up: link up or down
5781 **/
5782 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5783 {
5784 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5785 return;
5786
5787 switch (vsi->type) {
5788 case I40E_VSI_MAIN:
5789 #ifdef I40E_FCOE
5790 case I40E_VSI_FCOE:
5791 #endif
5792 if (!vsi->netdev || !vsi->netdev_registered)
5793 break;
5794
5795 if (link_up) {
5796 netif_carrier_on(vsi->netdev);
5797 netif_tx_wake_all_queues(vsi->netdev);
5798 } else {
5799 netif_carrier_off(vsi->netdev);
5800 netif_tx_stop_all_queues(vsi->netdev);
5801 }
5802 break;
5803
5804 case I40E_VSI_SRIOV:
5805 case I40E_VSI_VMDQ2:
5806 case I40E_VSI_CTRL:
5807 case I40E_VSI_MIRROR:
5808 default:
5809 /* there is no notification for other VSIs */
5810 break;
5811 }
5812 }
5813
5814 /**
5815 * i40e_veb_link_event - notify elements on the veb of a link event
5816 * @veb: veb to be notified
5817 * @link_up: link up or down
5818 **/
5819 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
5820 {
5821 struct i40e_pf *pf;
5822 int i;
5823
5824 if (!veb || !veb->pf)
5825 return;
5826 pf = veb->pf;
5827
5828 /* depth first... */
5829 for (i = 0; i < I40E_MAX_VEB; i++)
5830 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
5831 i40e_veb_link_event(pf->veb[i], link_up);
5832
5833 /* ... now the local VSIs */
5834 for (i = 0; i < pf->num_alloc_vsi; i++)
5835 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
5836 i40e_vsi_link_event(pf->vsi[i], link_up);
5837 }
5838
5839 /**
5840 * i40e_link_event - Update netif_carrier status
5841 * @pf: board private structure
5842 **/
5843 static void i40e_link_event(struct i40e_pf *pf)
5844 {
5845 bool new_link, old_link;
5846 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5847 u8 new_link_speed, old_link_speed;
5848
5849 /* set this to force the get_link_status call to refresh state */
5850 pf->hw.phy.get_link_info = true;
5851
5852 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
5853 new_link = i40e_get_link_status(&pf->hw);
5854 old_link_speed = pf->hw.phy.link_info_old.link_speed;
5855 new_link_speed = pf->hw.phy.link_info.link_speed;
5856
5857 if (new_link == old_link &&
5858 new_link_speed == old_link_speed &&
5859 (test_bit(__I40E_DOWN, &vsi->state) ||
5860 new_link == netif_carrier_ok(vsi->netdev)))
5861 return;
5862
5863 if (!test_bit(__I40E_DOWN, &vsi->state))
5864 i40e_print_link_message(vsi, new_link);
5865
5866 /* Notify the base of the switch tree connected to
5867 * the link. Floating VEBs are not notified.
5868 */
5869 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
5870 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
5871 else
5872 i40e_vsi_link_event(vsi, new_link);
5873
5874 if (pf->vf)
5875 i40e_vc_notify_link_state(pf);
5876
5877 if (pf->flags & I40E_FLAG_PTP)
5878 i40e_ptp_set_increment(pf);
5879 }
5880
5881 /**
5882 * i40e_watchdog_subtask - periodic checks not using event driven response
5883 * @pf: board private structure
5884 **/
5885 static void i40e_watchdog_subtask(struct i40e_pf *pf)
5886 {
5887 int i;
5888
5889 /* if interface is down do nothing */
5890 if (test_bit(__I40E_DOWN, &pf->state) ||
5891 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5892 return;
5893
5894 /* make sure we don't do these things too often */
5895 if (time_before(jiffies, (pf->service_timer_previous +
5896 pf->service_timer_period)))
5897 return;
5898 pf->service_timer_previous = jiffies;
5899
5900 i40e_link_event(pf);
5901
5902 /* Update the stats for active netdevs so the network stack
5903 * can look at updated numbers whenever it cares to
5904 */
5905 for (i = 0; i < pf->num_alloc_vsi; i++)
5906 if (pf->vsi[i] && pf->vsi[i]->netdev)
5907 i40e_update_stats(pf->vsi[i]);
5908
5909 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
5910 /* Update the stats for the active switching components */
5911 for (i = 0; i < I40E_MAX_VEB; i++)
5912 if (pf->veb[i])
5913 i40e_update_veb_stats(pf->veb[i]);
5914 }
5915
5916 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
5917 }
5918
5919 /**
5920 * i40e_reset_subtask - Set up for resetting the device and driver
5921 * @pf: board private structure
5922 **/
5923 static void i40e_reset_subtask(struct i40e_pf *pf)
5924 {
5925 u32 reset_flags = 0;
5926
5927 rtnl_lock();
5928 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5929 reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
5930 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5931 }
5932 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5933 reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
5934 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5935 }
5936 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5937 reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
5938 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5939 }
5940 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5941 reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
5942 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5943 }
5944 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
5945 reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
5946 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
5947 }
5948
5949 /* If there's a recovery already waiting, it takes
5950 * precedence before starting a new reset sequence.
5951 */
5952 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5953 i40e_handle_reset_warning(pf);
5954 goto unlock;
5955 }
5956
5957 /* If we're already down or resetting, just bail */
5958 if (reset_flags &&
5959 !test_bit(__I40E_DOWN, &pf->state) &&
5960 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5961 i40e_do_reset(pf, reset_flags);
5962
5963 unlock:
5964 rtnl_unlock();
5965 }
5966
5967 /**
5968 * i40e_handle_link_event - Handle link event
5969 * @pf: board private structure
5970 * @e: event info posted on ARQ
5971 **/
5972 static void i40e_handle_link_event(struct i40e_pf *pf,
5973 struct i40e_arq_event_info *e)
5974 {
5975 struct i40e_hw *hw = &pf->hw;
5976 struct i40e_aqc_get_link_status *status =
5977 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5978
5979 /* save off old link status information */
5980 hw->phy.link_info_old = hw->phy.link_info;
5981
5982 /* Do a new status request to re-enable LSE reporting
5983 * and load new status information into the hw struct
5984 * This completely ignores any state information
5985 * in the ARQ event info, instead choosing to always
5986 * issue the AQ update link status command.
5987 */
5988 i40e_link_event(pf);
5989
5990 /* check for unqualified module, if link is down */
5991 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5992 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5993 (!(status->link_info & I40E_AQ_LINK_UP)))
5994 dev_err(&pf->pdev->dev,
5995 "The driver failed to link because an unqualified module was detected.\n");
5996 }
5997
5998 /**
5999 * i40e_clean_adminq_subtask - Clean the AdminQ rings
6000 * @pf: board private structure
6001 **/
6002 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6003 {
6004 struct i40e_arq_event_info event;
6005 struct i40e_hw *hw = &pf->hw;
6006 u16 pending, i = 0;
6007 i40e_status ret;
6008 u16 opcode;
6009 u32 oldval;
6010 u32 val;
6011
6012 /* Do not run clean AQ when PF reset fails */
6013 if (test_bit(__I40E_RESET_FAILED, &pf->state))
6014 return;
6015
6016 /* check for error indications */
6017 val = rd32(&pf->hw, pf->hw.aq.arq.len);
6018 oldval = val;
6019 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6020 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6021 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6022 }
6023 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6024 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6025 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6026 }
6027 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6028 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6029 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6030 }
6031 if (oldval != val)
6032 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6033
6034 val = rd32(&pf->hw, pf->hw.aq.asq.len);
6035 oldval = val;
6036 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6037 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6038 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6039 }
6040 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6041 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6042 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6043 }
6044 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6045 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6046 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6047 }
6048 if (oldval != val)
6049 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6050
6051 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6052 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6053 if (!event.msg_buf)
6054 return;
6055
6056 do {
6057 ret = i40e_clean_arq_element(hw, &event, &pending);
6058 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6059 break;
6060 else if (ret) {
6061 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6062 break;
6063 }
6064
6065 opcode = le16_to_cpu(event.desc.opcode);
6066 switch (opcode) {
6067
6068 case i40e_aqc_opc_get_link_status:
6069 i40e_handle_link_event(pf, &event);
6070 break;
6071 case i40e_aqc_opc_send_msg_to_pf:
6072 ret = i40e_vc_process_vf_msg(pf,
6073 le16_to_cpu(event.desc.retval),
6074 le32_to_cpu(event.desc.cookie_high),
6075 le32_to_cpu(event.desc.cookie_low),
6076 event.msg_buf,
6077 event.msg_len);
6078 break;
6079 case i40e_aqc_opc_lldp_update_mib:
6080 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6081 #ifdef CONFIG_I40E_DCB
6082 rtnl_lock();
6083 ret = i40e_handle_lldp_event(pf, &event);
6084 rtnl_unlock();
6085 #endif /* CONFIG_I40E_DCB */
6086 break;
6087 case i40e_aqc_opc_event_lan_overflow:
6088 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6089 i40e_handle_lan_overflow_event(pf, &event);
6090 break;
6091 case i40e_aqc_opc_send_msg_to_peer:
6092 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6093 break;
6094 case i40e_aqc_opc_nvm_erase:
6095 case i40e_aqc_opc_nvm_update:
6096 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
6097 break;
6098 default:
6099 dev_info(&pf->pdev->dev,
6100 "ARQ Error: Unknown event 0x%04x received\n",
6101 opcode);
6102 break;
6103 }
6104 } while (pending && (i++ < pf->adminq_work_limit));
6105
6106 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6107 /* re-enable Admin queue interrupt cause */
6108 val = rd32(hw, I40E_PFINT_ICR0_ENA);
6109 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6110 wr32(hw, I40E_PFINT_ICR0_ENA, val);
6111 i40e_flush(hw);
6112
6113 kfree(event.msg_buf);
6114 }
6115
6116 /**
6117 * i40e_verify_eeprom - make sure eeprom is good to use
6118 * @pf: board private structure
6119 **/
6120 static void i40e_verify_eeprom(struct i40e_pf *pf)
6121 {
6122 int err;
6123
6124 err = i40e_diag_eeprom_test(&pf->hw);
6125 if (err) {
6126 /* retry in case of garbage read */
6127 err = i40e_diag_eeprom_test(&pf->hw);
6128 if (err) {
6129 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6130 err);
6131 set_bit(__I40E_BAD_EEPROM, &pf->state);
6132 }
6133 }
6134
6135 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6136 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6137 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6138 }
6139 }
6140
6141 /**
6142 * i40e_enable_pf_switch_lb
6143 * @pf: pointer to the PF structure
6144 *
6145 * enable switch loop back or die - no point in a return value
6146 **/
6147 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6148 {
6149 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6150 struct i40e_vsi_context ctxt;
6151 int ret;
6152
6153 ctxt.seid = pf->main_vsi_seid;
6154 ctxt.pf_num = pf->hw.pf_id;
6155 ctxt.vf_num = 0;
6156 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6157 if (ret) {
6158 dev_info(&pf->pdev->dev,
6159 "couldn't get PF vsi config, err %s aq_err %s\n",
6160 i40e_stat_str(&pf->hw, ret),
6161 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6162 return;
6163 }
6164 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6165 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6166 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6167
6168 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6169 if (ret) {
6170 dev_info(&pf->pdev->dev,
6171 "update vsi switch failed, err %s aq_err %s\n",
6172 i40e_stat_str(&pf->hw, ret),
6173 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6174 }
6175 }
6176
6177 /**
6178 * i40e_disable_pf_switch_lb
6179 * @pf: pointer to the PF structure
6180 *
6181 * disable switch loop back or die - no point in a return value
6182 **/
6183 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6184 {
6185 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6186 struct i40e_vsi_context ctxt;
6187 int ret;
6188
6189 ctxt.seid = pf->main_vsi_seid;
6190 ctxt.pf_num = pf->hw.pf_id;
6191 ctxt.vf_num = 0;
6192 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6193 if (ret) {
6194 dev_info(&pf->pdev->dev,
6195 "couldn't get PF vsi config, err %s aq_err %s\n",
6196 i40e_stat_str(&pf->hw, ret),
6197 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6198 return;
6199 }
6200 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6201 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6202 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6203
6204 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6205 if (ret) {
6206 dev_info(&pf->pdev->dev,
6207 "update vsi switch failed, err %s aq_err %s\n",
6208 i40e_stat_str(&pf->hw, ret),
6209 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6210 }
6211 }
6212
6213 /**
6214 * i40e_config_bridge_mode - Configure the HW bridge mode
6215 * @veb: pointer to the bridge instance
6216 *
6217 * Configure the loop back mode for the LAN VSI that is downlink to the
6218 * specified HW bridge instance. It is expected this function is called
6219 * when a new HW bridge is instantiated.
6220 **/
6221 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6222 {
6223 struct i40e_pf *pf = veb->pf;
6224
6225 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6226 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6227 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6228 i40e_disable_pf_switch_lb(pf);
6229 else
6230 i40e_enable_pf_switch_lb(pf);
6231 }
6232
6233 /**
6234 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6235 * @veb: pointer to the VEB instance
6236 *
6237 * This is a recursive function that first builds the attached VSIs then
6238 * recurses in to build the next layer of VEB. We track the connections
6239 * through our own index numbers because the seid's from the HW could
6240 * change across the reset.
6241 **/
6242 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6243 {
6244 struct i40e_vsi *ctl_vsi = NULL;
6245 struct i40e_pf *pf = veb->pf;
6246 int v, veb_idx;
6247 int ret;
6248
6249 /* build VSI that owns this VEB, temporarily attached to base VEB */
6250 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6251 if (pf->vsi[v] &&
6252 pf->vsi[v]->veb_idx == veb->idx &&
6253 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6254 ctl_vsi = pf->vsi[v];
6255 break;
6256 }
6257 }
6258 if (!ctl_vsi) {
6259 dev_info(&pf->pdev->dev,
6260 "missing owner VSI for veb_idx %d\n", veb->idx);
6261 ret = -ENOENT;
6262 goto end_reconstitute;
6263 }
6264 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6265 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6266 ret = i40e_add_vsi(ctl_vsi);
6267 if (ret) {
6268 dev_info(&pf->pdev->dev,
6269 "rebuild of veb_idx %d owner VSI failed: %d\n",
6270 veb->idx, ret);
6271 goto end_reconstitute;
6272 }
6273 i40e_vsi_reset_stats(ctl_vsi);
6274
6275 /* create the VEB in the switch and move the VSI onto the VEB */
6276 ret = i40e_add_veb(veb, ctl_vsi);
6277 if (ret)
6278 goto end_reconstitute;
6279
6280 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6281 veb->bridge_mode = BRIDGE_MODE_VEB;
6282 else
6283 veb->bridge_mode = BRIDGE_MODE_VEPA;
6284 i40e_config_bridge_mode(veb);
6285
6286 /* create the remaining VSIs attached to this VEB */
6287 for (v = 0; v < pf->num_alloc_vsi; v++) {
6288 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6289 continue;
6290
6291 if (pf->vsi[v]->veb_idx == veb->idx) {
6292 struct i40e_vsi *vsi = pf->vsi[v];
6293 vsi->uplink_seid = veb->seid;
6294 ret = i40e_add_vsi(vsi);
6295 if (ret) {
6296 dev_info(&pf->pdev->dev,
6297 "rebuild of vsi_idx %d failed: %d\n",
6298 v, ret);
6299 goto end_reconstitute;
6300 }
6301 i40e_vsi_reset_stats(vsi);
6302 }
6303 }
6304
6305 /* create any VEBs attached to this VEB - RECURSION */
6306 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6307 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6308 pf->veb[veb_idx]->uplink_seid = veb->seid;
6309 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6310 if (ret)
6311 break;
6312 }
6313 }
6314
6315 end_reconstitute:
6316 return ret;
6317 }
6318
6319 /**
6320 * i40e_get_capabilities - get info about the HW
6321 * @pf: the PF struct
6322 **/
6323 static int i40e_get_capabilities(struct i40e_pf *pf)
6324 {
6325 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6326 u16 data_size;
6327 int buf_len;
6328 int err;
6329
6330 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6331 do {
6332 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6333 if (!cap_buf)
6334 return -ENOMEM;
6335
6336 /* this loads the data into the hw struct for us */
6337 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6338 &data_size,
6339 i40e_aqc_opc_list_func_capabilities,
6340 NULL);
6341 /* data loaded, buffer no longer needed */
6342 kfree(cap_buf);
6343
6344 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6345 /* retry with a larger buffer */
6346 buf_len = data_size;
6347 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6348 dev_info(&pf->pdev->dev,
6349 "capability discovery failed, err %s aq_err %s\n",
6350 i40e_stat_str(&pf->hw, err),
6351 i40e_aq_str(&pf->hw,
6352 pf->hw.aq.asq_last_status));
6353 return -ENODEV;
6354 }
6355 } while (err);
6356
6357 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
6358 (pf->hw.aq.fw_maj_ver < 2)) {
6359 pf->hw.func_caps.num_msix_vectors++;
6360 pf->hw.func_caps.num_msix_vectors_vf++;
6361 }
6362
6363 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6364 dev_info(&pf->pdev->dev,
6365 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6366 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6367 pf->hw.func_caps.num_msix_vectors,
6368 pf->hw.func_caps.num_msix_vectors_vf,
6369 pf->hw.func_caps.fd_filters_guaranteed,
6370 pf->hw.func_caps.fd_filters_best_effort,
6371 pf->hw.func_caps.num_tx_qp,
6372 pf->hw.func_caps.num_vsis);
6373
6374 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6375 + pf->hw.func_caps.num_vfs)
6376 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6377 dev_info(&pf->pdev->dev,
6378 "got num_vsis %d, setting num_vsis to %d\n",
6379 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6380 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6381 }
6382
6383 return 0;
6384 }
6385
6386 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6387
6388 /**
6389 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6390 * @pf: board private structure
6391 **/
6392 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6393 {
6394 struct i40e_vsi *vsi;
6395 int i;
6396
6397 /* quick workaround for an NVM issue that leaves a critical register
6398 * uninitialized
6399 */
6400 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6401 static const u32 hkey[] = {
6402 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6403 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6404 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6405 0x95b3a76d};
6406
6407 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6408 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6409 }
6410
6411 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6412 return;
6413
6414 /* find existing VSI and see if it needs configuring */
6415 vsi = NULL;
6416 for (i = 0; i < pf->num_alloc_vsi; i++) {
6417 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6418 vsi = pf->vsi[i];
6419 break;
6420 }
6421 }
6422
6423 /* create a new VSI if none exists */
6424 if (!vsi) {
6425 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6426 pf->vsi[pf->lan_vsi]->seid, 0);
6427 if (!vsi) {
6428 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6429 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6430 return;
6431 }
6432 }
6433
6434 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6435 }
6436
6437 /**
6438 * i40e_fdir_teardown - release the Flow Director resources
6439 * @pf: board private structure
6440 **/
6441 static void i40e_fdir_teardown(struct i40e_pf *pf)
6442 {
6443 int i;
6444
6445 i40e_fdir_filter_exit(pf);
6446 for (i = 0; i < pf->num_alloc_vsi; i++) {
6447 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6448 i40e_vsi_release(pf->vsi[i]);
6449 break;
6450 }
6451 }
6452 }
6453
6454 /**
6455 * i40e_prep_for_reset - prep for the core to reset
6456 * @pf: board private structure
6457 *
6458 * Close up the VFs and other things in prep for PF Reset.
6459 **/
6460 static void i40e_prep_for_reset(struct i40e_pf *pf)
6461 {
6462 struct i40e_hw *hw = &pf->hw;
6463 i40e_status ret = 0;
6464 u32 v;
6465
6466 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6467 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6468 return;
6469
6470 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6471
6472 /* quiesce the VSIs and their queues that are not already DOWN */
6473 i40e_pf_quiesce_all_vsi(pf);
6474
6475 for (v = 0; v < pf->num_alloc_vsi; v++) {
6476 if (pf->vsi[v])
6477 pf->vsi[v]->seid = 0;
6478 }
6479
6480 i40e_shutdown_adminq(&pf->hw);
6481
6482 /* call shutdown HMC */
6483 if (hw->hmc.hmc_obj) {
6484 ret = i40e_shutdown_lan_hmc(hw);
6485 if (ret)
6486 dev_warn(&pf->pdev->dev,
6487 "shutdown_lan_hmc failed: %d\n", ret);
6488 }
6489 }
6490
6491 /**
6492 * i40e_send_version - update firmware with driver version
6493 * @pf: PF struct
6494 */
6495 static void i40e_send_version(struct i40e_pf *pf)
6496 {
6497 struct i40e_driver_version dv;
6498
6499 dv.major_version = DRV_VERSION_MAJOR;
6500 dv.minor_version = DRV_VERSION_MINOR;
6501 dv.build_version = DRV_VERSION_BUILD;
6502 dv.subbuild_version = 0;
6503 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6504 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6505 }
6506
6507 /**
6508 * i40e_reset_and_rebuild - reset and rebuild using a saved config
6509 * @pf: board private structure
6510 * @reinit: if the Main VSI needs to re-initialized.
6511 **/
6512 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6513 {
6514 struct i40e_hw *hw = &pf->hw;
6515 u8 set_fc_aq_fail = 0;
6516 i40e_status ret;
6517 u32 v;
6518
6519 /* Now we wait for GRST to settle out.
6520 * We don't have to delete the VEBs or VSIs from the hw switch
6521 * because the reset will make them disappear.
6522 */
6523 ret = i40e_pf_reset(hw);
6524 if (ret) {
6525 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6526 set_bit(__I40E_RESET_FAILED, &pf->state);
6527 goto clear_recovery;
6528 }
6529 pf->pfr_count++;
6530
6531 if (test_bit(__I40E_DOWN, &pf->state))
6532 goto clear_recovery;
6533 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6534
6535 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6536 ret = i40e_init_adminq(&pf->hw);
6537 if (ret) {
6538 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6539 i40e_stat_str(&pf->hw, ret),
6540 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6541 goto clear_recovery;
6542 }
6543
6544 /* re-verify the eeprom if we just had an EMP reset */
6545 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6546 i40e_verify_eeprom(pf);
6547
6548 i40e_clear_pxe_mode(hw);
6549 ret = i40e_get_capabilities(pf);
6550 if (ret)
6551 goto end_core_reset;
6552
6553 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6554 hw->func_caps.num_rx_qp,
6555 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6556 if (ret) {
6557 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6558 goto end_core_reset;
6559 }
6560 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6561 if (ret) {
6562 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6563 goto end_core_reset;
6564 }
6565
6566 #ifdef CONFIG_I40E_DCB
6567 ret = i40e_init_pf_dcb(pf);
6568 if (ret) {
6569 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6570 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6571 /* Continue without DCB enabled */
6572 }
6573 #endif /* CONFIG_I40E_DCB */
6574 #ifdef I40E_FCOE
6575 ret = i40e_init_pf_fcoe(pf);
6576 if (ret)
6577 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
6578
6579 #endif
6580 /* do basic switch setup */
6581 ret = i40e_setup_pf_switch(pf, reinit);
6582 if (ret)
6583 goto end_core_reset;
6584
6585 /* driver is only interested in link up/down and module qualification
6586 * reports from firmware
6587 */
6588 ret = i40e_aq_set_phy_int_mask(&pf->hw,
6589 I40E_AQ_EVENT_LINK_UPDOWN |
6590 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6591 if (ret)
6592 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6593 i40e_stat_str(&pf->hw, ret),
6594 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6595
6596 /* make sure our flow control settings are restored */
6597 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6598 if (ret)
6599 dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
6600 i40e_stat_str(&pf->hw, ret),
6601 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6602
6603 /* Rebuild the VSIs and VEBs that existed before reset.
6604 * They are still in our local switch element arrays, so only
6605 * need to rebuild the switch model in the HW.
6606 *
6607 * If there were VEBs but the reconstitution failed, we'll try
6608 * try to recover minimal use by getting the basic PF VSI working.
6609 */
6610 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6611 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6612 /* find the one VEB connected to the MAC, and find orphans */
6613 for (v = 0; v < I40E_MAX_VEB; v++) {
6614 if (!pf->veb[v])
6615 continue;
6616
6617 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6618 pf->veb[v]->uplink_seid == 0) {
6619 ret = i40e_reconstitute_veb(pf->veb[v]);
6620
6621 if (!ret)
6622 continue;
6623
6624 /* If Main VEB failed, we're in deep doodoo,
6625 * so give up rebuilding the switch and set up
6626 * for minimal rebuild of PF VSI.
6627 * If orphan failed, we'll report the error
6628 * but try to keep going.
6629 */
6630 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6631 dev_info(&pf->pdev->dev,
6632 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6633 ret);
6634 pf->vsi[pf->lan_vsi]->uplink_seid
6635 = pf->mac_seid;
6636 break;
6637 } else if (pf->veb[v]->uplink_seid == 0) {
6638 dev_info(&pf->pdev->dev,
6639 "rebuild of orphan VEB failed: %d\n",
6640 ret);
6641 }
6642 }
6643 }
6644 }
6645
6646 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6647 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6648 /* no VEB, so rebuild only the Main VSI */
6649 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6650 if (ret) {
6651 dev_info(&pf->pdev->dev,
6652 "rebuild of Main VSI failed: %d\n", ret);
6653 goto end_core_reset;
6654 }
6655 }
6656
6657 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
6658 (pf->hw.aq.fw_maj_ver < 4)) {
6659 msleep(75);
6660 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6661 if (ret)
6662 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6663 i40e_stat_str(&pf->hw, ret),
6664 i40e_aq_str(&pf->hw,
6665 pf->hw.aq.asq_last_status));
6666 }
6667 /* reinit the misc interrupt */
6668 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6669 ret = i40e_setup_misc_vector(pf);
6670
6671 /* restart the VSIs that were rebuilt and running before the reset */
6672 i40e_pf_unquiesce_all_vsi(pf);
6673
6674 if (pf->num_alloc_vfs) {
6675 for (v = 0; v < pf->num_alloc_vfs; v++)
6676 i40e_reset_vf(&pf->vf[v], true);
6677 }
6678
6679 /* tell the firmware that we're starting */
6680 i40e_send_version(pf);
6681
6682 end_core_reset:
6683 clear_bit(__I40E_RESET_FAILED, &pf->state);
6684 clear_recovery:
6685 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6686 }
6687
6688 /**
6689 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
6690 * @pf: board private structure
6691 *
6692 * Close up the VFs and other things in prep for a Core Reset,
6693 * then get ready to rebuild the world.
6694 **/
6695 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6696 {
6697 i40e_prep_for_reset(pf);
6698 i40e_reset_and_rebuild(pf, false);
6699 }
6700
6701 /**
6702 * i40e_handle_mdd_event
6703 * @pf: pointer to the PF structure
6704 *
6705 * Called from the MDD irq handler to identify possibly malicious vfs
6706 **/
6707 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6708 {
6709 struct i40e_hw *hw = &pf->hw;
6710 bool mdd_detected = false;
6711 bool pf_mdd_detected = false;
6712 struct i40e_vf *vf;
6713 u32 reg;
6714 int i;
6715
6716 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6717 return;
6718
6719 /* find what triggered the MDD event */
6720 reg = rd32(hw, I40E_GL_MDET_TX);
6721 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6722 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6723 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6724 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6725 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6726 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6727 I40E_GL_MDET_TX_EVENT_SHIFT;
6728 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6729 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6730 pf->hw.func_caps.base_queue;
6731 if (netif_msg_tx_err(pf))
6732 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
6733 event, queue, pf_num, vf_num);
6734 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6735 mdd_detected = true;
6736 }
6737 reg = rd32(hw, I40E_GL_MDET_RX);
6738 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6739 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6740 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6741 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6742 I40E_GL_MDET_RX_EVENT_SHIFT;
6743 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6744 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6745 pf->hw.func_caps.base_queue;
6746 if (netif_msg_rx_err(pf))
6747 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6748 event, queue, func);
6749 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6750 mdd_detected = true;
6751 }
6752
6753 if (mdd_detected) {
6754 reg = rd32(hw, I40E_PF_MDET_TX);
6755 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6756 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
6757 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
6758 pf_mdd_detected = true;
6759 }
6760 reg = rd32(hw, I40E_PF_MDET_RX);
6761 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6762 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
6763 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
6764 pf_mdd_detected = true;
6765 }
6766 /* Queue belongs to the PF, initiate a reset */
6767 if (pf_mdd_detected) {
6768 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6769 i40e_service_event_schedule(pf);
6770 }
6771 }
6772
6773 /* see if one of the VFs needs its hand slapped */
6774 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6775 vf = &(pf->vf[i]);
6776 reg = rd32(hw, I40E_VP_MDET_TX(i));
6777 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6778 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6779 vf->num_mdd_events++;
6780 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6781 i);
6782 }
6783
6784 reg = rd32(hw, I40E_VP_MDET_RX(i));
6785 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6786 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6787 vf->num_mdd_events++;
6788 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6789 i);
6790 }
6791
6792 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
6793 dev_info(&pf->pdev->dev,
6794 "Too many MDD events on VF %d, disabled\n", i);
6795 dev_info(&pf->pdev->dev,
6796 "Use PF Control I/F to re-enable the VF\n");
6797 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
6798 }
6799 }
6800
6801 /* re-enable mdd interrupt cause */
6802 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
6803 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
6804 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
6805 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
6806 i40e_flush(hw);
6807 }
6808
6809 #ifdef CONFIG_I40E_VXLAN
6810 /**
6811 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
6812 * @pf: board private structure
6813 **/
6814 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6815 {
6816 struct i40e_hw *hw = &pf->hw;
6817 i40e_status ret;
6818 __be16 port;
6819 int i;
6820
6821 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
6822 return;
6823
6824 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
6825
6826 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6827 if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
6828 pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
6829 port = pf->vxlan_ports[i];
6830 if (port)
6831 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
6832 I40E_AQC_TUNNEL_TYPE_VXLAN,
6833 NULL, NULL);
6834 else
6835 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
6836
6837 if (ret) {
6838 dev_info(&pf->pdev->dev,
6839 "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
6840 port ? "add" : "delete",
6841 ntohs(port), i,
6842 i40e_stat_str(&pf->hw, ret),
6843 i40e_aq_str(&pf->hw,
6844 pf->hw.aq.asq_last_status));
6845 pf->vxlan_ports[i] = 0;
6846 }
6847 }
6848 }
6849 }
6850
6851 #endif
6852 /**
6853 * i40e_service_task - Run the driver's async subtasks
6854 * @work: pointer to work_struct containing our data
6855 **/
6856 static void i40e_service_task(struct work_struct *work)
6857 {
6858 struct i40e_pf *pf = container_of(work,
6859 struct i40e_pf,
6860 service_task);
6861 unsigned long start_time = jiffies;
6862
6863 /* don't bother with service tasks if a reset is in progress */
6864 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6865 i40e_service_event_complete(pf);
6866 return;
6867 }
6868
6869 i40e_detect_recover_hung(pf);
6870 i40e_reset_subtask(pf);
6871 i40e_handle_mdd_event(pf);
6872 i40e_vc_process_vflr_event(pf);
6873 i40e_watchdog_subtask(pf);
6874 i40e_fdir_reinit_subtask(pf);
6875 i40e_sync_filters_subtask(pf);
6876 #ifdef CONFIG_I40E_VXLAN
6877 i40e_sync_vxlan_filters_subtask(pf);
6878 #endif
6879 i40e_clean_adminq_subtask(pf);
6880
6881 i40e_service_event_complete(pf);
6882
6883 /* If the tasks have taken longer than one timer cycle or there
6884 * is more work to be done, reschedule the service task now
6885 * rather than wait for the timer to tick again.
6886 */
6887 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
6888 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
6889 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
6890 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
6891 i40e_service_event_schedule(pf);
6892 }
6893
6894 /**
6895 * i40e_service_timer - timer callback
6896 * @data: pointer to PF struct
6897 **/
6898 static void i40e_service_timer(unsigned long data)
6899 {
6900 struct i40e_pf *pf = (struct i40e_pf *)data;
6901
6902 mod_timer(&pf->service_timer,
6903 round_jiffies(jiffies + pf->service_timer_period));
6904 i40e_service_event_schedule(pf);
6905 }
6906
6907 /**
6908 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
6909 * @vsi: the VSI being configured
6910 **/
6911 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6912 {
6913 struct i40e_pf *pf = vsi->back;
6914
6915 switch (vsi->type) {
6916 case I40E_VSI_MAIN:
6917 vsi->alloc_queue_pairs = pf->num_lan_qps;
6918 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6919 I40E_REQ_DESCRIPTOR_MULTIPLE);
6920 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6921 vsi->num_q_vectors = pf->num_lan_msix;
6922 else
6923 vsi->num_q_vectors = 1;
6924
6925 break;
6926
6927 case I40E_VSI_FDIR:
6928 vsi->alloc_queue_pairs = 1;
6929 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
6930 I40E_REQ_DESCRIPTOR_MULTIPLE);
6931 vsi->num_q_vectors = 1;
6932 break;
6933
6934 case I40E_VSI_VMDQ2:
6935 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
6936 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6937 I40E_REQ_DESCRIPTOR_MULTIPLE);
6938 vsi->num_q_vectors = pf->num_vmdq_msix;
6939 break;
6940
6941 case I40E_VSI_SRIOV:
6942 vsi->alloc_queue_pairs = pf->num_vf_qps;
6943 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6944 I40E_REQ_DESCRIPTOR_MULTIPLE);
6945 break;
6946
6947 #ifdef I40E_FCOE
6948 case I40E_VSI_FCOE:
6949 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6950 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6951 I40E_REQ_DESCRIPTOR_MULTIPLE);
6952 vsi->num_q_vectors = pf->num_fcoe_msix;
6953 break;
6954
6955 #endif /* I40E_FCOE */
6956 default:
6957 WARN_ON(1);
6958 return -ENODATA;
6959 }
6960
6961 return 0;
6962 }
6963
6964 /**
6965 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
6966 * @type: VSI pointer
6967 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
6968 *
6969 * On error: returns error code (negative)
6970 * On success: returns 0
6971 **/
6972 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
6973 {
6974 int size;
6975 int ret = 0;
6976
6977 /* allocate memory for both Tx and Rx ring pointers */
6978 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
6979 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
6980 if (!vsi->tx_rings)
6981 return -ENOMEM;
6982 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
6983
6984 if (alloc_qvectors) {
6985 /* allocate memory for q_vector pointers */
6986 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
6987 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
6988 if (!vsi->q_vectors) {
6989 ret = -ENOMEM;
6990 goto err_vectors;
6991 }
6992 }
6993 return ret;
6994
6995 err_vectors:
6996 kfree(vsi->tx_rings);
6997 return ret;
6998 }
6999
7000 /**
7001 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7002 * @pf: board private structure
7003 * @type: type of VSI
7004 *
7005 * On error: returns error code (negative)
7006 * On success: returns vsi index in PF (positive)
7007 **/
7008 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7009 {
7010 int ret = -ENODEV;
7011 struct i40e_vsi *vsi;
7012 int vsi_idx;
7013 int i;
7014
7015 /* Need to protect the allocation of the VSIs at the PF level */
7016 mutex_lock(&pf->switch_mutex);
7017
7018 /* VSI list may be fragmented if VSI creation/destruction has
7019 * been happening. We can afford to do a quick scan to look
7020 * for any free VSIs in the list.
7021 *
7022 * find next empty vsi slot, looping back around if necessary
7023 */
7024 i = pf->next_vsi;
7025 while (i < pf->num_alloc_vsi && pf->vsi[i])
7026 i++;
7027 if (i >= pf->num_alloc_vsi) {
7028 i = 0;
7029 while (i < pf->next_vsi && pf->vsi[i])
7030 i++;
7031 }
7032
7033 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7034 vsi_idx = i; /* Found one! */
7035 } else {
7036 ret = -ENODEV;
7037 goto unlock_pf; /* out of VSI slots! */
7038 }
7039 pf->next_vsi = ++i;
7040
7041 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7042 if (!vsi) {
7043 ret = -ENOMEM;
7044 goto unlock_pf;
7045 }
7046 vsi->type = type;
7047 vsi->back = pf;
7048 set_bit(__I40E_DOWN, &vsi->state);
7049 vsi->flags = 0;
7050 vsi->idx = vsi_idx;
7051 vsi->rx_itr_setting = pf->rx_itr_default;
7052 vsi->tx_itr_setting = pf->tx_itr_default;
7053 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7054 pf->rss_table_size : 64;
7055 vsi->netdev_registered = false;
7056 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7057 INIT_LIST_HEAD(&vsi->mac_filter_list);
7058 vsi->irqs_ready = false;
7059
7060 ret = i40e_set_num_rings_in_vsi(vsi);
7061 if (ret)
7062 goto err_rings;
7063
7064 ret = i40e_vsi_alloc_arrays(vsi, true);
7065 if (ret)
7066 goto err_rings;
7067
7068 /* Setup default MSIX irq handler for VSI */
7069 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7070
7071 pf->vsi[vsi_idx] = vsi;
7072 ret = vsi_idx;
7073 goto unlock_pf;
7074
7075 err_rings:
7076 pf->next_vsi = i - 1;
7077 kfree(vsi);
7078 unlock_pf:
7079 mutex_unlock(&pf->switch_mutex);
7080 return ret;
7081 }
7082
7083 /**
7084 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7085 * @type: VSI pointer
7086 * @free_qvectors: a bool to specify if q_vectors need to be freed.
7087 *
7088 * On error: returns error code (negative)
7089 * On success: returns 0
7090 **/
7091 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7092 {
7093 /* free the ring and vector containers */
7094 if (free_qvectors) {
7095 kfree(vsi->q_vectors);
7096 vsi->q_vectors = NULL;
7097 }
7098 kfree(vsi->tx_rings);
7099 vsi->tx_rings = NULL;
7100 vsi->rx_rings = NULL;
7101 }
7102
7103 /**
7104 * i40e_vsi_clear - Deallocate the VSI provided
7105 * @vsi: the VSI being un-configured
7106 **/
7107 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7108 {
7109 struct i40e_pf *pf;
7110
7111 if (!vsi)
7112 return 0;
7113
7114 if (!vsi->back)
7115 goto free_vsi;
7116 pf = vsi->back;
7117
7118 mutex_lock(&pf->switch_mutex);
7119 if (!pf->vsi[vsi->idx]) {
7120 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7121 vsi->idx, vsi->idx, vsi, vsi->type);
7122 goto unlock_vsi;
7123 }
7124
7125 if (pf->vsi[vsi->idx] != vsi) {
7126 dev_err(&pf->pdev->dev,
7127 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7128 pf->vsi[vsi->idx]->idx,
7129 pf->vsi[vsi->idx],
7130 pf->vsi[vsi->idx]->type,
7131 vsi->idx, vsi, vsi->type);
7132 goto unlock_vsi;
7133 }
7134
7135 /* updates the PF for this cleared vsi */
7136 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7137 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7138
7139 i40e_vsi_free_arrays(vsi, true);
7140
7141 pf->vsi[vsi->idx] = NULL;
7142 if (vsi->idx < pf->next_vsi)
7143 pf->next_vsi = vsi->idx;
7144
7145 unlock_vsi:
7146 mutex_unlock(&pf->switch_mutex);
7147 free_vsi:
7148 kfree(vsi);
7149
7150 return 0;
7151 }
7152
7153 /**
7154 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7155 * @vsi: the VSI being cleaned
7156 **/
7157 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7158 {
7159 int i;
7160
7161 if (vsi->tx_rings && vsi->tx_rings[0]) {
7162 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7163 kfree_rcu(vsi->tx_rings[i], rcu);
7164 vsi->tx_rings[i] = NULL;
7165 vsi->rx_rings[i] = NULL;
7166 }
7167 }
7168 }
7169
7170 /**
7171 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7172 * @vsi: the VSI being configured
7173 **/
7174 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7175 {
7176 struct i40e_ring *tx_ring, *rx_ring;
7177 struct i40e_pf *pf = vsi->back;
7178 int i;
7179
7180 /* Set basic values in the rings to be used later during open() */
7181 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7182 /* allocate space for both Tx and Rx in one shot */
7183 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7184 if (!tx_ring)
7185 goto err_out;
7186
7187 tx_ring->queue_index = i;
7188 tx_ring->reg_idx = vsi->base_queue + i;
7189 tx_ring->ring_active = false;
7190 tx_ring->vsi = vsi;
7191 tx_ring->netdev = vsi->netdev;
7192 tx_ring->dev = &pf->pdev->dev;
7193 tx_ring->count = vsi->num_desc;
7194 tx_ring->size = 0;
7195 tx_ring->dcb_tc = 0;
7196 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7197 tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7198 if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
7199 tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
7200 vsi->tx_rings[i] = tx_ring;
7201
7202 rx_ring = &tx_ring[1];
7203 rx_ring->queue_index = i;
7204 rx_ring->reg_idx = vsi->base_queue + i;
7205 rx_ring->ring_active = false;
7206 rx_ring->vsi = vsi;
7207 rx_ring->netdev = vsi->netdev;
7208 rx_ring->dev = &pf->pdev->dev;
7209 rx_ring->count = vsi->num_desc;
7210 rx_ring->size = 0;
7211 rx_ring->dcb_tc = 0;
7212 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
7213 set_ring_16byte_desc_enabled(rx_ring);
7214 else
7215 clear_ring_16byte_desc_enabled(rx_ring);
7216 vsi->rx_rings[i] = rx_ring;
7217 }
7218
7219 return 0;
7220
7221 err_out:
7222 i40e_vsi_clear_rings(vsi);
7223 return -ENOMEM;
7224 }
7225
7226 /**
7227 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7228 * @pf: board private structure
7229 * @vectors: the number of MSI-X vectors to request
7230 *
7231 * Returns the number of vectors reserved, or error
7232 **/
7233 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7234 {
7235 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7236 I40E_MIN_MSIX, vectors);
7237 if (vectors < 0) {
7238 dev_info(&pf->pdev->dev,
7239 "MSI-X vector reservation failed: %d\n", vectors);
7240 vectors = 0;
7241 }
7242
7243 return vectors;
7244 }
7245
7246 /**
7247 * i40e_init_msix - Setup the MSIX capability
7248 * @pf: board private structure
7249 *
7250 * Work with the OS to set up the MSIX vectors needed.
7251 *
7252 * Returns the number of vectors reserved or negative on failure
7253 **/
7254 static int i40e_init_msix(struct i40e_pf *pf)
7255 {
7256 struct i40e_hw *hw = &pf->hw;
7257 int vectors_left;
7258 int v_budget, i;
7259 int v_actual;
7260
7261 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7262 return -ENODEV;
7263
7264 /* The number of vectors we'll request will be comprised of:
7265 * - Add 1 for "other" cause for Admin Queue events, etc.
7266 * - The number of LAN queue pairs
7267 * - Queues being used for RSS.
7268 * We don't need as many as max_rss_size vectors.
7269 * use rss_size instead in the calculation since that
7270 * is governed by number of cpus in the system.
7271 * - assumes symmetric Tx/Rx pairing
7272 * - The number of VMDq pairs
7273 #ifdef I40E_FCOE
7274 * - The number of FCOE qps.
7275 #endif
7276 * Once we count this up, try the request.
7277 *
7278 * If we can't get what we want, we'll simplify to nearly nothing
7279 * and try again. If that still fails, we punt.
7280 */
7281 vectors_left = hw->func_caps.num_msix_vectors;
7282 v_budget = 0;
7283
7284 /* reserve one vector for miscellaneous handler */
7285 if (vectors_left) {
7286 v_budget++;
7287 vectors_left--;
7288 }
7289
7290 /* reserve vectors for the main PF traffic queues */
7291 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7292 vectors_left -= pf->num_lan_msix;
7293 v_budget += pf->num_lan_msix;
7294
7295 /* reserve one vector for sideband flow director */
7296 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7297 if (vectors_left) {
7298 v_budget++;
7299 vectors_left--;
7300 } else {
7301 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7302 }
7303 }
7304
7305 #ifdef I40E_FCOE
7306 /* can we reserve enough for FCoE? */
7307 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7308 if (!vectors_left)
7309 pf->num_fcoe_msix = 0;
7310 else if (vectors_left >= pf->num_fcoe_qps)
7311 pf->num_fcoe_msix = pf->num_fcoe_qps;
7312 else
7313 pf->num_fcoe_msix = 1;
7314 v_budget += pf->num_fcoe_msix;
7315 vectors_left -= pf->num_fcoe_msix;
7316 }
7317
7318 #endif
7319 /* any vectors left over go for VMDq support */
7320 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7321 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7322 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7323
7324 /* if we're short on vectors for what's desired, we limit
7325 * the queues per vmdq. If this is still more than are
7326 * available, the user will need to change the number of
7327 * queues/vectors used by the PF later with the ethtool
7328 * channels command
7329 */
7330 if (vmdq_vecs < vmdq_vecs_wanted)
7331 pf->num_vmdq_qps = 1;
7332 pf->num_vmdq_msix = pf->num_vmdq_qps;
7333
7334 v_budget += vmdq_vecs;
7335 vectors_left -= vmdq_vecs;
7336 }
7337
7338 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7339 GFP_KERNEL);
7340 if (!pf->msix_entries)
7341 return -ENOMEM;
7342
7343 for (i = 0; i < v_budget; i++)
7344 pf->msix_entries[i].entry = i;
7345 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7346
7347 if (v_actual != v_budget) {
7348 /* If we have limited resources, we will start with no vectors
7349 * for the special features and then allocate vectors to some
7350 * of these features based on the policy and at the end disable
7351 * the features that did not get any vectors.
7352 */
7353 #ifdef I40E_FCOE
7354 pf->num_fcoe_qps = 0;
7355 pf->num_fcoe_msix = 0;
7356 #endif
7357 pf->num_vmdq_msix = 0;
7358 }
7359
7360 if (v_actual < I40E_MIN_MSIX) {
7361 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7362 kfree(pf->msix_entries);
7363 pf->msix_entries = NULL;
7364 return -ENODEV;
7365
7366 } else if (v_actual == I40E_MIN_MSIX) {
7367 /* Adjust for minimal MSIX use */
7368 pf->num_vmdq_vsis = 0;
7369 pf->num_vmdq_qps = 0;
7370 pf->num_lan_qps = 1;
7371 pf->num_lan_msix = 1;
7372
7373 } else if (v_actual != v_budget) {
7374 int vec;
7375
7376 /* reserve the misc vector */
7377 vec = v_actual - 1;
7378
7379 /* Scale vector usage down */
7380 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
7381 pf->num_vmdq_vsis = 1;
7382 pf->num_vmdq_qps = 1;
7383 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7384
7385 /* partition out the remaining vectors */
7386 switch (vec) {
7387 case 2:
7388 pf->num_lan_msix = 1;
7389 break;
7390 case 3:
7391 #ifdef I40E_FCOE
7392 /* give one vector to FCoE */
7393 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7394 pf->num_lan_msix = 1;
7395 pf->num_fcoe_msix = 1;
7396 }
7397 #else
7398 pf->num_lan_msix = 2;
7399 #endif
7400 break;
7401 default:
7402 #ifdef I40E_FCOE
7403 /* give one vector to FCoE */
7404 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7405 pf->num_fcoe_msix = 1;
7406 vec--;
7407 }
7408 #endif
7409 /* give the rest to the PF */
7410 pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
7411 break;
7412 }
7413 }
7414
7415 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7416 (pf->num_vmdq_msix == 0)) {
7417 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7418 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7419 }
7420 #ifdef I40E_FCOE
7421
7422 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7423 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7424 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7425 }
7426 #endif
7427 return v_actual;
7428 }
7429
7430 /**
7431 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7432 * @vsi: the VSI being configured
7433 * @v_idx: index of the vector in the vsi struct
7434 *
7435 * We allocate one q_vector. If allocation fails we return -ENOMEM.
7436 **/
7437 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7438 {
7439 struct i40e_q_vector *q_vector;
7440
7441 /* allocate q_vector */
7442 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7443 if (!q_vector)
7444 return -ENOMEM;
7445
7446 q_vector->vsi = vsi;
7447 q_vector->v_idx = v_idx;
7448 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7449 if (vsi->netdev)
7450 netif_napi_add(vsi->netdev, &q_vector->napi,
7451 i40e_napi_poll, NAPI_POLL_WEIGHT);
7452
7453 q_vector->rx.latency_range = I40E_LOW_LATENCY;
7454 q_vector->tx.latency_range = I40E_LOW_LATENCY;
7455
7456 /* tie q_vector and vsi together */
7457 vsi->q_vectors[v_idx] = q_vector;
7458
7459 return 0;
7460 }
7461
7462 /**
7463 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7464 * @vsi: the VSI being configured
7465 *
7466 * We allocate one q_vector per queue interrupt. If allocation fails we
7467 * return -ENOMEM.
7468 **/
7469 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7470 {
7471 struct i40e_pf *pf = vsi->back;
7472 int v_idx, num_q_vectors;
7473 int err;
7474
7475 /* if not MSIX, give the one vector only to the LAN VSI */
7476 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7477 num_q_vectors = vsi->num_q_vectors;
7478 else if (vsi == pf->vsi[pf->lan_vsi])
7479 num_q_vectors = 1;
7480 else
7481 return -EINVAL;
7482
7483 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7484 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
7485 if (err)
7486 goto err_out;
7487 }
7488
7489 return 0;
7490
7491 err_out:
7492 while (v_idx--)
7493 i40e_free_q_vector(vsi, v_idx);
7494
7495 return err;
7496 }
7497
7498 /**
7499 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7500 * @pf: board private structure to initialize
7501 **/
7502 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7503 {
7504 int vectors = 0;
7505 ssize_t size;
7506
7507 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7508 vectors = i40e_init_msix(pf);
7509 if (vectors < 0) {
7510 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
7511 #ifdef I40E_FCOE
7512 I40E_FLAG_FCOE_ENABLED |
7513 #endif
7514 I40E_FLAG_RSS_ENABLED |
7515 I40E_FLAG_DCB_CAPABLE |
7516 I40E_FLAG_SRIOV_ENABLED |
7517 I40E_FLAG_FD_SB_ENABLED |
7518 I40E_FLAG_FD_ATR_ENABLED |
7519 I40E_FLAG_VMDQ_ENABLED);
7520
7521 /* rework the queue expectations without MSIX */
7522 i40e_determine_queue_usage(pf);
7523 }
7524 }
7525
7526 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7527 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7528 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7529 vectors = pci_enable_msi(pf->pdev);
7530 if (vectors < 0) {
7531 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7532 vectors);
7533 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7534 }
7535 vectors = 1; /* one MSI or Legacy vector */
7536 }
7537
7538 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7539 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7540
7541 /* set up vector assignment tracking */
7542 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7543 pf->irq_pile = kzalloc(size, GFP_KERNEL);
7544 if (!pf->irq_pile) {
7545 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7546 return -ENOMEM;
7547 }
7548 pf->irq_pile->num_entries = vectors;
7549 pf->irq_pile->search_hint = 0;
7550
7551 /* track first vector for misc interrupts, ignore return */
7552 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7553
7554 return 0;
7555 }
7556
7557 /**
7558 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7559 * @pf: board private structure
7560 *
7561 * This sets up the handler for MSIX 0, which is used to manage the
7562 * non-queue interrupts, e.g. AdminQ and errors. This is not used
7563 * when in MSI or Legacy interrupt mode.
7564 **/
7565 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7566 {
7567 struct i40e_hw *hw = &pf->hw;
7568 int err = 0;
7569
7570 /* Only request the irq if this is the first time through, and
7571 * not when we're rebuilding after a Reset
7572 */
7573 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7574 err = request_irq(pf->msix_entries[0].vector,
7575 i40e_intr, 0, pf->int_name, pf);
7576 if (err) {
7577 dev_info(&pf->pdev->dev,
7578 "request_irq for %s failed: %d\n",
7579 pf->int_name, err);
7580 return -EFAULT;
7581 }
7582 }
7583
7584 i40e_enable_misc_int_causes(pf);
7585
7586 /* associate no queues to the misc vector */
7587 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7588 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7589
7590 i40e_flush(hw);
7591
7592 i40e_irq_dynamic_enable_icr0(pf);
7593
7594 return err;
7595 }
7596
7597 /**
7598 * i40e_config_rss_aq - Prepare for RSS using AQ commands
7599 * @vsi: vsi structure
7600 * @seed: RSS hash seed
7601 **/
7602 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
7603 {
7604 struct i40e_aqc_get_set_rss_key_data rss_key;
7605 struct i40e_pf *pf = vsi->back;
7606 struct i40e_hw *hw = &pf->hw;
7607 bool pf_lut = false;
7608 u8 *rss_lut;
7609 int ret, i;
7610
7611 memset(&rss_key, 0, sizeof(rss_key));
7612 memcpy(&rss_key, seed, sizeof(rss_key));
7613
7614 rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
7615 if (!rss_lut)
7616 return -ENOMEM;
7617
7618 /* Populate the LUT with max no. of queues in round robin fashion */
7619 for (i = 0; i < vsi->rss_table_size; i++)
7620 rss_lut[i] = i % vsi->rss_size;
7621
7622 ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
7623 if (ret) {
7624 dev_info(&pf->pdev->dev,
7625 "Cannot set RSS key, err %s aq_err %s\n",
7626 i40e_stat_str(&pf->hw, ret),
7627 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7628 goto config_rss_aq_out;
7629 }
7630
7631 if (vsi->type == I40E_VSI_MAIN)
7632 pf_lut = true;
7633
7634 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
7635 vsi->rss_table_size);
7636 if (ret)
7637 dev_info(&pf->pdev->dev,
7638 "Cannot set RSS lut, err %s aq_err %s\n",
7639 i40e_stat_str(&pf->hw, ret),
7640 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7641
7642 config_rss_aq_out:
7643 kfree(rss_lut);
7644 return ret;
7645 }
7646
7647 /**
7648 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
7649 * @vsi: VSI structure
7650 **/
7651 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
7652 {
7653 u8 seed[I40E_HKEY_ARRAY_SIZE];
7654 struct i40e_pf *pf = vsi->back;
7655
7656 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7657 vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7658
7659 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7660 return i40e_config_rss_aq(vsi, seed);
7661
7662 return 0;
7663 }
7664
7665 /**
7666 * i40e_config_rss_reg - Prepare for RSS if used
7667 * @pf: board private structure
7668 * @seed: RSS hash seed
7669 **/
7670 static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
7671 {
7672 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7673 struct i40e_hw *hw = &pf->hw;
7674 u32 *seed_dw = (u32 *)seed;
7675 u32 current_queue = 0;
7676 u32 lut = 0;
7677 int i, j;
7678
7679 /* Fill out hash function seed */
7680 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7681 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
7682
7683 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
7684 lut = 0;
7685 for (j = 0; j < 4; j++) {
7686 if (current_queue == vsi->rss_size)
7687 current_queue = 0;
7688 lut |= ((current_queue) << (8 * j));
7689 current_queue++;
7690 }
7691 wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
7692 }
7693 i40e_flush(hw);
7694
7695 return 0;
7696 }
7697
7698 /**
7699 * i40e_config_rss - Prepare for RSS if used
7700 * @pf: board private structure
7701 **/
7702 static int i40e_config_rss(struct i40e_pf *pf)
7703 {
7704 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7705 u8 seed[I40E_HKEY_ARRAY_SIZE];
7706 struct i40e_hw *hw = &pf->hw;
7707 u32 reg_val;
7708 u64 hena;
7709
7710 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7711
7712 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
7713 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
7714 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
7715 hena |= i40e_pf_get_default_rss_hena(pf);
7716
7717 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
7718 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
7719
7720 vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7721
7722 /* Determine the RSS table size based on the hardware capabilities */
7723 reg_val = rd32(hw, I40E_PFQF_CTL_0);
7724 reg_val = (pf->rss_table_size == 512) ?
7725 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
7726 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
7727 wr32(hw, I40E_PFQF_CTL_0, reg_val);
7728
7729 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7730 return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
7731 else
7732 return i40e_config_rss_reg(pf, seed);
7733 }
7734
7735 /**
7736 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7737 * @pf: board private structure
7738 * @queue_count: the requested queue count for rss.
7739 *
7740 * returns 0 if rss is not enabled, if enabled returns the final rss queue
7741 * count which may be different from the requested queue count.
7742 **/
7743 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7744 {
7745 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7746 int new_rss_size;
7747
7748 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7749 return 0;
7750
7751 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
7752
7753 if (queue_count != vsi->num_queue_pairs) {
7754 vsi->req_queue_pairs = queue_count;
7755 i40e_prep_for_reset(pf);
7756
7757 pf->rss_size = new_rss_size;
7758
7759 i40e_reset_and_rebuild(pf, true);
7760 i40e_config_rss(pf);
7761 }
7762 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
7763 return pf->rss_size;
7764 }
7765
7766 /**
7767 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
7768 * @pf: board private structure
7769 **/
7770 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
7771 {
7772 i40e_status status;
7773 bool min_valid, max_valid;
7774 u32 max_bw, min_bw;
7775
7776 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
7777 &min_valid, &max_valid);
7778
7779 if (!status) {
7780 if (min_valid)
7781 pf->npar_min_bw = min_bw;
7782 if (max_valid)
7783 pf->npar_max_bw = max_bw;
7784 }
7785
7786 return status;
7787 }
7788
7789 /**
7790 * i40e_set_npar_bw_setting - Set BW settings for this PF partition
7791 * @pf: board private structure
7792 **/
7793 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
7794 {
7795 struct i40e_aqc_configure_partition_bw_data bw_data;
7796 i40e_status status;
7797
7798 /* Set the valid bit for this PF */
7799 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
7800 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
7801 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
7802
7803 /* Set the new bandwidths */
7804 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
7805
7806 return status;
7807 }
7808
7809 /**
7810 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
7811 * @pf: board private structure
7812 **/
7813 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
7814 {
7815 /* Commit temporary BW setting to permanent NVM image */
7816 enum i40e_admin_queue_err last_aq_status;
7817 i40e_status ret;
7818 u16 nvm_word;
7819
7820 if (pf->hw.partition_id != 1) {
7821 dev_info(&pf->pdev->dev,
7822 "Commit BW only works on partition 1! This is partition %d",
7823 pf->hw.partition_id);
7824 ret = I40E_NOT_SUPPORTED;
7825 goto bw_commit_out;
7826 }
7827
7828 /* Acquire NVM for read access */
7829 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
7830 last_aq_status = pf->hw.aq.asq_last_status;
7831 if (ret) {
7832 dev_info(&pf->pdev->dev,
7833 "Cannot acquire NVM for read access, err %s aq_err %s\n",
7834 i40e_stat_str(&pf->hw, ret),
7835 i40e_aq_str(&pf->hw, last_aq_status));
7836 goto bw_commit_out;
7837 }
7838
7839 /* Read word 0x10 of NVM - SW compatibility word 1 */
7840 ret = i40e_aq_read_nvm(&pf->hw,
7841 I40E_SR_NVM_CONTROL_WORD,
7842 0x10, sizeof(nvm_word), &nvm_word,
7843 false, NULL);
7844 /* Save off last admin queue command status before releasing
7845 * the NVM
7846 */
7847 last_aq_status = pf->hw.aq.asq_last_status;
7848 i40e_release_nvm(&pf->hw);
7849 if (ret) {
7850 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
7851 i40e_stat_str(&pf->hw, ret),
7852 i40e_aq_str(&pf->hw, last_aq_status));
7853 goto bw_commit_out;
7854 }
7855
7856 /* Wait a bit for NVM release to complete */
7857 msleep(50);
7858
7859 /* Acquire NVM for write access */
7860 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
7861 last_aq_status = pf->hw.aq.asq_last_status;
7862 if (ret) {
7863 dev_info(&pf->pdev->dev,
7864 "Cannot acquire NVM for write access, err %s aq_err %s\n",
7865 i40e_stat_str(&pf->hw, ret),
7866 i40e_aq_str(&pf->hw, last_aq_status));
7867 goto bw_commit_out;
7868 }
7869 /* Write it back out unchanged to initiate update NVM,
7870 * which will force a write of the shadow (alt) RAM to
7871 * the NVM - thus storing the bandwidth values permanently.
7872 */
7873 ret = i40e_aq_update_nvm(&pf->hw,
7874 I40E_SR_NVM_CONTROL_WORD,
7875 0x10, sizeof(nvm_word),
7876 &nvm_word, true, NULL);
7877 /* Save off last admin queue command status before releasing
7878 * the NVM
7879 */
7880 last_aq_status = pf->hw.aq.asq_last_status;
7881 i40e_release_nvm(&pf->hw);
7882 if (ret)
7883 dev_info(&pf->pdev->dev,
7884 "BW settings NOT SAVED, err %s aq_err %s\n",
7885 i40e_stat_str(&pf->hw, ret),
7886 i40e_aq_str(&pf->hw, last_aq_status));
7887 bw_commit_out:
7888
7889 return ret;
7890 }
7891
7892 /**
7893 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
7894 * @pf: board private structure to initialize
7895 *
7896 * i40e_sw_init initializes the Adapter private data structure.
7897 * Fields are initialized based on PCI device information and
7898 * OS network device settings (MTU size).
7899 **/
7900 static int i40e_sw_init(struct i40e_pf *pf)
7901 {
7902 int err = 0;
7903 int size;
7904
7905 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
7906 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
7907 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
7908 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
7909 if (I40E_DEBUG_USER & debug)
7910 pf->hw.debug_mask = debug;
7911 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
7912 I40E_DEFAULT_MSG_ENABLE);
7913 }
7914
7915 /* Set default capability flags */
7916 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7917 I40E_FLAG_MSI_ENABLED |
7918 I40E_FLAG_MSIX_ENABLED;
7919
7920 if (iommu_present(&pci_bus_type))
7921 pf->flags |= I40E_FLAG_RX_PS_ENABLED;
7922 else
7923 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
7924
7925 /* Set default ITR */
7926 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
7927 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
7928
7929 /* Depending on PF configurations, it is possible that the RSS
7930 * maximum might end up larger than the available queues
7931 */
7932 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
7933 pf->rss_size = 1;
7934 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
7935 pf->rss_size_max = min_t(int, pf->rss_size_max,
7936 pf->hw.func_caps.num_tx_qp);
7937 if (pf->hw.func_caps.rss) {
7938 pf->flags |= I40E_FLAG_RSS_ENABLED;
7939 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
7940 }
7941
7942 /* MFP mode enabled */
7943 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
7944 pf->flags |= I40E_FLAG_MFP_ENABLED;
7945 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
7946 if (i40e_get_npar_bw_setting(pf))
7947 dev_warn(&pf->pdev->dev,
7948 "Could not get NPAR bw settings\n");
7949 else
7950 dev_info(&pf->pdev->dev,
7951 "Min BW = %8.8x, Max BW = %8.8x\n",
7952 pf->npar_min_bw, pf->npar_max_bw);
7953 }
7954
7955 /* FW/NVM is not yet fixed in this regard */
7956 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
7957 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7958 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7959 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
7960 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
7961 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7962 } else {
7963 dev_info(&pf->pdev->dev,
7964 "Flow Director Sideband mode Disabled in MFP mode\n");
7965 }
7966 pf->fdir_pf_filter_count =
7967 pf->hw.func_caps.fd_filters_guaranteed;
7968 pf->hw.fdir_shared_filter_count =
7969 pf->hw.func_caps.fd_filters_best_effort;
7970 }
7971
7972 if (pf->hw.func_caps.vmdq) {
7973 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7974 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7975 }
7976
7977 #ifdef I40E_FCOE
7978 err = i40e_init_pf_fcoe(pf);
7979 if (err)
7980 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
7981
7982 #endif /* I40E_FCOE */
7983 #ifdef CONFIG_PCI_IOV
7984 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
7985 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7986 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7987 pf->num_req_vfs = min_t(int,
7988 pf->hw.func_caps.num_vfs,
7989 I40E_MAX_VF_COUNT);
7990 }
7991 #endif /* CONFIG_PCI_IOV */
7992 if (pf->hw.mac.type == I40E_MAC_X722) {
7993 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
7994 I40E_FLAG_128_QP_RSS_CAPABLE |
7995 I40E_FLAG_HW_ATR_EVICT_CAPABLE |
7996 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
7997 I40E_FLAG_WB_ON_ITR_CAPABLE |
7998 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
7999 }
8000 pf->eeprom_version = 0xDEAD;
8001 pf->lan_veb = I40E_NO_VEB;
8002 pf->lan_vsi = I40E_NO_VSI;
8003
8004 /* By default FW has this off for performance reasons */
8005 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
8006
8007 /* set up queue assignment tracking */
8008 size = sizeof(struct i40e_lump_tracking)
8009 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
8010 pf->qp_pile = kzalloc(size, GFP_KERNEL);
8011 if (!pf->qp_pile) {
8012 err = -ENOMEM;
8013 goto sw_init_done;
8014 }
8015 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8016 pf->qp_pile->search_hint = 0;
8017
8018 pf->tx_timeout_recovery_level = 1;
8019
8020 mutex_init(&pf->switch_mutex);
8021
8022 /* If NPAR is enabled nudge the Tx scheduler */
8023 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8024 i40e_set_npar_bw_setting(pf);
8025
8026 sw_init_done:
8027 return err;
8028 }
8029
8030 /**
8031 * i40e_set_ntuple - set the ntuple feature flag and take action
8032 * @pf: board private structure to initialize
8033 * @features: the feature set that the stack is suggesting
8034 *
8035 * returns a bool to indicate if reset needs to happen
8036 **/
8037 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8038 {
8039 bool need_reset = false;
8040
8041 /* Check if Flow Director n-tuple support was enabled or disabled. If
8042 * the state changed, we need to reset.
8043 */
8044 if (features & NETIF_F_NTUPLE) {
8045 /* Enable filters and mark for reset */
8046 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8047 need_reset = true;
8048 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8049 } else {
8050 /* turn off filters, mark for reset and clear SW filter list */
8051 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8052 need_reset = true;
8053 i40e_fdir_filter_exit(pf);
8054 }
8055 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8056 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
8057 /* reset fd counters */
8058 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
8059 pf->fdir_pf_active_filters = 0;
8060 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8061 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8062 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8063 /* if ATR was auto disabled it can be re-enabled. */
8064 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8065 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
8066 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8067 }
8068 return need_reset;
8069 }
8070
8071 /**
8072 * i40e_set_features - set the netdev feature flags
8073 * @netdev: ptr to the netdev being adjusted
8074 * @features: the feature set that the stack is suggesting
8075 **/
8076 static int i40e_set_features(struct net_device *netdev,
8077 netdev_features_t features)
8078 {
8079 struct i40e_netdev_priv *np = netdev_priv(netdev);
8080 struct i40e_vsi *vsi = np->vsi;
8081 struct i40e_pf *pf = vsi->back;
8082 bool need_reset;
8083
8084 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8085 i40e_vlan_stripping_enable(vsi);
8086 else
8087 i40e_vlan_stripping_disable(vsi);
8088
8089 need_reset = i40e_set_ntuple(pf, features);
8090
8091 if (need_reset)
8092 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8093
8094 return 0;
8095 }
8096
8097 #ifdef CONFIG_I40E_VXLAN
8098 /**
8099 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
8100 * @pf: board private structure
8101 * @port: The UDP port to look up
8102 *
8103 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8104 **/
8105 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
8106 {
8107 u8 i;
8108
8109 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8110 if (pf->vxlan_ports[i] == port)
8111 return i;
8112 }
8113
8114 return i;
8115 }
8116
8117 /**
8118 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
8119 * @netdev: This physical port's netdev
8120 * @sa_family: Socket Family that VXLAN is notifying us about
8121 * @port: New UDP port number that VXLAN started listening to
8122 **/
8123 static void i40e_add_vxlan_port(struct net_device *netdev,
8124 sa_family_t sa_family, __be16 port)
8125 {
8126 struct i40e_netdev_priv *np = netdev_priv(netdev);
8127 struct i40e_vsi *vsi = np->vsi;
8128 struct i40e_pf *pf = vsi->back;
8129 u8 next_idx;
8130 u8 idx;
8131
8132 if (sa_family == AF_INET6)
8133 return;
8134
8135 idx = i40e_get_vxlan_port_idx(pf, port);
8136
8137 /* Check if port already exists */
8138 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8139 netdev_info(netdev, "vxlan port %d already offloaded\n",
8140 ntohs(port));
8141 return;
8142 }
8143
8144 /* Now check if there is space to add the new port */
8145 next_idx = i40e_get_vxlan_port_idx(pf, 0);
8146
8147 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8148 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
8149 ntohs(port));
8150 return;
8151 }
8152
8153 /* New port: add it and mark its index in the bitmap */
8154 pf->vxlan_ports[next_idx] = port;
8155 pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
8156 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8157 }
8158
8159 /**
8160 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
8161 * @netdev: This physical port's netdev
8162 * @sa_family: Socket Family that VXLAN is notifying us about
8163 * @port: UDP port number that VXLAN stopped listening to
8164 **/
8165 static void i40e_del_vxlan_port(struct net_device *netdev,
8166 sa_family_t sa_family, __be16 port)
8167 {
8168 struct i40e_netdev_priv *np = netdev_priv(netdev);
8169 struct i40e_vsi *vsi = np->vsi;
8170 struct i40e_pf *pf = vsi->back;
8171 u8 idx;
8172
8173 if (sa_family == AF_INET6)
8174 return;
8175
8176 idx = i40e_get_vxlan_port_idx(pf, port);
8177
8178 /* Check if port already exists */
8179 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8180 /* if port exists, set it to 0 (mark for deletion)
8181 * and make it pending
8182 */
8183 pf->vxlan_ports[idx] = 0;
8184 pf->pending_vxlan_bitmap |= BIT_ULL(idx);
8185 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8186 } else {
8187 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
8188 ntohs(port));
8189 }
8190 }
8191
8192 #endif
8193 static int i40e_get_phys_port_id(struct net_device *netdev,
8194 struct netdev_phys_item_id *ppid)
8195 {
8196 struct i40e_netdev_priv *np = netdev_priv(netdev);
8197 struct i40e_pf *pf = np->vsi->back;
8198 struct i40e_hw *hw = &pf->hw;
8199
8200 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8201 return -EOPNOTSUPP;
8202
8203 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8204 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8205
8206 return 0;
8207 }
8208
8209 /**
8210 * i40e_ndo_fdb_add - add an entry to the hardware database
8211 * @ndm: the input from the stack
8212 * @tb: pointer to array of nladdr (unused)
8213 * @dev: the net device pointer
8214 * @addr: the MAC address entry being added
8215 * @flags: instructions from stack about fdb operation
8216 */
8217 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8218 struct net_device *dev,
8219 const unsigned char *addr, u16 vid,
8220 u16 flags)
8221 {
8222 struct i40e_netdev_priv *np = netdev_priv(dev);
8223 struct i40e_pf *pf = np->vsi->back;
8224 int err = 0;
8225
8226 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8227 return -EOPNOTSUPP;
8228
8229 if (vid) {
8230 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8231 return -EINVAL;
8232 }
8233
8234 /* Hardware does not support aging addresses so if a
8235 * ndm_state is given only allow permanent addresses
8236 */
8237 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8238 netdev_info(dev, "FDB only supports static addresses\n");
8239 return -EINVAL;
8240 }
8241
8242 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8243 err = dev_uc_add_excl(dev, addr);
8244 else if (is_multicast_ether_addr(addr))
8245 err = dev_mc_add_excl(dev, addr);
8246 else
8247 err = -EINVAL;
8248
8249 /* Only return duplicate errors if NLM_F_EXCL is set */
8250 if (err == -EEXIST && !(flags & NLM_F_EXCL))
8251 err = 0;
8252
8253 return err;
8254 }
8255
8256 /**
8257 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8258 * @dev: the netdev being configured
8259 * @nlh: RTNL message
8260 *
8261 * Inserts a new hardware bridge if not already created and
8262 * enables the bridging mode requested (VEB or VEPA). If the
8263 * hardware bridge has already been inserted and the request
8264 * is to change the mode then that requires a PF reset to
8265 * allow rebuild of the components with required hardware
8266 * bridge mode enabled.
8267 **/
8268 static int i40e_ndo_bridge_setlink(struct net_device *dev,
8269 struct nlmsghdr *nlh,
8270 u16 flags)
8271 {
8272 struct i40e_netdev_priv *np = netdev_priv(dev);
8273 struct i40e_vsi *vsi = np->vsi;
8274 struct i40e_pf *pf = vsi->back;
8275 struct i40e_veb *veb = NULL;
8276 struct nlattr *attr, *br_spec;
8277 int i, rem;
8278
8279 /* Only for PF VSI for now */
8280 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8281 return -EOPNOTSUPP;
8282
8283 /* Find the HW bridge for PF VSI */
8284 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8285 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8286 veb = pf->veb[i];
8287 }
8288
8289 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8290
8291 nla_for_each_nested(attr, br_spec, rem) {
8292 __u16 mode;
8293
8294 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8295 continue;
8296
8297 mode = nla_get_u16(attr);
8298 if ((mode != BRIDGE_MODE_VEPA) &&
8299 (mode != BRIDGE_MODE_VEB))
8300 return -EINVAL;
8301
8302 /* Insert a new HW bridge */
8303 if (!veb) {
8304 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8305 vsi->tc_config.enabled_tc);
8306 if (veb) {
8307 veb->bridge_mode = mode;
8308 i40e_config_bridge_mode(veb);
8309 } else {
8310 /* No Bridge HW offload available */
8311 return -ENOENT;
8312 }
8313 break;
8314 } else if (mode != veb->bridge_mode) {
8315 /* Existing HW bridge but different mode needs reset */
8316 veb->bridge_mode = mode;
8317 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8318 if (mode == BRIDGE_MODE_VEB)
8319 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8320 else
8321 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8322 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8323 break;
8324 }
8325 }
8326
8327 return 0;
8328 }
8329
8330 /**
8331 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8332 * @skb: skb buff
8333 * @pid: process id
8334 * @seq: RTNL message seq #
8335 * @dev: the netdev being configured
8336 * @filter_mask: unused
8337 *
8338 * Return the mode in which the hardware bridge is operating in
8339 * i.e VEB or VEPA.
8340 **/
8341 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8342 struct net_device *dev,
8343 u32 filter_mask, int nlflags)
8344 {
8345 struct i40e_netdev_priv *np = netdev_priv(dev);
8346 struct i40e_vsi *vsi = np->vsi;
8347 struct i40e_pf *pf = vsi->back;
8348 struct i40e_veb *veb = NULL;
8349 int i;
8350
8351 /* Only for PF VSI for now */
8352 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8353 return -EOPNOTSUPP;
8354
8355 /* Find the HW bridge for the PF VSI */
8356 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8357 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8358 veb = pf->veb[i];
8359 }
8360
8361 if (!veb)
8362 return 0;
8363
8364 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
8365 nlflags, 0, 0, filter_mask, NULL);
8366 }
8367
8368 #define I40E_MAX_TUNNEL_HDR_LEN 80
8369 /**
8370 * i40e_features_check - Validate encapsulated packet conforms to limits
8371 * @skb: skb buff
8372 * @netdev: This physical port's netdev
8373 * @features: Offload features that the stack believes apply
8374 **/
8375 static netdev_features_t i40e_features_check(struct sk_buff *skb,
8376 struct net_device *dev,
8377 netdev_features_t features)
8378 {
8379 if (skb->encapsulation &&
8380 (skb_inner_mac_header(skb) - skb_transport_header(skb) >
8381 I40E_MAX_TUNNEL_HDR_LEN))
8382 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
8383
8384 return features;
8385 }
8386
8387 static const struct net_device_ops i40e_netdev_ops = {
8388 .ndo_open = i40e_open,
8389 .ndo_stop = i40e_close,
8390 .ndo_start_xmit = i40e_lan_xmit_frame,
8391 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
8392 .ndo_set_rx_mode = i40e_set_rx_mode,
8393 .ndo_validate_addr = eth_validate_addr,
8394 .ndo_set_mac_address = i40e_set_mac,
8395 .ndo_change_mtu = i40e_change_mtu,
8396 .ndo_do_ioctl = i40e_ioctl,
8397 .ndo_tx_timeout = i40e_tx_timeout,
8398 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
8399 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
8400 #ifdef CONFIG_NET_POLL_CONTROLLER
8401 .ndo_poll_controller = i40e_netpoll,
8402 #endif
8403 .ndo_setup_tc = i40e_setup_tc,
8404 #ifdef I40E_FCOE
8405 .ndo_fcoe_enable = i40e_fcoe_enable,
8406 .ndo_fcoe_disable = i40e_fcoe_disable,
8407 #endif
8408 .ndo_set_features = i40e_set_features,
8409 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
8410 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
8411 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
8412 .ndo_get_vf_config = i40e_ndo_get_vf_config,
8413 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
8414 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
8415 #ifdef CONFIG_I40E_VXLAN
8416 .ndo_add_vxlan_port = i40e_add_vxlan_port,
8417 .ndo_del_vxlan_port = i40e_del_vxlan_port,
8418 #endif
8419 .ndo_get_phys_port_id = i40e_get_phys_port_id,
8420 .ndo_fdb_add = i40e_ndo_fdb_add,
8421 .ndo_features_check = i40e_features_check,
8422 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
8423 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
8424 };
8425
8426 /**
8427 * i40e_config_netdev - Setup the netdev flags
8428 * @vsi: the VSI being configured
8429 *
8430 * Returns 0 on success, negative value on failure
8431 **/
8432 static int i40e_config_netdev(struct i40e_vsi *vsi)
8433 {
8434 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8435 struct i40e_pf *pf = vsi->back;
8436 struct i40e_hw *hw = &pf->hw;
8437 struct i40e_netdev_priv *np;
8438 struct net_device *netdev;
8439 u8 mac_addr[ETH_ALEN];
8440 int etherdev_size;
8441
8442 etherdev_size = sizeof(struct i40e_netdev_priv);
8443 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
8444 if (!netdev)
8445 return -ENOMEM;
8446
8447 vsi->netdev = netdev;
8448 np = netdev_priv(netdev);
8449 np->vsi = vsi;
8450
8451 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
8452 NETIF_F_GSO_UDP_TUNNEL |
8453 NETIF_F_TSO;
8454
8455 netdev->features = NETIF_F_SG |
8456 NETIF_F_IP_CSUM |
8457 NETIF_F_SCTP_CSUM |
8458 NETIF_F_HIGHDMA |
8459 NETIF_F_GSO_UDP_TUNNEL |
8460 NETIF_F_HW_VLAN_CTAG_TX |
8461 NETIF_F_HW_VLAN_CTAG_RX |
8462 NETIF_F_HW_VLAN_CTAG_FILTER |
8463 NETIF_F_IPV6_CSUM |
8464 NETIF_F_TSO |
8465 NETIF_F_TSO_ECN |
8466 NETIF_F_TSO6 |
8467 NETIF_F_RXCSUM |
8468 NETIF_F_RXHASH |
8469 0;
8470
8471 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
8472 netdev->features |= NETIF_F_NTUPLE;
8473
8474 /* copy netdev features into list of user selectable features */
8475 netdev->hw_features |= netdev->features;
8476
8477 if (vsi->type == I40E_VSI_MAIN) {
8478 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
8479 ether_addr_copy(mac_addr, hw->mac.perm_addr);
8480 /* The following steps are necessary to prevent reception
8481 * of tagged packets - some older NVM configurations load a
8482 * default a MAC-VLAN filter that accepts any tagged packet
8483 * which must be replaced by a normal filter.
8484 */
8485 if (!i40e_rm_default_mac_filter(vsi, mac_addr))
8486 i40e_add_filter(vsi, mac_addr,
8487 I40E_VLAN_ANY, false, true);
8488 } else {
8489 /* relate the VSI_VMDQ name to the VSI_MAIN name */
8490 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
8491 pf->vsi[pf->lan_vsi]->netdev->name);
8492 random_ether_addr(mac_addr);
8493 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
8494 }
8495 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
8496
8497 ether_addr_copy(netdev->dev_addr, mac_addr);
8498 ether_addr_copy(netdev->perm_addr, mac_addr);
8499 /* vlan gets same features (except vlan offload)
8500 * after any tweaks for specific VSI types
8501 */
8502 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
8503 NETIF_F_HW_VLAN_CTAG_RX |
8504 NETIF_F_HW_VLAN_CTAG_FILTER);
8505 netdev->priv_flags |= IFF_UNICAST_FLT;
8506 netdev->priv_flags |= IFF_SUPP_NOFCS;
8507 /* Setup netdev TC information */
8508 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
8509
8510 netdev->netdev_ops = &i40e_netdev_ops;
8511 netdev->watchdog_timeo = 5 * HZ;
8512 i40e_set_ethtool_ops(netdev);
8513 #ifdef I40E_FCOE
8514 i40e_fcoe_config_netdev(netdev, vsi);
8515 #endif
8516
8517 return 0;
8518 }
8519
8520 /**
8521 * i40e_vsi_delete - Delete a VSI from the switch
8522 * @vsi: the VSI being removed
8523 *
8524 * Returns 0 on success, negative value on failure
8525 **/
8526 static void i40e_vsi_delete(struct i40e_vsi *vsi)
8527 {
8528 /* remove default VSI is not allowed */
8529 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
8530 return;
8531
8532 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
8533 }
8534
8535 /**
8536 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
8537 * @vsi: the VSI being queried
8538 *
8539 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
8540 **/
8541 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
8542 {
8543 struct i40e_veb *veb;
8544 struct i40e_pf *pf = vsi->back;
8545
8546 /* Uplink is not a bridge so default to VEB */
8547 if (vsi->veb_idx == I40E_NO_VEB)
8548 return 1;
8549
8550 veb = pf->veb[vsi->veb_idx];
8551 /* Uplink is a bridge in VEPA mode */
8552 if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
8553 return 0;
8554
8555 /* Uplink is a bridge in VEB mode */
8556 return 1;
8557 }
8558
8559 /**
8560 * i40e_add_vsi - Add a VSI to the switch
8561 * @vsi: the VSI being configured
8562 *
8563 * This initializes a VSI context depending on the VSI type to be added and
8564 * passes it down to the add_vsi aq command.
8565 **/
8566 static int i40e_add_vsi(struct i40e_vsi *vsi)
8567 {
8568 int ret = -ENODEV;
8569 struct i40e_mac_filter *f, *ftmp;
8570 struct i40e_pf *pf = vsi->back;
8571 struct i40e_hw *hw = &pf->hw;
8572 struct i40e_vsi_context ctxt;
8573 u8 enabled_tc = 0x1; /* TC0 enabled */
8574 int f_count = 0;
8575
8576 memset(&ctxt, 0, sizeof(ctxt));
8577 switch (vsi->type) {
8578 case I40E_VSI_MAIN:
8579 /* The PF's main VSI is already setup as part of the
8580 * device initialization, so we'll not bother with
8581 * the add_vsi call, but we will retrieve the current
8582 * VSI context.
8583 */
8584 ctxt.seid = pf->main_vsi_seid;
8585 ctxt.pf_num = pf->hw.pf_id;
8586 ctxt.vf_num = 0;
8587 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8588 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8589 if (ret) {
8590 dev_info(&pf->pdev->dev,
8591 "couldn't get PF vsi config, err %s aq_err %s\n",
8592 i40e_stat_str(&pf->hw, ret),
8593 i40e_aq_str(&pf->hw,
8594 pf->hw.aq.asq_last_status));
8595 return -ENOENT;
8596 }
8597 vsi->info = ctxt.info;
8598 vsi->info.valid_sections = 0;
8599
8600 vsi->seid = ctxt.seid;
8601 vsi->id = ctxt.vsi_number;
8602
8603 enabled_tc = i40e_pf_get_tc_map(pf);
8604
8605 /* MFP mode setup queue map and update VSI */
8606 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
8607 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
8608 memset(&ctxt, 0, sizeof(ctxt));
8609 ctxt.seid = pf->main_vsi_seid;
8610 ctxt.pf_num = pf->hw.pf_id;
8611 ctxt.vf_num = 0;
8612 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
8613 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
8614 if (ret) {
8615 dev_info(&pf->pdev->dev,
8616 "update vsi failed, err %s aq_err %s\n",
8617 i40e_stat_str(&pf->hw, ret),
8618 i40e_aq_str(&pf->hw,
8619 pf->hw.aq.asq_last_status));
8620 ret = -ENOENT;
8621 goto err;
8622 }
8623 /* update the local VSI info queue map */
8624 i40e_vsi_update_queue_map(vsi, &ctxt);
8625 vsi->info.valid_sections = 0;
8626 } else {
8627 /* Default/Main VSI is only enabled for TC0
8628 * reconfigure it to enable all TCs that are
8629 * available on the port in SFP mode.
8630 * For MFP case the iSCSI PF would use this
8631 * flow to enable LAN+iSCSI TC.
8632 */
8633 ret = i40e_vsi_config_tc(vsi, enabled_tc);
8634 if (ret) {
8635 dev_info(&pf->pdev->dev,
8636 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
8637 enabled_tc,
8638 i40e_stat_str(&pf->hw, ret),
8639 i40e_aq_str(&pf->hw,
8640 pf->hw.aq.asq_last_status));
8641 ret = -ENOENT;
8642 }
8643 }
8644 break;
8645
8646 case I40E_VSI_FDIR:
8647 ctxt.pf_num = hw->pf_id;
8648 ctxt.vf_num = 0;
8649 ctxt.uplink_seid = vsi->uplink_seid;
8650 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8651 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8652 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
8653 (i40e_is_vsi_uplink_mode_veb(vsi))) {
8654 ctxt.info.valid_sections |=
8655 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8656 ctxt.info.switch_id =
8657 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8658 }
8659 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8660 break;
8661
8662 case I40E_VSI_VMDQ2:
8663 ctxt.pf_num = hw->pf_id;
8664 ctxt.vf_num = 0;
8665 ctxt.uplink_seid = vsi->uplink_seid;
8666 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8667 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
8668
8669 /* This VSI is connected to VEB so the switch_id
8670 * should be set to zero by default.
8671 */
8672 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8673 ctxt.info.valid_sections |=
8674 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8675 ctxt.info.switch_id =
8676 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8677 }
8678
8679 /* Setup the VSI tx/rx queue map for TC0 only for now */
8680 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8681 break;
8682
8683 case I40E_VSI_SRIOV:
8684 ctxt.pf_num = hw->pf_id;
8685 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
8686 ctxt.uplink_seid = vsi->uplink_seid;
8687 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8688 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
8689
8690 /* This VSI is connected to VEB so the switch_id
8691 * should be set to zero by default.
8692 */
8693 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8694 ctxt.info.valid_sections |=
8695 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8696 ctxt.info.switch_id =
8697 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8698 }
8699
8700 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
8701 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
8702 if (pf->vf[vsi->vf_id].spoofchk) {
8703 ctxt.info.valid_sections |=
8704 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
8705 ctxt.info.sec_flags |=
8706 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
8707 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
8708 }
8709 /* Setup the VSI tx/rx queue map for TC0 only for now */
8710 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8711 break;
8712
8713 #ifdef I40E_FCOE
8714 case I40E_VSI_FCOE:
8715 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
8716 if (ret) {
8717 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
8718 return ret;
8719 }
8720 break;
8721
8722 #endif /* I40E_FCOE */
8723 default:
8724 return -ENODEV;
8725 }
8726
8727 if (vsi->type != I40E_VSI_MAIN) {
8728 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
8729 if (ret) {
8730 dev_info(&vsi->back->pdev->dev,
8731 "add vsi failed, err %s aq_err %s\n",
8732 i40e_stat_str(&pf->hw, ret),
8733 i40e_aq_str(&pf->hw,
8734 pf->hw.aq.asq_last_status));
8735 ret = -ENOENT;
8736 goto err;
8737 }
8738 vsi->info = ctxt.info;
8739 vsi->info.valid_sections = 0;
8740 vsi->seid = ctxt.seid;
8741 vsi->id = ctxt.vsi_number;
8742 }
8743
8744 /* If macvlan filters already exist, force them to get loaded */
8745 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
8746 f->changed = true;
8747 f_count++;
8748
8749 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
8750 struct i40e_aqc_remove_macvlan_element_data element;
8751
8752 memset(&element, 0, sizeof(element));
8753 ether_addr_copy(element.mac_addr, f->macaddr);
8754 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
8755 ret = i40e_aq_remove_macvlan(hw, vsi->seid,
8756 &element, 1, NULL);
8757 if (ret) {
8758 /* some older FW has a different default */
8759 element.flags |=
8760 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
8761 i40e_aq_remove_macvlan(hw, vsi->seid,
8762 &element, 1, NULL);
8763 }
8764
8765 i40e_aq_mac_address_write(hw,
8766 I40E_AQC_WRITE_TYPE_LAA_WOL,
8767 f->macaddr, NULL);
8768 }
8769 }
8770 if (f_count) {
8771 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
8772 pf->flags |= I40E_FLAG_FILTER_SYNC;
8773 }
8774
8775 /* Update VSI BW information */
8776 ret = i40e_vsi_get_bw_info(vsi);
8777 if (ret) {
8778 dev_info(&pf->pdev->dev,
8779 "couldn't get vsi bw info, err %s aq_err %s\n",
8780 i40e_stat_str(&pf->hw, ret),
8781 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8782 /* VSI is already added so not tearing that up */
8783 ret = 0;
8784 }
8785
8786 err:
8787 return ret;
8788 }
8789
8790 /**
8791 * i40e_vsi_release - Delete a VSI and free its resources
8792 * @vsi: the VSI being removed
8793 *
8794 * Returns 0 on success or < 0 on error
8795 **/
8796 int i40e_vsi_release(struct i40e_vsi *vsi)
8797 {
8798 struct i40e_mac_filter *f, *ftmp;
8799 struct i40e_veb *veb = NULL;
8800 struct i40e_pf *pf;
8801 u16 uplink_seid;
8802 int i, n;
8803
8804 pf = vsi->back;
8805
8806 /* release of a VEB-owner or last VSI is not allowed */
8807 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
8808 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
8809 vsi->seid, vsi->uplink_seid);
8810 return -ENODEV;
8811 }
8812 if (vsi == pf->vsi[pf->lan_vsi] &&
8813 !test_bit(__I40E_DOWN, &pf->state)) {
8814 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
8815 return -ENODEV;
8816 }
8817
8818 uplink_seid = vsi->uplink_seid;
8819 if (vsi->type != I40E_VSI_SRIOV) {
8820 if (vsi->netdev_registered) {
8821 vsi->netdev_registered = false;
8822 if (vsi->netdev) {
8823 /* results in a call to i40e_close() */
8824 unregister_netdev(vsi->netdev);
8825 }
8826 } else {
8827 i40e_vsi_close(vsi);
8828 }
8829 i40e_vsi_disable_irq(vsi);
8830 }
8831
8832 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
8833 i40e_del_filter(vsi, f->macaddr, f->vlan,
8834 f->is_vf, f->is_netdev);
8835 i40e_sync_vsi_filters(vsi, false);
8836
8837 i40e_vsi_delete(vsi);
8838 i40e_vsi_free_q_vectors(vsi);
8839 if (vsi->netdev) {
8840 free_netdev(vsi->netdev);
8841 vsi->netdev = NULL;
8842 }
8843 i40e_vsi_clear_rings(vsi);
8844 i40e_vsi_clear(vsi);
8845
8846 /* If this was the last thing on the VEB, except for the
8847 * controlling VSI, remove the VEB, which puts the controlling
8848 * VSI onto the next level down in the switch.
8849 *
8850 * Well, okay, there's one more exception here: don't remove
8851 * the orphan VEBs yet. We'll wait for an explicit remove request
8852 * from up the network stack.
8853 */
8854 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
8855 if (pf->vsi[i] &&
8856 pf->vsi[i]->uplink_seid == uplink_seid &&
8857 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8858 n++; /* count the VSIs */
8859 }
8860 }
8861 for (i = 0; i < I40E_MAX_VEB; i++) {
8862 if (!pf->veb[i])
8863 continue;
8864 if (pf->veb[i]->uplink_seid == uplink_seid)
8865 n++; /* count the VEBs */
8866 if (pf->veb[i]->seid == uplink_seid)
8867 veb = pf->veb[i];
8868 }
8869 if (n == 0 && veb && veb->uplink_seid != 0)
8870 i40e_veb_release(veb);
8871
8872 return 0;
8873 }
8874
8875 /**
8876 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
8877 * @vsi: ptr to the VSI
8878 *
8879 * This should only be called after i40e_vsi_mem_alloc() which allocates the
8880 * corresponding SW VSI structure and initializes num_queue_pairs for the
8881 * newly allocated VSI.
8882 *
8883 * Returns 0 on success or negative on failure
8884 **/
8885 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
8886 {
8887 int ret = -ENOENT;
8888 struct i40e_pf *pf = vsi->back;
8889
8890 if (vsi->q_vectors[0]) {
8891 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
8892 vsi->seid);
8893 return -EEXIST;
8894 }
8895
8896 if (vsi->base_vector) {
8897 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
8898 vsi->seid, vsi->base_vector);
8899 return -EEXIST;
8900 }
8901
8902 ret = i40e_vsi_alloc_q_vectors(vsi);
8903 if (ret) {
8904 dev_info(&pf->pdev->dev,
8905 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
8906 vsi->num_q_vectors, vsi->seid, ret);
8907 vsi->num_q_vectors = 0;
8908 goto vector_setup_out;
8909 }
8910
8911 /* In Legacy mode, we do not have to get any other vector since we
8912 * piggyback on the misc/ICR0 for queue interrupts.
8913 */
8914 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8915 return ret;
8916 if (vsi->num_q_vectors)
8917 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
8918 vsi->num_q_vectors, vsi->idx);
8919 if (vsi->base_vector < 0) {
8920 dev_info(&pf->pdev->dev,
8921 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
8922 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
8923 i40e_vsi_free_q_vectors(vsi);
8924 ret = -ENOENT;
8925 goto vector_setup_out;
8926 }
8927
8928 vector_setup_out:
8929 return ret;
8930 }
8931
8932 /**
8933 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
8934 * @vsi: pointer to the vsi.
8935 *
8936 * This re-allocates a vsi's queue resources.
8937 *
8938 * Returns pointer to the successfully allocated and configured VSI sw struct
8939 * on success, otherwise returns NULL on failure.
8940 **/
8941 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
8942 {
8943 struct i40e_pf *pf = vsi->back;
8944 u8 enabled_tc;
8945 int ret;
8946
8947 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
8948 i40e_vsi_clear_rings(vsi);
8949
8950 i40e_vsi_free_arrays(vsi, false);
8951 i40e_set_num_rings_in_vsi(vsi);
8952 ret = i40e_vsi_alloc_arrays(vsi, false);
8953 if (ret)
8954 goto err_vsi;
8955
8956 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
8957 if (ret < 0) {
8958 dev_info(&pf->pdev->dev,
8959 "failed to get tracking for %d queues for VSI %d err %d\n",
8960 vsi->alloc_queue_pairs, vsi->seid, ret);
8961 goto err_vsi;
8962 }
8963 vsi->base_queue = ret;
8964
8965 /* Update the FW view of the VSI. Force a reset of TC and queue
8966 * layout configurations.
8967 */
8968 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8969 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8970 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8971 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8972
8973 /* assign it some queues */
8974 ret = i40e_alloc_rings(vsi);
8975 if (ret)
8976 goto err_rings;
8977
8978 /* map all of the rings to the q_vectors */
8979 i40e_vsi_map_rings_to_vectors(vsi);
8980 return vsi;
8981
8982 err_rings:
8983 i40e_vsi_free_q_vectors(vsi);
8984 if (vsi->netdev_registered) {
8985 vsi->netdev_registered = false;
8986 unregister_netdev(vsi->netdev);
8987 free_netdev(vsi->netdev);
8988 vsi->netdev = NULL;
8989 }
8990 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8991 err_vsi:
8992 i40e_vsi_clear(vsi);
8993 return NULL;
8994 }
8995
8996 /**
8997 * i40e_vsi_setup - Set up a VSI by a given type
8998 * @pf: board private structure
8999 * @type: VSI type
9000 * @uplink_seid: the switch element to link to
9001 * @param1: usage depends upon VSI type. For VF types, indicates VF id
9002 *
9003 * This allocates the sw VSI structure and its queue resources, then add a VSI
9004 * to the identified VEB.
9005 *
9006 * Returns pointer to the successfully allocated and configure VSI sw struct on
9007 * success, otherwise returns NULL on failure.
9008 **/
9009 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9010 u16 uplink_seid, u32 param1)
9011 {
9012 struct i40e_vsi *vsi = NULL;
9013 struct i40e_veb *veb = NULL;
9014 int ret, i;
9015 int v_idx;
9016
9017 /* The requested uplink_seid must be either
9018 * - the PF's port seid
9019 * no VEB is needed because this is the PF
9020 * or this is a Flow Director special case VSI
9021 * - seid of an existing VEB
9022 * - seid of a VSI that owns an existing VEB
9023 * - seid of a VSI that doesn't own a VEB
9024 * a new VEB is created and the VSI becomes the owner
9025 * - seid of the PF VSI, which is what creates the first VEB
9026 * this is a special case of the previous
9027 *
9028 * Find which uplink_seid we were given and create a new VEB if needed
9029 */
9030 for (i = 0; i < I40E_MAX_VEB; i++) {
9031 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9032 veb = pf->veb[i];
9033 break;
9034 }
9035 }
9036
9037 if (!veb && uplink_seid != pf->mac_seid) {
9038
9039 for (i = 0; i < pf->num_alloc_vsi; i++) {
9040 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9041 vsi = pf->vsi[i];
9042 break;
9043 }
9044 }
9045 if (!vsi) {
9046 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9047 uplink_seid);
9048 return NULL;
9049 }
9050
9051 if (vsi->uplink_seid == pf->mac_seid)
9052 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9053 vsi->tc_config.enabled_tc);
9054 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9055 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9056 vsi->tc_config.enabled_tc);
9057 if (veb) {
9058 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9059 dev_info(&vsi->back->pdev->dev,
9060 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
9061 __func__);
9062 return NULL;
9063 }
9064 /* We come up by default in VEPA mode if SRIOV is not
9065 * already enabled, in which case we can't force VEPA
9066 * mode.
9067 */
9068 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9069 veb->bridge_mode = BRIDGE_MODE_VEPA;
9070 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9071 }
9072 i40e_config_bridge_mode(veb);
9073 }
9074 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9075 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9076 veb = pf->veb[i];
9077 }
9078 if (!veb) {
9079 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9080 return NULL;
9081 }
9082
9083 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9084 uplink_seid = veb->seid;
9085 }
9086
9087 /* get vsi sw struct */
9088 v_idx = i40e_vsi_mem_alloc(pf, type);
9089 if (v_idx < 0)
9090 goto err_alloc;
9091 vsi = pf->vsi[v_idx];
9092 if (!vsi)
9093 goto err_alloc;
9094 vsi->type = type;
9095 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9096
9097 if (type == I40E_VSI_MAIN)
9098 pf->lan_vsi = v_idx;
9099 else if (type == I40E_VSI_SRIOV)
9100 vsi->vf_id = param1;
9101 /* assign it some queues */
9102 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9103 vsi->idx);
9104 if (ret < 0) {
9105 dev_info(&pf->pdev->dev,
9106 "failed to get tracking for %d queues for VSI %d err=%d\n",
9107 vsi->alloc_queue_pairs, vsi->seid, ret);
9108 goto err_vsi;
9109 }
9110 vsi->base_queue = ret;
9111
9112 /* get a VSI from the hardware */
9113 vsi->uplink_seid = uplink_seid;
9114 ret = i40e_add_vsi(vsi);
9115 if (ret)
9116 goto err_vsi;
9117
9118 switch (vsi->type) {
9119 /* setup the netdev if needed */
9120 case I40E_VSI_MAIN:
9121 case I40E_VSI_VMDQ2:
9122 case I40E_VSI_FCOE:
9123 ret = i40e_config_netdev(vsi);
9124 if (ret)
9125 goto err_netdev;
9126 ret = register_netdev(vsi->netdev);
9127 if (ret)
9128 goto err_netdev;
9129 vsi->netdev_registered = true;
9130 netif_carrier_off(vsi->netdev);
9131 #ifdef CONFIG_I40E_DCB
9132 /* Setup DCB netlink interface */
9133 i40e_dcbnl_setup(vsi);
9134 #endif /* CONFIG_I40E_DCB */
9135 /* fall through */
9136
9137 case I40E_VSI_FDIR:
9138 /* set up vectors and rings if needed */
9139 ret = i40e_vsi_setup_vectors(vsi);
9140 if (ret)
9141 goto err_msix;
9142
9143 ret = i40e_alloc_rings(vsi);
9144 if (ret)
9145 goto err_rings;
9146
9147 /* map all of the rings to the q_vectors */
9148 i40e_vsi_map_rings_to_vectors(vsi);
9149
9150 i40e_vsi_reset_stats(vsi);
9151 break;
9152
9153 default:
9154 /* no netdev or rings for the other VSI types */
9155 break;
9156 }
9157
9158 if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9159 (vsi->type == I40E_VSI_VMDQ2)) {
9160 ret = i40e_vsi_config_rss(vsi);
9161 }
9162 return vsi;
9163
9164 err_rings:
9165 i40e_vsi_free_q_vectors(vsi);
9166 err_msix:
9167 if (vsi->netdev_registered) {
9168 vsi->netdev_registered = false;
9169 unregister_netdev(vsi->netdev);
9170 free_netdev(vsi->netdev);
9171 vsi->netdev = NULL;
9172 }
9173 err_netdev:
9174 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9175 err_vsi:
9176 i40e_vsi_clear(vsi);
9177 err_alloc:
9178 return NULL;
9179 }
9180
9181 /**
9182 * i40e_veb_get_bw_info - Query VEB BW information
9183 * @veb: the veb to query
9184 *
9185 * Query the Tx scheduler BW configuration data for given VEB
9186 **/
9187 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9188 {
9189 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9190 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9191 struct i40e_pf *pf = veb->pf;
9192 struct i40e_hw *hw = &pf->hw;
9193 u32 tc_bw_max;
9194 int ret = 0;
9195 int i;
9196
9197 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9198 &bw_data, NULL);
9199 if (ret) {
9200 dev_info(&pf->pdev->dev,
9201 "query veb bw config failed, err %s aq_err %s\n",
9202 i40e_stat_str(&pf->hw, ret),
9203 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9204 goto out;
9205 }
9206
9207 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9208 &ets_data, NULL);
9209 if (ret) {
9210 dev_info(&pf->pdev->dev,
9211 "query veb bw ets config failed, err %s aq_err %s\n",
9212 i40e_stat_str(&pf->hw, ret),
9213 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9214 goto out;
9215 }
9216
9217 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9218 veb->bw_max_quanta = ets_data.tc_bw_max;
9219 veb->is_abs_credits = bw_data.absolute_credits_enable;
9220 veb->enabled_tc = ets_data.tc_valid_bits;
9221 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9222 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9223 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9224 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9225 veb->bw_tc_limit_credits[i] =
9226 le16_to_cpu(bw_data.tc_bw_limits[i]);
9227 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9228 }
9229
9230 out:
9231 return ret;
9232 }
9233
9234 /**
9235 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9236 * @pf: board private structure
9237 *
9238 * On error: returns error code (negative)
9239 * On success: returns vsi index in PF (positive)
9240 **/
9241 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9242 {
9243 int ret = -ENOENT;
9244 struct i40e_veb *veb;
9245 int i;
9246
9247 /* Need to protect the allocation of switch elements at the PF level */
9248 mutex_lock(&pf->switch_mutex);
9249
9250 /* VEB list may be fragmented if VEB creation/destruction has
9251 * been happening. We can afford to do a quick scan to look
9252 * for any free slots in the list.
9253 *
9254 * find next empty veb slot, looping back around if necessary
9255 */
9256 i = 0;
9257 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9258 i++;
9259 if (i >= I40E_MAX_VEB) {
9260 ret = -ENOMEM;
9261 goto err_alloc_veb; /* out of VEB slots! */
9262 }
9263
9264 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9265 if (!veb) {
9266 ret = -ENOMEM;
9267 goto err_alloc_veb;
9268 }
9269 veb->pf = pf;
9270 veb->idx = i;
9271 veb->enabled_tc = 1;
9272
9273 pf->veb[i] = veb;
9274 ret = i;
9275 err_alloc_veb:
9276 mutex_unlock(&pf->switch_mutex);
9277 return ret;
9278 }
9279
9280 /**
9281 * i40e_switch_branch_release - Delete a branch of the switch tree
9282 * @branch: where to start deleting
9283 *
9284 * This uses recursion to find the tips of the branch to be
9285 * removed, deleting until we get back to and can delete this VEB.
9286 **/
9287 static void i40e_switch_branch_release(struct i40e_veb *branch)
9288 {
9289 struct i40e_pf *pf = branch->pf;
9290 u16 branch_seid = branch->seid;
9291 u16 veb_idx = branch->idx;
9292 int i;
9293
9294 /* release any VEBs on this VEB - RECURSION */
9295 for (i = 0; i < I40E_MAX_VEB; i++) {
9296 if (!pf->veb[i])
9297 continue;
9298 if (pf->veb[i]->uplink_seid == branch->seid)
9299 i40e_switch_branch_release(pf->veb[i]);
9300 }
9301
9302 /* Release the VSIs on this VEB, but not the owner VSI.
9303 *
9304 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
9305 * the VEB itself, so don't use (*branch) after this loop.
9306 */
9307 for (i = 0; i < pf->num_alloc_vsi; i++) {
9308 if (!pf->vsi[i])
9309 continue;
9310 if (pf->vsi[i]->uplink_seid == branch_seid &&
9311 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9312 i40e_vsi_release(pf->vsi[i]);
9313 }
9314 }
9315
9316 /* There's one corner case where the VEB might not have been
9317 * removed, so double check it here and remove it if needed.
9318 * This case happens if the veb was created from the debugfs
9319 * commands and no VSIs were added to it.
9320 */
9321 if (pf->veb[veb_idx])
9322 i40e_veb_release(pf->veb[veb_idx]);
9323 }
9324
9325 /**
9326 * i40e_veb_clear - remove veb struct
9327 * @veb: the veb to remove
9328 **/
9329 static void i40e_veb_clear(struct i40e_veb *veb)
9330 {
9331 if (!veb)
9332 return;
9333
9334 if (veb->pf) {
9335 struct i40e_pf *pf = veb->pf;
9336
9337 mutex_lock(&pf->switch_mutex);
9338 if (pf->veb[veb->idx] == veb)
9339 pf->veb[veb->idx] = NULL;
9340 mutex_unlock(&pf->switch_mutex);
9341 }
9342
9343 kfree(veb);
9344 }
9345
9346 /**
9347 * i40e_veb_release - Delete a VEB and free its resources
9348 * @veb: the VEB being removed
9349 **/
9350 void i40e_veb_release(struct i40e_veb *veb)
9351 {
9352 struct i40e_vsi *vsi = NULL;
9353 struct i40e_pf *pf;
9354 int i, n = 0;
9355
9356 pf = veb->pf;
9357
9358 /* find the remaining VSI and check for extras */
9359 for (i = 0; i < pf->num_alloc_vsi; i++) {
9360 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
9361 n++;
9362 vsi = pf->vsi[i];
9363 }
9364 }
9365 if (n != 1) {
9366 dev_info(&pf->pdev->dev,
9367 "can't remove VEB %d with %d VSIs left\n",
9368 veb->seid, n);
9369 return;
9370 }
9371
9372 /* move the remaining VSI to uplink veb */
9373 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
9374 if (veb->uplink_seid) {
9375 vsi->uplink_seid = veb->uplink_seid;
9376 if (veb->uplink_seid == pf->mac_seid)
9377 vsi->veb_idx = I40E_NO_VEB;
9378 else
9379 vsi->veb_idx = veb->veb_idx;
9380 } else {
9381 /* floating VEB */
9382 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9383 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
9384 }
9385
9386 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9387 i40e_veb_clear(veb);
9388 }
9389
9390 /**
9391 * i40e_add_veb - create the VEB in the switch
9392 * @veb: the VEB to be instantiated
9393 * @vsi: the controlling VSI
9394 **/
9395 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
9396 {
9397 struct i40e_pf *pf = veb->pf;
9398 bool is_default = veb->pf->cur_promisc;
9399 bool is_cloud = false;
9400 int ret;
9401
9402 /* get a VEB from the hardware */
9403 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
9404 veb->enabled_tc, is_default,
9405 is_cloud, &veb->seid, NULL);
9406 if (ret) {
9407 dev_info(&pf->pdev->dev,
9408 "couldn't add VEB, err %s aq_err %s\n",
9409 i40e_stat_str(&pf->hw, ret),
9410 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9411 return -EPERM;
9412 }
9413
9414 /* get statistics counter */
9415 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
9416 &veb->stats_idx, NULL, NULL, NULL);
9417 if (ret) {
9418 dev_info(&pf->pdev->dev,
9419 "couldn't get VEB statistics idx, err %s aq_err %s\n",
9420 i40e_stat_str(&pf->hw, ret),
9421 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9422 return -EPERM;
9423 }
9424 ret = i40e_veb_get_bw_info(veb);
9425 if (ret) {
9426 dev_info(&pf->pdev->dev,
9427 "couldn't get VEB bw info, err %s aq_err %s\n",
9428 i40e_stat_str(&pf->hw, ret),
9429 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9430 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9431 return -ENOENT;
9432 }
9433
9434 vsi->uplink_seid = veb->seid;
9435 vsi->veb_idx = veb->idx;
9436 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9437
9438 return 0;
9439 }
9440
9441 /**
9442 * i40e_veb_setup - Set up a VEB
9443 * @pf: board private structure
9444 * @flags: VEB setup flags
9445 * @uplink_seid: the switch element to link to
9446 * @vsi_seid: the initial VSI seid
9447 * @enabled_tc: Enabled TC bit-map
9448 *
9449 * This allocates the sw VEB structure and links it into the switch
9450 * It is possible and legal for this to be a duplicate of an already
9451 * existing VEB. It is also possible for both uplink and vsi seids
9452 * to be zero, in order to create a floating VEB.
9453 *
9454 * Returns pointer to the successfully allocated VEB sw struct on
9455 * success, otherwise returns NULL on failure.
9456 **/
9457 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
9458 u16 uplink_seid, u16 vsi_seid,
9459 u8 enabled_tc)
9460 {
9461 struct i40e_veb *veb, *uplink_veb = NULL;
9462 int vsi_idx, veb_idx;
9463 int ret;
9464
9465 /* if one seid is 0, the other must be 0 to create a floating relay */
9466 if ((uplink_seid == 0 || vsi_seid == 0) &&
9467 (uplink_seid + vsi_seid != 0)) {
9468 dev_info(&pf->pdev->dev,
9469 "one, not both seid's are 0: uplink=%d vsi=%d\n",
9470 uplink_seid, vsi_seid);
9471 return NULL;
9472 }
9473
9474 /* make sure there is such a vsi and uplink */
9475 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
9476 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
9477 break;
9478 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
9479 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
9480 vsi_seid);
9481 return NULL;
9482 }
9483
9484 if (uplink_seid && uplink_seid != pf->mac_seid) {
9485 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9486 if (pf->veb[veb_idx] &&
9487 pf->veb[veb_idx]->seid == uplink_seid) {
9488 uplink_veb = pf->veb[veb_idx];
9489 break;
9490 }
9491 }
9492 if (!uplink_veb) {
9493 dev_info(&pf->pdev->dev,
9494 "uplink seid %d not found\n", uplink_seid);
9495 return NULL;
9496 }
9497 }
9498
9499 /* get veb sw struct */
9500 veb_idx = i40e_veb_mem_alloc(pf);
9501 if (veb_idx < 0)
9502 goto err_alloc;
9503 veb = pf->veb[veb_idx];
9504 veb->flags = flags;
9505 veb->uplink_seid = uplink_seid;
9506 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
9507 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
9508
9509 /* create the VEB in the switch */
9510 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
9511 if (ret)
9512 goto err_veb;
9513 if (vsi_idx == pf->lan_vsi)
9514 pf->lan_veb = veb->idx;
9515
9516 return veb;
9517
9518 err_veb:
9519 i40e_veb_clear(veb);
9520 err_alloc:
9521 return NULL;
9522 }
9523
9524 /**
9525 * i40e_setup_pf_switch_element - set PF vars based on switch type
9526 * @pf: board private structure
9527 * @ele: element we are building info from
9528 * @num_reported: total number of elements
9529 * @printconfig: should we print the contents
9530 *
9531 * helper function to assist in extracting a few useful SEID values.
9532 **/
9533 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
9534 struct i40e_aqc_switch_config_element_resp *ele,
9535 u16 num_reported, bool printconfig)
9536 {
9537 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
9538 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
9539 u8 element_type = ele->element_type;
9540 u16 seid = le16_to_cpu(ele->seid);
9541
9542 if (printconfig)
9543 dev_info(&pf->pdev->dev,
9544 "type=%d seid=%d uplink=%d downlink=%d\n",
9545 element_type, seid, uplink_seid, downlink_seid);
9546
9547 switch (element_type) {
9548 case I40E_SWITCH_ELEMENT_TYPE_MAC:
9549 pf->mac_seid = seid;
9550 break;
9551 case I40E_SWITCH_ELEMENT_TYPE_VEB:
9552 /* Main VEB? */
9553 if (uplink_seid != pf->mac_seid)
9554 break;
9555 if (pf->lan_veb == I40E_NO_VEB) {
9556 int v;
9557
9558 /* find existing or else empty VEB */
9559 for (v = 0; v < I40E_MAX_VEB; v++) {
9560 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
9561 pf->lan_veb = v;
9562 break;
9563 }
9564 }
9565 if (pf->lan_veb == I40E_NO_VEB) {
9566 v = i40e_veb_mem_alloc(pf);
9567 if (v < 0)
9568 break;
9569 pf->lan_veb = v;
9570 }
9571 }
9572
9573 pf->veb[pf->lan_veb]->seid = seid;
9574 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
9575 pf->veb[pf->lan_veb]->pf = pf;
9576 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
9577 break;
9578 case I40E_SWITCH_ELEMENT_TYPE_VSI:
9579 if (num_reported != 1)
9580 break;
9581 /* This is immediately after a reset so we can assume this is
9582 * the PF's VSI
9583 */
9584 pf->mac_seid = uplink_seid;
9585 pf->pf_seid = downlink_seid;
9586 pf->main_vsi_seid = seid;
9587 if (printconfig)
9588 dev_info(&pf->pdev->dev,
9589 "pf_seid=%d main_vsi_seid=%d\n",
9590 pf->pf_seid, pf->main_vsi_seid);
9591 break;
9592 case I40E_SWITCH_ELEMENT_TYPE_PF:
9593 case I40E_SWITCH_ELEMENT_TYPE_VF:
9594 case I40E_SWITCH_ELEMENT_TYPE_EMP:
9595 case I40E_SWITCH_ELEMENT_TYPE_BMC:
9596 case I40E_SWITCH_ELEMENT_TYPE_PE:
9597 case I40E_SWITCH_ELEMENT_TYPE_PA:
9598 /* ignore these for now */
9599 break;
9600 default:
9601 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
9602 element_type, seid);
9603 break;
9604 }
9605 }
9606
9607 /**
9608 * i40e_fetch_switch_configuration - Get switch config from firmware
9609 * @pf: board private structure
9610 * @printconfig: should we print the contents
9611 *
9612 * Get the current switch configuration from the device and
9613 * extract a few useful SEID values.
9614 **/
9615 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
9616 {
9617 struct i40e_aqc_get_switch_config_resp *sw_config;
9618 u16 next_seid = 0;
9619 int ret = 0;
9620 u8 *aq_buf;
9621 int i;
9622
9623 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
9624 if (!aq_buf)
9625 return -ENOMEM;
9626
9627 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
9628 do {
9629 u16 num_reported, num_total;
9630
9631 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
9632 I40E_AQ_LARGE_BUF,
9633 &next_seid, NULL);
9634 if (ret) {
9635 dev_info(&pf->pdev->dev,
9636 "get switch config failed err %s aq_err %s\n",
9637 i40e_stat_str(&pf->hw, ret),
9638 i40e_aq_str(&pf->hw,
9639 pf->hw.aq.asq_last_status));
9640 kfree(aq_buf);
9641 return -ENOENT;
9642 }
9643
9644 num_reported = le16_to_cpu(sw_config->header.num_reported);
9645 num_total = le16_to_cpu(sw_config->header.num_total);
9646
9647 if (printconfig)
9648 dev_info(&pf->pdev->dev,
9649 "header: %d reported %d total\n",
9650 num_reported, num_total);
9651
9652 for (i = 0; i < num_reported; i++) {
9653 struct i40e_aqc_switch_config_element_resp *ele =
9654 &sw_config->element[i];
9655
9656 i40e_setup_pf_switch_element(pf, ele, num_reported,
9657 printconfig);
9658 }
9659 } while (next_seid != 0);
9660
9661 kfree(aq_buf);
9662 return ret;
9663 }
9664
9665 /**
9666 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
9667 * @pf: board private structure
9668 * @reinit: if the Main VSI needs to re-initialized.
9669 *
9670 * Returns 0 on success, negative value on failure
9671 **/
9672 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
9673 {
9674 int ret;
9675
9676 /* find out what's out there already */
9677 ret = i40e_fetch_switch_configuration(pf, false);
9678 if (ret) {
9679 dev_info(&pf->pdev->dev,
9680 "couldn't fetch switch config, err %s aq_err %s\n",
9681 i40e_stat_str(&pf->hw, ret),
9682 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9683 return ret;
9684 }
9685 i40e_pf_reset_stats(pf);
9686
9687 /* first time setup */
9688 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
9689 struct i40e_vsi *vsi = NULL;
9690 u16 uplink_seid;
9691
9692 /* Set up the PF VSI associated with the PF's main VSI
9693 * that is already in the HW switch
9694 */
9695 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
9696 uplink_seid = pf->veb[pf->lan_veb]->seid;
9697 else
9698 uplink_seid = pf->mac_seid;
9699 if (pf->lan_vsi == I40E_NO_VSI)
9700 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
9701 else if (reinit)
9702 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
9703 if (!vsi) {
9704 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
9705 i40e_fdir_teardown(pf);
9706 return -EAGAIN;
9707 }
9708 } else {
9709 /* force a reset of TC and queue layout configurations */
9710 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9711 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9712 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9713 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9714 }
9715 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
9716
9717 i40e_fdir_sb_setup(pf);
9718
9719 /* Setup static PF queue filter control settings */
9720 ret = i40e_setup_pf_filter_control(pf);
9721 if (ret) {
9722 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
9723 ret);
9724 /* Failure here should not stop continuing other steps */
9725 }
9726
9727 /* enable RSS in the HW, even for only one queue, as the stack can use
9728 * the hash
9729 */
9730 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
9731 i40e_config_rss(pf);
9732
9733 /* fill in link information and enable LSE reporting */
9734 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
9735 i40e_link_event(pf);
9736
9737 /* Initialize user-specific link properties */
9738 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
9739 I40E_AQ_AN_COMPLETED) ? true : false);
9740
9741 i40e_ptp_init(pf);
9742
9743 return ret;
9744 }
9745
9746 /**
9747 * i40e_determine_queue_usage - Work out queue distribution
9748 * @pf: board private structure
9749 **/
9750 static void i40e_determine_queue_usage(struct i40e_pf *pf)
9751 {
9752 int queues_left;
9753
9754 pf->num_lan_qps = 0;
9755 #ifdef I40E_FCOE
9756 pf->num_fcoe_qps = 0;
9757 #endif
9758
9759 /* Find the max queues to be put into basic use. We'll always be
9760 * using TC0, whether or not DCB is running, and TC0 will get the
9761 * big RSS set.
9762 */
9763 queues_left = pf->hw.func_caps.num_tx_qp;
9764
9765 if ((queues_left == 1) ||
9766 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
9767 /* one qp for PF, no queues for anything else */
9768 queues_left = 0;
9769 pf->rss_size = pf->num_lan_qps = 1;
9770
9771 /* make sure all the fancies are disabled */
9772 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
9773 #ifdef I40E_FCOE
9774 I40E_FLAG_FCOE_ENABLED |
9775 #endif
9776 I40E_FLAG_FD_SB_ENABLED |
9777 I40E_FLAG_FD_ATR_ENABLED |
9778 I40E_FLAG_DCB_CAPABLE |
9779 I40E_FLAG_SRIOV_ENABLED |
9780 I40E_FLAG_VMDQ_ENABLED);
9781 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
9782 I40E_FLAG_FD_SB_ENABLED |
9783 I40E_FLAG_FD_ATR_ENABLED |
9784 I40E_FLAG_DCB_CAPABLE))) {
9785 /* one qp for PF */
9786 pf->rss_size = pf->num_lan_qps = 1;
9787 queues_left -= pf->num_lan_qps;
9788
9789 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
9790 #ifdef I40E_FCOE
9791 I40E_FLAG_FCOE_ENABLED |
9792 #endif
9793 I40E_FLAG_FD_SB_ENABLED |
9794 I40E_FLAG_FD_ATR_ENABLED |
9795 I40E_FLAG_DCB_ENABLED |
9796 I40E_FLAG_VMDQ_ENABLED);
9797 } else {
9798 /* Not enough queues for all TCs */
9799 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
9800 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
9801 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9802 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
9803 }
9804 pf->num_lan_qps = max_t(int, pf->rss_size_max,
9805 num_online_cpus());
9806 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
9807 pf->hw.func_caps.num_tx_qp);
9808
9809 queues_left -= pf->num_lan_qps;
9810 }
9811
9812 #ifdef I40E_FCOE
9813 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
9814 if (I40E_DEFAULT_FCOE <= queues_left) {
9815 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
9816 } else if (I40E_MINIMUM_FCOE <= queues_left) {
9817 pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
9818 } else {
9819 pf->num_fcoe_qps = 0;
9820 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
9821 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
9822 }
9823
9824 queues_left -= pf->num_fcoe_qps;
9825 }
9826
9827 #endif
9828 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9829 if (queues_left > 1) {
9830 queues_left -= 1; /* save 1 queue for FD */
9831 } else {
9832 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9833 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
9834 }
9835 }
9836
9837 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9838 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
9839 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
9840 (queues_left / pf->num_vf_qps));
9841 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
9842 }
9843
9844 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
9845 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
9846 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
9847 (queues_left / pf->num_vmdq_qps));
9848 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
9849 }
9850
9851 pf->queues_left = queues_left;
9852 #ifdef I40E_FCOE
9853 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
9854 #endif
9855 }
9856
9857 /**
9858 * i40e_setup_pf_filter_control - Setup PF static filter control
9859 * @pf: PF to be setup
9860 *
9861 * i40e_setup_pf_filter_control sets up a PF's initial filter control
9862 * settings. If PE/FCoE are enabled then it will also set the per PF
9863 * based filter sizes required for them. It also enables Flow director,
9864 * ethertype and macvlan type filter settings for the pf.
9865 *
9866 * Returns 0 on success, negative on failure
9867 **/
9868 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
9869 {
9870 struct i40e_filter_control_settings *settings = &pf->filter_settings;
9871
9872 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
9873
9874 /* Flow Director is enabled */
9875 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
9876 settings->enable_fdir = true;
9877
9878 /* Ethtype and MACVLAN filters enabled for PF */
9879 settings->enable_ethtype = true;
9880 settings->enable_macvlan = true;
9881
9882 if (i40e_set_filter_control(&pf->hw, settings))
9883 return -ENOENT;
9884
9885 return 0;
9886 }
9887
9888 #define INFO_STRING_LEN 255
9889 static void i40e_print_features(struct i40e_pf *pf)
9890 {
9891 struct i40e_hw *hw = &pf->hw;
9892 char *buf, *string;
9893
9894 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
9895 if (!string) {
9896 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
9897 return;
9898 }
9899
9900 buf = string;
9901
9902 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
9903 #ifdef CONFIG_PCI_IOV
9904 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
9905 #endif
9906 buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
9907 pf->hw.func_caps.num_vsis,
9908 pf->vsi[pf->lan_vsi]->num_queue_pairs,
9909 pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
9910
9911 if (pf->flags & I40E_FLAG_RSS_ENABLED)
9912 buf += sprintf(buf, "RSS ");
9913 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
9914 buf += sprintf(buf, "FD_ATR ");
9915 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9916 buf += sprintf(buf, "FD_SB ");
9917 buf += sprintf(buf, "NTUPLE ");
9918 }
9919 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
9920 buf += sprintf(buf, "DCB ");
9921 if (pf->flags & I40E_FLAG_PTP)
9922 buf += sprintf(buf, "PTP ");
9923 #ifdef I40E_FCOE
9924 if (pf->flags & I40E_FLAG_FCOE_ENABLED)
9925 buf += sprintf(buf, "FCOE ");
9926 #endif
9927
9928 BUG_ON(buf > (string + INFO_STRING_LEN));
9929 dev_info(&pf->pdev->dev, "%s\n", string);
9930 kfree(string);
9931 }
9932
9933 /**
9934 * i40e_probe - Device initialization routine
9935 * @pdev: PCI device information struct
9936 * @ent: entry in i40e_pci_tbl
9937 *
9938 * i40e_probe initializes a PF identified by a pci_dev structure.
9939 * The OS initialization, configuring of the PF private structure,
9940 * and a hardware reset occur.
9941 *
9942 * Returns 0 on success, negative on failure
9943 **/
9944 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9945 {
9946 struct i40e_aq_get_phy_abilities_resp abilities;
9947 struct i40e_pf *pf;
9948 struct i40e_hw *hw;
9949 static u16 pfs_found;
9950 u16 link_status;
9951 int err = 0;
9952 u32 len;
9953 u32 i;
9954
9955 err = pci_enable_device_mem(pdev);
9956 if (err)
9957 return err;
9958
9959 /* set up for high or low dma */
9960 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9961 if (err) {
9962 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9963 if (err) {
9964 dev_err(&pdev->dev,
9965 "DMA configuration failed: 0x%x\n", err);
9966 goto err_dma;
9967 }
9968 }
9969
9970 /* set up pci connections */
9971 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
9972 IORESOURCE_MEM), i40e_driver_name);
9973 if (err) {
9974 dev_info(&pdev->dev,
9975 "pci_request_selected_regions failed %d\n", err);
9976 goto err_pci_reg;
9977 }
9978
9979 pci_enable_pcie_error_reporting(pdev);
9980 pci_set_master(pdev);
9981
9982 /* Now that we have a PCI connection, we need to do the
9983 * low level device setup. This is primarily setting up
9984 * the Admin Queue structures and then querying for the
9985 * device's current profile information.
9986 */
9987 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
9988 if (!pf) {
9989 err = -ENOMEM;
9990 goto err_pf_alloc;
9991 }
9992 pf->next_vsi = 0;
9993 pf->pdev = pdev;
9994 set_bit(__I40E_DOWN, &pf->state);
9995
9996 hw = &pf->hw;
9997 hw->back = pf;
9998
9999 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10000 I40E_MAX_CSR_SPACE);
10001
10002 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
10003 if (!hw->hw_addr) {
10004 err = -EIO;
10005 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10006 (unsigned int)pci_resource_start(pdev, 0),
10007 pf->ioremap_len, err);
10008 goto err_ioremap;
10009 }
10010 hw->vendor_id = pdev->vendor;
10011 hw->device_id = pdev->device;
10012 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10013 hw->subsystem_vendor_id = pdev->subsystem_vendor;
10014 hw->subsystem_device_id = pdev->subsystem_device;
10015 hw->bus.device = PCI_SLOT(pdev->devfn);
10016 hw->bus.func = PCI_FUNC(pdev->devfn);
10017 pf->instance = pfs_found;
10018
10019 if (debug != -1) {
10020 pf->msg_enable = pf->hw.debug_mask;
10021 pf->msg_enable = debug;
10022 }
10023
10024 /* do a special CORER for clearing PXE mode once at init */
10025 if (hw->revision_id == 0 &&
10026 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10027 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10028 i40e_flush(hw);
10029 msleep(200);
10030 pf->corer_count++;
10031
10032 i40e_clear_pxe_mode(hw);
10033 }
10034
10035 /* Reset here to make sure all is clean and to define PF 'n' */
10036 i40e_clear_hw(hw);
10037 err = i40e_pf_reset(hw);
10038 if (err) {
10039 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10040 goto err_pf_reset;
10041 }
10042 pf->pfr_count++;
10043
10044 hw->aq.num_arq_entries = I40E_AQ_LEN;
10045 hw->aq.num_asq_entries = I40E_AQ_LEN;
10046 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10047 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10048 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
10049
10050 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
10051 "%s-%s:misc",
10052 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
10053
10054 err = i40e_init_shared_code(hw);
10055 if (err) {
10056 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10057 err);
10058 goto err_pf_reset;
10059 }
10060
10061 /* set up a default setting for link flow control */
10062 pf->hw.fc.requested_mode = I40E_FC_NONE;
10063
10064 err = i40e_init_adminq(hw);
10065 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
10066 if (err) {
10067 dev_info(&pdev->dev,
10068 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10069 goto err_pf_reset;
10070 }
10071
10072 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10073 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
10074 dev_info(&pdev->dev,
10075 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10076 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10077 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
10078 dev_info(&pdev->dev,
10079 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
10080
10081 i40e_verify_eeprom(pf);
10082
10083 /* Rev 0 hardware was never productized */
10084 if (hw->revision_id < 1)
10085 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10086
10087 i40e_clear_pxe_mode(hw);
10088 err = i40e_get_capabilities(pf);
10089 if (err)
10090 goto err_adminq_setup;
10091
10092 err = i40e_sw_init(pf);
10093 if (err) {
10094 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10095 goto err_sw_init;
10096 }
10097
10098 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10099 hw->func_caps.num_rx_qp,
10100 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10101 if (err) {
10102 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10103 goto err_init_lan_hmc;
10104 }
10105
10106 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10107 if (err) {
10108 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10109 err = -ENOENT;
10110 goto err_configure_lan_hmc;
10111 }
10112
10113 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
10114 * Ignore error return codes because if it was already disabled via
10115 * hardware settings this will fail
10116 */
10117 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
10118 (pf->hw.aq.fw_maj_ver < 4)) {
10119 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10120 i40e_aq_stop_lldp(hw, true, NULL);
10121 }
10122
10123 i40e_get_mac_addr(hw, hw->mac.addr);
10124 if (!is_valid_ether_addr(hw->mac.addr)) {
10125 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10126 err = -EIO;
10127 goto err_mac_addr;
10128 }
10129 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10130 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10131 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10132 if (is_valid_ether_addr(hw->mac.port_addr))
10133 pf->flags |= I40E_FLAG_PORT_ID_VALID;
10134 #ifdef I40E_FCOE
10135 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10136 if (err)
10137 dev_info(&pdev->dev,
10138 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10139 if (!is_valid_ether_addr(hw->mac.san_addr)) {
10140 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10141 hw->mac.san_addr);
10142 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10143 }
10144 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10145 #endif /* I40E_FCOE */
10146
10147 pci_set_drvdata(pdev, pf);
10148 pci_save_state(pdev);
10149 #ifdef CONFIG_I40E_DCB
10150 err = i40e_init_pf_dcb(pf);
10151 if (err) {
10152 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10153 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10154 /* Continue without DCB enabled */
10155 }
10156 #endif /* CONFIG_I40E_DCB */
10157
10158 /* set up periodic task facility */
10159 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10160 pf->service_timer_period = HZ;
10161
10162 INIT_WORK(&pf->service_task, i40e_service_task);
10163 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10164 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10165
10166 /* WoL defaults to disabled */
10167 pf->wol_en = false;
10168 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10169
10170 /* set up the main switch operations */
10171 i40e_determine_queue_usage(pf);
10172 err = i40e_init_interrupt_scheme(pf);
10173 if (err)
10174 goto err_switch_setup;
10175
10176 /* The number of VSIs reported by the FW is the minimum guaranteed
10177 * to us; HW supports far more and we share the remaining pool with
10178 * the other PFs. We allocate space for more than the guarantee with
10179 * the understanding that we might not get them all later.
10180 */
10181 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10182 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10183 else
10184 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10185
10186 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
10187 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
10188 pf->vsi = kzalloc(len, GFP_KERNEL);
10189 if (!pf->vsi) {
10190 err = -ENOMEM;
10191 goto err_switch_setup;
10192 }
10193
10194 #ifdef CONFIG_PCI_IOV
10195 /* prep for VF support */
10196 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10197 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10198 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10199 if (pci_num_vf(pdev))
10200 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10201 }
10202 #endif
10203 err = i40e_setup_pf_switch(pf, false);
10204 if (err) {
10205 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10206 goto err_vsis;
10207 }
10208 /* if FDIR VSI was set up, start it now */
10209 for (i = 0; i < pf->num_alloc_vsi; i++) {
10210 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
10211 i40e_vsi_open(pf->vsi[i]);
10212 break;
10213 }
10214 }
10215
10216 /* driver is only interested in link up/down and module qualification
10217 * reports from firmware
10218 */
10219 err = i40e_aq_set_phy_int_mask(&pf->hw,
10220 I40E_AQ_EVENT_LINK_UPDOWN |
10221 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
10222 if (err)
10223 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10224 i40e_stat_str(&pf->hw, err),
10225 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10226
10227 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
10228 (pf->hw.aq.fw_maj_ver < 4)) {
10229 msleep(75);
10230 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10231 if (err)
10232 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10233 i40e_stat_str(&pf->hw, err),
10234 i40e_aq_str(&pf->hw,
10235 pf->hw.aq.asq_last_status));
10236 }
10237 /* The main driver is (mostly) up and happy. We need to set this state
10238 * before setting up the misc vector or we get a race and the vector
10239 * ends up disabled forever.
10240 */
10241 clear_bit(__I40E_DOWN, &pf->state);
10242
10243 /* In case of MSIX we are going to setup the misc vector right here
10244 * to handle admin queue events etc. In case of legacy and MSI
10245 * the misc functionality and queue processing is combined in
10246 * the same vector and that gets setup at open.
10247 */
10248 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10249 err = i40e_setup_misc_vector(pf);
10250 if (err) {
10251 dev_info(&pdev->dev,
10252 "setup of misc vector failed: %d\n", err);
10253 goto err_vsis;
10254 }
10255 }
10256
10257 #ifdef CONFIG_PCI_IOV
10258 /* prep for VF support */
10259 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10260 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10261 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10262 u32 val;
10263
10264 /* disable link interrupts for VFs */
10265 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
10266 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
10267 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
10268 i40e_flush(hw);
10269
10270 if (pci_num_vf(pdev)) {
10271 dev_info(&pdev->dev,
10272 "Active VFs found, allocating resources.\n");
10273 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
10274 if (err)
10275 dev_info(&pdev->dev,
10276 "Error %d allocating resources for existing VFs\n",
10277 err);
10278 }
10279 }
10280 #endif /* CONFIG_PCI_IOV */
10281
10282 pfs_found++;
10283
10284 i40e_dbg_pf_init(pf);
10285
10286 /* tell the firmware that we're starting */
10287 i40e_send_version(pf);
10288
10289 /* since everything's happy, start the service_task timer */
10290 mod_timer(&pf->service_timer,
10291 round_jiffies(jiffies + pf->service_timer_period));
10292
10293 #ifdef I40E_FCOE
10294 /* create FCoE interface */
10295 i40e_fcoe_vsi_setup(pf);
10296
10297 #endif
10298 /* Get the negotiated link width and speed from PCI config space */
10299 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
10300
10301 i40e_set_pci_config_data(hw, link_status);
10302
10303 dev_info(&pdev->dev, "PCI-Express: %s %s\n",
10304 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
10305 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
10306 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
10307 "Unknown"),
10308 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
10309 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
10310 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
10311 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
10312 "Unknown"));
10313
10314 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
10315 hw->bus.speed < i40e_bus_speed_8000) {
10316 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
10317 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
10318 }
10319
10320 /* get the requested speeds from the fw */
10321 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
10322 if (err)
10323 dev_info(&pf->pdev->dev,
10324 "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
10325 i40e_stat_str(&pf->hw, err),
10326 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10327 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
10328
10329 /* print a string summarizing features */
10330 i40e_print_features(pf);
10331
10332 return 0;
10333
10334 /* Unwind what we've done if something failed in the setup */
10335 err_vsis:
10336 set_bit(__I40E_DOWN, &pf->state);
10337 i40e_clear_interrupt_scheme(pf);
10338 kfree(pf->vsi);
10339 err_switch_setup:
10340 i40e_reset_interrupt_capability(pf);
10341 del_timer_sync(&pf->service_timer);
10342 err_mac_addr:
10343 err_configure_lan_hmc:
10344 (void)i40e_shutdown_lan_hmc(hw);
10345 err_init_lan_hmc:
10346 kfree(pf->qp_pile);
10347 err_sw_init:
10348 err_adminq_setup:
10349 (void)i40e_shutdown_adminq(hw);
10350 err_pf_reset:
10351 iounmap(hw->hw_addr);
10352 err_ioremap:
10353 kfree(pf);
10354 err_pf_alloc:
10355 pci_disable_pcie_error_reporting(pdev);
10356 pci_release_selected_regions(pdev,
10357 pci_select_bars(pdev, IORESOURCE_MEM));
10358 err_pci_reg:
10359 err_dma:
10360 pci_disable_device(pdev);
10361 return err;
10362 }
10363
10364 /**
10365 * i40e_remove - Device removal routine
10366 * @pdev: PCI device information struct
10367 *
10368 * i40e_remove is called by the PCI subsystem to alert the driver
10369 * that is should release a PCI device. This could be caused by a
10370 * Hot-Plug event, or because the driver is going to be removed from
10371 * memory.
10372 **/
10373 static void i40e_remove(struct pci_dev *pdev)
10374 {
10375 struct i40e_pf *pf = pci_get_drvdata(pdev);
10376 i40e_status ret_code;
10377 int i;
10378
10379 i40e_dbg_pf_exit(pf);
10380
10381 i40e_ptp_stop(pf);
10382
10383 /* no more scheduling of any task */
10384 set_bit(__I40E_DOWN, &pf->state);
10385 del_timer_sync(&pf->service_timer);
10386 cancel_work_sync(&pf->service_task);
10387 i40e_fdir_teardown(pf);
10388
10389 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
10390 i40e_free_vfs(pf);
10391 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
10392 }
10393
10394 i40e_fdir_teardown(pf);
10395
10396 /* If there is a switch structure or any orphans, remove them.
10397 * This will leave only the PF's VSI remaining.
10398 */
10399 for (i = 0; i < I40E_MAX_VEB; i++) {
10400 if (!pf->veb[i])
10401 continue;
10402
10403 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
10404 pf->veb[i]->uplink_seid == 0)
10405 i40e_switch_branch_release(pf->veb[i]);
10406 }
10407
10408 /* Now we can shutdown the PF's VSI, just before we kill
10409 * adminq and hmc.
10410 */
10411 if (pf->vsi[pf->lan_vsi])
10412 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
10413
10414 /* shutdown and destroy the HMC */
10415 if (pf->hw.hmc.hmc_obj) {
10416 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
10417 if (ret_code)
10418 dev_warn(&pdev->dev,
10419 "Failed to destroy the HMC resources: %d\n",
10420 ret_code);
10421 }
10422
10423 /* shutdown the adminq */
10424 ret_code = i40e_shutdown_adminq(&pf->hw);
10425 if (ret_code)
10426 dev_warn(&pdev->dev,
10427 "Failed to destroy the Admin Queue resources: %d\n",
10428 ret_code);
10429
10430 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
10431 i40e_clear_interrupt_scheme(pf);
10432 for (i = 0; i < pf->num_alloc_vsi; i++) {
10433 if (pf->vsi[i]) {
10434 i40e_vsi_clear_rings(pf->vsi[i]);
10435 i40e_vsi_clear(pf->vsi[i]);
10436 pf->vsi[i] = NULL;
10437 }
10438 }
10439
10440 for (i = 0; i < I40E_MAX_VEB; i++) {
10441 kfree(pf->veb[i]);
10442 pf->veb[i] = NULL;
10443 }
10444
10445 kfree(pf->qp_pile);
10446 kfree(pf->vsi);
10447
10448 iounmap(pf->hw.hw_addr);
10449 kfree(pf);
10450 pci_release_selected_regions(pdev,
10451 pci_select_bars(pdev, IORESOURCE_MEM));
10452
10453 pci_disable_pcie_error_reporting(pdev);
10454 pci_disable_device(pdev);
10455 }
10456
10457 /**
10458 * i40e_pci_error_detected - warning that something funky happened in PCI land
10459 * @pdev: PCI device information struct
10460 *
10461 * Called to warn that something happened and the error handling steps
10462 * are in progress. Allows the driver to quiesce things, be ready for
10463 * remediation.
10464 **/
10465 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
10466 enum pci_channel_state error)
10467 {
10468 struct i40e_pf *pf = pci_get_drvdata(pdev);
10469
10470 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
10471
10472 /* shutdown all operations */
10473 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
10474 rtnl_lock();
10475 i40e_prep_for_reset(pf);
10476 rtnl_unlock();
10477 }
10478
10479 /* Request a slot reset */
10480 return PCI_ERS_RESULT_NEED_RESET;
10481 }
10482
10483 /**
10484 * i40e_pci_error_slot_reset - a PCI slot reset just happened
10485 * @pdev: PCI device information struct
10486 *
10487 * Called to find if the driver can work with the device now that
10488 * the pci slot has been reset. If a basic connection seems good
10489 * (registers are readable and have sane content) then return a
10490 * happy little PCI_ERS_RESULT_xxx.
10491 **/
10492 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
10493 {
10494 struct i40e_pf *pf = pci_get_drvdata(pdev);
10495 pci_ers_result_t result;
10496 int err;
10497 u32 reg;
10498
10499 dev_info(&pdev->dev, "%s\n", __func__);
10500 if (pci_enable_device_mem(pdev)) {
10501 dev_info(&pdev->dev,
10502 "Cannot re-enable PCI device after reset.\n");
10503 result = PCI_ERS_RESULT_DISCONNECT;
10504 } else {
10505 pci_set_master(pdev);
10506 pci_restore_state(pdev);
10507 pci_save_state(pdev);
10508 pci_wake_from_d3(pdev, false);
10509
10510 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
10511 if (reg == 0)
10512 result = PCI_ERS_RESULT_RECOVERED;
10513 else
10514 result = PCI_ERS_RESULT_DISCONNECT;
10515 }
10516
10517 err = pci_cleanup_aer_uncorrect_error_status(pdev);
10518 if (err) {
10519 dev_info(&pdev->dev,
10520 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
10521 err);
10522 /* non-fatal, continue */
10523 }
10524
10525 return result;
10526 }
10527
10528 /**
10529 * i40e_pci_error_resume - restart operations after PCI error recovery
10530 * @pdev: PCI device information struct
10531 *
10532 * Called to allow the driver to bring things back up after PCI error
10533 * and/or reset recovery has finished.
10534 **/
10535 static void i40e_pci_error_resume(struct pci_dev *pdev)
10536 {
10537 struct i40e_pf *pf = pci_get_drvdata(pdev);
10538
10539 dev_info(&pdev->dev, "%s\n", __func__);
10540 if (test_bit(__I40E_SUSPENDED, &pf->state))
10541 return;
10542
10543 rtnl_lock();
10544 i40e_handle_reset_warning(pf);
10545 rtnl_unlock();
10546 }
10547
10548 /**
10549 * i40e_shutdown - PCI callback for shutting down
10550 * @pdev: PCI device information struct
10551 **/
10552 static void i40e_shutdown(struct pci_dev *pdev)
10553 {
10554 struct i40e_pf *pf = pci_get_drvdata(pdev);
10555 struct i40e_hw *hw = &pf->hw;
10556
10557 set_bit(__I40E_SUSPENDED, &pf->state);
10558 set_bit(__I40E_DOWN, &pf->state);
10559 rtnl_lock();
10560 i40e_prep_for_reset(pf);
10561 rtnl_unlock();
10562
10563 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10564 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10565
10566 del_timer_sync(&pf->service_timer);
10567 cancel_work_sync(&pf->service_task);
10568 i40e_fdir_teardown(pf);
10569
10570 rtnl_lock();
10571 i40e_prep_for_reset(pf);
10572 rtnl_unlock();
10573
10574 wr32(hw, I40E_PFPM_APM,
10575 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10576 wr32(hw, I40E_PFPM_WUFC,
10577 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10578
10579 i40e_clear_interrupt_scheme(pf);
10580
10581 if (system_state == SYSTEM_POWER_OFF) {
10582 pci_wake_from_d3(pdev, pf->wol_en);
10583 pci_set_power_state(pdev, PCI_D3hot);
10584 }
10585 }
10586
10587 #ifdef CONFIG_PM
10588 /**
10589 * i40e_suspend - PCI callback for moving to D3
10590 * @pdev: PCI device information struct
10591 **/
10592 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
10593 {
10594 struct i40e_pf *pf = pci_get_drvdata(pdev);
10595 struct i40e_hw *hw = &pf->hw;
10596
10597 set_bit(__I40E_SUSPENDED, &pf->state);
10598 set_bit(__I40E_DOWN, &pf->state);
10599
10600 rtnl_lock();
10601 i40e_prep_for_reset(pf);
10602 rtnl_unlock();
10603
10604 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10605 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10606
10607 pci_wake_from_d3(pdev, pf->wol_en);
10608 pci_set_power_state(pdev, PCI_D3hot);
10609
10610 return 0;
10611 }
10612
10613 /**
10614 * i40e_resume - PCI callback for waking up from D3
10615 * @pdev: PCI device information struct
10616 **/
10617 static int i40e_resume(struct pci_dev *pdev)
10618 {
10619 struct i40e_pf *pf = pci_get_drvdata(pdev);
10620 u32 err;
10621
10622 pci_set_power_state(pdev, PCI_D0);
10623 pci_restore_state(pdev);
10624 /* pci_restore_state() clears dev->state_saves, so
10625 * call pci_save_state() again to restore it.
10626 */
10627 pci_save_state(pdev);
10628
10629 err = pci_enable_device_mem(pdev);
10630 if (err) {
10631 dev_err(&pdev->dev,
10632 "%s: Cannot enable PCI device from suspend\n",
10633 __func__);
10634 return err;
10635 }
10636 pci_set_master(pdev);
10637
10638 /* no wakeup events while running */
10639 pci_wake_from_d3(pdev, false);
10640
10641 /* handling the reset will rebuild the device state */
10642 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
10643 clear_bit(__I40E_DOWN, &pf->state);
10644 rtnl_lock();
10645 i40e_reset_and_rebuild(pf, false);
10646 rtnl_unlock();
10647 }
10648
10649 return 0;
10650 }
10651
10652 #endif
10653 static const struct pci_error_handlers i40e_err_handler = {
10654 .error_detected = i40e_pci_error_detected,
10655 .slot_reset = i40e_pci_error_slot_reset,
10656 .resume = i40e_pci_error_resume,
10657 };
10658
10659 static struct pci_driver i40e_driver = {
10660 .name = i40e_driver_name,
10661 .id_table = i40e_pci_tbl,
10662 .probe = i40e_probe,
10663 .remove = i40e_remove,
10664 #ifdef CONFIG_PM
10665 .suspend = i40e_suspend,
10666 .resume = i40e_resume,
10667 #endif
10668 .shutdown = i40e_shutdown,
10669 .err_handler = &i40e_err_handler,
10670 .sriov_configure = i40e_pci_sriov_configure,
10671 };
10672
10673 /**
10674 * i40e_init_module - Driver registration routine
10675 *
10676 * i40e_init_module is the first routine called when the driver is
10677 * loaded. All it does is register with the PCI subsystem.
10678 **/
10679 static int __init i40e_init_module(void)
10680 {
10681 pr_info("%s: %s - version %s\n", i40e_driver_name,
10682 i40e_driver_string, i40e_driver_version_str);
10683 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
10684
10685 i40e_dbg_init();
10686 return pci_register_driver(&i40e_driver);
10687 }
10688 module_init(i40e_init_module);
10689
10690 /**
10691 * i40e_exit_module - Driver exit cleanup routine
10692 *
10693 * i40e_exit_module is called just before the driver is removed
10694 * from memory.
10695 **/
10696 static void __exit i40e_exit_module(void)
10697 {
10698 pci_unregister_driver(&i40e_driver);
10699 i40e_dbg_exit();
10700 }
10701 module_exit(i40e_exit_module);
This page took 0.440802 seconds and 6 git commands to generate.