i40e: mask phy events
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 /* Local includes */
28 #include "i40e.h"
29 #include "i40e_diag.h"
30 #ifdef CONFIG_I40E_VXLAN
31 #include <net/vxlan.h>
32 #endif
33
34 const char i40e_driver_name[] = "i40e";
35 static const char i40e_driver_string[] =
36 "Intel(R) Ethernet Connection XL710 Network Driver";
37
38 #define DRV_KERN "-k"
39
40 #define DRV_VERSION_MAJOR 1
41 #define DRV_VERSION_MINOR 0
42 #define DRV_VERSION_BUILD 11
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN
46 const char i40e_driver_version_str[] = DRV_VERSION;
47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
48
49 /* a bit of forward declarations */
50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
52 static int i40e_add_vsi(struct i40e_vsi *vsi);
53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
59 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
60
61 /* i40e_pci_tbl - PCI Device ID Table
62 *
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
68 static const struct pci_device_id i40e_pci_tbl[] = {
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
77 /* required last entry */
78 {0, }
79 };
80 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
81
82 #define I40E_MAX_VF_COUNT 128
83 static int debug = -1;
84 module_param(debug, int, 0);
85 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
86
87 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
88 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION);
91
92 /**
93 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
94 * @hw: pointer to the HW structure
95 * @mem: ptr to mem struct to fill out
96 * @size: size of memory requested
97 * @alignment: what to align the allocation to
98 **/
99 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
100 u64 size, u32 alignment)
101 {
102 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
103
104 mem->size = ALIGN(size, alignment);
105 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
106 &mem->pa, GFP_KERNEL);
107 if (!mem->va)
108 return -ENOMEM;
109
110 return 0;
111 }
112
113 /**
114 * i40e_free_dma_mem_d - OS specific memory free for shared code
115 * @hw: pointer to the HW structure
116 * @mem: ptr to mem struct to free
117 **/
118 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
119 {
120 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
121
122 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
123 mem->va = NULL;
124 mem->pa = 0;
125 mem->size = 0;
126
127 return 0;
128 }
129
130 /**
131 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
132 * @hw: pointer to the HW structure
133 * @mem: ptr to mem struct to fill out
134 * @size: size of memory requested
135 **/
136 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
137 u32 size)
138 {
139 mem->size = size;
140 mem->va = kzalloc(size, GFP_KERNEL);
141
142 if (!mem->va)
143 return -ENOMEM;
144
145 return 0;
146 }
147
148 /**
149 * i40e_free_virt_mem_d - OS specific memory free for shared code
150 * @hw: pointer to the HW structure
151 * @mem: ptr to mem struct to free
152 **/
153 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
154 {
155 /* it's ok to kfree a NULL pointer */
156 kfree(mem->va);
157 mem->va = NULL;
158 mem->size = 0;
159
160 return 0;
161 }
162
163 /**
164 * i40e_get_lump - find a lump of free generic resource
165 * @pf: board private structure
166 * @pile: the pile of resource to search
167 * @needed: the number of items needed
168 * @id: an owner id to stick on the items assigned
169 *
170 * Returns the base item index of the lump, or negative for error
171 *
172 * The search_hint trick and lack of advanced fit-finding only work
173 * because we're highly likely to have all the same size lump requests.
174 * Linear search time and any fragmentation should be minimal.
175 **/
176 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
177 u16 needed, u16 id)
178 {
179 int ret = -ENOMEM;
180 int i, j;
181
182 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
183 dev_info(&pf->pdev->dev,
184 "param err: pile=%p needed=%d id=0x%04x\n",
185 pile, needed, id);
186 return -EINVAL;
187 }
188
189 /* start the linear search with an imperfect hint */
190 i = pile->search_hint;
191 while (i < pile->num_entries) {
192 /* skip already allocated entries */
193 if (pile->list[i] & I40E_PILE_VALID_BIT) {
194 i++;
195 continue;
196 }
197
198 /* do we have enough in this lump? */
199 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
200 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
201 break;
202 }
203
204 if (j == needed) {
205 /* there was enough, so assign it to the requestor */
206 for (j = 0; j < needed; j++)
207 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
208 ret = i;
209 pile->search_hint = i + j;
210 break;
211 } else {
212 /* not enough, so skip over it and continue looking */
213 i += j;
214 }
215 }
216
217 return ret;
218 }
219
220 /**
221 * i40e_put_lump - return a lump of generic resource
222 * @pile: the pile of resource to search
223 * @index: the base item index
224 * @id: the owner id of the items assigned
225 *
226 * Returns the count of items in the lump
227 **/
228 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
229 {
230 int valid_id = (id | I40E_PILE_VALID_BIT);
231 int count = 0;
232 int i;
233
234 if (!pile || index >= pile->num_entries)
235 return -EINVAL;
236
237 for (i = index;
238 i < pile->num_entries && pile->list[i] == valid_id;
239 i++) {
240 pile->list[i] = 0;
241 count++;
242 }
243
244 if (count && index < pile->search_hint)
245 pile->search_hint = index;
246
247 return count;
248 }
249
250 /**
251 * i40e_service_event_schedule - Schedule the service task to wake up
252 * @pf: board private structure
253 *
254 * If not already scheduled, this puts the task into the work queue
255 **/
256 static void i40e_service_event_schedule(struct i40e_pf *pf)
257 {
258 if (!test_bit(__I40E_DOWN, &pf->state) &&
259 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
260 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
261 schedule_work(&pf->service_task);
262 }
263
264 /**
265 * i40e_tx_timeout - Respond to a Tx Hang
266 * @netdev: network interface device structure
267 *
268 * If any port has noticed a Tx timeout, it is likely that the whole
269 * device is munged, not just the one netdev port, so go for the full
270 * reset.
271 **/
272 #ifdef I40E_FCOE
273 void i40e_tx_timeout(struct net_device *netdev)
274 #else
275 static void i40e_tx_timeout(struct net_device *netdev)
276 #endif
277 {
278 struct i40e_netdev_priv *np = netdev_priv(netdev);
279 struct i40e_vsi *vsi = np->vsi;
280 struct i40e_pf *pf = vsi->back;
281
282 pf->tx_timeout_count++;
283
284 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
285 pf->tx_timeout_recovery_level = 1;
286 pf->tx_timeout_last_recovery = jiffies;
287 netdev_info(netdev, "tx_timeout recovery level %d\n",
288 pf->tx_timeout_recovery_level);
289
290 switch (pf->tx_timeout_recovery_level) {
291 case 0:
292 /* disable and re-enable queues for the VSI */
293 if (in_interrupt()) {
294 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
295 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
296 } else {
297 i40e_vsi_reinit_locked(vsi);
298 }
299 break;
300 case 1:
301 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
302 break;
303 case 2:
304 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
305 break;
306 case 3:
307 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
308 break;
309 default:
310 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
311 set_bit(__I40E_DOWN_REQUESTED, &pf->state);
312 set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
313 break;
314 }
315 i40e_service_event_schedule(pf);
316 pf->tx_timeout_recovery_level++;
317 }
318
319 /**
320 * i40e_release_rx_desc - Store the new tail and head values
321 * @rx_ring: ring to bump
322 * @val: new head index
323 **/
324 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
325 {
326 rx_ring->next_to_use = val;
327
328 /* Force memory writes to complete before letting h/w
329 * know there are new descriptors to fetch. (Only
330 * applicable for weak-ordered memory model archs,
331 * such as IA-64).
332 */
333 wmb();
334 writel(val, rx_ring->tail);
335 }
336
337 /**
338 * i40e_get_vsi_stats_struct - Get System Network Statistics
339 * @vsi: the VSI we care about
340 *
341 * Returns the address of the device statistics structure.
342 * The statistics are actually updated from the service task.
343 **/
344 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
345 {
346 return &vsi->net_stats;
347 }
348
349 /**
350 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
351 * @netdev: network interface device structure
352 *
353 * Returns the address of the device statistics structure.
354 * The statistics are actually updated from the service task.
355 **/
356 #ifdef I40E_FCOE
357 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
358 struct net_device *netdev,
359 struct rtnl_link_stats64 *stats)
360 #else
361 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
362 struct net_device *netdev,
363 struct rtnl_link_stats64 *stats)
364 #endif
365 {
366 struct i40e_netdev_priv *np = netdev_priv(netdev);
367 struct i40e_ring *tx_ring, *rx_ring;
368 struct i40e_vsi *vsi = np->vsi;
369 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
370 int i;
371
372 if (test_bit(__I40E_DOWN, &vsi->state))
373 return stats;
374
375 if (!vsi->tx_rings)
376 return stats;
377
378 rcu_read_lock();
379 for (i = 0; i < vsi->num_queue_pairs; i++) {
380 u64 bytes, packets;
381 unsigned int start;
382
383 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
384 if (!tx_ring)
385 continue;
386
387 do {
388 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
389 packets = tx_ring->stats.packets;
390 bytes = tx_ring->stats.bytes;
391 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
392
393 stats->tx_packets += packets;
394 stats->tx_bytes += bytes;
395 rx_ring = &tx_ring[1];
396
397 do {
398 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
399 packets = rx_ring->stats.packets;
400 bytes = rx_ring->stats.bytes;
401 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
402
403 stats->rx_packets += packets;
404 stats->rx_bytes += bytes;
405 }
406 rcu_read_unlock();
407
408 /* following stats updated by i40e_watchdog_subtask() */
409 stats->multicast = vsi_stats->multicast;
410 stats->tx_errors = vsi_stats->tx_errors;
411 stats->tx_dropped = vsi_stats->tx_dropped;
412 stats->rx_errors = vsi_stats->rx_errors;
413 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
414 stats->rx_length_errors = vsi_stats->rx_length_errors;
415
416 return stats;
417 }
418
419 /**
420 * i40e_vsi_reset_stats - Resets all stats of the given vsi
421 * @vsi: the VSI to have its stats reset
422 **/
423 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
424 {
425 struct rtnl_link_stats64 *ns;
426 int i;
427
428 if (!vsi)
429 return;
430
431 ns = i40e_get_vsi_stats_struct(vsi);
432 memset(ns, 0, sizeof(*ns));
433 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
434 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
435 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
436 if (vsi->rx_rings && vsi->rx_rings[0]) {
437 for (i = 0; i < vsi->num_queue_pairs; i++) {
438 memset(&vsi->rx_rings[i]->stats, 0 ,
439 sizeof(vsi->rx_rings[i]->stats));
440 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
441 sizeof(vsi->rx_rings[i]->rx_stats));
442 memset(&vsi->tx_rings[i]->stats, 0 ,
443 sizeof(vsi->tx_rings[i]->stats));
444 memset(&vsi->tx_rings[i]->tx_stats, 0,
445 sizeof(vsi->tx_rings[i]->tx_stats));
446 }
447 }
448 vsi->stat_offsets_loaded = false;
449 }
450
451 /**
452 * i40e_pf_reset_stats - Reset all of the stats for the given pf
453 * @pf: the PF to be reset
454 **/
455 void i40e_pf_reset_stats(struct i40e_pf *pf)
456 {
457 int i;
458
459 memset(&pf->stats, 0, sizeof(pf->stats));
460 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
461 pf->stat_offsets_loaded = false;
462
463 for (i = 0; i < I40E_MAX_VEB; i++) {
464 if (pf->veb[i]) {
465 memset(&pf->veb[i]->stats, 0,
466 sizeof(pf->veb[i]->stats));
467 memset(&pf->veb[i]->stats_offsets, 0,
468 sizeof(pf->veb[i]->stats_offsets));
469 pf->veb[i]->stat_offsets_loaded = false;
470 }
471 }
472 }
473
474 /**
475 * i40e_stat_update48 - read and update a 48 bit stat from the chip
476 * @hw: ptr to the hardware info
477 * @hireg: the high 32 bit reg to read
478 * @loreg: the low 32 bit reg to read
479 * @offset_loaded: has the initial offset been loaded yet
480 * @offset: ptr to current offset value
481 * @stat: ptr to the stat
482 *
483 * Since the device stats are not reset at PFReset, they likely will not
484 * be zeroed when the driver starts. We'll save the first values read
485 * and use them as offsets to be subtracted from the raw values in order
486 * to report stats that count from zero. In the process, we also manage
487 * the potential roll-over.
488 **/
489 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
490 bool offset_loaded, u64 *offset, u64 *stat)
491 {
492 u64 new_data;
493
494 if (hw->device_id == I40E_DEV_ID_QEMU) {
495 new_data = rd32(hw, loreg);
496 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
497 } else {
498 new_data = rd64(hw, loreg);
499 }
500 if (!offset_loaded)
501 *offset = new_data;
502 if (likely(new_data >= *offset))
503 *stat = new_data - *offset;
504 else
505 *stat = (new_data + ((u64)1 << 48)) - *offset;
506 *stat &= 0xFFFFFFFFFFFFULL;
507 }
508
509 /**
510 * i40e_stat_update32 - read and update a 32 bit stat from the chip
511 * @hw: ptr to the hardware info
512 * @reg: the hw reg to read
513 * @offset_loaded: has the initial offset been loaded yet
514 * @offset: ptr to current offset value
515 * @stat: ptr to the stat
516 **/
517 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
518 bool offset_loaded, u64 *offset, u64 *stat)
519 {
520 u32 new_data;
521
522 new_data = rd32(hw, reg);
523 if (!offset_loaded)
524 *offset = new_data;
525 if (likely(new_data >= *offset))
526 *stat = (u32)(new_data - *offset);
527 else
528 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
529 }
530
531 /**
532 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
533 * @vsi: the VSI to be updated
534 **/
535 void i40e_update_eth_stats(struct i40e_vsi *vsi)
536 {
537 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
538 struct i40e_pf *pf = vsi->back;
539 struct i40e_hw *hw = &pf->hw;
540 struct i40e_eth_stats *oes;
541 struct i40e_eth_stats *es; /* device's eth stats */
542
543 es = &vsi->eth_stats;
544 oes = &vsi->eth_stats_offsets;
545
546 /* Gather up the stats that the hw collects */
547 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
548 vsi->stat_offsets_loaded,
549 &oes->tx_errors, &es->tx_errors);
550 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
551 vsi->stat_offsets_loaded,
552 &oes->rx_discards, &es->rx_discards);
553 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
554 vsi->stat_offsets_loaded,
555 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
556 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
557 vsi->stat_offsets_loaded,
558 &oes->tx_errors, &es->tx_errors);
559
560 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
561 I40E_GLV_GORCL(stat_idx),
562 vsi->stat_offsets_loaded,
563 &oes->rx_bytes, &es->rx_bytes);
564 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
565 I40E_GLV_UPRCL(stat_idx),
566 vsi->stat_offsets_loaded,
567 &oes->rx_unicast, &es->rx_unicast);
568 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
569 I40E_GLV_MPRCL(stat_idx),
570 vsi->stat_offsets_loaded,
571 &oes->rx_multicast, &es->rx_multicast);
572 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
573 I40E_GLV_BPRCL(stat_idx),
574 vsi->stat_offsets_loaded,
575 &oes->rx_broadcast, &es->rx_broadcast);
576
577 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
578 I40E_GLV_GOTCL(stat_idx),
579 vsi->stat_offsets_loaded,
580 &oes->tx_bytes, &es->tx_bytes);
581 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
582 I40E_GLV_UPTCL(stat_idx),
583 vsi->stat_offsets_loaded,
584 &oes->tx_unicast, &es->tx_unicast);
585 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
586 I40E_GLV_MPTCL(stat_idx),
587 vsi->stat_offsets_loaded,
588 &oes->tx_multicast, &es->tx_multicast);
589 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
590 I40E_GLV_BPTCL(stat_idx),
591 vsi->stat_offsets_loaded,
592 &oes->tx_broadcast, &es->tx_broadcast);
593 vsi->stat_offsets_loaded = true;
594 }
595
596 /**
597 * i40e_update_veb_stats - Update Switch component statistics
598 * @veb: the VEB being updated
599 **/
600 static void i40e_update_veb_stats(struct i40e_veb *veb)
601 {
602 struct i40e_pf *pf = veb->pf;
603 struct i40e_hw *hw = &pf->hw;
604 struct i40e_eth_stats *oes;
605 struct i40e_eth_stats *es; /* device's eth stats */
606 int idx = 0;
607
608 idx = veb->stats_idx;
609 es = &veb->stats;
610 oes = &veb->stats_offsets;
611
612 /* Gather up the stats that the hw collects */
613 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
614 veb->stat_offsets_loaded,
615 &oes->tx_discards, &es->tx_discards);
616 if (hw->revision_id > 0)
617 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
618 veb->stat_offsets_loaded,
619 &oes->rx_unknown_protocol,
620 &es->rx_unknown_protocol);
621 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
622 veb->stat_offsets_loaded,
623 &oes->rx_bytes, &es->rx_bytes);
624 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
625 veb->stat_offsets_loaded,
626 &oes->rx_unicast, &es->rx_unicast);
627 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
628 veb->stat_offsets_loaded,
629 &oes->rx_multicast, &es->rx_multicast);
630 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
631 veb->stat_offsets_loaded,
632 &oes->rx_broadcast, &es->rx_broadcast);
633
634 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
635 veb->stat_offsets_loaded,
636 &oes->tx_bytes, &es->tx_bytes);
637 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
638 veb->stat_offsets_loaded,
639 &oes->tx_unicast, &es->tx_unicast);
640 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
641 veb->stat_offsets_loaded,
642 &oes->tx_multicast, &es->tx_multicast);
643 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
644 veb->stat_offsets_loaded,
645 &oes->tx_broadcast, &es->tx_broadcast);
646 veb->stat_offsets_loaded = true;
647 }
648
649 #ifdef I40E_FCOE
650 /**
651 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
652 * @vsi: the VSI that is capable of doing FCoE
653 **/
654 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
655 {
656 struct i40e_pf *pf = vsi->back;
657 struct i40e_hw *hw = &pf->hw;
658 struct i40e_fcoe_stats *ofs;
659 struct i40e_fcoe_stats *fs; /* device's eth stats */
660 int idx;
661
662 if (vsi->type != I40E_VSI_FCOE)
663 return;
664
665 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
666 fs = &vsi->fcoe_stats;
667 ofs = &vsi->fcoe_stats_offsets;
668
669 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
670 vsi->fcoe_stat_offsets_loaded,
671 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
672 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
673 vsi->fcoe_stat_offsets_loaded,
674 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
675 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
676 vsi->fcoe_stat_offsets_loaded,
677 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
678 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
679 vsi->fcoe_stat_offsets_loaded,
680 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
681 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
682 vsi->fcoe_stat_offsets_loaded,
683 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
684 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
685 vsi->fcoe_stat_offsets_loaded,
686 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
687 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
688 vsi->fcoe_stat_offsets_loaded,
689 &ofs->fcoe_last_error, &fs->fcoe_last_error);
690 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
691 vsi->fcoe_stat_offsets_loaded,
692 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
693
694 vsi->fcoe_stat_offsets_loaded = true;
695 }
696
697 #endif
698 /**
699 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
700 * @pf: the corresponding PF
701 *
702 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
703 **/
704 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
705 {
706 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
707 struct i40e_hw_port_stats *nsd = &pf->stats;
708 struct i40e_hw *hw = &pf->hw;
709 u64 xoff = 0;
710 u16 i, v;
711
712 if ((hw->fc.current_mode != I40E_FC_FULL) &&
713 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
714 return;
715
716 xoff = nsd->link_xoff_rx;
717 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
718 pf->stat_offsets_loaded,
719 &osd->link_xoff_rx, &nsd->link_xoff_rx);
720
721 /* No new LFC xoff rx */
722 if (!(nsd->link_xoff_rx - xoff))
723 return;
724
725 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
726 for (v = 0; v < pf->num_alloc_vsi; v++) {
727 struct i40e_vsi *vsi = pf->vsi[v];
728
729 if (!vsi || !vsi->tx_rings[0])
730 continue;
731
732 for (i = 0; i < vsi->num_queue_pairs; i++) {
733 struct i40e_ring *ring = vsi->tx_rings[i];
734 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
735 }
736 }
737 }
738
739 /**
740 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
741 * @pf: the corresponding PF
742 *
743 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
744 **/
745 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
746 {
747 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
748 struct i40e_hw_port_stats *nsd = &pf->stats;
749 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
750 struct i40e_dcbx_config *dcb_cfg;
751 struct i40e_hw *hw = &pf->hw;
752 u16 i, v;
753 u8 tc;
754
755 dcb_cfg = &hw->local_dcbx_config;
756
757 /* See if DCB enabled with PFC TC */
758 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
759 !(dcb_cfg->pfc.pfcenable)) {
760 i40e_update_link_xoff_rx(pf);
761 return;
762 }
763
764 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
765 u64 prio_xoff = nsd->priority_xoff_rx[i];
766 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
767 pf->stat_offsets_loaded,
768 &osd->priority_xoff_rx[i],
769 &nsd->priority_xoff_rx[i]);
770
771 /* No new PFC xoff rx */
772 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
773 continue;
774 /* Get the TC for given priority */
775 tc = dcb_cfg->etscfg.prioritytable[i];
776 xoff[tc] = true;
777 }
778
779 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
780 for (v = 0; v < pf->num_alloc_vsi; v++) {
781 struct i40e_vsi *vsi = pf->vsi[v];
782
783 if (!vsi || !vsi->tx_rings[0])
784 continue;
785
786 for (i = 0; i < vsi->num_queue_pairs; i++) {
787 struct i40e_ring *ring = vsi->tx_rings[i];
788
789 tc = ring->dcb_tc;
790 if (xoff[tc])
791 clear_bit(__I40E_HANG_CHECK_ARMED,
792 &ring->state);
793 }
794 }
795 }
796
797 /**
798 * i40e_update_vsi_stats - Update the vsi statistics counters.
799 * @vsi: the VSI to be updated
800 *
801 * There are a few instances where we store the same stat in a
802 * couple of different structs. This is partly because we have
803 * the netdev stats that need to be filled out, which is slightly
804 * different from the "eth_stats" defined by the chip and used in
805 * VF communications. We sort it out here.
806 **/
807 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
808 {
809 struct i40e_pf *pf = vsi->back;
810 struct rtnl_link_stats64 *ons;
811 struct rtnl_link_stats64 *ns; /* netdev stats */
812 struct i40e_eth_stats *oes;
813 struct i40e_eth_stats *es; /* device's eth stats */
814 u32 tx_restart, tx_busy;
815 u32 rx_page, rx_buf;
816 u64 rx_p, rx_b;
817 u64 tx_p, tx_b;
818 u16 q;
819
820 if (test_bit(__I40E_DOWN, &vsi->state) ||
821 test_bit(__I40E_CONFIG_BUSY, &pf->state))
822 return;
823
824 ns = i40e_get_vsi_stats_struct(vsi);
825 ons = &vsi->net_stats_offsets;
826 es = &vsi->eth_stats;
827 oes = &vsi->eth_stats_offsets;
828
829 /* Gather up the netdev and vsi stats that the driver collects
830 * on the fly during packet processing
831 */
832 rx_b = rx_p = 0;
833 tx_b = tx_p = 0;
834 tx_restart = tx_busy = 0;
835 rx_page = 0;
836 rx_buf = 0;
837 rcu_read_lock();
838 for (q = 0; q < vsi->num_queue_pairs; q++) {
839 struct i40e_ring *p;
840 u64 bytes, packets;
841 unsigned int start;
842
843 /* locate Tx ring */
844 p = ACCESS_ONCE(vsi->tx_rings[q]);
845
846 do {
847 start = u64_stats_fetch_begin_irq(&p->syncp);
848 packets = p->stats.packets;
849 bytes = p->stats.bytes;
850 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
851 tx_b += bytes;
852 tx_p += packets;
853 tx_restart += p->tx_stats.restart_queue;
854 tx_busy += p->tx_stats.tx_busy;
855
856 /* Rx queue is part of the same block as Tx queue */
857 p = &p[1];
858 do {
859 start = u64_stats_fetch_begin_irq(&p->syncp);
860 packets = p->stats.packets;
861 bytes = p->stats.bytes;
862 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
863 rx_b += bytes;
864 rx_p += packets;
865 rx_buf += p->rx_stats.alloc_buff_failed;
866 rx_page += p->rx_stats.alloc_page_failed;
867 }
868 rcu_read_unlock();
869 vsi->tx_restart = tx_restart;
870 vsi->tx_busy = tx_busy;
871 vsi->rx_page_failed = rx_page;
872 vsi->rx_buf_failed = rx_buf;
873
874 ns->rx_packets = rx_p;
875 ns->rx_bytes = rx_b;
876 ns->tx_packets = tx_p;
877 ns->tx_bytes = tx_b;
878
879 /* update netdev stats from eth stats */
880 i40e_update_eth_stats(vsi);
881 ons->tx_errors = oes->tx_errors;
882 ns->tx_errors = es->tx_errors;
883 ons->multicast = oes->rx_multicast;
884 ns->multicast = es->rx_multicast;
885 ons->rx_dropped = oes->rx_discards;
886 ns->rx_dropped = es->rx_discards;
887 ons->tx_dropped = oes->tx_discards;
888 ns->tx_dropped = es->tx_discards;
889
890 /* pull in a couple PF stats if this is the main vsi */
891 if (vsi == pf->vsi[pf->lan_vsi]) {
892 ns->rx_crc_errors = pf->stats.crc_errors;
893 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
894 ns->rx_length_errors = pf->stats.rx_length_errors;
895 }
896 }
897
898 /**
899 * i40e_update_pf_stats - Update the pf statistics counters.
900 * @pf: the PF to be updated
901 **/
902 static void i40e_update_pf_stats(struct i40e_pf *pf)
903 {
904 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
905 struct i40e_hw_port_stats *nsd = &pf->stats;
906 struct i40e_hw *hw = &pf->hw;
907 u32 val;
908 int i;
909
910 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
911 I40E_GLPRT_GORCL(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
914 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
915 I40E_GLPRT_GOTCL(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
918 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
919 pf->stat_offsets_loaded,
920 &osd->eth.rx_discards,
921 &nsd->eth.rx_discards);
922 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
923 pf->stat_offsets_loaded,
924 &osd->eth.tx_discards,
925 &nsd->eth.tx_discards);
926
927 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
928 I40E_GLPRT_UPRCL(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->eth.rx_unicast,
931 &nsd->eth.rx_unicast);
932 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
933 I40E_GLPRT_MPRCL(hw->port),
934 pf->stat_offsets_loaded,
935 &osd->eth.rx_multicast,
936 &nsd->eth.rx_multicast);
937 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
938 I40E_GLPRT_BPRCL(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->eth.rx_broadcast,
941 &nsd->eth.rx_broadcast);
942 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
943 I40E_GLPRT_UPTCL(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->eth.tx_unicast,
946 &nsd->eth.tx_unicast);
947 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
948 I40E_GLPRT_MPTCL(hw->port),
949 pf->stat_offsets_loaded,
950 &osd->eth.tx_multicast,
951 &nsd->eth.tx_multicast);
952 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
953 I40E_GLPRT_BPTCL(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->eth.tx_broadcast,
956 &nsd->eth.tx_broadcast);
957
958 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
959 pf->stat_offsets_loaded,
960 &osd->tx_dropped_link_down,
961 &nsd->tx_dropped_link_down);
962
963 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
964 pf->stat_offsets_loaded,
965 &osd->crc_errors, &nsd->crc_errors);
966
967 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
968 pf->stat_offsets_loaded,
969 &osd->illegal_bytes, &nsd->illegal_bytes);
970
971 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
972 pf->stat_offsets_loaded,
973 &osd->mac_local_faults,
974 &nsd->mac_local_faults);
975 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->mac_remote_faults,
978 &nsd->mac_remote_faults);
979
980 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
981 pf->stat_offsets_loaded,
982 &osd->rx_length_errors,
983 &nsd->rx_length_errors);
984
985 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
986 pf->stat_offsets_loaded,
987 &osd->link_xon_rx, &nsd->link_xon_rx);
988 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
989 pf->stat_offsets_loaded,
990 &osd->link_xon_tx, &nsd->link_xon_tx);
991 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
992 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
993 pf->stat_offsets_loaded,
994 &osd->link_xoff_tx, &nsd->link_xoff_tx);
995
996 for (i = 0; i < 8; i++) {
997 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
998 pf->stat_offsets_loaded,
999 &osd->priority_xon_rx[i],
1000 &nsd->priority_xon_rx[i]);
1001 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1002 pf->stat_offsets_loaded,
1003 &osd->priority_xon_tx[i],
1004 &nsd->priority_xon_tx[i]);
1005 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1006 pf->stat_offsets_loaded,
1007 &osd->priority_xoff_tx[i],
1008 &nsd->priority_xoff_tx[i]);
1009 i40e_stat_update32(hw,
1010 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1011 pf->stat_offsets_loaded,
1012 &osd->priority_xon_2_xoff[i],
1013 &nsd->priority_xon_2_xoff[i]);
1014 }
1015
1016 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1017 I40E_GLPRT_PRC64L(hw->port),
1018 pf->stat_offsets_loaded,
1019 &osd->rx_size_64, &nsd->rx_size_64);
1020 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1021 I40E_GLPRT_PRC127L(hw->port),
1022 pf->stat_offsets_loaded,
1023 &osd->rx_size_127, &nsd->rx_size_127);
1024 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1025 I40E_GLPRT_PRC255L(hw->port),
1026 pf->stat_offsets_loaded,
1027 &osd->rx_size_255, &nsd->rx_size_255);
1028 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1029 I40E_GLPRT_PRC511L(hw->port),
1030 pf->stat_offsets_loaded,
1031 &osd->rx_size_511, &nsd->rx_size_511);
1032 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1033 I40E_GLPRT_PRC1023L(hw->port),
1034 pf->stat_offsets_loaded,
1035 &osd->rx_size_1023, &nsd->rx_size_1023);
1036 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1037 I40E_GLPRT_PRC1522L(hw->port),
1038 pf->stat_offsets_loaded,
1039 &osd->rx_size_1522, &nsd->rx_size_1522);
1040 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1041 I40E_GLPRT_PRC9522L(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->rx_size_big, &nsd->rx_size_big);
1044
1045 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1046 I40E_GLPRT_PTC64L(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->tx_size_64, &nsd->tx_size_64);
1049 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1050 I40E_GLPRT_PTC127L(hw->port),
1051 pf->stat_offsets_loaded,
1052 &osd->tx_size_127, &nsd->tx_size_127);
1053 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1054 I40E_GLPRT_PTC255L(hw->port),
1055 pf->stat_offsets_loaded,
1056 &osd->tx_size_255, &nsd->tx_size_255);
1057 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1058 I40E_GLPRT_PTC511L(hw->port),
1059 pf->stat_offsets_loaded,
1060 &osd->tx_size_511, &nsd->tx_size_511);
1061 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1062 I40E_GLPRT_PTC1023L(hw->port),
1063 pf->stat_offsets_loaded,
1064 &osd->tx_size_1023, &nsd->tx_size_1023);
1065 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1066 I40E_GLPRT_PTC1522L(hw->port),
1067 pf->stat_offsets_loaded,
1068 &osd->tx_size_1522, &nsd->tx_size_1522);
1069 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1070 I40E_GLPRT_PTC9522L(hw->port),
1071 pf->stat_offsets_loaded,
1072 &osd->tx_size_big, &nsd->tx_size_big);
1073
1074 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1075 pf->stat_offsets_loaded,
1076 &osd->rx_undersize, &nsd->rx_undersize);
1077 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1078 pf->stat_offsets_loaded,
1079 &osd->rx_fragments, &nsd->rx_fragments);
1080 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1081 pf->stat_offsets_loaded,
1082 &osd->rx_oversize, &nsd->rx_oversize);
1083 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1084 pf->stat_offsets_loaded,
1085 &osd->rx_jabber, &nsd->rx_jabber);
1086
1087 /* FDIR stats */
1088 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
1089 pf->stat_offsets_loaded,
1090 &osd->fd_atr_match, &nsd->fd_atr_match);
1091 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
1092 pf->stat_offsets_loaded,
1093 &osd->fd_sb_match, &nsd->fd_sb_match);
1094
1095 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1096 nsd->tx_lpi_status =
1097 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1098 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1099 nsd->rx_lpi_status =
1100 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1101 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1102 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1103 pf->stat_offsets_loaded,
1104 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1105 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1106 pf->stat_offsets_loaded,
1107 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1108
1109 pf->stat_offsets_loaded = true;
1110 }
1111
1112 /**
1113 * i40e_update_stats - Update the various statistics counters.
1114 * @vsi: the VSI to be updated
1115 *
1116 * Update the various stats for this VSI and its related entities.
1117 **/
1118 void i40e_update_stats(struct i40e_vsi *vsi)
1119 {
1120 struct i40e_pf *pf = vsi->back;
1121
1122 if (vsi == pf->vsi[pf->lan_vsi])
1123 i40e_update_pf_stats(pf);
1124
1125 i40e_update_vsi_stats(vsi);
1126 #ifdef I40E_FCOE
1127 i40e_update_fcoe_stats(vsi);
1128 #endif
1129 }
1130
1131 /**
1132 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1133 * @vsi: the VSI to be searched
1134 * @macaddr: the MAC address
1135 * @vlan: the vlan
1136 * @is_vf: make sure its a vf filter, else doesn't matter
1137 * @is_netdev: make sure its a netdev filter, else doesn't matter
1138 *
1139 * Returns ptr to the filter object or NULL
1140 **/
1141 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1142 u8 *macaddr, s16 vlan,
1143 bool is_vf, bool is_netdev)
1144 {
1145 struct i40e_mac_filter *f;
1146
1147 if (!vsi || !macaddr)
1148 return NULL;
1149
1150 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1151 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1152 (vlan == f->vlan) &&
1153 (!is_vf || f->is_vf) &&
1154 (!is_netdev || f->is_netdev))
1155 return f;
1156 }
1157 return NULL;
1158 }
1159
1160 /**
1161 * i40e_find_mac - Find a mac addr in the macvlan filters list
1162 * @vsi: the VSI to be searched
1163 * @macaddr: the MAC address we are searching for
1164 * @is_vf: make sure its a vf filter, else doesn't matter
1165 * @is_netdev: make sure its a netdev filter, else doesn't matter
1166 *
1167 * Returns the first filter with the provided MAC address or NULL if
1168 * MAC address was not found
1169 **/
1170 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1171 bool is_vf, bool is_netdev)
1172 {
1173 struct i40e_mac_filter *f;
1174
1175 if (!vsi || !macaddr)
1176 return NULL;
1177
1178 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1179 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1180 (!is_vf || f->is_vf) &&
1181 (!is_netdev || f->is_netdev))
1182 return f;
1183 }
1184 return NULL;
1185 }
1186
1187 /**
1188 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1189 * @vsi: the VSI to be searched
1190 *
1191 * Returns true if VSI is in vlan mode or false otherwise
1192 **/
1193 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1194 {
1195 struct i40e_mac_filter *f;
1196
1197 /* Only -1 for all the filters denotes not in vlan mode
1198 * so we have to go through all the list in order to make sure
1199 */
1200 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1201 if (f->vlan >= 0)
1202 return true;
1203 }
1204
1205 return false;
1206 }
1207
1208 /**
1209 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1210 * @vsi: the VSI to be searched
1211 * @macaddr: the mac address to be filtered
1212 * @is_vf: true if it is a vf
1213 * @is_netdev: true if it is a netdev
1214 *
1215 * Goes through all the macvlan filters and adds a
1216 * macvlan filter for each unique vlan that already exists
1217 *
1218 * Returns first filter found on success, else NULL
1219 **/
1220 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1221 bool is_vf, bool is_netdev)
1222 {
1223 struct i40e_mac_filter *f;
1224
1225 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1226 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1227 is_vf, is_netdev)) {
1228 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1229 is_vf, is_netdev))
1230 return NULL;
1231 }
1232 }
1233
1234 return list_first_entry_or_null(&vsi->mac_filter_list,
1235 struct i40e_mac_filter, list);
1236 }
1237
1238 /**
1239 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1240 * @vsi: the PF Main VSI - inappropriate for any other VSI
1241 * @macaddr: the MAC address
1242 *
1243 * Some older firmware configurations set up a default promiscuous VLAN
1244 * filter that needs to be removed.
1245 **/
1246 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1247 {
1248 struct i40e_aqc_remove_macvlan_element_data element;
1249 struct i40e_pf *pf = vsi->back;
1250 i40e_status aq_ret;
1251
1252 /* Only appropriate for the PF main VSI */
1253 if (vsi->type != I40E_VSI_MAIN)
1254 return -EINVAL;
1255
1256 memset(&element, 0, sizeof(element));
1257 ether_addr_copy(element.mac_addr, macaddr);
1258 element.vlan_tag = 0;
1259 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1260 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1261 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1262 if (aq_ret)
1263 return -ENOENT;
1264
1265 return 0;
1266 }
1267
1268 /**
1269 * i40e_add_filter - Add a mac/vlan filter to the VSI
1270 * @vsi: the VSI to be searched
1271 * @macaddr: the MAC address
1272 * @vlan: the vlan
1273 * @is_vf: make sure its a vf filter, else doesn't matter
1274 * @is_netdev: make sure its a netdev filter, else doesn't matter
1275 *
1276 * Returns ptr to the filter object or NULL when no memory available.
1277 **/
1278 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1279 u8 *macaddr, s16 vlan,
1280 bool is_vf, bool is_netdev)
1281 {
1282 struct i40e_mac_filter *f;
1283
1284 if (!vsi || !macaddr)
1285 return NULL;
1286
1287 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1288 if (!f) {
1289 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1290 if (!f)
1291 goto add_filter_out;
1292
1293 ether_addr_copy(f->macaddr, macaddr);
1294 f->vlan = vlan;
1295 f->changed = true;
1296
1297 INIT_LIST_HEAD(&f->list);
1298 list_add(&f->list, &vsi->mac_filter_list);
1299 }
1300
1301 /* increment counter and add a new flag if needed */
1302 if (is_vf) {
1303 if (!f->is_vf) {
1304 f->is_vf = true;
1305 f->counter++;
1306 }
1307 } else if (is_netdev) {
1308 if (!f->is_netdev) {
1309 f->is_netdev = true;
1310 f->counter++;
1311 }
1312 } else {
1313 f->counter++;
1314 }
1315
1316 /* changed tells sync_filters_subtask to
1317 * push the filter down to the firmware
1318 */
1319 if (f->changed) {
1320 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1321 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1322 }
1323
1324 add_filter_out:
1325 return f;
1326 }
1327
1328 /**
1329 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1330 * @vsi: the VSI to be searched
1331 * @macaddr: the MAC address
1332 * @vlan: the vlan
1333 * @is_vf: make sure it's a vf filter, else doesn't matter
1334 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1335 **/
1336 void i40e_del_filter(struct i40e_vsi *vsi,
1337 u8 *macaddr, s16 vlan,
1338 bool is_vf, bool is_netdev)
1339 {
1340 struct i40e_mac_filter *f;
1341
1342 if (!vsi || !macaddr)
1343 return;
1344
1345 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1346 if (!f || f->counter == 0)
1347 return;
1348
1349 if (is_vf) {
1350 if (f->is_vf) {
1351 f->is_vf = false;
1352 f->counter--;
1353 }
1354 } else if (is_netdev) {
1355 if (f->is_netdev) {
1356 f->is_netdev = false;
1357 f->counter--;
1358 }
1359 } else {
1360 /* make sure we don't remove a filter in use by vf or netdev */
1361 int min_f = 0;
1362 min_f += (f->is_vf ? 1 : 0);
1363 min_f += (f->is_netdev ? 1 : 0);
1364
1365 if (f->counter > min_f)
1366 f->counter--;
1367 }
1368
1369 /* counter == 0 tells sync_filters_subtask to
1370 * remove the filter from the firmware's list
1371 */
1372 if (f->counter == 0) {
1373 f->changed = true;
1374 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1375 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1376 }
1377 }
1378
1379 /**
1380 * i40e_set_mac - NDO callback to set mac address
1381 * @netdev: network interface device structure
1382 * @p: pointer to an address structure
1383 *
1384 * Returns 0 on success, negative on failure
1385 **/
1386 #ifdef I40E_FCOE
1387 int i40e_set_mac(struct net_device *netdev, void *p)
1388 #else
1389 static int i40e_set_mac(struct net_device *netdev, void *p)
1390 #endif
1391 {
1392 struct i40e_netdev_priv *np = netdev_priv(netdev);
1393 struct i40e_vsi *vsi = np->vsi;
1394 struct i40e_pf *pf = vsi->back;
1395 struct i40e_hw *hw = &pf->hw;
1396 struct sockaddr *addr = p;
1397 struct i40e_mac_filter *f;
1398
1399 if (!is_valid_ether_addr(addr->sa_data))
1400 return -EADDRNOTAVAIL;
1401
1402 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1403 netdev_info(netdev, "already using mac address %pM\n",
1404 addr->sa_data);
1405 return 0;
1406 }
1407
1408 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1409 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1410 return -EADDRNOTAVAIL;
1411
1412 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1413 netdev_info(netdev, "returning to hw mac address %pM\n",
1414 hw->mac.addr);
1415 else
1416 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1417
1418 if (vsi->type == I40E_VSI_MAIN) {
1419 i40e_status ret;
1420 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1421 I40E_AQC_WRITE_TYPE_LAA_WOL,
1422 addr->sa_data, NULL);
1423 if (ret) {
1424 netdev_info(netdev,
1425 "Addr change for Main VSI failed: %d\n",
1426 ret);
1427 return -EADDRNOTAVAIL;
1428 }
1429 }
1430
1431 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1432 struct i40e_aqc_remove_macvlan_element_data element;
1433
1434 memset(&element, 0, sizeof(element));
1435 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1436 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1437 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1438 } else {
1439 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1440 false, false);
1441 }
1442
1443 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1444 struct i40e_aqc_add_macvlan_element_data element;
1445
1446 memset(&element, 0, sizeof(element));
1447 ether_addr_copy(element.mac_addr, hw->mac.addr);
1448 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1449 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1450 } else {
1451 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1452 false, false);
1453 if (f)
1454 f->is_laa = true;
1455 }
1456
1457 i40e_sync_vsi_filters(vsi);
1458 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1459
1460 return 0;
1461 }
1462
1463 /**
1464 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1465 * @vsi: the VSI being setup
1466 * @ctxt: VSI context structure
1467 * @enabled_tc: Enabled TCs bitmap
1468 * @is_add: True if called before Add VSI
1469 *
1470 * Setup VSI queue mapping for enabled traffic classes.
1471 **/
1472 #ifdef I40E_FCOE
1473 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1474 struct i40e_vsi_context *ctxt,
1475 u8 enabled_tc,
1476 bool is_add)
1477 #else
1478 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1479 struct i40e_vsi_context *ctxt,
1480 u8 enabled_tc,
1481 bool is_add)
1482 #endif
1483 {
1484 struct i40e_pf *pf = vsi->back;
1485 u16 sections = 0;
1486 u8 netdev_tc = 0;
1487 u16 numtc = 0;
1488 u16 qcount;
1489 u8 offset;
1490 u16 qmap;
1491 int i;
1492 u16 num_tc_qps = 0;
1493
1494 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1495 offset = 0;
1496
1497 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1498 /* Find numtc from enabled TC bitmap */
1499 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1500 if (enabled_tc & (1 << i)) /* TC is enabled */
1501 numtc++;
1502 }
1503 if (!numtc) {
1504 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1505 numtc = 1;
1506 }
1507 } else {
1508 /* At least TC0 is enabled in case of non-DCB case */
1509 numtc = 1;
1510 }
1511
1512 vsi->tc_config.numtc = numtc;
1513 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1514 /* Number of queues per enabled TC */
1515 num_tc_qps = vsi->alloc_queue_pairs/numtc;
1516 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1517
1518 /* Setup queue offset/count for all TCs for given VSI */
1519 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1520 /* See if the given TC is enabled for the given VSI */
1521 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1522 int pow, num_qps;
1523
1524 switch (vsi->type) {
1525 case I40E_VSI_MAIN:
1526 qcount = min_t(int, pf->rss_size, num_tc_qps);
1527 break;
1528 #ifdef I40E_FCOE
1529 case I40E_VSI_FCOE:
1530 qcount = num_tc_qps;
1531 break;
1532 #endif
1533 case I40E_VSI_FDIR:
1534 case I40E_VSI_SRIOV:
1535 case I40E_VSI_VMDQ2:
1536 default:
1537 qcount = num_tc_qps;
1538 WARN_ON(i != 0);
1539 break;
1540 }
1541 vsi->tc_config.tc_info[i].qoffset = offset;
1542 vsi->tc_config.tc_info[i].qcount = qcount;
1543
1544 /* find the power-of-2 of the number of queue pairs */
1545 num_qps = qcount;
1546 pow = 0;
1547 while (num_qps && ((1 << pow) < qcount)) {
1548 pow++;
1549 num_qps >>= 1;
1550 }
1551
1552 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1553 qmap =
1554 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1555 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1556
1557 offset += qcount;
1558 } else {
1559 /* TC is not enabled so set the offset to
1560 * default queue and allocate one queue
1561 * for the given TC.
1562 */
1563 vsi->tc_config.tc_info[i].qoffset = 0;
1564 vsi->tc_config.tc_info[i].qcount = 1;
1565 vsi->tc_config.tc_info[i].netdev_tc = 0;
1566
1567 qmap = 0;
1568 }
1569 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1570 }
1571
1572 /* Set actual Tx/Rx queue pairs */
1573 vsi->num_queue_pairs = offset;
1574
1575 /* Scheduler section valid can only be set for ADD VSI */
1576 if (is_add) {
1577 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1578
1579 ctxt->info.up_enable_bits = enabled_tc;
1580 }
1581 if (vsi->type == I40E_VSI_SRIOV) {
1582 ctxt->info.mapping_flags |=
1583 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1584 for (i = 0; i < vsi->num_queue_pairs; i++)
1585 ctxt->info.queue_mapping[i] =
1586 cpu_to_le16(vsi->base_queue + i);
1587 } else {
1588 ctxt->info.mapping_flags |=
1589 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1590 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1591 }
1592 ctxt->info.valid_sections |= cpu_to_le16(sections);
1593 }
1594
1595 /**
1596 * i40e_set_rx_mode - NDO callback to set the netdev filters
1597 * @netdev: network interface device structure
1598 **/
1599 #ifdef I40E_FCOE
1600 void i40e_set_rx_mode(struct net_device *netdev)
1601 #else
1602 static void i40e_set_rx_mode(struct net_device *netdev)
1603 #endif
1604 {
1605 struct i40e_netdev_priv *np = netdev_priv(netdev);
1606 struct i40e_mac_filter *f, *ftmp;
1607 struct i40e_vsi *vsi = np->vsi;
1608 struct netdev_hw_addr *uca;
1609 struct netdev_hw_addr *mca;
1610 struct netdev_hw_addr *ha;
1611
1612 /* add addr if not already in the filter list */
1613 netdev_for_each_uc_addr(uca, netdev) {
1614 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1615 if (i40e_is_vsi_in_vlan(vsi))
1616 i40e_put_mac_in_vlan(vsi, uca->addr,
1617 false, true);
1618 else
1619 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1620 false, true);
1621 }
1622 }
1623
1624 netdev_for_each_mc_addr(mca, netdev) {
1625 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1626 if (i40e_is_vsi_in_vlan(vsi))
1627 i40e_put_mac_in_vlan(vsi, mca->addr,
1628 false, true);
1629 else
1630 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1631 false, true);
1632 }
1633 }
1634
1635 /* remove filter if not in netdev list */
1636 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1637 bool found = false;
1638
1639 if (!f->is_netdev)
1640 continue;
1641
1642 if (is_multicast_ether_addr(f->macaddr)) {
1643 netdev_for_each_mc_addr(mca, netdev) {
1644 if (ether_addr_equal(mca->addr, f->macaddr)) {
1645 found = true;
1646 break;
1647 }
1648 }
1649 } else {
1650 netdev_for_each_uc_addr(uca, netdev) {
1651 if (ether_addr_equal(uca->addr, f->macaddr)) {
1652 found = true;
1653 break;
1654 }
1655 }
1656
1657 for_each_dev_addr(netdev, ha) {
1658 if (ether_addr_equal(ha->addr, f->macaddr)) {
1659 found = true;
1660 break;
1661 }
1662 }
1663 }
1664 if (!found)
1665 i40e_del_filter(
1666 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1667 }
1668
1669 /* check for other flag changes */
1670 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1671 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1672 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1673 }
1674 }
1675
1676 /**
1677 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1678 * @vsi: ptr to the VSI
1679 *
1680 * Push any outstanding VSI filter changes through the AdminQ.
1681 *
1682 * Returns 0 or error value
1683 **/
1684 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1685 {
1686 struct i40e_mac_filter *f, *ftmp;
1687 bool promisc_forced_on = false;
1688 bool add_happened = false;
1689 int filter_list_len = 0;
1690 u32 changed_flags = 0;
1691 i40e_status aq_ret = 0;
1692 struct i40e_pf *pf;
1693 int num_add = 0;
1694 int num_del = 0;
1695 u16 cmd_flags;
1696
1697 /* empty array typed pointers, kcalloc later */
1698 struct i40e_aqc_add_macvlan_element_data *add_list;
1699 struct i40e_aqc_remove_macvlan_element_data *del_list;
1700
1701 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1702 usleep_range(1000, 2000);
1703 pf = vsi->back;
1704
1705 if (vsi->netdev) {
1706 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1707 vsi->current_netdev_flags = vsi->netdev->flags;
1708 }
1709
1710 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1711 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1712
1713 filter_list_len = pf->hw.aq.asq_buf_size /
1714 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1715 del_list = kcalloc(filter_list_len,
1716 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1717 GFP_KERNEL);
1718 if (!del_list)
1719 return -ENOMEM;
1720
1721 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1722 if (!f->changed)
1723 continue;
1724
1725 if (f->counter != 0)
1726 continue;
1727 f->changed = false;
1728 cmd_flags = 0;
1729
1730 /* add to delete list */
1731 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1732 del_list[num_del].vlan_tag =
1733 cpu_to_le16((u16)(f->vlan ==
1734 I40E_VLAN_ANY ? 0 : f->vlan));
1735
1736 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1737 del_list[num_del].flags = cmd_flags;
1738 num_del++;
1739
1740 /* unlink from filter list */
1741 list_del(&f->list);
1742 kfree(f);
1743
1744 /* flush a full buffer */
1745 if (num_del == filter_list_len) {
1746 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1747 vsi->seid, del_list, num_del,
1748 NULL);
1749 num_del = 0;
1750 memset(del_list, 0, sizeof(*del_list));
1751
1752 if (aq_ret &&
1753 pf->hw.aq.asq_last_status !=
1754 I40E_AQ_RC_ENOENT)
1755 dev_info(&pf->pdev->dev,
1756 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1757 aq_ret,
1758 pf->hw.aq.asq_last_status);
1759 }
1760 }
1761 if (num_del) {
1762 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1763 del_list, num_del, NULL);
1764 num_del = 0;
1765
1766 if (aq_ret &&
1767 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
1768 dev_info(&pf->pdev->dev,
1769 "ignoring delete macvlan error, err %d, aq_err %d\n",
1770 aq_ret, pf->hw.aq.asq_last_status);
1771 }
1772
1773 kfree(del_list);
1774 del_list = NULL;
1775
1776 /* do all the adds now */
1777 filter_list_len = pf->hw.aq.asq_buf_size /
1778 sizeof(struct i40e_aqc_add_macvlan_element_data),
1779 add_list = kcalloc(filter_list_len,
1780 sizeof(struct i40e_aqc_add_macvlan_element_data),
1781 GFP_KERNEL);
1782 if (!add_list)
1783 return -ENOMEM;
1784
1785 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1786 if (!f->changed)
1787 continue;
1788
1789 if (f->counter == 0)
1790 continue;
1791 f->changed = false;
1792 add_happened = true;
1793 cmd_flags = 0;
1794
1795 /* add to add array */
1796 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1797 add_list[num_add].vlan_tag =
1798 cpu_to_le16(
1799 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1800 add_list[num_add].queue_number = 0;
1801
1802 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1803 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1804 num_add++;
1805
1806 /* flush a full buffer */
1807 if (num_add == filter_list_len) {
1808 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1809 add_list, num_add,
1810 NULL);
1811 num_add = 0;
1812
1813 if (aq_ret)
1814 break;
1815 memset(add_list, 0, sizeof(*add_list));
1816 }
1817 }
1818 if (num_add) {
1819 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1820 add_list, num_add, NULL);
1821 num_add = 0;
1822 }
1823 kfree(add_list);
1824 add_list = NULL;
1825
1826 if (add_happened && aq_ret &&
1827 pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
1828 dev_info(&pf->pdev->dev,
1829 "add filter failed, err %d, aq_err %d\n",
1830 aq_ret, pf->hw.aq.asq_last_status);
1831 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1832 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1833 &vsi->state)) {
1834 promisc_forced_on = true;
1835 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1836 &vsi->state);
1837 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1838 }
1839 }
1840 }
1841
1842 /* check for changes in promiscuous modes */
1843 if (changed_flags & IFF_ALLMULTI) {
1844 bool cur_multipromisc;
1845 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1846 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1847 vsi->seid,
1848 cur_multipromisc,
1849 NULL);
1850 if (aq_ret)
1851 dev_info(&pf->pdev->dev,
1852 "set multi promisc failed, err %d, aq_err %d\n",
1853 aq_ret, pf->hw.aq.asq_last_status);
1854 }
1855 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1856 bool cur_promisc;
1857 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1858 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1859 &vsi->state));
1860 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1861 vsi->seid,
1862 cur_promisc, NULL);
1863 if (aq_ret)
1864 dev_info(&pf->pdev->dev,
1865 "set uni promisc failed, err %d, aq_err %d\n",
1866 aq_ret, pf->hw.aq.asq_last_status);
1867 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1868 vsi->seid,
1869 cur_promisc, NULL);
1870 if (aq_ret)
1871 dev_info(&pf->pdev->dev,
1872 "set brdcast promisc failed, err %d, aq_err %d\n",
1873 aq_ret, pf->hw.aq.asq_last_status);
1874 }
1875
1876 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1877 return 0;
1878 }
1879
1880 /**
1881 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1882 * @pf: board private structure
1883 **/
1884 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1885 {
1886 int v;
1887
1888 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1889 return;
1890 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1891
1892 for (v = 0; v < pf->num_alloc_vsi; v++) {
1893 if (pf->vsi[v] &&
1894 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1895 i40e_sync_vsi_filters(pf->vsi[v]);
1896 }
1897 }
1898
1899 /**
1900 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1901 * @netdev: network interface device structure
1902 * @new_mtu: new value for maximum frame size
1903 *
1904 * Returns 0 on success, negative on failure
1905 **/
1906 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1907 {
1908 struct i40e_netdev_priv *np = netdev_priv(netdev);
1909 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1910 struct i40e_vsi *vsi = np->vsi;
1911
1912 /* MTU < 68 is an error and causes problems on some kernels */
1913 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1914 return -EINVAL;
1915
1916 netdev_info(netdev, "changing MTU from %d to %d\n",
1917 netdev->mtu, new_mtu);
1918 netdev->mtu = new_mtu;
1919 if (netif_running(netdev))
1920 i40e_vsi_reinit_locked(vsi);
1921
1922 return 0;
1923 }
1924
1925 /**
1926 * i40e_ioctl - Access the hwtstamp interface
1927 * @netdev: network interface device structure
1928 * @ifr: interface request data
1929 * @cmd: ioctl command
1930 **/
1931 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1932 {
1933 struct i40e_netdev_priv *np = netdev_priv(netdev);
1934 struct i40e_pf *pf = np->vsi->back;
1935
1936 switch (cmd) {
1937 case SIOCGHWTSTAMP:
1938 return i40e_ptp_get_ts_config(pf, ifr);
1939 case SIOCSHWTSTAMP:
1940 return i40e_ptp_set_ts_config(pf, ifr);
1941 default:
1942 return -EOPNOTSUPP;
1943 }
1944 }
1945
1946 /**
1947 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1948 * @vsi: the vsi being adjusted
1949 **/
1950 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1951 {
1952 struct i40e_vsi_context ctxt;
1953 i40e_status ret;
1954
1955 if ((vsi->info.valid_sections &
1956 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1957 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1958 return; /* already enabled */
1959
1960 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1961 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1962 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1963
1964 ctxt.seid = vsi->seid;
1965 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1966 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1967 if (ret) {
1968 dev_info(&vsi->back->pdev->dev,
1969 "%s: update vsi failed, aq_err=%d\n",
1970 __func__, vsi->back->hw.aq.asq_last_status);
1971 }
1972 }
1973
1974 /**
1975 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1976 * @vsi: the vsi being adjusted
1977 **/
1978 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1979 {
1980 struct i40e_vsi_context ctxt;
1981 i40e_status ret;
1982
1983 if ((vsi->info.valid_sections &
1984 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1985 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1986 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1987 return; /* already disabled */
1988
1989 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1990 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1991 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1992
1993 ctxt.seid = vsi->seid;
1994 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1995 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1996 if (ret) {
1997 dev_info(&vsi->back->pdev->dev,
1998 "%s: update vsi failed, aq_err=%d\n",
1999 __func__, vsi->back->hw.aq.asq_last_status);
2000 }
2001 }
2002
2003 /**
2004 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2005 * @netdev: network interface to be adjusted
2006 * @features: netdev features to test if VLAN offload is enabled or not
2007 **/
2008 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2009 {
2010 struct i40e_netdev_priv *np = netdev_priv(netdev);
2011 struct i40e_vsi *vsi = np->vsi;
2012
2013 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2014 i40e_vlan_stripping_enable(vsi);
2015 else
2016 i40e_vlan_stripping_disable(vsi);
2017 }
2018
2019 /**
2020 * i40e_vsi_add_vlan - Add vsi membership for given vlan
2021 * @vsi: the vsi being configured
2022 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2023 **/
2024 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2025 {
2026 struct i40e_mac_filter *f, *add_f;
2027 bool is_netdev, is_vf;
2028
2029 is_vf = (vsi->type == I40E_VSI_SRIOV);
2030 is_netdev = !!(vsi->netdev);
2031
2032 if (is_netdev) {
2033 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2034 is_vf, is_netdev);
2035 if (!add_f) {
2036 dev_info(&vsi->back->pdev->dev,
2037 "Could not add vlan filter %d for %pM\n",
2038 vid, vsi->netdev->dev_addr);
2039 return -ENOMEM;
2040 }
2041 }
2042
2043 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2044 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2045 if (!add_f) {
2046 dev_info(&vsi->back->pdev->dev,
2047 "Could not add vlan filter %d for %pM\n",
2048 vid, f->macaddr);
2049 return -ENOMEM;
2050 }
2051 }
2052
2053 /* Now if we add a vlan tag, make sure to check if it is the first
2054 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2055 * with 0, so we now accept untagged and specified tagged traffic
2056 * (and not any taged and untagged)
2057 */
2058 if (vid > 0) {
2059 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2060 I40E_VLAN_ANY,
2061 is_vf, is_netdev)) {
2062 i40e_del_filter(vsi, vsi->netdev->dev_addr,
2063 I40E_VLAN_ANY, is_vf, is_netdev);
2064 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2065 is_vf, is_netdev);
2066 if (!add_f) {
2067 dev_info(&vsi->back->pdev->dev,
2068 "Could not add filter 0 for %pM\n",
2069 vsi->netdev->dev_addr);
2070 return -ENOMEM;
2071 }
2072 }
2073 }
2074
2075 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2076 if (vid > 0 && !vsi->info.pvid) {
2077 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2078 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2079 is_vf, is_netdev)) {
2080 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2081 is_vf, is_netdev);
2082 add_f = i40e_add_filter(vsi, f->macaddr,
2083 0, is_vf, is_netdev);
2084 if (!add_f) {
2085 dev_info(&vsi->back->pdev->dev,
2086 "Could not add filter 0 for %pM\n",
2087 f->macaddr);
2088 return -ENOMEM;
2089 }
2090 }
2091 }
2092 }
2093
2094 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2095 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2096 return 0;
2097
2098 return i40e_sync_vsi_filters(vsi);
2099 }
2100
2101 /**
2102 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2103 * @vsi: the vsi being configured
2104 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2105 *
2106 * Return: 0 on success or negative otherwise
2107 **/
2108 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2109 {
2110 struct net_device *netdev = vsi->netdev;
2111 struct i40e_mac_filter *f, *add_f;
2112 bool is_vf, is_netdev;
2113 int filter_count = 0;
2114
2115 is_vf = (vsi->type == I40E_VSI_SRIOV);
2116 is_netdev = !!(netdev);
2117
2118 if (is_netdev)
2119 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2120
2121 list_for_each_entry(f, &vsi->mac_filter_list, list)
2122 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2123
2124 /* go through all the filters for this VSI and if there is only
2125 * vid == 0 it means there are no other filters, so vid 0 must
2126 * be replaced with -1. This signifies that we should from now
2127 * on accept any traffic (with any tag present, or untagged)
2128 */
2129 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2130 if (is_netdev) {
2131 if (f->vlan &&
2132 ether_addr_equal(netdev->dev_addr, f->macaddr))
2133 filter_count++;
2134 }
2135
2136 if (f->vlan)
2137 filter_count++;
2138 }
2139
2140 if (!filter_count && is_netdev) {
2141 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2142 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2143 is_vf, is_netdev);
2144 if (!f) {
2145 dev_info(&vsi->back->pdev->dev,
2146 "Could not add filter %d for %pM\n",
2147 I40E_VLAN_ANY, netdev->dev_addr);
2148 return -ENOMEM;
2149 }
2150 }
2151
2152 if (!filter_count) {
2153 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2154 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2155 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2156 is_vf, is_netdev);
2157 if (!add_f) {
2158 dev_info(&vsi->back->pdev->dev,
2159 "Could not add filter %d for %pM\n",
2160 I40E_VLAN_ANY, f->macaddr);
2161 return -ENOMEM;
2162 }
2163 }
2164 }
2165
2166 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2167 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2168 return 0;
2169
2170 return i40e_sync_vsi_filters(vsi);
2171 }
2172
2173 /**
2174 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2175 * @netdev: network interface to be adjusted
2176 * @vid: vlan id to be added
2177 *
2178 * net_device_ops implementation for adding vlan ids
2179 **/
2180 #ifdef I40E_FCOE
2181 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2182 __always_unused __be16 proto, u16 vid)
2183 #else
2184 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2185 __always_unused __be16 proto, u16 vid)
2186 #endif
2187 {
2188 struct i40e_netdev_priv *np = netdev_priv(netdev);
2189 struct i40e_vsi *vsi = np->vsi;
2190 int ret = 0;
2191
2192 if (vid > 4095)
2193 return -EINVAL;
2194
2195 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2196
2197 /* If the network stack called us with vid = 0 then
2198 * it is asking to receive priority tagged packets with
2199 * vlan id 0. Our HW receives them by default when configured
2200 * to receive untagged packets so there is no need to add an
2201 * extra filter for vlan 0 tagged packets.
2202 */
2203 if (vid)
2204 ret = i40e_vsi_add_vlan(vsi, vid);
2205
2206 if (!ret && (vid < VLAN_N_VID))
2207 set_bit(vid, vsi->active_vlans);
2208
2209 return ret;
2210 }
2211
2212 /**
2213 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2214 * @netdev: network interface to be adjusted
2215 * @vid: vlan id to be removed
2216 *
2217 * net_device_ops implementation for removing vlan ids
2218 **/
2219 #ifdef I40E_FCOE
2220 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2221 __always_unused __be16 proto, u16 vid)
2222 #else
2223 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2224 __always_unused __be16 proto, u16 vid)
2225 #endif
2226 {
2227 struct i40e_netdev_priv *np = netdev_priv(netdev);
2228 struct i40e_vsi *vsi = np->vsi;
2229
2230 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2231
2232 /* return code is ignored as there is nothing a user
2233 * can do about failure to remove and a log message was
2234 * already printed from the other function
2235 */
2236 i40e_vsi_kill_vlan(vsi, vid);
2237
2238 clear_bit(vid, vsi->active_vlans);
2239
2240 return 0;
2241 }
2242
2243 /**
2244 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2245 * @vsi: the vsi being brought back up
2246 **/
2247 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2248 {
2249 u16 vid;
2250
2251 if (!vsi->netdev)
2252 return;
2253
2254 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2255
2256 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2257 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2258 vid);
2259 }
2260
2261 /**
2262 * i40e_vsi_add_pvid - Add pvid for the VSI
2263 * @vsi: the vsi being adjusted
2264 * @vid: the vlan id to set as a PVID
2265 **/
2266 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2267 {
2268 struct i40e_vsi_context ctxt;
2269 i40e_status aq_ret;
2270
2271 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2272 vsi->info.pvid = cpu_to_le16(vid);
2273 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2274 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2275 I40E_AQ_VSI_PVLAN_EMOD_STR;
2276
2277 ctxt.seid = vsi->seid;
2278 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2279 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2280 if (aq_ret) {
2281 dev_info(&vsi->back->pdev->dev,
2282 "%s: update vsi failed, aq_err=%d\n",
2283 __func__, vsi->back->hw.aq.asq_last_status);
2284 return -ENOENT;
2285 }
2286
2287 return 0;
2288 }
2289
2290 /**
2291 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2292 * @vsi: the vsi being adjusted
2293 *
2294 * Just use the vlan_rx_register() service to put it back to normal
2295 **/
2296 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2297 {
2298 i40e_vlan_stripping_disable(vsi);
2299
2300 vsi->info.pvid = 0;
2301 }
2302
2303 /**
2304 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2305 * @vsi: ptr to the VSI
2306 *
2307 * If this function returns with an error, then it's possible one or
2308 * more of the rings is populated (while the rest are not). It is the
2309 * callers duty to clean those orphaned rings.
2310 *
2311 * Return 0 on success, negative on failure
2312 **/
2313 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2314 {
2315 int i, err = 0;
2316
2317 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2318 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2319
2320 return err;
2321 }
2322
2323 /**
2324 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2325 * @vsi: ptr to the VSI
2326 *
2327 * Free VSI's transmit software resources
2328 **/
2329 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2330 {
2331 int i;
2332
2333 if (!vsi->tx_rings)
2334 return;
2335
2336 for (i = 0; i < vsi->num_queue_pairs; i++)
2337 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2338 i40e_free_tx_resources(vsi->tx_rings[i]);
2339 }
2340
2341 /**
2342 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2343 * @vsi: ptr to the VSI
2344 *
2345 * If this function returns with an error, then it's possible one or
2346 * more of the rings is populated (while the rest are not). It is the
2347 * callers duty to clean those orphaned rings.
2348 *
2349 * Return 0 on success, negative on failure
2350 **/
2351 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2352 {
2353 int i, err = 0;
2354
2355 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2356 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2357 #ifdef I40E_FCOE
2358 i40e_fcoe_setup_ddp_resources(vsi);
2359 #endif
2360 return err;
2361 }
2362
2363 /**
2364 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2365 * @vsi: ptr to the VSI
2366 *
2367 * Free all receive software resources
2368 **/
2369 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2370 {
2371 int i;
2372
2373 if (!vsi->rx_rings)
2374 return;
2375
2376 for (i = 0; i < vsi->num_queue_pairs; i++)
2377 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2378 i40e_free_rx_resources(vsi->rx_rings[i]);
2379 #ifdef I40E_FCOE
2380 i40e_fcoe_free_ddp_resources(vsi);
2381 #endif
2382 }
2383
2384 /**
2385 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2386 * @ring: The Tx ring to configure
2387 *
2388 * Configure the Tx descriptor ring in the HMC context.
2389 **/
2390 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2391 {
2392 struct i40e_vsi *vsi = ring->vsi;
2393 u16 pf_q = vsi->base_queue + ring->queue_index;
2394 struct i40e_hw *hw = &vsi->back->hw;
2395 struct i40e_hmc_obj_txq tx_ctx;
2396 i40e_status err = 0;
2397 u32 qtx_ctl = 0;
2398
2399 /* some ATR related tx ring init */
2400 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2401 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2402 ring->atr_count = 0;
2403 } else {
2404 ring->atr_sample_rate = 0;
2405 }
2406
2407 /* initialize XPS */
2408 if (ring->q_vector && ring->netdev &&
2409 vsi->tc_config.numtc <= 1 &&
2410 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2411 netif_set_xps_queue(ring->netdev,
2412 &ring->q_vector->affinity_mask,
2413 ring->queue_index);
2414
2415 /* clear the context structure first */
2416 memset(&tx_ctx, 0, sizeof(tx_ctx));
2417
2418 tx_ctx.new_context = 1;
2419 tx_ctx.base = (ring->dma / 128);
2420 tx_ctx.qlen = ring->count;
2421 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2422 I40E_FLAG_FD_ATR_ENABLED));
2423 #ifdef I40E_FCOE
2424 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2425 #endif
2426 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2427 /* FDIR VSI tx ring can still use RS bit and writebacks */
2428 if (vsi->type != I40E_VSI_FDIR)
2429 tx_ctx.head_wb_ena = 1;
2430 tx_ctx.head_wb_addr = ring->dma +
2431 (ring->count * sizeof(struct i40e_tx_desc));
2432
2433 /* As part of VSI creation/update, FW allocates certain
2434 * Tx arbitration queue sets for each TC enabled for
2435 * the VSI. The FW returns the handles to these queue
2436 * sets as part of the response buffer to Add VSI,
2437 * Update VSI, etc. AQ commands. It is expected that
2438 * these queue set handles be associated with the Tx
2439 * queues by the driver as part of the TX queue context
2440 * initialization. This has to be done regardless of
2441 * DCB as by default everything is mapped to TC0.
2442 */
2443 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2444 tx_ctx.rdylist_act = 0;
2445
2446 /* clear the context in the HMC */
2447 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2448 if (err) {
2449 dev_info(&vsi->back->pdev->dev,
2450 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2451 ring->queue_index, pf_q, err);
2452 return -ENOMEM;
2453 }
2454
2455 /* set the context in the HMC */
2456 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2457 if (err) {
2458 dev_info(&vsi->back->pdev->dev,
2459 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2460 ring->queue_index, pf_q, err);
2461 return -ENOMEM;
2462 }
2463
2464 /* Now associate this queue with this PCI function */
2465 if (vsi->type == I40E_VSI_VMDQ2)
2466 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2467 else
2468 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2469 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2470 I40E_QTX_CTL_PF_INDX_MASK);
2471 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2472 i40e_flush(hw);
2473
2474 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2475
2476 /* cache tail off for easier writes later */
2477 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2478
2479 return 0;
2480 }
2481
2482 /**
2483 * i40e_configure_rx_ring - Configure a receive ring context
2484 * @ring: The Rx ring to configure
2485 *
2486 * Configure the Rx descriptor ring in the HMC context.
2487 **/
2488 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2489 {
2490 struct i40e_vsi *vsi = ring->vsi;
2491 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2492 u16 pf_q = vsi->base_queue + ring->queue_index;
2493 struct i40e_hw *hw = &vsi->back->hw;
2494 struct i40e_hmc_obj_rxq rx_ctx;
2495 i40e_status err = 0;
2496
2497 ring->state = 0;
2498
2499 /* clear the context structure first */
2500 memset(&rx_ctx, 0, sizeof(rx_ctx));
2501
2502 ring->rx_buf_len = vsi->rx_buf_len;
2503 ring->rx_hdr_len = vsi->rx_hdr_len;
2504
2505 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2506 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2507
2508 rx_ctx.base = (ring->dma / 128);
2509 rx_ctx.qlen = ring->count;
2510
2511 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2512 set_ring_16byte_desc_enabled(ring);
2513 rx_ctx.dsize = 0;
2514 } else {
2515 rx_ctx.dsize = 1;
2516 }
2517
2518 rx_ctx.dtype = vsi->dtype;
2519 if (vsi->dtype) {
2520 set_ring_ps_enabled(ring);
2521 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2522 I40E_RX_SPLIT_IP |
2523 I40E_RX_SPLIT_TCP_UDP |
2524 I40E_RX_SPLIT_SCTP;
2525 } else {
2526 rx_ctx.hsplit_0 = 0;
2527 }
2528
2529 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2530 (chain_len * ring->rx_buf_len));
2531 if (hw->revision_id == 0)
2532 rx_ctx.lrxqthresh = 0;
2533 else
2534 rx_ctx.lrxqthresh = 2;
2535 rx_ctx.crcstrip = 1;
2536 rx_ctx.l2tsel = 1;
2537 rx_ctx.showiv = 1;
2538 #ifdef I40E_FCOE
2539 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2540 #endif
2541 /* set the prefena field to 1 because the manual says to */
2542 rx_ctx.prefena = 1;
2543
2544 /* clear the context in the HMC */
2545 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2546 if (err) {
2547 dev_info(&vsi->back->pdev->dev,
2548 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2549 ring->queue_index, pf_q, err);
2550 return -ENOMEM;
2551 }
2552
2553 /* set the context in the HMC */
2554 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2555 if (err) {
2556 dev_info(&vsi->back->pdev->dev,
2557 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2558 ring->queue_index, pf_q, err);
2559 return -ENOMEM;
2560 }
2561
2562 /* cache tail for quicker writes, and clear the reg before use */
2563 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2564 writel(0, ring->tail);
2565
2566 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2567
2568 return 0;
2569 }
2570
2571 /**
2572 * i40e_vsi_configure_tx - Configure the VSI for Tx
2573 * @vsi: VSI structure describing this set of rings and resources
2574 *
2575 * Configure the Tx VSI for operation.
2576 **/
2577 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2578 {
2579 int err = 0;
2580 u16 i;
2581
2582 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2583 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2584
2585 return err;
2586 }
2587
2588 /**
2589 * i40e_vsi_configure_rx - Configure the VSI for Rx
2590 * @vsi: the VSI being configured
2591 *
2592 * Configure the Rx VSI for operation.
2593 **/
2594 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2595 {
2596 int err = 0;
2597 u16 i;
2598
2599 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2600 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2601 + ETH_FCS_LEN + VLAN_HLEN;
2602 else
2603 vsi->max_frame = I40E_RXBUFFER_2048;
2604
2605 /* figure out correct receive buffer length */
2606 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2607 I40E_FLAG_RX_PS_ENABLED)) {
2608 case I40E_FLAG_RX_1BUF_ENABLED:
2609 vsi->rx_hdr_len = 0;
2610 vsi->rx_buf_len = vsi->max_frame;
2611 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2612 break;
2613 case I40E_FLAG_RX_PS_ENABLED:
2614 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2615 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2616 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2617 break;
2618 default:
2619 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2620 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2621 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2622 break;
2623 }
2624
2625 #ifdef I40E_FCOE
2626 /* setup rx buffer for FCoE */
2627 if ((vsi->type == I40E_VSI_FCOE) &&
2628 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2629 vsi->rx_hdr_len = 0;
2630 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2631 vsi->max_frame = I40E_RXBUFFER_3072;
2632 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2633 }
2634
2635 #endif /* I40E_FCOE */
2636 /* round up for the chip's needs */
2637 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2638 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2639 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2640 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2641
2642 /* set up individual rings */
2643 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2644 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2645
2646 return err;
2647 }
2648
2649 /**
2650 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2651 * @vsi: ptr to the VSI
2652 **/
2653 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2654 {
2655 struct i40e_ring *tx_ring, *rx_ring;
2656 u16 qoffset, qcount;
2657 int i, n;
2658
2659 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2660 return;
2661
2662 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2663 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2664 continue;
2665
2666 qoffset = vsi->tc_config.tc_info[n].qoffset;
2667 qcount = vsi->tc_config.tc_info[n].qcount;
2668 for (i = qoffset; i < (qoffset + qcount); i++) {
2669 rx_ring = vsi->rx_rings[i];
2670 tx_ring = vsi->tx_rings[i];
2671 rx_ring->dcb_tc = n;
2672 tx_ring->dcb_tc = n;
2673 }
2674 }
2675 }
2676
2677 /**
2678 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2679 * @vsi: ptr to the VSI
2680 **/
2681 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2682 {
2683 if (vsi->netdev)
2684 i40e_set_rx_mode(vsi->netdev);
2685 }
2686
2687 /**
2688 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2689 * @vsi: Pointer to the targeted VSI
2690 *
2691 * This function replays the hlist on the hw where all the SB Flow Director
2692 * filters were saved.
2693 **/
2694 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2695 {
2696 struct i40e_fdir_filter *filter;
2697 struct i40e_pf *pf = vsi->back;
2698 struct hlist_node *node;
2699
2700 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2701 return;
2702
2703 hlist_for_each_entry_safe(filter, node,
2704 &pf->fdir_filter_list, fdir_node) {
2705 i40e_add_del_fdir(vsi, filter, true);
2706 }
2707 }
2708
2709 /**
2710 * i40e_vsi_configure - Set up the VSI for action
2711 * @vsi: the VSI being configured
2712 **/
2713 static int i40e_vsi_configure(struct i40e_vsi *vsi)
2714 {
2715 int err;
2716
2717 i40e_set_vsi_rx_mode(vsi);
2718 i40e_restore_vlan(vsi);
2719 i40e_vsi_config_dcb_rings(vsi);
2720 err = i40e_vsi_configure_tx(vsi);
2721 if (!err)
2722 err = i40e_vsi_configure_rx(vsi);
2723
2724 return err;
2725 }
2726
2727 /**
2728 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2729 * @vsi: the VSI being configured
2730 **/
2731 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2732 {
2733 struct i40e_pf *pf = vsi->back;
2734 struct i40e_q_vector *q_vector;
2735 struct i40e_hw *hw = &pf->hw;
2736 u16 vector;
2737 int i, q;
2738 u32 val;
2739 u32 qp;
2740
2741 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2742 * and PFINT_LNKLSTn registers, e.g.:
2743 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2744 */
2745 qp = vsi->base_queue;
2746 vector = vsi->base_vector;
2747 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2748 q_vector = vsi->q_vectors[i];
2749 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2750 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2751 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2752 q_vector->rx.itr);
2753 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2754 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2755 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2756 q_vector->tx.itr);
2757
2758 /* Linked list for the queuepairs assigned to this vector */
2759 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2760 for (q = 0; q < q_vector->num_ringpairs; q++) {
2761 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2762 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2763 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2764 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2765 (I40E_QUEUE_TYPE_TX
2766 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2767
2768 wr32(hw, I40E_QINT_RQCTL(qp), val);
2769
2770 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2771 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2772 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2773 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2774 (I40E_QUEUE_TYPE_RX
2775 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2776
2777 /* Terminate the linked list */
2778 if (q == (q_vector->num_ringpairs - 1))
2779 val |= (I40E_QUEUE_END_OF_LIST
2780 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2781
2782 wr32(hw, I40E_QINT_TQCTL(qp), val);
2783 qp++;
2784 }
2785 }
2786
2787 i40e_flush(hw);
2788 }
2789
2790 /**
2791 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2792 * @hw: ptr to the hardware info
2793 **/
2794 static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2795 {
2796 u32 val;
2797
2798 /* clear things first */
2799 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2800 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2801
2802 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2803 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2804 I40E_PFINT_ICR0_ENA_GRST_MASK |
2805 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2806 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2807 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
2808 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2809 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2810 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2811
2812 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2813
2814 /* SW_ITR_IDX = 0, but don't change INTENA */
2815 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2816 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2817
2818 /* OTHER_ITR_IDX = 0 */
2819 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2820 }
2821
2822 /**
2823 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2824 * @vsi: the VSI being configured
2825 **/
2826 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2827 {
2828 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2829 struct i40e_pf *pf = vsi->back;
2830 struct i40e_hw *hw = &pf->hw;
2831 u32 val;
2832
2833 /* set the ITR configuration */
2834 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2835 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2836 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2837 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2838 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2839 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2840
2841 i40e_enable_misc_int_causes(hw);
2842
2843 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2844 wr32(hw, I40E_PFINT_LNKLST0, 0);
2845
2846 /* Associate the queue pair to the vector and enable the queue int */
2847 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2848 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2849 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2850
2851 wr32(hw, I40E_QINT_RQCTL(0), val);
2852
2853 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2854 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2855 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2856
2857 wr32(hw, I40E_QINT_TQCTL(0), val);
2858 i40e_flush(hw);
2859 }
2860
2861 /**
2862 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2863 * @pf: board private structure
2864 **/
2865 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2866 {
2867 struct i40e_hw *hw = &pf->hw;
2868
2869 wr32(hw, I40E_PFINT_DYN_CTL0,
2870 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2871 i40e_flush(hw);
2872 }
2873
2874 /**
2875 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2876 * @pf: board private structure
2877 **/
2878 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2879 {
2880 struct i40e_hw *hw = &pf->hw;
2881 u32 val;
2882
2883 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2884 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2885 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2886
2887 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2888 i40e_flush(hw);
2889 }
2890
2891 /**
2892 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2893 * @vsi: pointer to a vsi
2894 * @vector: enable a particular Hw Interrupt vector
2895 **/
2896 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2897 {
2898 struct i40e_pf *pf = vsi->back;
2899 struct i40e_hw *hw = &pf->hw;
2900 u32 val;
2901
2902 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2903 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2904 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2905 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2906 /* skip the flush */
2907 }
2908
2909 /**
2910 * i40e_irq_dynamic_disable - Disable default interrupt generation settings
2911 * @vsi: pointer to a vsi
2912 * @vector: enable a particular Hw Interrupt vector
2913 **/
2914 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
2915 {
2916 struct i40e_pf *pf = vsi->back;
2917 struct i40e_hw *hw = &pf->hw;
2918 u32 val;
2919
2920 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2921 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2922 i40e_flush(hw);
2923 }
2924
2925 /**
2926 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2927 * @irq: interrupt number
2928 * @data: pointer to a q_vector
2929 **/
2930 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2931 {
2932 struct i40e_q_vector *q_vector = data;
2933
2934 if (!q_vector->tx.ring && !q_vector->rx.ring)
2935 return IRQ_HANDLED;
2936
2937 napi_schedule(&q_vector->napi);
2938
2939 return IRQ_HANDLED;
2940 }
2941
2942 /**
2943 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2944 * @vsi: the VSI being configured
2945 * @basename: name for the vector
2946 *
2947 * Allocates MSI-X vectors and requests interrupts from the kernel.
2948 **/
2949 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2950 {
2951 int q_vectors = vsi->num_q_vectors;
2952 struct i40e_pf *pf = vsi->back;
2953 int base = vsi->base_vector;
2954 int rx_int_idx = 0;
2955 int tx_int_idx = 0;
2956 int vector, err;
2957
2958 for (vector = 0; vector < q_vectors; vector++) {
2959 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
2960
2961 if (q_vector->tx.ring && q_vector->rx.ring) {
2962 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2963 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2964 tx_int_idx++;
2965 } else if (q_vector->rx.ring) {
2966 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2967 "%s-%s-%d", basename, "rx", rx_int_idx++);
2968 } else if (q_vector->tx.ring) {
2969 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2970 "%s-%s-%d", basename, "tx", tx_int_idx++);
2971 } else {
2972 /* skip this unused q_vector */
2973 continue;
2974 }
2975 err = request_irq(pf->msix_entries[base + vector].vector,
2976 vsi->irq_handler,
2977 0,
2978 q_vector->name,
2979 q_vector);
2980 if (err) {
2981 dev_info(&pf->pdev->dev,
2982 "%s: request_irq failed, error: %d\n",
2983 __func__, err);
2984 goto free_queue_irqs;
2985 }
2986 /* assign the mask for this irq */
2987 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2988 &q_vector->affinity_mask);
2989 }
2990
2991 vsi->irqs_ready = true;
2992 return 0;
2993
2994 free_queue_irqs:
2995 while (vector) {
2996 vector--;
2997 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2998 NULL);
2999 free_irq(pf->msix_entries[base + vector].vector,
3000 &(vsi->q_vectors[vector]));
3001 }
3002 return err;
3003 }
3004
3005 /**
3006 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3007 * @vsi: the VSI being un-configured
3008 **/
3009 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3010 {
3011 struct i40e_pf *pf = vsi->back;
3012 struct i40e_hw *hw = &pf->hw;
3013 int base = vsi->base_vector;
3014 int i;
3015
3016 for (i = 0; i < vsi->num_queue_pairs; i++) {
3017 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3018 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3019 }
3020
3021 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3022 for (i = vsi->base_vector;
3023 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3024 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3025
3026 i40e_flush(hw);
3027 for (i = 0; i < vsi->num_q_vectors; i++)
3028 synchronize_irq(pf->msix_entries[i + base].vector);
3029 } else {
3030 /* Legacy and MSI mode - this stops all interrupt handling */
3031 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3032 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3033 i40e_flush(hw);
3034 synchronize_irq(pf->pdev->irq);
3035 }
3036 }
3037
3038 /**
3039 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3040 * @vsi: the VSI being configured
3041 **/
3042 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3043 {
3044 struct i40e_pf *pf = vsi->back;
3045 int i;
3046
3047 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3048 for (i = vsi->base_vector;
3049 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3050 i40e_irq_dynamic_enable(vsi, i);
3051 } else {
3052 i40e_irq_dynamic_enable_icr0(pf);
3053 }
3054
3055 i40e_flush(&pf->hw);
3056 return 0;
3057 }
3058
3059 /**
3060 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3061 * @pf: board private structure
3062 **/
3063 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3064 {
3065 /* Disable ICR 0 */
3066 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3067 i40e_flush(&pf->hw);
3068 }
3069
3070 /**
3071 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3072 * @irq: interrupt number
3073 * @data: pointer to a q_vector
3074 *
3075 * This is the handler used for all MSI/Legacy interrupts, and deals
3076 * with both queue and non-queue interrupts. This is also used in
3077 * MSIX mode to handle the non-queue interrupts.
3078 **/
3079 static irqreturn_t i40e_intr(int irq, void *data)
3080 {
3081 struct i40e_pf *pf = (struct i40e_pf *)data;
3082 struct i40e_hw *hw = &pf->hw;
3083 irqreturn_t ret = IRQ_NONE;
3084 u32 icr0, icr0_remaining;
3085 u32 val, ena_mask;
3086
3087 icr0 = rd32(hw, I40E_PFINT_ICR0);
3088 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3089
3090 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3091 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3092 goto enable_intr;
3093
3094 /* if interrupt but no bits showing, must be SWINT */
3095 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3096 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3097 pf->sw_int_count++;
3098
3099 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3100 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3101
3102 /* temporarily disable queue cause for NAPI processing */
3103 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3104 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3105 wr32(hw, I40E_QINT_RQCTL(0), qval);
3106
3107 qval = rd32(hw, I40E_QINT_TQCTL(0));
3108 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3109 wr32(hw, I40E_QINT_TQCTL(0), qval);
3110
3111 if (!test_bit(__I40E_DOWN, &pf->state))
3112 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
3113 }
3114
3115 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3116 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3117 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3118 }
3119
3120 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3121 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3122 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3123 }
3124
3125 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3126 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3127 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3128 }
3129
3130 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3131 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3132 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3133 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3134 val = rd32(hw, I40E_GLGEN_RSTAT);
3135 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3136 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3137 if (val == I40E_RESET_CORER) {
3138 pf->corer_count++;
3139 } else if (val == I40E_RESET_GLOBR) {
3140 pf->globr_count++;
3141 } else if (val == I40E_RESET_EMPR) {
3142 pf->empr_count++;
3143 set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
3144 }
3145 }
3146
3147 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3148 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3149 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3150 }
3151
3152 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3153 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3154
3155 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3156 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3157 i40e_ptp_tx_hwtstamp(pf);
3158 }
3159 }
3160
3161 /* If a critical error is pending we have no choice but to reset the
3162 * device.
3163 * Report and mask out any remaining unexpected interrupts.
3164 */
3165 icr0_remaining = icr0 & ena_mask;
3166 if (icr0_remaining) {
3167 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3168 icr0_remaining);
3169 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3170 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3171 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3172 dev_info(&pf->pdev->dev, "device will be reset\n");
3173 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3174 i40e_service_event_schedule(pf);
3175 }
3176 ena_mask &= ~icr0_remaining;
3177 }
3178 ret = IRQ_HANDLED;
3179
3180 enable_intr:
3181 /* re-enable interrupt causes */
3182 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3183 if (!test_bit(__I40E_DOWN, &pf->state)) {
3184 i40e_service_event_schedule(pf);
3185 i40e_irq_dynamic_enable_icr0(pf);
3186 }
3187
3188 return ret;
3189 }
3190
3191 /**
3192 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3193 * @tx_ring: tx ring to clean
3194 * @budget: how many cleans we're allowed
3195 *
3196 * Returns true if there's any budget left (e.g. the clean is finished)
3197 **/
3198 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3199 {
3200 struct i40e_vsi *vsi = tx_ring->vsi;
3201 u16 i = tx_ring->next_to_clean;
3202 struct i40e_tx_buffer *tx_buf;
3203 struct i40e_tx_desc *tx_desc;
3204
3205 tx_buf = &tx_ring->tx_bi[i];
3206 tx_desc = I40E_TX_DESC(tx_ring, i);
3207 i -= tx_ring->count;
3208
3209 do {
3210 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3211
3212 /* if next_to_watch is not set then there is no work pending */
3213 if (!eop_desc)
3214 break;
3215
3216 /* prevent any other reads prior to eop_desc */
3217 read_barrier_depends();
3218
3219 /* if the descriptor isn't done, no work yet to do */
3220 if (!(eop_desc->cmd_type_offset_bsz &
3221 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3222 break;
3223
3224 /* clear next_to_watch to prevent false hangs */
3225 tx_buf->next_to_watch = NULL;
3226
3227 tx_desc->buffer_addr = 0;
3228 tx_desc->cmd_type_offset_bsz = 0;
3229 /* move past filter desc */
3230 tx_buf++;
3231 tx_desc++;
3232 i++;
3233 if (unlikely(!i)) {
3234 i -= tx_ring->count;
3235 tx_buf = tx_ring->tx_bi;
3236 tx_desc = I40E_TX_DESC(tx_ring, 0);
3237 }
3238 /* unmap skb header data */
3239 dma_unmap_single(tx_ring->dev,
3240 dma_unmap_addr(tx_buf, dma),
3241 dma_unmap_len(tx_buf, len),
3242 DMA_TO_DEVICE);
3243 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3244 kfree(tx_buf->raw_buf);
3245
3246 tx_buf->raw_buf = NULL;
3247 tx_buf->tx_flags = 0;
3248 tx_buf->next_to_watch = NULL;
3249 dma_unmap_len_set(tx_buf, len, 0);
3250 tx_desc->buffer_addr = 0;
3251 tx_desc->cmd_type_offset_bsz = 0;
3252
3253 /* move us past the eop_desc for start of next FD desc */
3254 tx_buf++;
3255 tx_desc++;
3256 i++;
3257 if (unlikely(!i)) {
3258 i -= tx_ring->count;
3259 tx_buf = tx_ring->tx_bi;
3260 tx_desc = I40E_TX_DESC(tx_ring, 0);
3261 }
3262
3263 /* update budget accounting */
3264 budget--;
3265 } while (likely(budget));
3266
3267 i += tx_ring->count;
3268 tx_ring->next_to_clean = i;
3269
3270 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3271 i40e_irq_dynamic_enable(vsi,
3272 tx_ring->q_vector->v_idx + vsi->base_vector);
3273 }
3274 return budget > 0;
3275 }
3276
3277 /**
3278 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3279 * @irq: interrupt number
3280 * @data: pointer to a q_vector
3281 **/
3282 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3283 {
3284 struct i40e_q_vector *q_vector = data;
3285 struct i40e_vsi *vsi;
3286
3287 if (!q_vector->tx.ring)
3288 return IRQ_HANDLED;
3289
3290 vsi = q_vector->tx.ring->vsi;
3291 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3292
3293 return IRQ_HANDLED;
3294 }
3295
3296 /**
3297 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3298 * @vsi: the VSI being configured
3299 * @v_idx: vector index
3300 * @qp_idx: queue pair index
3301 **/
3302 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3303 {
3304 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3305 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3306 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3307
3308 tx_ring->q_vector = q_vector;
3309 tx_ring->next = q_vector->tx.ring;
3310 q_vector->tx.ring = tx_ring;
3311 q_vector->tx.count++;
3312
3313 rx_ring->q_vector = q_vector;
3314 rx_ring->next = q_vector->rx.ring;
3315 q_vector->rx.ring = rx_ring;
3316 q_vector->rx.count++;
3317 }
3318
3319 /**
3320 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3321 * @vsi: the VSI being configured
3322 *
3323 * This function maps descriptor rings to the queue-specific vectors
3324 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3325 * one vector per queue pair, but on a constrained vector budget, we
3326 * group the queue pairs as "efficiently" as possible.
3327 **/
3328 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3329 {
3330 int qp_remaining = vsi->num_queue_pairs;
3331 int q_vectors = vsi->num_q_vectors;
3332 int num_ringpairs;
3333 int v_start = 0;
3334 int qp_idx = 0;
3335
3336 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3337 * group them so there are multiple queues per vector.
3338 * It is also important to go through all the vectors available to be
3339 * sure that if we don't use all the vectors, that the remaining vectors
3340 * are cleared. This is especially important when decreasing the
3341 * number of queues in use.
3342 */
3343 for (; v_start < q_vectors; v_start++) {
3344 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3345
3346 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3347
3348 q_vector->num_ringpairs = num_ringpairs;
3349
3350 q_vector->rx.count = 0;
3351 q_vector->tx.count = 0;
3352 q_vector->rx.ring = NULL;
3353 q_vector->tx.ring = NULL;
3354
3355 while (num_ringpairs--) {
3356 map_vector_to_qp(vsi, v_start, qp_idx);
3357 qp_idx++;
3358 qp_remaining--;
3359 }
3360 }
3361 }
3362
3363 /**
3364 * i40e_vsi_request_irq - Request IRQ from the OS
3365 * @vsi: the VSI being configured
3366 * @basename: name for the vector
3367 **/
3368 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3369 {
3370 struct i40e_pf *pf = vsi->back;
3371 int err;
3372
3373 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3374 err = i40e_vsi_request_irq_msix(vsi, basename);
3375 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3376 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3377 pf->misc_int_name, pf);
3378 else
3379 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3380 pf->misc_int_name, pf);
3381
3382 if (err)
3383 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3384
3385 return err;
3386 }
3387
3388 #ifdef CONFIG_NET_POLL_CONTROLLER
3389 /**
3390 * i40e_netpoll - A Polling 'interrupt'handler
3391 * @netdev: network interface device structure
3392 *
3393 * This is used by netconsole to send skbs without having to re-enable
3394 * interrupts. It's not called while the normal interrupt routine is executing.
3395 **/
3396 #ifdef I40E_FCOE
3397 void i40e_netpoll(struct net_device *netdev)
3398 #else
3399 static void i40e_netpoll(struct net_device *netdev)
3400 #endif
3401 {
3402 struct i40e_netdev_priv *np = netdev_priv(netdev);
3403 struct i40e_vsi *vsi = np->vsi;
3404 struct i40e_pf *pf = vsi->back;
3405 int i;
3406
3407 /* if interface is down do nothing */
3408 if (test_bit(__I40E_DOWN, &vsi->state))
3409 return;
3410
3411 pf->flags |= I40E_FLAG_IN_NETPOLL;
3412 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3413 for (i = 0; i < vsi->num_q_vectors; i++)
3414 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3415 } else {
3416 i40e_intr(pf->pdev->irq, netdev);
3417 }
3418 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3419 }
3420 #endif
3421
3422 /**
3423 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3424 * @pf: the PF being configured
3425 * @pf_q: the PF queue
3426 * @enable: enable or disable state of the queue
3427 *
3428 * This routine will wait for the given Tx queue of the PF to reach the
3429 * enabled or disabled state.
3430 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3431 * multiple retries; else will return 0 in case of success.
3432 **/
3433 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3434 {
3435 int i;
3436 u32 tx_reg;
3437
3438 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3439 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3440 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3441 break;
3442
3443 udelay(10);
3444 }
3445 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3446 return -ETIMEDOUT;
3447
3448 return 0;
3449 }
3450
3451 /**
3452 * i40e_vsi_control_tx - Start or stop a VSI's rings
3453 * @vsi: the VSI being configured
3454 * @enable: start or stop the rings
3455 **/
3456 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3457 {
3458 struct i40e_pf *pf = vsi->back;
3459 struct i40e_hw *hw = &pf->hw;
3460 int i, j, pf_q, ret = 0;
3461 u32 tx_reg;
3462
3463 pf_q = vsi->base_queue;
3464 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3465
3466 /* warn the TX unit of coming changes */
3467 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3468 if (!enable)
3469 udelay(10);
3470
3471 for (j = 0; j < 50; j++) {
3472 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3473 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3474 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3475 break;
3476 usleep_range(1000, 2000);
3477 }
3478 /* Skip if the queue is already in the requested state */
3479 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3480 continue;
3481
3482 /* turn on/off the queue */
3483 if (enable) {
3484 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3485 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3486 } else {
3487 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3488 }
3489
3490 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3491
3492 /* wait for the change to finish */
3493 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3494 if (ret) {
3495 dev_info(&pf->pdev->dev,
3496 "%s: VSI seid %d Tx ring %d %sable timeout\n",
3497 __func__, vsi->seid, pf_q,
3498 (enable ? "en" : "dis"));
3499 break;
3500 }
3501 }
3502
3503 if (hw->revision_id == 0)
3504 mdelay(50);
3505 return ret;
3506 }
3507
3508 /**
3509 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3510 * @pf: the PF being configured
3511 * @pf_q: the PF queue
3512 * @enable: enable or disable state of the queue
3513 *
3514 * This routine will wait for the given Rx queue of the PF to reach the
3515 * enabled or disabled state.
3516 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3517 * multiple retries; else will return 0 in case of success.
3518 **/
3519 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3520 {
3521 int i;
3522 u32 rx_reg;
3523
3524 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3525 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3526 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3527 break;
3528
3529 udelay(10);
3530 }
3531 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3532 return -ETIMEDOUT;
3533
3534 return 0;
3535 }
3536
3537 /**
3538 * i40e_vsi_control_rx - Start or stop a VSI's rings
3539 * @vsi: the VSI being configured
3540 * @enable: start or stop the rings
3541 **/
3542 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3543 {
3544 struct i40e_pf *pf = vsi->back;
3545 struct i40e_hw *hw = &pf->hw;
3546 int i, j, pf_q, ret = 0;
3547 u32 rx_reg;
3548
3549 pf_q = vsi->base_queue;
3550 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3551 for (j = 0; j < 50; j++) {
3552 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3553 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3554 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3555 break;
3556 usleep_range(1000, 2000);
3557 }
3558
3559 /* Skip if the queue is already in the requested state */
3560 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3561 continue;
3562
3563 /* turn on/off the queue */
3564 if (enable)
3565 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3566 else
3567 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3568 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3569
3570 /* wait for the change to finish */
3571 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3572 if (ret) {
3573 dev_info(&pf->pdev->dev,
3574 "%s: VSI seid %d Rx ring %d %sable timeout\n",
3575 __func__, vsi->seid, pf_q,
3576 (enable ? "en" : "dis"));
3577 break;
3578 }
3579 }
3580
3581 return ret;
3582 }
3583
3584 /**
3585 * i40e_vsi_control_rings - Start or stop a VSI's rings
3586 * @vsi: the VSI being configured
3587 * @enable: start or stop the rings
3588 **/
3589 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3590 {
3591 int ret = 0;
3592
3593 /* do rx first for enable and last for disable */
3594 if (request) {
3595 ret = i40e_vsi_control_rx(vsi, request);
3596 if (ret)
3597 return ret;
3598 ret = i40e_vsi_control_tx(vsi, request);
3599 } else {
3600 /* Ignore return value, we need to shutdown whatever we can */
3601 i40e_vsi_control_tx(vsi, request);
3602 i40e_vsi_control_rx(vsi, request);
3603 }
3604
3605 return ret;
3606 }
3607
3608 /**
3609 * i40e_vsi_free_irq - Free the irq association with the OS
3610 * @vsi: the VSI being configured
3611 **/
3612 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3613 {
3614 struct i40e_pf *pf = vsi->back;
3615 struct i40e_hw *hw = &pf->hw;
3616 int base = vsi->base_vector;
3617 u32 val, qp;
3618 int i;
3619
3620 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3621 if (!vsi->q_vectors)
3622 return;
3623
3624 if (!vsi->irqs_ready)
3625 return;
3626
3627 vsi->irqs_ready = false;
3628 for (i = 0; i < vsi->num_q_vectors; i++) {
3629 u16 vector = i + base;
3630
3631 /* free only the irqs that were actually requested */
3632 if (!vsi->q_vectors[i] ||
3633 !vsi->q_vectors[i]->num_ringpairs)
3634 continue;
3635
3636 /* clear the affinity_mask in the IRQ descriptor */
3637 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3638 NULL);
3639 free_irq(pf->msix_entries[vector].vector,
3640 vsi->q_vectors[i]);
3641
3642 /* Tear down the interrupt queue link list
3643 *
3644 * We know that they come in pairs and always
3645 * the Rx first, then the Tx. To clear the
3646 * link list, stick the EOL value into the
3647 * next_q field of the registers.
3648 */
3649 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3650 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3651 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3652 val |= I40E_QUEUE_END_OF_LIST
3653 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3654 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3655
3656 while (qp != I40E_QUEUE_END_OF_LIST) {
3657 u32 next;
3658
3659 val = rd32(hw, I40E_QINT_RQCTL(qp));
3660
3661 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3662 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3663 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3664 I40E_QINT_RQCTL_INTEVENT_MASK);
3665
3666 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3667 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3668
3669 wr32(hw, I40E_QINT_RQCTL(qp), val);
3670
3671 val = rd32(hw, I40E_QINT_TQCTL(qp));
3672
3673 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3674 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3675
3676 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3677 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3678 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3679 I40E_QINT_TQCTL_INTEVENT_MASK);
3680
3681 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3682 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3683
3684 wr32(hw, I40E_QINT_TQCTL(qp), val);
3685 qp = next;
3686 }
3687 }
3688 } else {
3689 free_irq(pf->pdev->irq, pf);
3690
3691 val = rd32(hw, I40E_PFINT_LNKLST0);
3692 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3693 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3694 val |= I40E_QUEUE_END_OF_LIST
3695 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3696 wr32(hw, I40E_PFINT_LNKLST0, val);
3697
3698 val = rd32(hw, I40E_QINT_RQCTL(qp));
3699 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3700 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3701 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3702 I40E_QINT_RQCTL_INTEVENT_MASK);
3703
3704 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3705 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3706
3707 wr32(hw, I40E_QINT_RQCTL(qp), val);
3708
3709 val = rd32(hw, I40E_QINT_TQCTL(qp));
3710
3711 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3712 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3713 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3714 I40E_QINT_TQCTL_INTEVENT_MASK);
3715
3716 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3717 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3718
3719 wr32(hw, I40E_QINT_TQCTL(qp), val);
3720 }
3721 }
3722
3723 /**
3724 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3725 * @vsi: the VSI being configured
3726 * @v_idx: Index of vector to be freed
3727 *
3728 * This function frees the memory allocated to the q_vector. In addition if
3729 * NAPI is enabled it will delete any references to the NAPI struct prior
3730 * to freeing the q_vector.
3731 **/
3732 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3733 {
3734 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3735 struct i40e_ring *ring;
3736
3737 if (!q_vector)
3738 return;
3739
3740 /* disassociate q_vector from rings */
3741 i40e_for_each_ring(ring, q_vector->tx)
3742 ring->q_vector = NULL;
3743
3744 i40e_for_each_ring(ring, q_vector->rx)
3745 ring->q_vector = NULL;
3746
3747 /* only VSI w/ an associated netdev is set up w/ NAPI */
3748 if (vsi->netdev)
3749 netif_napi_del(&q_vector->napi);
3750
3751 vsi->q_vectors[v_idx] = NULL;
3752
3753 kfree_rcu(q_vector, rcu);
3754 }
3755
3756 /**
3757 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3758 * @vsi: the VSI being un-configured
3759 *
3760 * This frees the memory allocated to the q_vectors and
3761 * deletes references to the NAPI struct.
3762 **/
3763 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3764 {
3765 int v_idx;
3766
3767 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3768 i40e_free_q_vector(vsi, v_idx);
3769 }
3770
3771 /**
3772 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3773 * @pf: board private structure
3774 **/
3775 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3776 {
3777 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3778 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3779 pci_disable_msix(pf->pdev);
3780 kfree(pf->msix_entries);
3781 pf->msix_entries = NULL;
3782 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3783 pci_disable_msi(pf->pdev);
3784 }
3785 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3786 }
3787
3788 /**
3789 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3790 * @pf: board private structure
3791 *
3792 * We go through and clear interrupt specific resources and reset the structure
3793 * to pre-load conditions
3794 **/
3795 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3796 {
3797 int i;
3798
3799 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3800 for (i = 0; i < pf->num_alloc_vsi; i++)
3801 if (pf->vsi[i])
3802 i40e_vsi_free_q_vectors(pf->vsi[i]);
3803 i40e_reset_interrupt_capability(pf);
3804 }
3805
3806 /**
3807 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3808 * @vsi: the VSI being configured
3809 **/
3810 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3811 {
3812 int q_idx;
3813
3814 if (!vsi->netdev)
3815 return;
3816
3817 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3818 napi_enable(&vsi->q_vectors[q_idx]->napi);
3819 }
3820
3821 /**
3822 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3823 * @vsi: the VSI being configured
3824 **/
3825 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3826 {
3827 int q_idx;
3828
3829 if (!vsi->netdev)
3830 return;
3831
3832 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3833 napi_disable(&vsi->q_vectors[q_idx]->napi);
3834 }
3835
3836 /**
3837 * i40e_vsi_close - Shut down a VSI
3838 * @vsi: the vsi to be quelled
3839 **/
3840 static void i40e_vsi_close(struct i40e_vsi *vsi)
3841 {
3842 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3843 i40e_down(vsi);
3844 i40e_vsi_free_irq(vsi);
3845 i40e_vsi_free_tx_resources(vsi);
3846 i40e_vsi_free_rx_resources(vsi);
3847 }
3848
3849 /**
3850 * i40e_quiesce_vsi - Pause a given VSI
3851 * @vsi: the VSI being paused
3852 **/
3853 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3854 {
3855 if (test_bit(__I40E_DOWN, &vsi->state))
3856 return;
3857
3858 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3859 if (vsi->netdev && netif_running(vsi->netdev)) {
3860 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3861 } else {
3862 i40e_vsi_close(vsi);
3863 }
3864 }
3865
3866 /**
3867 * i40e_unquiesce_vsi - Resume a given VSI
3868 * @vsi: the VSI being resumed
3869 **/
3870 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3871 {
3872 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3873 return;
3874
3875 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3876 if (vsi->netdev && netif_running(vsi->netdev))
3877 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3878 else
3879 i40e_vsi_open(vsi); /* this clears the DOWN bit */
3880 }
3881
3882 /**
3883 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3884 * @pf: the PF
3885 **/
3886 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3887 {
3888 int v;
3889
3890 for (v = 0; v < pf->num_alloc_vsi; v++) {
3891 if (pf->vsi[v])
3892 i40e_quiesce_vsi(pf->vsi[v]);
3893 }
3894 }
3895
3896 /**
3897 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3898 * @pf: the PF
3899 **/
3900 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3901 {
3902 int v;
3903
3904 for (v = 0; v < pf->num_alloc_vsi; v++) {
3905 if (pf->vsi[v])
3906 i40e_unquiesce_vsi(pf->vsi[v]);
3907 }
3908 }
3909
3910 /**
3911 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
3912 * @dcbcfg: the corresponding DCBx configuration structure
3913 *
3914 * Return the number of TCs from given DCBx configuration
3915 **/
3916 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3917 {
3918 u8 num_tc = 0;
3919 int i;
3920
3921 /* Scan the ETS Config Priority Table to find
3922 * traffic class enabled for a given priority
3923 * and use the traffic class index to get the
3924 * number of traffic classes enabled
3925 */
3926 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3927 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3928 num_tc = dcbcfg->etscfg.prioritytable[i];
3929 }
3930
3931 /* Traffic class index starts from zero so
3932 * increment to return the actual count
3933 */
3934 return num_tc + 1;
3935 }
3936
3937 /**
3938 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
3939 * @dcbcfg: the corresponding DCBx configuration structure
3940 *
3941 * Query the current DCB configuration and return the number of
3942 * traffic classes enabled from the given DCBX config
3943 **/
3944 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3945 {
3946 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3947 u8 enabled_tc = 1;
3948 u8 i;
3949
3950 for (i = 0; i < num_tc; i++)
3951 enabled_tc |= 1 << i;
3952
3953 return enabled_tc;
3954 }
3955
3956 /**
3957 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
3958 * @pf: PF being queried
3959 *
3960 * Return number of traffic classes enabled for the given PF
3961 **/
3962 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3963 {
3964 struct i40e_hw *hw = &pf->hw;
3965 u8 i, enabled_tc;
3966 u8 num_tc = 0;
3967 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3968
3969 /* If DCB is not enabled then always in single TC */
3970 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3971 return 1;
3972
3973 /* MFP mode return count of enabled TCs for this PF */
3974 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3975 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3976 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3977 if (enabled_tc & (1 << i))
3978 num_tc++;
3979 }
3980 return num_tc;
3981 }
3982
3983 /* SFP mode will be enabled for all TCs on port */
3984 return i40e_dcb_get_num_tc(dcbcfg);
3985 }
3986
3987 /**
3988 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
3989 * @pf: PF being queried
3990 *
3991 * Return a bitmap for first enabled traffic class for this PF.
3992 **/
3993 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3994 {
3995 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3996 u8 i = 0;
3997
3998 if (!enabled_tc)
3999 return 0x1; /* TC0 */
4000
4001 /* Find the first enabled TC */
4002 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4003 if (enabled_tc & (1 << i))
4004 break;
4005 }
4006
4007 return 1 << i;
4008 }
4009
4010 /**
4011 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4012 * @pf: PF being queried
4013 *
4014 * Return a bitmap for enabled traffic classes for this PF.
4015 **/
4016 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4017 {
4018 /* If DCB is not enabled for this PF then just return default TC */
4019 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4020 return i40e_pf_get_default_tc(pf);
4021
4022 /* MFP mode will have enabled TCs set by FW */
4023 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4024 return pf->hw.func_caps.enabled_tcmap;
4025
4026 /* SFP mode we want PF to be enabled for all TCs */
4027 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4028 }
4029
4030 /**
4031 * i40e_vsi_get_bw_info - Query VSI BW Information
4032 * @vsi: the VSI being queried
4033 *
4034 * Returns 0 on success, negative value on failure
4035 **/
4036 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4037 {
4038 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4039 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4040 struct i40e_pf *pf = vsi->back;
4041 struct i40e_hw *hw = &pf->hw;
4042 i40e_status aq_ret;
4043 u32 tc_bw_max;
4044 int i;
4045
4046 /* Get the VSI level BW configuration */
4047 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4048 if (aq_ret) {
4049 dev_info(&pf->pdev->dev,
4050 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
4051 aq_ret, pf->hw.aq.asq_last_status);
4052 return -EINVAL;
4053 }
4054
4055 /* Get the VSI level BW configuration per TC */
4056 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4057 NULL);
4058 if (aq_ret) {
4059 dev_info(&pf->pdev->dev,
4060 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
4061 aq_ret, pf->hw.aq.asq_last_status);
4062 return -EINVAL;
4063 }
4064
4065 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4066 dev_info(&pf->pdev->dev,
4067 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4068 bw_config.tc_valid_bits,
4069 bw_ets_config.tc_valid_bits);
4070 /* Still continuing */
4071 }
4072
4073 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4074 vsi->bw_max_quanta = bw_config.max_bw;
4075 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4076 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4077 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4078 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4079 vsi->bw_ets_limit_credits[i] =
4080 le16_to_cpu(bw_ets_config.credits[i]);
4081 /* 3 bits out of 4 for each TC */
4082 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4083 }
4084
4085 return 0;
4086 }
4087
4088 /**
4089 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4090 * @vsi: the VSI being configured
4091 * @enabled_tc: TC bitmap
4092 * @bw_credits: BW shared credits per TC
4093 *
4094 * Returns 0 on success, negative value on failure
4095 **/
4096 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4097 u8 *bw_share)
4098 {
4099 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4100 i40e_status aq_ret;
4101 int i;
4102
4103 bw_data.tc_valid_bits = enabled_tc;
4104 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4105 bw_data.tc_bw_credits[i] = bw_share[i];
4106
4107 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4108 NULL);
4109 if (aq_ret) {
4110 dev_info(&vsi->back->pdev->dev,
4111 "AQ command Config VSI BW allocation per TC failed = %d\n",
4112 vsi->back->hw.aq.asq_last_status);
4113 return -EINVAL;
4114 }
4115
4116 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4117 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4118
4119 return 0;
4120 }
4121
4122 /**
4123 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4124 * @vsi: the VSI being configured
4125 * @enabled_tc: TC map to be enabled
4126 *
4127 **/
4128 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4129 {
4130 struct net_device *netdev = vsi->netdev;
4131 struct i40e_pf *pf = vsi->back;
4132 struct i40e_hw *hw = &pf->hw;
4133 u8 netdev_tc = 0;
4134 int i;
4135 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4136
4137 if (!netdev)
4138 return;
4139
4140 if (!enabled_tc) {
4141 netdev_reset_tc(netdev);
4142 return;
4143 }
4144
4145 /* Set up actual enabled TCs on the VSI */
4146 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4147 return;
4148
4149 /* set per TC queues for the VSI */
4150 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4151 /* Only set TC queues for enabled tcs
4152 *
4153 * e.g. For a VSI that has TC0 and TC3 enabled the
4154 * enabled_tc bitmap would be 0x00001001; the driver
4155 * will set the numtc for netdev as 2 that will be
4156 * referenced by the netdev layer as TC 0 and 1.
4157 */
4158 if (vsi->tc_config.enabled_tc & (1 << i))
4159 netdev_set_tc_queue(netdev,
4160 vsi->tc_config.tc_info[i].netdev_tc,
4161 vsi->tc_config.tc_info[i].qcount,
4162 vsi->tc_config.tc_info[i].qoffset);
4163 }
4164
4165 /* Assign UP2TC map for the VSI */
4166 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4167 /* Get the actual TC# for the UP */
4168 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4169 /* Get the mapped netdev TC# for the UP */
4170 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
4171 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4172 }
4173 }
4174
4175 /**
4176 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4177 * @vsi: the VSI being configured
4178 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4179 **/
4180 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4181 struct i40e_vsi_context *ctxt)
4182 {
4183 /* copy just the sections touched not the entire info
4184 * since not all sections are valid as returned by
4185 * update vsi params
4186 */
4187 vsi->info.mapping_flags = ctxt->info.mapping_flags;
4188 memcpy(&vsi->info.queue_mapping,
4189 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4190 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4191 sizeof(vsi->info.tc_mapping));
4192 }
4193
4194 /**
4195 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4196 * @vsi: VSI to be configured
4197 * @enabled_tc: TC bitmap
4198 *
4199 * This configures a particular VSI for TCs that are mapped to the
4200 * given TC bitmap. It uses default bandwidth share for TCs across
4201 * VSIs to configure TC for a particular VSI.
4202 *
4203 * NOTE:
4204 * It is expected that the VSI queues have been quisced before calling
4205 * this function.
4206 **/
4207 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4208 {
4209 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4210 struct i40e_vsi_context ctxt;
4211 int ret = 0;
4212 int i;
4213
4214 /* Check if enabled_tc is same as existing or new TCs */
4215 if (vsi->tc_config.enabled_tc == enabled_tc)
4216 return ret;
4217
4218 /* Enable ETS TCs with equal BW Share for now across all VSIs */
4219 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4220 if (enabled_tc & (1 << i))
4221 bw_share[i] = 1;
4222 }
4223
4224 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4225 if (ret) {
4226 dev_info(&vsi->back->pdev->dev,
4227 "Failed configuring TC map %d for VSI %d\n",
4228 enabled_tc, vsi->seid);
4229 goto out;
4230 }
4231
4232 /* Update Queue Pairs Mapping for currently enabled UPs */
4233 ctxt.seid = vsi->seid;
4234 ctxt.pf_num = vsi->back->hw.pf_id;
4235 ctxt.vf_num = 0;
4236 ctxt.uplink_seid = vsi->uplink_seid;
4237 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4238 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4239
4240 /* Update the VSI after updating the VSI queue-mapping information */
4241 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4242 if (ret) {
4243 dev_info(&vsi->back->pdev->dev,
4244 "update vsi failed, aq_err=%d\n",
4245 vsi->back->hw.aq.asq_last_status);
4246 goto out;
4247 }
4248 /* update the local VSI info with updated queue map */
4249 i40e_vsi_update_queue_map(vsi, &ctxt);
4250 vsi->info.valid_sections = 0;
4251
4252 /* Update current VSI BW information */
4253 ret = i40e_vsi_get_bw_info(vsi);
4254 if (ret) {
4255 dev_info(&vsi->back->pdev->dev,
4256 "Failed updating vsi bw info, aq_err=%d\n",
4257 vsi->back->hw.aq.asq_last_status);
4258 goto out;
4259 }
4260
4261 /* Update the netdev TC setup */
4262 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4263 out:
4264 return ret;
4265 }
4266
4267 /**
4268 * i40e_veb_config_tc - Configure TCs for given VEB
4269 * @veb: given VEB
4270 * @enabled_tc: TC bitmap
4271 *
4272 * Configures given TC bitmap for VEB (switching) element
4273 **/
4274 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4275 {
4276 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4277 struct i40e_pf *pf = veb->pf;
4278 int ret = 0;
4279 int i;
4280
4281 /* No TCs or already enabled TCs just return */
4282 if (!enabled_tc || veb->enabled_tc == enabled_tc)
4283 return ret;
4284
4285 bw_data.tc_valid_bits = enabled_tc;
4286 /* bw_data.absolute_credits is not set (relative) */
4287
4288 /* Enable ETS TCs with equal BW Share for now */
4289 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4290 if (enabled_tc & (1 << i))
4291 bw_data.tc_bw_share_credits[i] = 1;
4292 }
4293
4294 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4295 &bw_data, NULL);
4296 if (ret) {
4297 dev_info(&pf->pdev->dev,
4298 "veb bw config failed, aq_err=%d\n",
4299 pf->hw.aq.asq_last_status);
4300 goto out;
4301 }
4302
4303 /* Update the BW information */
4304 ret = i40e_veb_get_bw_info(veb);
4305 if (ret) {
4306 dev_info(&pf->pdev->dev,
4307 "Failed getting veb bw config, aq_err=%d\n",
4308 pf->hw.aq.asq_last_status);
4309 }
4310
4311 out:
4312 return ret;
4313 }
4314
4315 #ifdef CONFIG_I40E_DCB
4316 /**
4317 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4318 * @pf: PF struct
4319 *
4320 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4321 * the caller would've quiesce all the VSIs before calling
4322 * this function
4323 **/
4324 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4325 {
4326 u8 tc_map = 0;
4327 int ret;
4328 u8 v;
4329
4330 /* Enable the TCs available on PF to all VEBs */
4331 tc_map = i40e_pf_get_tc_map(pf);
4332 for (v = 0; v < I40E_MAX_VEB; v++) {
4333 if (!pf->veb[v])
4334 continue;
4335 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4336 if (ret) {
4337 dev_info(&pf->pdev->dev,
4338 "Failed configuring TC for VEB seid=%d\n",
4339 pf->veb[v]->seid);
4340 /* Will try to configure as many components */
4341 }
4342 }
4343
4344 /* Update each VSI */
4345 for (v = 0; v < pf->num_alloc_vsi; v++) {
4346 if (!pf->vsi[v])
4347 continue;
4348
4349 /* - Enable all TCs for the LAN VSI
4350 #ifdef I40E_FCOE
4351 * - For FCoE VSI only enable the TC configured
4352 * as per the APP TLV
4353 #endif
4354 * - For all others keep them at TC0 for now
4355 */
4356 if (v == pf->lan_vsi)
4357 tc_map = i40e_pf_get_tc_map(pf);
4358 else
4359 tc_map = i40e_pf_get_default_tc(pf);
4360 #ifdef I40E_FCOE
4361 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4362 tc_map = i40e_get_fcoe_tc_map(pf);
4363 #endif /* #ifdef I40E_FCOE */
4364
4365 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4366 if (ret) {
4367 dev_info(&pf->pdev->dev,
4368 "Failed configuring TC for VSI seid=%d\n",
4369 pf->vsi[v]->seid);
4370 /* Will try to configure as many components */
4371 } else {
4372 /* Re-configure VSI vectors based on updated TC map */
4373 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4374 if (pf->vsi[v]->netdev)
4375 i40e_dcbnl_set_all(pf->vsi[v]);
4376 }
4377 }
4378 }
4379
4380 /**
4381 * i40e_init_pf_dcb - Initialize DCB configuration
4382 * @pf: PF being configured
4383 *
4384 * Query the current DCB configuration and cache it
4385 * in the hardware structure
4386 **/
4387 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4388 {
4389 struct i40e_hw *hw = &pf->hw;
4390 int err = 0;
4391
4392 if (pf->hw.func_caps.npar_enable)
4393 goto out;
4394
4395 /* Get the initial DCB configuration */
4396 err = i40e_init_dcb(hw);
4397 if (!err) {
4398 /* Device/Function is not DCBX capable */
4399 if ((!hw->func_caps.dcb) ||
4400 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4401 dev_info(&pf->pdev->dev,
4402 "DCBX offload is not supported or is disabled for this PF.\n");
4403
4404 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4405 goto out;
4406
4407 } else {
4408 /* When status is not DISABLED then DCBX in FW */
4409 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4410 DCB_CAP_DCBX_VER_IEEE;
4411
4412 pf->flags |= I40E_FLAG_DCB_CAPABLE;
4413 /* Enable DCB tagging only when more than one TC */
4414 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4415 pf->flags |= I40E_FLAG_DCB_ENABLED;
4416 }
4417 } else {
4418 dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
4419 pf->hw.aq.asq_last_status);
4420 }
4421
4422 out:
4423 return err;
4424 }
4425 #endif /* CONFIG_I40E_DCB */
4426 #define SPEED_SIZE 14
4427 #define FC_SIZE 8
4428 /**
4429 * i40e_print_link_message - print link up or down
4430 * @vsi: the VSI for which link needs a message
4431 */
4432 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4433 {
4434 char speed[SPEED_SIZE] = "Unknown";
4435 char fc[FC_SIZE] = "RX/TX";
4436
4437 if (!isup) {
4438 netdev_info(vsi->netdev, "NIC Link is Down\n");
4439 return;
4440 }
4441
4442 switch (vsi->back->hw.phy.link_info.link_speed) {
4443 case I40E_LINK_SPEED_40GB:
4444 strlcpy(speed, "40 Gbps", SPEED_SIZE);
4445 break;
4446 case I40E_LINK_SPEED_10GB:
4447 strlcpy(speed, "10 Gbps", SPEED_SIZE);
4448 break;
4449 case I40E_LINK_SPEED_1GB:
4450 strlcpy(speed, "1000 Mbps", SPEED_SIZE);
4451 break;
4452 default:
4453 break;
4454 }
4455
4456 switch (vsi->back->hw.fc.current_mode) {
4457 case I40E_FC_FULL:
4458 strlcpy(fc, "RX/TX", FC_SIZE);
4459 break;
4460 case I40E_FC_TX_PAUSE:
4461 strlcpy(fc, "TX", FC_SIZE);
4462 break;
4463 case I40E_FC_RX_PAUSE:
4464 strlcpy(fc, "RX", FC_SIZE);
4465 break;
4466 default:
4467 strlcpy(fc, "None", FC_SIZE);
4468 break;
4469 }
4470
4471 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4472 speed, fc);
4473 }
4474
4475 /**
4476 * i40e_up_complete - Finish the last steps of bringing up a connection
4477 * @vsi: the VSI being configured
4478 **/
4479 static int i40e_up_complete(struct i40e_vsi *vsi)
4480 {
4481 struct i40e_pf *pf = vsi->back;
4482 u8 set_fc_aq_fail = 0;
4483 int err;
4484
4485 /* force flow control off */
4486 i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
4487
4488 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4489 i40e_vsi_configure_msix(vsi);
4490 else
4491 i40e_configure_msi_and_legacy(vsi);
4492
4493 /* start rings */
4494 err = i40e_vsi_control_rings(vsi, true);
4495 if (err)
4496 return err;
4497
4498 clear_bit(__I40E_DOWN, &vsi->state);
4499 i40e_napi_enable_all(vsi);
4500 i40e_vsi_enable_irq(vsi);
4501
4502 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4503 (vsi->netdev)) {
4504 i40e_print_link_message(vsi, true);
4505 netif_tx_start_all_queues(vsi->netdev);
4506 netif_carrier_on(vsi->netdev);
4507 } else if (vsi->netdev) {
4508 i40e_print_link_message(vsi, false);
4509 /* need to check for qualified module here*/
4510 if ((pf->hw.phy.link_info.link_info &
4511 I40E_AQ_MEDIA_AVAILABLE) &&
4512 (!(pf->hw.phy.link_info.an_info &
4513 I40E_AQ_QUALIFIED_MODULE)))
4514 netdev_err(vsi->netdev,
4515 "the driver failed to link because an unqualified module was detected.");
4516 }
4517
4518 /* replay FDIR SB filters */
4519 if (vsi->type == I40E_VSI_FDIR) {
4520 /* reset fd counters */
4521 pf->fd_add_err = pf->fd_atr_cnt = 0;
4522 if (pf->fd_tcp_rule > 0) {
4523 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4524 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4525 pf->fd_tcp_rule = 0;
4526 }
4527 i40e_fdir_filter_restore(vsi);
4528 }
4529 i40e_service_event_schedule(pf);
4530
4531 return 0;
4532 }
4533
4534 /**
4535 * i40e_vsi_reinit_locked - Reset the VSI
4536 * @vsi: the VSI being configured
4537 *
4538 * Rebuild the ring structs after some configuration
4539 * has changed, e.g. MTU size.
4540 **/
4541 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4542 {
4543 struct i40e_pf *pf = vsi->back;
4544
4545 WARN_ON(in_interrupt());
4546 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4547 usleep_range(1000, 2000);
4548 i40e_down(vsi);
4549
4550 /* Give a VF some time to respond to the reset. The
4551 * two second wait is based upon the watchdog cycle in
4552 * the VF driver.
4553 */
4554 if (vsi->type == I40E_VSI_SRIOV)
4555 msleep(2000);
4556 i40e_up(vsi);
4557 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4558 }
4559
4560 /**
4561 * i40e_up - Bring the connection back up after being down
4562 * @vsi: the VSI being configured
4563 **/
4564 int i40e_up(struct i40e_vsi *vsi)
4565 {
4566 int err;
4567
4568 err = i40e_vsi_configure(vsi);
4569 if (!err)
4570 err = i40e_up_complete(vsi);
4571
4572 return err;
4573 }
4574
4575 /**
4576 * i40e_down - Shutdown the connection processing
4577 * @vsi: the VSI being stopped
4578 **/
4579 void i40e_down(struct i40e_vsi *vsi)
4580 {
4581 int i;
4582
4583 /* It is assumed that the caller of this function
4584 * sets the vsi->state __I40E_DOWN bit.
4585 */
4586 if (vsi->netdev) {
4587 netif_carrier_off(vsi->netdev);
4588 netif_tx_disable(vsi->netdev);
4589 }
4590 i40e_vsi_disable_irq(vsi);
4591 i40e_vsi_control_rings(vsi, false);
4592 i40e_napi_disable_all(vsi);
4593
4594 for (i = 0; i < vsi->num_queue_pairs; i++) {
4595 i40e_clean_tx_ring(vsi->tx_rings[i]);
4596 i40e_clean_rx_ring(vsi->rx_rings[i]);
4597 }
4598 }
4599
4600 /**
4601 * i40e_setup_tc - configure multiple traffic classes
4602 * @netdev: net device to configure
4603 * @tc: number of traffic classes to enable
4604 **/
4605 #ifdef I40E_FCOE
4606 int i40e_setup_tc(struct net_device *netdev, u8 tc)
4607 #else
4608 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4609 #endif
4610 {
4611 struct i40e_netdev_priv *np = netdev_priv(netdev);
4612 struct i40e_vsi *vsi = np->vsi;
4613 struct i40e_pf *pf = vsi->back;
4614 u8 enabled_tc = 0;
4615 int ret = -EINVAL;
4616 int i;
4617
4618 /* Check if DCB enabled to continue */
4619 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4620 netdev_info(netdev, "DCB is not enabled for adapter\n");
4621 goto exit;
4622 }
4623
4624 /* Check if MFP enabled */
4625 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4626 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4627 goto exit;
4628 }
4629
4630 /* Check whether tc count is within enabled limit */
4631 if (tc > i40e_pf_get_num_tc(pf)) {
4632 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4633 goto exit;
4634 }
4635
4636 /* Generate TC map for number of tc requested */
4637 for (i = 0; i < tc; i++)
4638 enabled_tc |= (1 << i);
4639
4640 /* Requesting same TC configuration as already enabled */
4641 if (enabled_tc == vsi->tc_config.enabled_tc)
4642 return 0;
4643
4644 /* Quiesce VSI queues */
4645 i40e_quiesce_vsi(vsi);
4646
4647 /* Configure VSI for enabled TCs */
4648 ret = i40e_vsi_config_tc(vsi, enabled_tc);
4649 if (ret) {
4650 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4651 vsi->seid);
4652 goto exit;
4653 }
4654
4655 /* Unquiesce VSI */
4656 i40e_unquiesce_vsi(vsi);
4657
4658 exit:
4659 return ret;
4660 }
4661
4662 /**
4663 * i40e_open - Called when a network interface is made active
4664 * @netdev: network interface device structure
4665 *
4666 * The open entry point is called when a network interface is made
4667 * active by the system (IFF_UP). At this point all resources needed
4668 * for transmit and receive operations are allocated, the interrupt
4669 * handler is registered with the OS, the netdev watchdog subtask is
4670 * enabled, and the stack is notified that the interface is ready.
4671 *
4672 * Returns 0 on success, negative value on failure
4673 **/
4674 #ifdef I40E_FCOE
4675 int i40e_open(struct net_device *netdev)
4676 #else
4677 static int i40e_open(struct net_device *netdev)
4678 #endif
4679 {
4680 struct i40e_netdev_priv *np = netdev_priv(netdev);
4681 struct i40e_vsi *vsi = np->vsi;
4682 struct i40e_pf *pf = vsi->back;
4683 int err;
4684
4685 /* disallow open during test or if eeprom is broken */
4686 if (test_bit(__I40E_TESTING, &pf->state) ||
4687 test_bit(__I40E_BAD_EEPROM, &pf->state))
4688 return -EBUSY;
4689
4690 netif_carrier_off(netdev);
4691
4692 err = i40e_vsi_open(vsi);
4693 if (err)
4694 return err;
4695
4696 /* configure global TSO hardware offload settings */
4697 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4698 TCP_FLAG_FIN) >> 16);
4699 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4700 TCP_FLAG_FIN |
4701 TCP_FLAG_CWR) >> 16);
4702 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4703
4704 #ifdef CONFIG_I40E_VXLAN
4705 vxlan_get_rx_port(netdev);
4706 #endif
4707
4708 return 0;
4709 }
4710
4711 /**
4712 * i40e_vsi_open -
4713 * @vsi: the VSI to open
4714 *
4715 * Finish initialization of the VSI.
4716 *
4717 * Returns 0 on success, negative value on failure
4718 **/
4719 int i40e_vsi_open(struct i40e_vsi *vsi)
4720 {
4721 struct i40e_pf *pf = vsi->back;
4722 char int_name[IFNAMSIZ];
4723 int err;
4724
4725 /* allocate descriptors */
4726 err = i40e_vsi_setup_tx_resources(vsi);
4727 if (err)
4728 goto err_setup_tx;
4729 err = i40e_vsi_setup_rx_resources(vsi);
4730 if (err)
4731 goto err_setup_rx;
4732
4733 err = i40e_vsi_configure(vsi);
4734 if (err)
4735 goto err_setup_rx;
4736
4737 if (vsi->netdev) {
4738 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4739 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4740 err = i40e_vsi_request_irq(vsi, int_name);
4741 if (err)
4742 goto err_setup_rx;
4743
4744 /* Notify the stack of the actual queue counts. */
4745 err = netif_set_real_num_tx_queues(vsi->netdev,
4746 vsi->num_queue_pairs);
4747 if (err)
4748 goto err_set_queues;
4749
4750 err = netif_set_real_num_rx_queues(vsi->netdev,
4751 vsi->num_queue_pairs);
4752 if (err)
4753 goto err_set_queues;
4754
4755 } else if (vsi->type == I40E_VSI_FDIR) {
4756 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4757 dev_driver_string(&pf->pdev->dev));
4758 err = i40e_vsi_request_irq(vsi, int_name);
4759 } else {
4760 err = -EINVAL;
4761 goto err_setup_rx;
4762 }
4763
4764 err = i40e_up_complete(vsi);
4765 if (err)
4766 goto err_up_complete;
4767
4768 return 0;
4769
4770 err_up_complete:
4771 i40e_down(vsi);
4772 err_set_queues:
4773 i40e_vsi_free_irq(vsi);
4774 err_setup_rx:
4775 i40e_vsi_free_rx_resources(vsi);
4776 err_setup_tx:
4777 i40e_vsi_free_tx_resources(vsi);
4778 if (vsi == pf->vsi[pf->lan_vsi])
4779 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
4780
4781 return err;
4782 }
4783
4784 /**
4785 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
4786 * @pf: Pointer to pf
4787 *
4788 * This function destroys the hlist where all the Flow Director
4789 * filters were saved.
4790 **/
4791 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
4792 {
4793 struct i40e_fdir_filter *filter;
4794 struct hlist_node *node2;
4795
4796 hlist_for_each_entry_safe(filter, node2,
4797 &pf->fdir_filter_list, fdir_node) {
4798 hlist_del(&filter->fdir_node);
4799 kfree(filter);
4800 }
4801 pf->fdir_pf_active_filters = 0;
4802 }
4803
4804 /**
4805 * i40e_close - Disables a network interface
4806 * @netdev: network interface device structure
4807 *
4808 * The close entry point is called when an interface is de-activated
4809 * by the OS. The hardware is still under the driver's control, but
4810 * this netdev interface is disabled.
4811 *
4812 * Returns 0, this is not allowed to fail
4813 **/
4814 #ifdef I40E_FCOE
4815 int i40e_close(struct net_device *netdev)
4816 #else
4817 static int i40e_close(struct net_device *netdev)
4818 #endif
4819 {
4820 struct i40e_netdev_priv *np = netdev_priv(netdev);
4821 struct i40e_vsi *vsi = np->vsi;
4822
4823 i40e_vsi_close(vsi);
4824
4825 return 0;
4826 }
4827
4828 /**
4829 * i40e_do_reset - Start a PF or Core Reset sequence
4830 * @pf: board private structure
4831 * @reset_flags: which reset is requested
4832 *
4833 * The essential difference in resets is that the PF Reset
4834 * doesn't clear the packet buffers, doesn't reset the PE
4835 * firmware, and doesn't bother the other PFs on the chip.
4836 **/
4837 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4838 {
4839 u32 val;
4840
4841 WARN_ON(in_interrupt());
4842
4843 if (i40e_check_asq_alive(&pf->hw))
4844 i40e_vc_notify_reset(pf);
4845
4846 /* do the biggest reset indicated */
4847 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4848
4849 /* Request a Global Reset
4850 *
4851 * This will start the chip's countdown to the actual full
4852 * chip reset event, and a warning interrupt to be sent
4853 * to all PFs, including the requestor. Our handler
4854 * for the warning interrupt will deal with the shutdown
4855 * and recovery of the switch setup.
4856 */
4857 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
4858 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4859 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
4860 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4861
4862 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
4863
4864 /* Request a Core Reset
4865 *
4866 * Same as Global Reset, except does *not* include the MAC/PHY
4867 */
4868 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
4869 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4870 val |= I40E_GLGEN_RTRIG_CORER_MASK;
4871 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4872 i40e_flush(&pf->hw);
4873
4874 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
4875
4876 /* Request a Firmware Reset
4877 *
4878 * Same as Global reset, plus restarting the
4879 * embedded firmware engine.
4880 */
4881 /* enable EMP Reset */
4882 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
4883 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
4884 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
4885
4886 /* force the reset */
4887 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4888 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
4889 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4890 i40e_flush(&pf->hw);
4891
4892 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
4893
4894 /* Request a PF Reset
4895 *
4896 * Resets only the PF-specific registers
4897 *
4898 * This goes directly to the tear-down and rebuild of
4899 * the switch, since we need to do all the recovery as
4900 * for the Core Reset.
4901 */
4902 dev_dbg(&pf->pdev->dev, "PFR requested\n");
4903 i40e_handle_reset_warning(pf);
4904
4905 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
4906 int v;
4907
4908 /* Find the VSI(s) that requested a re-init */
4909 dev_info(&pf->pdev->dev,
4910 "VSI reinit requested\n");
4911 for (v = 0; v < pf->num_alloc_vsi; v++) {
4912 struct i40e_vsi *vsi = pf->vsi[v];
4913 if (vsi != NULL &&
4914 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4915 i40e_vsi_reinit_locked(pf->vsi[v]);
4916 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4917 }
4918 }
4919
4920 /* no further action needed, so return now */
4921 return;
4922 } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
4923 int v;
4924
4925 /* Find the VSI(s) that needs to be brought down */
4926 dev_info(&pf->pdev->dev, "VSI down requested\n");
4927 for (v = 0; v < pf->num_alloc_vsi; v++) {
4928 struct i40e_vsi *vsi = pf->vsi[v];
4929 if (vsi != NULL &&
4930 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
4931 set_bit(__I40E_DOWN, &vsi->state);
4932 i40e_down(vsi);
4933 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
4934 }
4935 }
4936
4937 /* no further action needed, so return now */
4938 return;
4939 } else {
4940 dev_info(&pf->pdev->dev,
4941 "bad reset request 0x%08x\n", reset_flags);
4942 return;
4943 }
4944 }
4945
4946 #ifdef CONFIG_I40E_DCB
4947 /**
4948 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
4949 * @pf: board private structure
4950 * @old_cfg: current DCB config
4951 * @new_cfg: new DCB config
4952 **/
4953 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4954 struct i40e_dcbx_config *old_cfg,
4955 struct i40e_dcbx_config *new_cfg)
4956 {
4957 bool need_reconfig = false;
4958
4959 /* Check if ETS configuration has changed */
4960 if (memcmp(&new_cfg->etscfg,
4961 &old_cfg->etscfg,
4962 sizeof(new_cfg->etscfg))) {
4963 /* If Priority Table has changed reconfig is needed */
4964 if (memcmp(&new_cfg->etscfg.prioritytable,
4965 &old_cfg->etscfg.prioritytable,
4966 sizeof(new_cfg->etscfg.prioritytable))) {
4967 need_reconfig = true;
4968 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
4969 }
4970
4971 if (memcmp(&new_cfg->etscfg.tcbwtable,
4972 &old_cfg->etscfg.tcbwtable,
4973 sizeof(new_cfg->etscfg.tcbwtable)))
4974 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
4975
4976 if (memcmp(&new_cfg->etscfg.tsatable,
4977 &old_cfg->etscfg.tsatable,
4978 sizeof(new_cfg->etscfg.tsatable)))
4979 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
4980 }
4981
4982 /* Check if PFC configuration has changed */
4983 if (memcmp(&new_cfg->pfc,
4984 &old_cfg->pfc,
4985 sizeof(new_cfg->pfc))) {
4986 need_reconfig = true;
4987 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
4988 }
4989
4990 /* Check if APP Table has changed */
4991 if (memcmp(&new_cfg->app,
4992 &old_cfg->app,
4993 sizeof(new_cfg->app))) {
4994 need_reconfig = true;
4995 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
4996 }
4997
4998 return need_reconfig;
4999 }
5000
5001 /**
5002 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5003 * @pf: board private structure
5004 * @e: event info posted on ARQ
5005 **/
5006 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5007 struct i40e_arq_event_info *e)
5008 {
5009 struct i40e_aqc_lldp_get_mib *mib =
5010 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5011 struct i40e_hw *hw = &pf->hw;
5012 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
5013 struct i40e_dcbx_config tmp_dcbx_cfg;
5014 bool need_reconfig = false;
5015 int ret = 0;
5016 u8 type;
5017
5018 /* Not DCB capable or capability disabled */
5019 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5020 return ret;
5021
5022 /* Ignore if event is not for Nearest Bridge */
5023 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5024 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5025 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5026 return ret;
5027
5028 /* Check MIB Type and return if event for Remote MIB update */
5029 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5030 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5031 /* Update the remote cached instance and return */
5032 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5033 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5034 &hw->remote_dcbx_config);
5035 goto exit;
5036 }
5037
5038 /* Convert/store the DCBX data from LLDPDU temporarily */
5039 memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
5040 ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
5041 if (ret) {
5042 /* Error in LLDPDU parsing return */
5043 dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
5044 goto exit;
5045 }
5046
5047 /* No change detected in DCBX configs */
5048 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
5049 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5050 goto exit;
5051 }
5052
5053 need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
5054
5055 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
5056
5057 /* Overwrite the new configuration */
5058 *dcbx_cfg = tmp_dcbx_cfg;
5059
5060 if (!need_reconfig)
5061 goto exit;
5062
5063 /* Enable DCB tagging only when more than one TC */
5064 if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
5065 pf->flags |= I40E_FLAG_DCB_ENABLED;
5066 else
5067 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5068
5069 /* Reconfiguration needed quiesce all VSIs */
5070 i40e_pf_quiesce_all_vsi(pf);
5071
5072 /* Changes in configuration update VEB/VSI */
5073 i40e_dcb_reconfigure(pf);
5074
5075 i40e_pf_unquiesce_all_vsi(pf);
5076 exit:
5077 return ret;
5078 }
5079 #endif /* CONFIG_I40E_DCB */
5080
5081 /**
5082 * i40e_do_reset_safe - Protected reset path for userland calls.
5083 * @pf: board private structure
5084 * @reset_flags: which reset is requested
5085 *
5086 **/
5087 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5088 {
5089 rtnl_lock();
5090 i40e_do_reset(pf, reset_flags);
5091 rtnl_unlock();
5092 }
5093
5094 /**
5095 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5096 * @pf: board private structure
5097 * @e: event info posted on ARQ
5098 *
5099 * Handler for LAN Queue Overflow Event generated by the firmware for PF
5100 * and VF queues
5101 **/
5102 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5103 struct i40e_arq_event_info *e)
5104 {
5105 struct i40e_aqc_lan_overflow *data =
5106 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5107 u32 queue = le32_to_cpu(data->prtdcb_rupto);
5108 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5109 struct i40e_hw *hw = &pf->hw;
5110 struct i40e_vf *vf;
5111 u16 vf_id;
5112
5113 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5114 queue, qtx_ctl);
5115
5116 /* Queue belongs to VF, find the VF and issue VF reset */
5117 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5118 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5119 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5120 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5121 vf_id -= hw->func_caps.vf_base_id;
5122 vf = &pf->vf[vf_id];
5123 i40e_vc_notify_vf_reset(vf);
5124 /* Allow VF to process pending reset notification */
5125 msleep(20);
5126 i40e_reset_vf(vf, false);
5127 }
5128 }
5129
5130 /**
5131 * i40e_service_event_complete - Finish up the service event
5132 * @pf: board private structure
5133 **/
5134 static void i40e_service_event_complete(struct i40e_pf *pf)
5135 {
5136 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5137
5138 /* flush memory to make sure state is correct before next watchog */
5139 smp_mb__before_atomic();
5140 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5141 }
5142
5143 /**
5144 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5145 * @pf: board private structure
5146 **/
5147 int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5148 {
5149 int val, fcnt_prog;
5150
5151 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5152 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5153 return fcnt_prog;
5154 }
5155
5156 /**
5157 * i40e_get_current_fd_count - Get the count of total FD filters programmed
5158 * @pf: board private structure
5159 **/
5160 int i40e_get_current_fd_count(struct i40e_pf *pf)
5161 {
5162 int val, fcnt_prog;
5163 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5164 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5165 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5166 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5167 return fcnt_prog;
5168 }
5169
5170 /**
5171 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5172 * @pf: board private structure
5173 **/
5174 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5175 {
5176 u32 fcnt_prog, fcnt_avail;
5177
5178 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5179 return;
5180
5181 /* Check if, FD SB or ATR was auto disabled and if there is enough room
5182 * to re-enable
5183 */
5184 fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
5185 fcnt_avail = pf->fdir_pf_filter_count;
5186 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5187 (pf->fd_add_err == 0) ||
5188 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5189 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5190 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5191 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5192 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5193 }
5194 }
5195 /* Wait for some more space to be available to turn on ATR */
5196 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5197 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5198 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5199 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5200 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5201 }
5202 }
5203 }
5204
5205 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5206 /**
5207 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5208 * @pf: board private structure
5209 **/
5210 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5211 {
5212 int flush_wait_retry = 50;
5213 int reg;
5214
5215 if (time_after(jiffies, pf->fd_flush_timestamp +
5216 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
5217 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5218 pf->fd_flush_timestamp = jiffies;
5219 pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
5220 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5221 /* flush all filters */
5222 wr32(&pf->hw, I40E_PFQF_CTL_1,
5223 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5224 i40e_flush(&pf->hw);
5225 pf->fd_flush_cnt++;
5226 pf->fd_add_err = 0;
5227 do {
5228 /* Check FD flush status every 5-6msec */
5229 usleep_range(5000, 6000);
5230 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5231 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5232 break;
5233 } while (flush_wait_retry--);
5234 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5235 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5236 } else {
5237 /* replay sideband filters */
5238 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5239
5240 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5241 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5242 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5243 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5244 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5245 }
5246 }
5247 }
5248
5249 /**
5250 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5251 * @pf: board private structure
5252 **/
5253 int i40e_get_current_atr_cnt(struct i40e_pf *pf)
5254 {
5255 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5256 }
5257
5258 /* We can see up to 256 filter programming desc in transit if the filters are
5259 * being applied really fast; before we see the first
5260 * filter miss error on Rx queue 0. Accumulating enough error messages before
5261 * reacting will make sure we don't cause flush too often.
5262 */
5263 #define I40E_MAX_FD_PROGRAM_ERROR 256
5264
5265 /**
5266 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5267 * @pf: board private structure
5268 **/
5269 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5270 {
5271
5272 /* if interface is down do nothing */
5273 if (test_bit(__I40E_DOWN, &pf->state))
5274 return;
5275
5276 if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
5277 (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
5278 (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
5279 i40e_fdir_flush_and_replay(pf);
5280
5281 i40e_fdir_check_and_reenable(pf);
5282
5283 }
5284
5285 /**
5286 * i40e_vsi_link_event - notify VSI of a link event
5287 * @vsi: vsi to be notified
5288 * @link_up: link up or down
5289 **/
5290 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5291 {
5292 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5293 return;
5294
5295 switch (vsi->type) {
5296 case I40E_VSI_MAIN:
5297 #ifdef I40E_FCOE
5298 case I40E_VSI_FCOE:
5299 #endif
5300 if (!vsi->netdev || !vsi->netdev_registered)
5301 break;
5302
5303 if (link_up) {
5304 netif_carrier_on(vsi->netdev);
5305 netif_tx_wake_all_queues(vsi->netdev);
5306 } else {
5307 netif_carrier_off(vsi->netdev);
5308 netif_tx_stop_all_queues(vsi->netdev);
5309 }
5310 break;
5311
5312 case I40E_VSI_SRIOV:
5313 break;
5314
5315 case I40E_VSI_VMDQ2:
5316 case I40E_VSI_CTRL:
5317 case I40E_VSI_MIRROR:
5318 default:
5319 /* there is no notification for other VSIs */
5320 break;
5321 }
5322 }
5323
5324 /**
5325 * i40e_veb_link_event - notify elements on the veb of a link event
5326 * @veb: veb to be notified
5327 * @link_up: link up or down
5328 **/
5329 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
5330 {
5331 struct i40e_pf *pf;
5332 int i;
5333
5334 if (!veb || !veb->pf)
5335 return;
5336 pf = veb->pf;
5337
5338 /* depth first... */
5339 for (i = 0; i < I40E_MAX_VEB; i++)
5340 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
5341 i40e_veb_link_event(pf->veb[i], link_up);
5342
5343 /* ... now the local VSIs */
5344 for (i = 0; i < pf->num_alloc_vsi; i++)
5345 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
5346 i40e_vsi_link_event(pf->vsi[i], link_up);
5347 }
5348
5349 /**
5350 * i40e_link_event - Update netif_carrier status
5351 * @pf: board private structure
5352 **/
5353 static void i40e_link_event(struct i40e_pf *pf)
5354 {
5355 bool new_link, old_link;
5356
5357 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
5358 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
5359
5360 if (new_link == old_link)
5361 return;
5362 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
5363 i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
5364
5365 /* Notify the base of the switch tree connected to
5366 * the link. Floating VEBs are not notified.
5367 */
5368 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
5369 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
5370 else
5371 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
5372
5373 if (pf->vf)
5374 i40e_vc_notify_link_state(pf);
5375
5376 if (pf->flags & I40E_FLAG_PTP)
5377 i40e_ptp_set_increment(pf);
5378 }
5379
5380 /**
5381 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
5382 * @pf: board private structure
5383 *
5384 * Set the per-queue flags to request a check for stuck queues in the irq
5385 * clean functions, then force interrupts to be sure the irq clean is called.
5386 **/
5387 static void i40e_check_hang_subtask(struct i40e_pf *pf)
5388 {
5389 int i, v;
5390
5391 /* If we're down or resetting, just bail */
5392 if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
5393 return;
5394
5395 /* for each VSI/netdev
5396 * for each Tx queue
5397 * set the check flag
5398 * for each q_vector
5399 * force an interrupt
5400 */
5401 for (v = 0; v < pf->num_alloc_vsi; v++) {
5402 struct i40e_vsi *vsi = pf->vsi[v];
5403 int armed = 0;
5404
5405 if (!pf->vsi[v] ||
5406 test_bit(__I40E_DOWN, &vsi->state) ||
5407 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
5408 continue;
5409
5410 for (i = 0; i < vsi->num_queue_pairs; i++) {
5411 set_check_for_tx_hang(vsi->tx_rings[i]);
5412 if (test_bit(__I40E_HANG_CHECK_ARMED,
5413 &vsi->tx_rings[i]->state))
5414 armed++;
5415 }
5416
5417 if (armed) {
5418 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
5419 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
5420 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
5421 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
5422 } else {
5423 u16 vec = vsi->base_vector - 1;
5424 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
5425 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
5426 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
5427 wr32(&vsi->back->hw,
5428 I40E_PFINT_DYN_CTLN(vec), val);
5429 }
5430 i40e_flush(&vsi->back->hw);
5431 }
5432 }
5433 }
5434
5435 /**
5436 * i40e_watchdog_subtask - Check and bring link up
5437 * @pf: board private structure
5438 **/
5439 static void i40e_watchdog_subtask(struct i40e_pf *pf)
5440 {
5441 int i;
5442
5443 /* if interface is down do nothing */
5444 if (test_bit(__I40E_DOWN, &pf->state) ||
5445 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5446 return;
5447
5448 /* Update the stats for active netdevs so the network stack
5449 * can look at updated numbers whenever it cares to
5450 */
5451 for (i = 0; i < pf->num_alloc_vsi; i++)
5452 if (pf->vsi[i] && pf->vsi[i]->netdev)
5453 i40e_update_stats(pf->vsi[i]);
5454
5455 /* Update the stats for the active switching components */
5456 for (i = 0; i < I40E_MAX_VEB; i++)
5457 if (pf->veb[i])
5458 i40e_update_veb_stats(pf->veb[i]);
5459
5460 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
5461 }
5462
5463 /**
5464 * i40e_reset_subtask - Set up for resetting the device and driver
5465 * @pf: board private structure
5466 **/
5467 static void i40e_reset_subtask(struct i40e_pf *pf)
5468 {
5469 u32 reset_flags = 0;
5470
5471 rtnl_lock();
5472 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5473 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
5474 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5475 }
5476 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5477 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
5478 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5479 }
5480 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5481 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
5482 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5483 }
5484 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5485 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
5486 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5487 }
5488 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
5489 reset_flags |= (1 << __I40E_DOWN_REQUESTED);
5490 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
5491 }
5492
5493 /* If there's a recovery already waiting, it takes
5494 * precedence before starting a new reset sequence.
5495 */
5496 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5497 i40e_handle_reset_warning(pf);
5498 goto unlock;
5499 }
5500
5501 /* If we're already down or resetting, just bail */
5502 if (reset_flags &&
5503 !test_bit(__I40E_DOWN, &pf->state) &&
5504 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5505 i40e_do_reset(pf, reset_flags);
5506
5507 unlock:
5508 rtnl_unlock();
5509 }
5510
5511 /**
5512 * i40e_handle_link_event - Handle link event
5513 * @pf: board private structure
5514 * @e: event info posted on ARQ
5515 **/
5516 static void i40e_handle_link_event(struct i40e_pf *pf,
5517 struct i40e_arq_event_info *e)
5518 {
5519 struct i40e_hw *hw = &pf->hw;
5520 struct i40e_aqc_get_link_status *status =
5521 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5522 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
5523
5524 /* save off old link status information */
5525 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
5526 sizeof(pf->hw.phy.link_info_old));
5527
5528 /* check for unqualified module, if link is down */
5529 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5530 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5531 (!(status->link_info & I40E_AQ_LINK_UP)))
5532 dev_err(&pf->pdev->dev,
5533 "The driver failed to link because an unqualified module was detected.\n");
5534
5535 /* update link status */
5536 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
5537 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
5538 hw_link_info->link_info = status->link_info;
5539 hw_link_info->an_info = status->an_info;
5540 hw_link_info->ext_info = status->ext_info;
5541 hw_link_info->lse_enable =
5542 le16_to_cpu(status->command_flags) &
5543 I40E_AQ_LSE_ENABLE;
5544
5545 /* process the event */
5546 i40e_link_event(pf);
5547
5548 /* Do a new status request to re-enable LSE reporting
5549 * and load new status information into the hw struct,
5550 * then see if the status changed while processing the
5551 * initial event.
5552 */
5553 i40e_update_link_info(&pf->hw, true);
5554 i40e_link_event(pf);
5555 }
5556
5557 /**
5558 * i40e_clean_adminq_subtask - Clean the AdminQ rings
5559 * @pf: board private structure
5560 **/
5561 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5562 {
5563 struct i40e_arq_event_info event;
5564 struct i40e_hw *hw = &pf->hw;
5565 u16 pending, i = 0;
5566 i40e_status ret;
5567 u16 opcode;
5568 u32 oldval;
5569 u32 val;
5570
5571 /* Do not run clean AQ when PF reset fails */
5572 if (test_bit(__I40E_RESET_FAILED, &pf->state))
5573 return;
5574
5575 /* check for error indications */
5576 val = rd32(&pf->hw, pf->hw.aq.arq.len);
5577 oldval = val;
5578 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5579 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5580 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5581 }
5582 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5583 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5584 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5585 }
5586 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5587 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5588 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5589 }
5590 if (oldval != val)
5591 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5592
5593 val = rd32(&pf->hw, pf->hw.aq.asq.len);
5594 oldval = val;
5595 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5596 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5597 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5598 }
5599 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5600 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5601 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5602 }
5603 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5604 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5605 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5606 }
5607 if (oldval != val)
5608 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5609
5610 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
5611 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
5612 if (!event.msg_buf)
5613 return;
5614
5615 do {
5616 event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
5617 ret = i40e_clean_arq_element(hw, &event, &pending);
5618 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
5619 break;
5620 else if (ret) {
5621 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
5622 break;
5623 }
5624
5625 opcode = le16_to_cpu(event.desc.opcode);
5626 switch (opcode) {
5627
5628 case i40e_aqc_opc_get_link_status:
5629 i40e_handle_link_event(pf, &event);
5630 break;
5631 case i40e_aqc_opc_send_msg_to_pf:
5632 ret = i40e_vc_process_vf_msg(pf,
5633 le16_to_cpu(event.desc.retval),
5634 le32_to_cpu(event.desc.cookie_high),
5635 le32_to_cpu(event.desc.cookie_low),
5636 event.msg_buf,
5637 event.msg_size);
5638 break;
5639 case i40e_aqc_opc_lldp_update_mib:
5640 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
5641 #ifdef CONFIG_I40E_DCB
5642 rtnl_lock();
5643 ret = i40e_handle_lldp_event(pf, &event);
5644 rtnl_unlock();
5645 #endif /* CONFIG_I40E_DCB */
5646 break;
5647 case i40e_aqc_opc_event_lan_overflow:
5648 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
5649 i40e_handle_lan_overflow_event(pf, &event);
5650 break;
5651 case i40e_aqc_opc_send_msg_to_peer:
5652 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
5653 break;
5654 default:
5655 dev_info(&pf->pdev->dev,
5656 "ARQ Error: Unknown event 0x%04x received\n",
5657 opcode);
5658 break;
5659 }
5660 } while (pending && (i++ < pf->adminq_work_limit));
5661
5662 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
5663 /* re-enable Admin queue interrupt cause */
5664 val = rd32(hw, I40E_PFINT_ICR0_ENA);
5665 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
5666 wr32(hw, I40E_PFINT_ICR0_ENA, val);
5667 i40e_flush(hw);
5668
5669 kfree(event.msg_buf);
5670 }
5671
5672 /**
5673 * i40e_verify_eeprom - make sure eeprom is good to use
5674 * @pf: board private structure
5675 **/
5676 static void i40e_verify_eeprom(struct i40e_pf *pf)
5677 {
5678 int err;
5679
5680 err = i40e_diag_eeprom_test(&pf->hw);
5681 if (err) {
5682 /* retry in case of garbage read */
5683 err = i40e_diag_eeprom_test(&pf->hw);
5684 if (err) {
5685 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
5686 err);
5687 set_bit(__I40E_BAD_EEPROM, &pf->state);
5688 }
5689 }
5690
5691 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
5692 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
5693 clear_bit(__I40E_BAD_EEPROM, &pf->state);
5694 }
5695 }
5696
5697 /**
5698 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
5699 * @veb: pointer to the VEB instance
5700 *
5701 * This is a recursive function that first builds the attached VSIs then
5702 * recurses in to build the next layer of VEB. We track the connections
5703 * through our own index numbers because the seid's from the HW could
5704 * change across the reset.
5705 **/
5706 static int i40e_reconstitute_veb(struct i40e_veb *veb)
5707 {
5708 struct i40e_vsi *ctl_vsi = NULL;
5709 struct i40e_pf *pf = veb->pf;
5710 int v, veb_idx;
5711 int ret;
5712
5713 /* build VSI that owns this VEB, temporarily attached to base VEB */
5714 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
5715 if (pf->vsi[v] &&
5716 pf->vsi[v]->veb_idx == veb->idx &&
5717 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
5718 ctl_vsi = pf->vsi[v];
5719 break;
5720 }
5721 }
5722 if (!ctl_vsi) {
5723 dev_info(&pf->pdev->dev,
5724 "missing owner VSI for veb_idx %d\n", veb->idx);
5725 ret = -ENOENT;
5726 goto end_reconstitute;
5727 }
5728 if (ctl_vsi != pf->vsi[pf->lan_vsi])
5729 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5730 ret = i40e_add_vsi(ctl_vsi);
5731 if (ret) {
5732 dev_info(&pf->pdev->dev,
5733 "rebuild of owner VSI failed: %d\n", ret);
5734 goto end_reconstitute;
5735 }
5736 i40e_vsi_reset_stats(ctl_vsi);
5737
5738 /* create the VEB in the switch and move the VSI onto the VEB */
5739 ret = i40e_add_veb(veb, ctl_vsi);
5740 if (ret)
5741 goto end_reconstitute;
5742
5743 /* create the remaining VSIs attached to this VEB */
5744 for (v = 0; v < pf->num_alloc_vsi; v++) {
5745 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
5746 continue;
5747
5748 if (pf->vsi[v]->veb_idx == veb->idx) {
5749 struct i40e_vsi *vsi = pf->vsi[v];
5750 vsi->uplink_seid = veb->seid;
5751 ret = i40e_add_vsi(vsi);
5752 if (ret) {
5753 dev_info(&pf->pdev->dev,
5754 "rebuild of vsi_idx %d failed: %d\n",
5755 v, ret);
5756 goto end_reconstitute;
5757 }
5758 i40e_vsi_reset_stats(vsi);
5759 }
5760 }
5761
5762 /* create any VEBs attached to this VEB - RECURSION */
5763 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
5764 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
5765 pf->veb[veb_idx]->uplink_seid = veb->seid;
5766 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
5767 if (ret)
5768 break;
5769 }
5770 }
5771
5772 end_reconstitute:
5773 return ret;
5774 }
5775
5776 /**
5777 * i40e_get_capabilities - get info about the HW
5778 * @pf: the PF struct
5779 **/
5780 static int i40e_get_capabilities(struct i40e_pf *pf)
5781 {
5782 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
5783 u16 data_size;
5784 int buf_len;
5785 int err;
5786
5787 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
5788 do {
5789 cap_buf = kzalloc(buf_len, GFP_KERNEL);
5790 if (!cap_buf)
5791 return -ENOMEM;
5792
5793 /* this loads the data into the hw struct for us */
5794 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
5795 &data_size,
5796 i40e_aqc_opc_list_func_capabilities,
5797 NULL);
5798 /* data loaded, buffer no longer needed */
5799 kfree(cap_buf);
5800
5801 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
5802 /* retry with a larger buffer */
5803 buf_len = data_size;
5804 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
5805 dev_info(&pf->pdev->dev,
5806 "capability discovery failed: aq=%d\n",
5807 pf->hw.aq.asq_last_status);
5808 return -ENODEV;
5809 }
5810 } while (err);
5811
5812 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
5813 (pf->hw.aq.fw_maj_ver < 2)) {
5814 pf->hw.func_caps.num_msix_vectors++;
5815 pf->hw.func_caps.num_msix_vectors_vf++;
5816 }
5817
5818 if (pf->hw.debug_mask & I40E_DEBUG_USER)
5819 dev_info(&pf->pdev->dev,
5820 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
5821 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
5822 pf->hw.func_caps.num_msix_vectors,
5823 pf->hw.func_caps.num_msix_vectors_vf,
5824 pf->hw.func_caps.fd_filters_guaranteed,
5825 pf->hw.func_caps.fd_filters_best_effort,
5826 pf->hw.func_caps.num_tx_qp,
5827 pf->hw.func_caps.num_vsis);
5828
5829 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
5830 + pf->hw.func_caps.num_vfs)
5831 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
5832 dev_info(&pf->pdev->dev,
5833 "got num_vsis %d, setting num_vsis to %d\n",
5834 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
5835 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
5836 }
5837
5838 return 0;
5839 }
5840
5841 static int i40e_vsi_clear(struct i40e_vsi *vsi);
5842
5843 /**
5844 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
5845 * @pf: board private structure
5846 **/
5847 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5848 {
5849 struct i40e_vsi *vsi;
5850 int i;
5851
5852 /* quick workaround for an NVM issue that leaves a critical register
5853 * uninitialized
5854 */
5855 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
5856 static const u32 hkey[] = {
5857 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
5858 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
5859 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
5860 0x95b3a76d};
5861
5862 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
5863 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
5864 }
5865
5866 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
5867 return;
5868
5869 /* find existing VSI and see if it needs configuring */
5870 vsi = NULL;
5871 for (i = 0; i < pf->num_alloc_vsi; i++) {
5872 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5873 vsi = pf->vsi[i];
5874 break;
5875 }
5876 }
5877
5878 /* create a new VSI if none exists */
5879 if (!vsi) {
5880 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
5881 pf->vsi[pf->lan_vsi]->seid, 0);
5882 if (!vsi) {
5883 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
5884 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
5885 return;
5886 }
5887 }
5888
5889 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
5890 }
5891
5892 /**
5893 * i40e_fdir_teardown - release the Flow Director resources
5894 * @pf: board private structure
5895 **/
5896 static void i40e_fdir_teardown(struct i40e_pf *pf)
5897 {
5898 int i;
5899
5900 i40e_fdir_filter_exit(pf);
5901 for (i = 0; i < pf->num_alloc_vsi; i++) {
5902 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5903 i40e_vsi_release(pf->vsi[i]);
5904 break;
5905 }
5906 }
5907 }
5908
5909 /**
5910 * i40e_prep_for_reset - prep for the core to reset
5911 * @pf: board private structure
5912 *
5913 * Close up the VFs and other things in prep for pf Reset.
5914 **/
5915 static void i40e_prep_for_reset(struct i40e_pf *pf)
5916 {
5917 struct i40e_hw *hw = &pf->hw;
5918 i40e_status ret = 0;
5919 u32 v;
5920
5921 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
5922 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
5923 return;
5924
5925 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
5926
5927 /* quiesce the VSIs and their queues that are not already DOWN */
5928 i40e_pf_quiesce_all_vsi(pf);
5929
5930 for (v = 0; v < pf->num_alloc_vsi; v++) {
5931 if (pf->vsi[v])
5932 pf->vsi[v]->seid = 0;
5933 }
5934
5935 i40e_shutdown_adminq(&pf->hw);
5936
5937 /* call shutdown HMC */
5938 if (hw->hmc.hmc_obj) {
5939 ret = i40e_shutdown_lan_hmc(hw);
5940 if (ret)
5941 dev_warn(&pf->pdev->dev,
5942 "shutdown_lan_hmc failed: %d\n", ret);
5943 }
5944 }
5945
5946 /**
5947 * i40e_send_version - update firmware with driver version
5948 * @pf: PF struct
5949 */
5950 static void i40e_send_version(struct i40e_pf *pf)
5951 {
5952 struct i40e_driver_version dv;
5953
5954 dv.major_version = DRV_VERSION_MAJOR;
5955 dv.minor_version = DRV_VERSION_MINOR;
5956 dv.build_version = DRV_VERSION_BUILD;
5957 dv.subbuild_version = 0;
5958 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
5959 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5960 }
5961
5962 /**
5963 * i40e_reset_and_rebuild - reset and rebuild using a saved config
5964 * @pf: board private structure
5965 * @reinit: if the Main VSI needs to re-initialized.
5966 **/
5967 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5968 {
5969 struct i40e_hw *hw = &pf->hw;
5970 i40e_status ret;
5971 u32 v;
5972
5973 /* Now we wait for GRST to settle out.
5974 * We don't have to delete the VEBs or VSIs from the hw switch
5975 * because the reset will make them disappear.
5976 */
5977 ret = i40e_pf_reset(hw);
5978 if (ret) {
5979 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
5980 set_bit(__I40E_RESET_FAILED, &pf->state);
5981 goto clear_recovery;
5982 }
5983 pf->pfr_count++;
5984
5985 if (test_bit(__I40E_DOWN, &pf->state))
5986 goto clear_recovery;
5987 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
5988
5989 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
5990 ret = i40e_init_adminq(&pf->hw);
5991 if (ret) {
5992 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
5993 goto clear_recovery;
5994 }
5995
5996 /* re-verify the eeprom if we just had an EMP reset */
5997 if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
5998 clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
5999 i40e_verify_eeprom(pf);
6000 }
6001
6002 i40e_clear_pxe_mode(hw);
6003 ret = i40e_get_capabilities(pf);
6004 if (ret) {
6005 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
6006 ret);
6007 goto end_core_reset;
6008 }
6009
6010 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6011 hw->func_caps.num_rx_qp,
6012 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6013 if (ret) {
6014 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6015 goto end_core_reset;
6016 }
6017 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6018 if (ret) {
6019 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6020 goto end_core_reset;
6021 }
6022
6023 #ifdef CONFIG_I40E_DCB
6024 ret = i40e_init_pf_dcb(pf);
6025 if (ret) {
6026 dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
6027 goto end_core_reset;
6028 }
6029 #endif /* CONFIG_I40E_DCB */
6030 #ifdef I40E_FCOE
6031 ret = i40e_init_pf_fcoe(pf);
6032 if (ret)
6033 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
6034
6035 #endif
6036 /* do basic switch setup */
6037 ret = i40e_setup_pf_switch(pf, reinit);
6038 if (ret)
6039 goto end_core_reset;
6040
6041 /* driver is only interested in link up/down and module qualification
6042 * reports from firmware
6043 */
6044 ret = i40e_aq_set_phy_int_mask(&pf->hw,
6045 I40E_AQ_EVENT_LINK_UPDOWN |
6046 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6047 if (ret)
6048 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
6049
6050 /* Rebuild the VSIs and VEBs that existed before reset.
6051 * They are still in our local switch element arrays, so only
6052 * need to rebuild the switch model in the HW.
6053 *
6054 * If there were VEBs but the reconstitution failed, we'll try
6055 * try to recover minimal use by getting the basic PF VSI working.
6056 */
6057 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6058 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6059 /* find the one VEB connected to the MAC, and find orphans */
6060 for (v = 0; v < I40E_MAX_VEB; v++) {
6061 if (!pf->veb[v])
6062 continue;
6063
6064 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6065 pf->veb[v]->uplink_seid == 0) {
6066 ret = i40e_reconstitute_veb(pf->veb[v]);
6067
6068 if (!ret)
6069 continue;
6070
6071 /* If Main VEB failed, we're in deep doodoo,
6072 * so give up rebuilding the switch and set up
6073 * for minimal rebuild of PF VSI.
6074 * If orphan failed, we'll report the error
6075 * but try to keep going.
6076 */
6077 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6078 dev_info(&pf->pdev->dev,
6079 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6080 ret);
6081 pf->vsi[pf->lan_vsi]->uplink_seid
6082 = pf->mac_seid;
6083 break;
6084 } else if (pf->veb[v]->uplink_seid == 0) {
6085 dev_info(&pf->pdev->dev,
6086 "rebuild of orphan VEB failed: %d\n",
6087 ret);
6088 }
6089 }
6090 }
6091 }
6092
6093 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6094 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6095 /* no VEB, so rebuild only the Main VSI */
6096 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6097 if (ret) {
6098 dev_info(&pf->pdev->dev,
6099 "rebuild of Main VSI failed: %d\n", ret);
6100 goto end_core_reset;
6101 }
6102 }
6103
6104 /* reinit the misc interrupt */
6105 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6106 ret = i40e_setup_misc_vector(pf);
6107
6108 /* restart the VSIs that were rebuilt and running before the reset */
6109 i40e_pf_unquiesce_all_vsi(pf);
6110
6111 if (pf->num_alloc_vfs) {
6112 for (v = 0; v < pf->num_alloc_vfs; v++)
6113 i40e_reset_vf(&pf->vf[v], true);
6114 }
6115
6116 /* tell the firmware that we're starting */
6117 i40e_send_version(pf);
6118
6119 end_core_reset:
6120 clear_bit(__I40E_RESET_FAILED, &pf->state);
6121 clear_recovery:
6122 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6123 }
6124
6125 /**
6126 * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
6127 * @pf: board private structure
6128 *
6129 * Close up the VFs and other things in prep for a Core Reset,
6130 * then get ready to rebuild the world.
6131 **/
6132 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6133 {
6134 i40e_prep_for_reset(pf);
6135 i40e_reset_and_rebuild(pf, false);
6136 }
6137
6138 /**
6139 * i40e_handle_mdd_event
6140 * @pf: pointer to the pf structure
6141 *
6142 * Called from the MDD irq handler to identify possibly malicious vfs
6143 **/
6144 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6145 {
6146 struct i40e_hw *hw = &pf->hw;
6147 bool mdd_detected = false;
6148 bool pf_mdd_detected = false;
6149 struct i40e_vf *vf;
6150 u32 reg;
6151 int i;
6152
6153 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6154 return;
6155
6156 /* find what triggered the MDD event */
6157 reg = rd32(hw, I40E_GL_MDET_TX);
6158 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6159 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6160 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6161 u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6162 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6163 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >>
6164 I40E_GL_MDET_TX_EVENT_SHIFT;
6165 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6166 I40E_GL_MDET_TX_QUEUE_SHIFT;
6167 if (netif_msg_tx_err(pf))
6168 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
6169 event, queue, pf_num, vf_num);
6170 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6171 mdd_detected = true;
6172 }
6173 reg = rd32(hw, I40E_GL_MDET_RX);
6174 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6175 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6176 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6177 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >>
6178 I40E_GL_MDET_RX_EVENT_SHIFT;
6179 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6180 I40E_GL_MDET_RX_QUEUE_SHIFT;
6181 if (netif_msg_rx_err(pf))
6182 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6183 event, queue, func);
6184 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6185 mdd_detected = true;
6186 }
6187
6188 if (mdd_detected) {
6189 reg = rd32(hw, I40E_PF_MDET_TX);
6190 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6191 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
6192 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
6193 pf_mdd_detected = true;
6194 }
6195 reg = rd32(hw, I40E_PF_MDET_RX);
6196 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6197 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
6198 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
6199 pf_mdd_detected = true;
6200 }
6201 /* Queue belongs to the PF, initiate a reset */
6202 if (pf_mdd_detected) {
6203 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6204 i40e_service_event_schedule(pf);
6205 }
6206 }
6207
6208 /* see if one of the VFs needs its hand slapped */
6209 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6210 vf = &(pf->vf[i]);
6211 reg = rd32(hw, I40E_VP_MDET_TX(i));
6212 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6213 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6214 vf->num_mdd_events++;
6215 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6216 i);
6217 }
6218
6219 reg = rd32(hw, I40E_VP_MDET_RX(i));
6220 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6221 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6222 vf->num_mdd_events++;
6223 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6224 i);
6225 }
6226
6227 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
6228 dev_info(&pf->pdev->dev,
6229 "Too many MDD events on VF %d, disabled\n", i);
6230 dev_info(&pf->pdev->dev,
6231 "Use PF Control I/F to re-enable the VF\n");
6232 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
6233 }
6234 }
6235
6236 /* re-enable mdd interrupt cause */
6237 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
6238 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
6239 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
6240 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
6241 i40e_flush(hw);
6242 }
6243
6244 #ifdef CONFIG_I40E_VXLAN
6245 /**
6246 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
6247 * @pf: board private structure
6248 **/
6249 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6250 {
6251 struct i40e_hw *hw = &pf->hw;
6252 i40e_status ret;
6253 u8 filter_index;
6254 __be16 port;
6255 int i;
6256
6257 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
6258 return;
6259
6260 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
6261
6262 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6263 if (pf->pending_vxlan_bitmap & (1 << i)) {
6264 pf->pending_vxlan_bitmap &= ~(1 << i);
6265 port = pf->vxlan_ports[i];
6266 ret = port ?
6267 i40e_aq_add_udp_tunnel(hw, ntohs(port),
6268 I40E_AQC_TUNNEL_TYPE_VXLAN,
6269 &filter_index, NULL)
6270 : i40e_aq_del_udp_tunnel(hw, i, NULL);
6271
6272 if (ret) {
6273 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
6274 port ? "adding" : "deleting",
6275 ntohs(port), port ? i : i);
6276
6277 pf->vxlan_ports[i] = 0;
6278 } else {
6279 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
6280 port ? "Added" : "Deleted",
6281 ntohs(port), port ? i : filter_index);
6282 }
6283 }
6284 }
6285 }
6286
6287 #endif
6288 /**
6289 * i40e_service_task - Run the driver's async subtasks
6290 * @work: pointer to work_struct containing our data
6291 **/
6292 static void i40e_service_task(struct work_struct *work)
6293 {
6294 struct i40e_pf *pf = container_of(work,
6295 struct i40e_pf,
6296 service_task);
6297 unsigned long start_time = jiffies;
6298
6299 /* don't bother with service tasks if a reset is in progress */
6300 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6301 i40e_service_event_complete(pf);
6302 return;
6303 }
6304
6305 i40e_reset_subtask(pf);
6306 i40e_handle_mdd_event(pf);
6307 i40e_vc_process_vflr_event(pf);
6308 i40e_watchdog_subtask(pf);
6309 i40e_fdir_reinit_subtask(pf);
6310 i40e_check_hang_subtask(pf);
6311 i40e_sync_filters_subtask(pf);
6312 #ifdef CONFIG_I40E_VXLAN
6313 i40e_sync_vxlan_filters_subtask(pf);
6314 #endif
6315 i40e_clean_adminq_subtask(pf);
6316
6317 i40e_service_event_complete(pf);
6318
6319 /* If the tasks have taken longer than one timer cycle or there
6320 * is more work to be done, reschedule the service task now
6321 * rather than wait for the timer to tick again.
6322 */
6323 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
6324 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
6325 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
6326 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
6327 i40e_service_event_schedule(pf);
6328 }
6329
6330 /**
6331 * i40e_service_timer - timer callback
6332 * @data: pointer to PF struct
6333 **/
6334 static void i40e_service_timer(unsigned long data)
6335 {
6336 struct i40e_pf *pf = (struct i40e_pf *)data;
6337
6338 mod_timer(&pf->service_timer,
6339 round_jiffies(jiffies + pf->service_timer_period));
6340 i40e_service_event_schedule(pf);
6341 }
6342
6343 /**
6344 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
6345 * @vsi: the VSI being configured
6346 **/
6347 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6348 {
6349 struct i40e_pf *pf = vsi->back;
6350
6351 switch (vsi->type) {
6352 case I40E_VSI_MAIN:
6353 vsi->alloc_queue_pairs = pf->num_lan_qps;
6354 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6355 I40E_REQ_DESCRIPTOR_MULTIPLE);
6356 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6357 vsi->num_q_vectors = pf->num_lan_msix;
6358 else
6359 vsi->num_q_vectors = 1;
6360
6361 break;
6362
6363 case I40E_VSI_FDIR:
6364 vsi->alloc_queue_pairs = 1;
6365 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
6366 I40E_REQ_DESCRIPTOR_MULTIPLE);
6367 vsi->num_q_vectors = 1;
6368 break;
6369
6370 case I40E_VSI_VMDQ2:
6371 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
6372 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6373 I40E_REQ_DESCRIPTOR_MULTIPLE);
6374 vsi->num_q_vectors = pf->num_vmdq_msix;
6375 break;
6376
6377 case I40E_VSI_SRIOV:
6378 vsi->alloc_queue_pairs = pf->num_vf_qps;
6379 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6380 I40E_REQ_DESCRIPTOR_MULTIPLE);
6381 break;
6382
6383 #ifdef I40E_FCOE
6384 case I40E_VSI_FCOE:
6385 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6386 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6387 I40E_REQ_DESCRIPTOR_MULTIPLE);
6388 vsi->num_q_vectors = pf->num_fcoe_msix;
6389 break;
6390
6391 #endif /* I40E_FCOE */
6392 default:
6393 WARN_ON(1);
6394 return -ENODATA;
6395 }
6396
6397 return 0;
6398 }
6399
6400 /**
6401 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
6402 * @type: VSI pointer
6403 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
6404 *
6405 * On error: returns error code (negative)
6406 * On success: returns 0
6407 **/
6408 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
6409 {
6410 int size;
6411 int ret = 0;
6412
6413 /* allocate memory for both Tx and Rx ring pointers */
6414 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
6415 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
6416 if (!vsi->tx_rings)
6417 return -ENOMEM;
6418 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
6419
6420 if (alloc_qvectors) {
6421 /* allocate memory for q_vector pointers */
6422 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
6423 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
6424 if (!vsi->q_vectors) {
6425 ret = -ENOMEM;
6426 goto err_vectors;
6427 }
6428 }
6429 return ret;
6430
6431 err_vectors:
6432 kfree(vsi->tx_rings);
6433 return ret;
6434 }
6435
6436 /**
6437 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
6438 * @pf: board private structure
6439 * @type: type of VSI
6440 *
6441 * On error: returns error code (negative)
6442 * On success: returns vsi index in PF (positive)
6443 **/
6444 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
6445 {
6446 int ret = -ENODEV;
6447 struct i40e_vsi *vsi;
6448 int vsi_idx;
6449 int i;
6450
6451 /* Need to protect the allocation of the VSIs at the PF level */
6452 mutex_lock(&pf->switch_mutex);
6453
6454 /* VSI list may be fragmented if VSI creation/destruction has
6455 * been happening. We can afford to do a quick scan to look
6456 * for any free VSIs in the list.
6457 *
6458 * find next empty vsi slot, looping back around if necessary
6459 */
6460 i = pf->next_vsi;
6461 while (i < pf->num_alloc_vsi && pf->vsi[i])
6462 i++;
6463 if (i >= pf->num_alloc_vsi) {
6464 i = 0;
6465 while (i < pf->next_vsi && pf->vsi[i])
6466 i++;
6467 }
6468
6469 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
6470 vsi_idx = i; /* Found one! */
6471 } else {
6472 ret = -ENODEV;
6473 goto unlock_pf; /* out of VSI slots! */
6474 }
6475 pf->next_vsi = ++i;
6476
6477 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
6478 if (!vsi) {
6479 ret = -ENOMEM;
6480 goto unlock_pf;
6481 }
6482 vsi->type = type;
6483 vsi->back = pf;
6484 set_bit(__I40E_DOWN, &vsi->state);
6485 vsi->flags = 0;
6486 vsi->idx = vsi_idx;
6487 vsi->rx_itr_setting = pf->rx_itr_default;
6488 vsi->tx_itr_setting = pf->tx_itr_default;
6489 vsi->netdev_registered = false;
6490 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
6491 INIT_LIST_HEAD(&vsi->mac_filter_list);
6492 vsi->irqs_ready = false;
6493
6494 ret = i40e_set_num_rings_in_vsi(vsi);
6495 if (ret)
6496 goto err_rings;
6497
6498 ret = i40e_vsi_alloc_arrays(vsi, true);
6499 if (ret)
6500 goto err_rings;
6501
6502 /* Setup default MSIX irq handler for VSI */
6503 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
6504
6505 pf->vsi[vsi_idx] = vsi;
6506 ret = vsi_idx;
6507 goto unlock_pf;
6508
6509 err_rings:
6510 pf->next_vsi = i - 1;
6511 kfree(vsi);
6512 unlock_pf:
6513 mutex_unlock(&pf->switch_mutex);
6514 return ret;
6515 }
6516
6517 /**
6518 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
6519 * @type: VSI pointer
6520 * @free_qvectors: a bool to specify if q_vectors need to be freed.
6521 *
6522 * On error: returns error code (negative)
6523 * On success: returns 0
6524 **/
6525 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
6526 {
6527 /* free the ring and vector containers */
6528 if (free_qvectors) {
6529 kfree(vsi->q_vectors);
6530 vsi->q_vectors = NULL;
6531 }
6532 kfree(vsi->tx_rings);
6533 vsi->tx_rings = NULL;
6534 vsi->rx_rings = NULL;
6535 }
6536
6537 /**
6538 * i40e_vsi_clear - Deallocate the VSI provided
6539 * @vsi: the VSI being un-configured
6540 **/
6541 static int i40e_vsi_clear(struct i40e_vsi *vsi)
6542 {
6543 struct i40e_pf *pf;
6544
6545 if (!vsi)
6546 return 0;
6547
6548 if (!vsi->back)
6549 goto free_vsi;
6550 pf = vsi->back;
6551
6552 mutex_lock(&pf->switch_mutex);
6553 if (!pf->vsi[vsi->idx]) {
6554 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
6555 vsi->idx, vsi->idx, vsi, vsi->type);
6556 goto unlock_vsi;
6557 }
6558
6559 if (pf->vsi[vsi->idx] != vsi) {
6560 dev_err(&pf->pdev->dev,
6561 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
6562 pf->vsi[vsi->idx]->idx,
6563 pf->vsi[vsi->idx],
6564 pf->vsi[vsi->idx]->type,
6565 vsi->idx, vsi, vsi->type);
6566 goto unlock_vsi;
6567 }
6568
6569 /* updates the pf for this cleared vsi */
6570 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
6571 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
6572
6573 i40e_vsi_free_arrays(vsi, true);
6574
6575 pf->vsi[vsi->idx] = NULL;
6576 if (vsi->idx < pf->next_vsi)
6577 pf->next_vsi = vsi->idx;
6578
6579 unlock_vsi:
6580 mutex_unlock(&pf->switch_mutex);
6581 free_vsi:
6582 kfree(vsi);
6583
6584 return 0;
6585 }
6586
6587 /**
6588 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
6589 * @vsi: the VSI being cleaned
6590 **/
6591 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
6592 {
6593 int i;
6594
6595 if (vsi->tx_rings && vsi->tx_rings[0]) {
6596 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
6597 kfree_rcu(vsi->tx_rings[i], rcu);
6598 vsi->tx_rings[i] = NULL;
6599 vsi->rx_rings[i] = NULL;
6600 }
6601 }
6602 }
6603
6604 /**
6605 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
6606 * @vsi: the VSI being configured
6607 **/
6608 static int i40e_alloc_rings(struct i40e_vsi *vsi)
6609 {
6610 struct i40e_ring *tx_ring, *rx_ring;
6611 struct i40e_pf *pf = vsi->back;
6612 int i;
6613
6614 /* Set basic values in the rings to be used later during open() */
6615 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
6616 /* allocate space for both Tx and Rx in one shot */
6617 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
6618 if (!tx_ring)
6619 goto err_out;
6620
6621 tx_ring->queue_index = i;
6622 tx_ring->reg_idx = vsi->base_queue + i;
6623 tx_ring->ring_active = false;
6624 tx_ring->vsi = vsi;
6625 tx_ring->netdev = vsi->netdev;
6626 tx_ring->dev = &pf->pdev->dev;
6627 tx_ring->count = vsi->num_desc;
6628 tx_ring->size = 0;
6629 tx_ring->dcb_tc = 0;
6630 vsi->tx_rings[i] = tx_ring;
6631
6632 rx_ring = &tx_ring[1];
6633 rx_ring->queue_index = i;
6634 rx_ring->reg_idx = vsi->base_queue + i;
6635 rx_ring->ring_active = false;
6636 rx_ring->vsi = vsi;
6637 rx_ring->netdev = vsi->netdev;
6638 rx_ring->dev = &pf->pdev->dev;
6639 rx_ring->count = vsi->num_desc;
6640 rx_ring->size = 0;
6641 rx_ring->dcb_tc = 0;
6642 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
6643 set_ring_16byte_desc_enabled(rx_ring);
6644 else
6645 clear_ring_16byte_desc_enabled(rx_ring);
6646 vsi->rx_rings[i] = rx_ring;
6647 }
6648
6649 return 0;
6650
6651 err_out:
6652 i40e_vsi_clear_rings(vsi);
6653 return -ENOMEM;
6654 }
6655
6656 /**
6657 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
6658 * @pf: board private structure
6659 * @vectors: the number of MSI-X vectors to request
6660 *
6661 * Returns the number of vectors reserved, or error
6662 **/
6663 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
6664 {
6665 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
6666 I40E_MIN_MSIX, vectors);
6667 if (vectors < 0) {
6668 dev_info(&pf->pdev->dev,
6669 "MSI-X vector reservation failed: %d\n", vectors);
6670 vectors = 0;
6671 }
6672
6673 return vectors;
6674 }
6675
6676 /**
6677 * i40e_init_msix - Setup the MSIX capability
6678 * @pf: board private structure
6679 *
6680 * Work with the OS to set up the MSIX vectors needed.
6681 *
6682 * Returns 0 on success, negative on failure
6683 **/
6684 static int i40e_init_msix(struct i40e_pf *pf)
6685 {
6686 i40e_status err = 0;
6687 struct i40e_hw *hw = &pf->hw;
6688 int v_budget, i;
6689 int vec;
6690
6691 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6692 return -ENODEV;
6693
6694 /* The number of vectors we'll request will be comprised of:
6695 * - Add 1 for "other" cause for Admin Queue events, etc.
6696 * - The number of LAN queue pairs
6697 * - Queues being used for RSS.
6698 * We don't need as many as max_rss_size vectors.
6699 * use rss_size instead in the calculation since that
6700 * is governed by number of cpus in the system.
6701 * - assumes symmetric Tx/Rx pairing
6702 * - The number of VMDq pairs
6703 #ifdef I40E_FCOE
6704 * - The number of FCOE qps.
6705 #endif
6706 * Once we count this up, try the request.
6707 *
6708 * If we can't get what we want, we'll simplify to nearly nothing
6709 * and try again. If that still fails, we punt.
6710 */
6711 pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
6712 pf->num_vmdq_msix = pf->num_vmdq_qps;
6713 v_budget = 1 + pf->num_lan_msix;
6714 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
6715 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
6716 v_budget++;
6717
6718 #ifdef I40E_FCOE
6719 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
6720 pf->num_fcoe_msix = pf->num_fcoe_qps;
6721 v_budget += pf->num_fcoe_msix;
6722 }
6723
6724 #endif
6725 /* Scale down if necessary, and the rings will share vectors */
6726 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
6727
6728 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
6729 GFP_KERNEL);
6730 if (!pf->msix_entries)
6731 return -ENOMEM;
6732
6733 for (i = 0; i < v_budget; i++)
6734 pf->msix_entries[i].entry = i;
6735 vec = i40e_reserve_msix_vectors(pf, v_budget);
6736
6737 if (vec != v_budget) {
6738 /* If we have limited resources, we will start with no vectors
6739 * for the special features and then allocate vectors to some
6740 * of these features based on the policy and at the end disable
6741 * the features that did not get any vectors.
6742 */
6743 #ifdef I40E_FCOE
6744 pf->num_fcoe_qps = 0;
6745 pf->num_fcoe_msix = 0;
6746 #endif
6747 pf->num_vmdq_msix = 0;
6748 }
6749
6750 if (vec < I40E_MIN_MSIX) {
6751 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
6752 kfree(pf->msix_entries);
6753 pf->msix_entries = NULL;
6754 return -ENODEV;
6755
6756 } else if (vec == I40E_MIN_MSIX) {
6757 /* Adjust for minimal MSIX use */
6758 pf->num_vmdq_vsis = 0;
6759 pf->num_vmdq_qps = 0;
6760 pf->num_lan_qps = 1;
6761 pf->num_lan_msix = 1;
6762
6763 } else if (vec != v_budget) {
6764 /* reserve the misc vector */
6765 vec--;
6766
6767 /* Scale vector usage down */
6768 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
6769 pf->num_vmdq_vsis = 1;
6770
6771 /* partition out the remaining vectors */
6772 switch (vec) {
6773 case 2:
6774 pf->num_lan_msix = 1;
6775 break;
6776 case 3:
6777 #ifdef I40E_FCOE
6778 /* give one vector to FCoE */
6779 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
6780 pf->num_lan_msix = 1;
6781 pf->num_fcoe_msix = 1;
6782 }
6783 #else
6784 pf->num_lan_msix = 2;
6785 #endif
6786 break;
6787 default:
6788 #ifdef I40E_FCOE
6789 /* give one vector to FCoE */
6790 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
6791 pf->num_fcoe_msix = 1;
6792 vec--;
6793 }
6794 #endif
6795 pf->num_lan_msix = min_t(int, (vec / 2),
6796 pf->num_lan_qps);
6797 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
6798 I40E_DEFAULT_NUM_VMDQ_VSI);
6799 break;
6800 }
6801 }
6802
6803 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
6804 (pf->num_vmdq_msix == 0)) {
6805 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
6806 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6807 }
6808 #ifdef I40E_FCOE
6809
6810 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
6811 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
6812 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
6813 }
6814 #endif
6815 return err;
6816 }
6817
6818 /**
6819 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
6820 * @vsi: the VSI being configured
6821 * @v_idx: index of the vector in the vsi struct
6822 *
6823 * We allocate one q_vector. If allocation fails we return -ENOMEM.
6824 **/
6825 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
6826 {
6827 struct i40e_q_vector *q_vector;
6828
6829 /* allocate q_vector */
6830 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
6831 if (!q_vector)
6832 return -ENOMEM;
6833
6834 q_vector->vsi = vsi;
6835 q_vector->v_idx = v_idx;
6836 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
6837 if (vsi->netdev)
6838 netif_napi_add(vsi->netdev, &q_vector->napi,
6839 i40e_napi_poll, NAPI_POLL_WEIGHT);
6840
6841 q_vector->rx.latency_range = I40E_LOW_LATENCY;
6842 q_vector->tx.latency_range = I40E_LOW_LATENCY;
6843
6844 /* tie q_vector and vsi together */
6845 vsi->q_vectors[v_idx] = q_vector;
6846
6847 return 0;
6848 }
6849
6850 /**
6851 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
6852 * @vsi: the VSI being configured
6853 *
6854 * We allocate one q_vector per queue interrupt. If allocation fails we
6855 * return -ENOMEM.
6856 **/
6857 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
6858 {
6859 struct i40e_pf *pf = vsi->back;
6860 int v_idx, num_q_vectors;
6861 int err;
6862
6863 /* if not MSIX, give the one vector only to the LAN VSI */
6864 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6865 num_q_vectors = vsi->num_q_vectors;
6866 else if (vsi == pf->vsi[pf->lan_vsi])
6867 num_q_vectors = 1;
6868 else
6869 return -EINVAL;
6870
6871 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
6872 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
6873 if (err)
6874 goto err_out;
6875 }
6876
6877 return 0;
6878
6879 err_out:
6880 while (v_idx--)
6881 i40e_free_q_vector(vsi, v_idx);
6882
6883 return err;
6884 }
6885
6886 /**
6887 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
6888 * @pf: board private structure to initialize
6889 **/
6890 static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6891 {
6892 int err = 0;
6893
6894 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
6895 err = i40e_init_msix(pf);
6896 if (err) {
6897 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
6898 #ifdef I40E_FCOE
6899 I40E_FLAG_FCOE_ENABLED |
6900 #endif
6901 I40E_FLAG_RSS_ENABLED |
6902 I40E_FLAG_DCB_CAPABLE |
6903 I40E_FLAG_SRIOV_ENABLED |
6904 I40E_FLAG_FD_SB_ENABLED |
6905 I40E_FLAG_FD_ATR_ENABLED |
6906 I40E_FLAG_VMDQ_ENABLED);
6907
6908 /* rework the queue expectations without MSIX */
6909 i40e_determine_queue_usage(pf);
6910 }
6911 }
6912
6913 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6914 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
6915 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
6916 err = pci_enable_msi(pf->pdev);
6917 if (err) {
6918 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
6919 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
6920 }
6921 }
6922
6923 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
6924 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
6925
6926 /* track first vector for misc interrupts */
6927 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
6928 }
6929
6930 /**
6931 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
6932 * @pf: board private structure
6933 *
6934 * This sets up the handler for MSIX 0, which is used to manage the
6935 * non-queue interrupts, e.g. AdminQ and errors. This is not used
6936 * when in MSI or Legacy interrupt mode.
6937 **/
6938 static int i40e_setup_misc_vector(struct i40e_pf *pf)
6939 {
6940 struct i40e_hw *hw = &pf->hw;
6941 int err = 0;
6942
6943 /* Only request the irq if this is the first time through, and
6944 * not when we're rebuilding after a Reset
6945 */
6946 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6947 err = request_irq(pf->msix_entries[0].vector,
6948 i40e_intr, 0, pf->misc_int_name, pf);
6949 if (err) {
6950 dev_info(&pf->pdev->dev,
6951 "request_irq for %s failed: %d\n",
6952 pf->misc_int_name, err);
6953 return -EFAULT;
6954 }
6955 }
6956
6957 i40e_enable_misc_int_causes(hw);
6958
6959 /* associate no queues to the misc vector */
6960 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
6961 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
6962
6963 i40e_flush(hw);
6964
6965 i40e_irq_dynamic_enable_icr0(pf);
6966
6967 return err;
6968 }
6969
6970 /**
6971 * i40e_config_rss - Prepare for RSS if used
6972 * @pf: board private structure
6973 **/
6974 static int i40e_config_rss(struct i40e_pf *pf)
6975 {
6976 /* Set of random keys generated using kernel random number generator */
6977 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
6978 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
6979 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
6980 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
6981 struct i40e_hw *hw = &pf->hw;
6982 u32 lut = 0;
6983 int i, j;
6984 u64 hena;
6985 u32 reg_val;
6986
6987 /* Fill out hash function seed */
6988 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6989 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
6990
6991 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
6992 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
6993 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
6994 hena |= I40E_DEFAULT_RSS_HENA;
6995 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
6996 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
6997
6998 /* Check capability and Set table size and register per hw expectation*/
6999 reg_val = rd32(hw, I40E_PFQF_CTL_0);
7000 if (hw->func_caps.rss_table_size == 512) {
7001 reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7002 pf->rss_table_size = 512;
7003 } else {
7004 pf->rss_table_size = 128;
7005 reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7006 }
7007 wr32(hw, I40E_PFQF_CTL_0, reg_val);
7008
7009 /* Populate the LUT with max no. of queues in round robin fashion */
7010 for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
7011
7012 /* The assumption is that lan qp count will be the highest
7013 * qp count for any PF VSI that needs RSS.
7014 * If multiple VSIs need RSS support, all the qp counts
7015 * for those VSIs should be a power of 2 for RSS to work.
7016 * If LAN VSI is the only consumer for RSS then this requirement
7017 * is not necessary.
7018 */
7019 if (j == pf->rss_size)
7020 j = 0;
7021 /* lut = 4-byte sliding window of 4 lut entries */
7022 lut = (lut << 8) | (j &
7023 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
7024 /* On i = 3, we have 4 entries in lut; write to the register */
7025 if ((i & 3) == 3)
7026 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
7027 }
7028 i40e_flush(hw);
7029
7030 return 0;
7031 }
7032
7033 /**
7034 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7035 * @pf: board private structure
7036 * @queue_count: the requested queue count for rss.
7037 *
7038 * returns 0 if rss is not enabled, if enabled returns the final rss queue
7039 * count which may be different from the requested queue count.
7040 **/
7041 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7042 {
7043 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7044 return 0;
7045
7046 queue_count = min_t(int, queue_count, pf->rss_size_max);
7047
7048 if (queue_count != pf->rss_size) {
7049 i40e_prep_for_reset(pf);
7050
7051 pf->rss_size = queue_count;
7052
7053 i40e_reset_and_rebuild(pf, true);
7054 i40e_config_rss(pf);
7055 }
7056 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
7057 return pf->rss_size;
7058 }
7059
7060 /**
7061 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
7062 * @pf: board private structure to initialize
7063 *
7064 * i40e_sw_init initializes the Adapter private data structure.
7065 * Fields are initialized based on PCI device information and
7066 * OS network device settings (MTU size).
7067 **/
7068 static int i40e_sw_init(struct i40e_pf *pf)
7069 {
7070 int err = 0;
7071 int size;
7072
7073 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
7074 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
7075 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
7076 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
7077 if (I40E_DEBUG_USER & debug)
7078 pf->hw.debug_mask = debug;
7079 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
7080 I40E_DEFAULT_MSG_ENABLE);
7081 }
7082
7083 /* Set default capability flags */
7084 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7085 I40E_FLAG_MSI_ENABLED |
7086 I40E_FLAG_MSIX_ENABLED |
7087 I40E_FLAG_RX_1BUF_ENABLED;
7088
7089 /* Set default ITR */
7090 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
7091 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
7092
7093 /* Depending on PF configurations, it is possible that the RSS
7094 * maximum might end up larger than the available queues
7095 */
7096 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
7097 pf->rss_size = 1;
7098 pf->rss_size_max = min_t(int, pf->rss_size_max,
7099 pf->hw.func_caps.num_tx_qp);
7100 if (pf->hw.func_caps.rss) {
7101 pf->flags |= I40E_FLAG_RSS_ENABLED;
7102 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
7103 }
7104
7105 /* MFP mode enabled */
7106 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
7107 pf->flags |= I40E_FLAG_MFP_ENABLED;
7108 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
7109 }
7110
7111 /* FW/NVM is not yet fixed in this regard */
7112 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
7113 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7114 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7115 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
7116 /* Setup a counter for fd_atr per pf */
7117 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
7118 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
7119 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7120 /* Setup a counter for fd_sb per pf */
7121 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
7122 } else {
7123 dev_info(&pf->pdev->dev,
7124 "Flow Director Sideband mode Disabled in MFP mode\n");
7125 }
7126 pf->fdir_pf_filter_count =
7127 pf->hw.func_caps.fd_filters_guaranteed;
7128 pf->hw.fdir_shared_filter_count =
7129 pf->hw.func_caps.fd_filters_best_effort;
7130 }
7131
7132 if (pf->hw.func_caps.vmdq) {
7133 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7134 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7135 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
7136 }
7137
7138 #ifdef I40E_FCOE
7139 err = i40e_init_pf_fcoe(pf);
7140 if (err)
7141 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
7142
7143 #endif /* I40E_FCOE */
7144 #ifdef CONFIG_PCI_IOV
7145 if (pf->hw.func_caps.num_vfs) {
7146 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7147 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7148 pf->num_req_vfs = min_t(int,
7149 pf->hw.func_caps.num_vfs,
7150 I40E_MAX_VF_COUNT);
7151 }
7152 #endif /* CONFIG_PCI_IOV */
7153 pf->eeprom_version = 0xDEAD;
7154 pf->lan_veb = I40E_NO_VEB;
7155 pf->lan_vsi = I40E_NO_VSI;
7156
7157 /* set up queue assignment tracking */
7158 size = sizeof(struct i40e_lump_tracking)
7159 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
7160 pf->qp_pile = kzalloc(size, GFP_KERNEL);
7161 if (!pf->qp_pile) {
7162 err = -ENOMEM;
7163 goto sw_init_done;
7164 }
7165 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
7166 pf->qp_pile->search_hint = 0;
7167
7168 /* set up vector assignment tracking */
7169 size = sizeof(struct i40e_lump_tracking)
7170 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
7171 pf->irq_pile = kzalloc(size, GFP_KERNEL);
7172 if (!pf->irq_pile) {
7173 kfree(pf->qp_pile);
7174 err = -ENOMEM;
7175 goto sw_init_done;
7176 }
7177 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
7178 pf->irq_pile->search_hint = 0;
7179
7180 pf->tx_timeout_recovery_level = 1;
7181
7182 mutex_init(&pf->switch_mutex);
7183
7184 sw_init_done:
7185 return err;
7186 }
7187
7188 /**
7189 * i40e_set_ntuple - set the ntuple feature flag and take action
7190 * @pf: board private structure to initialize
7191 * @features: the feature set that the stack is suggesting
7192 *
7193 * returns a bool to indicate if reset needs to happen
7194 **/
7195 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
7196 {
7197 bool need_reset = false;
7198
7199 /* Check if Flow Director n-tuple support was enabled or disabled. If
7200 * the state changed, we need to reset.
7201 */
7202 if (features & NETIF_F_NTUPLE) {
7203 /* Enable filters and mark for reset */
7204 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7205 need_reset = true;
7206 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7207 } else {
7208 /* turn off filters, mark for reset and clear SW filter list */
7209 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7210 need_reset = true;
7211 i40e_fdir_filter_exit(pf);
7212 }
7213 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7214 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
7215 /* reset fd counters */
7216 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
7217 pf->fdir_pf_active_filters = 0;
7218 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7219 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
7220 /* if ATR was auto disabled it can be re-enabled. */
7221 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
7222 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
7223 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
7224 }
7225 return need_reset;
7226 }
7227
7228 /**
7229 * i40e_set_features - set the netdev feature flags
7230 * @netdev: ptr to the netdev being adjusted
7231 * @features: the feature set that the stack is suggesting
7232 **/
7233 static int i40e_set_features(struct net_device *netdev,
7234 netdev_features_t features)
7235 {
7236 struct i40e_netdev_priv *np = netdev_priv(netdev);
7237 struct i40e_vsi *vsi = np->vsi;
7238 struct i40e_pf *pf = vsi->back;
7239 bool need_reset;
7240
7241 if (features & NETIF_F_HW_VLAN_CTAG_RX)
7242 i40e_vlan_stripping_enable(vsi);
7243 else
7244 i40e_vlan_stripping_disable(vsi);
7245
7246 need_reset = i40e_set_ntuple(pf, features);
7247
7248 if (need_reset)
7249 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
7250
7251 return 0;
7252 }
7253
7254 #ifdef CONFIG_I40E_VXLAN
7255 /**
7256 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
7257 * @pf: board private structure
7258 * @port: The UDP port to look up
7259 *
7260 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
7261 **/
7262 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
7263 {
7264 u8 i;
7265
7266 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7267 if (pf->vxlan_ports[i] == port)
7268 return i;
7269 }
7270
7271 return i;
7272 }
7273
7274 /**
7275 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
7276 * @netdev: This physical port's netdev
7277 * @sa_family: Socket Family that VXLAN is notifying us about
7278 * @port: New UDP port number that VXLAN started listening to
7279 **/
7280 static void i40e_add_vxlan_port(struct net_device *netdev,
7281 sa_family_t sa_family, __be16 port)
7282 {
7283 struct i40e_netdev_priv *np = netdev_priv(netdev);
7284 struct i40e_vsi *vsi = np->vsi;
7285 struct i40e_pf *pf = vsi->back;
7286 u8 next_idx;
7287 u8 idx;
7288
7289 if (sa_family == AF_INET6)
7290 return;
7291
7292 idx = i40e_get_vxlan_port_idx(pf, port);
7293
7294 /* Check if port already exists */
7295 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7296 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
7297 return;
7298 }
7299
7300 /* Now check if there is space to add the new port */
7301 next_idx = i40e_get_vxlan_port_idx(pf, 0);
7302
7303 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7304 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
7305 ntohs(port));
7306 return;
7307 }
7308
7309 /* New port: add it and mark its index in the bitmap */
7310 pf->vxlan_ports[next_idx] = port;
7311 pf->pending_vxlan_bitmap |= (1 << next_idx);
7312
7313 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7314 }
7315
7316 /**
7317 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
7318 * @netdev: This physical port's netdev
7319 * @sa_family: Socket Family that VXLAN is notifying us about
7320 * @port: UDP port number that VXLAN stopped listening to
7321 **/
7322 static void i40e_del_vxlan_port(struct net_device *netdev,
7323 sa_family_t sa_family, __be16 port)
7324 {
7325 struct i40e_netdev_priv *np = netdev_priv(netdev);
7326 struct i40e_vsi *vsi = np->vsi;
7327 struct i40e_pf *pf = vsi->back;
7328 u8 idx;
7329
7330 if (sa_family == AF_INET6)
7331 return;
7332
7333 idx = i40e_get_vxlan_port_idx(pf, port);
7334
7335 /* Check if port already exists */
7336 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7337 /* if port exists, set it to 0 (mark for deletion)
7338 * and make it pending
7339 */
7340 pf->vxlan_ports[idx] = 0;
7341
7342 pf->pending_vxlan_bitmap |= (1 << idx);
7343
7344 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7345 } else {
7346 netdev_warn(netdev, "Port %d was not found, not deleting\n",
7347 ntohs(port));
7348 }
7349 }
7350
7351 #endif
7352 static int i40e_get_phys_port_id(struct net_device *netdev,
7353 struct netdev_phys_port_id *ppid)
7354 {
7355 struct i40e_netdev_priv *np = netdev_priv(netdev);
7356 struct i40e_pf *pf = np->vsi->back;
7357 struct i40e_hw *hw = &pf->hw;
7358
7359 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
7360 return -EOPNOTSUPP;
7361
7362 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
7363 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
7364
7365 return 0;
7366 }
7367
7368 #ifdef HAVE_FDB_OPS
7369 #ifdef USE_CONST_DEV_UC_CHAR
7370 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7371 struct net_device *dev,
7372 const unsigned char *addr,
7373 u16 flags)
7374 #else
7375 static int i40e_ndo_fdb_add(struct ndmsg *ndm,
7376 struct net_device *dev,
7377 unsigned char *addr,
7378 u16 flags)
7379 #endif
7380 {
7381 struct i40e_netdev_priv *np = netdev_priv(dev);
7382 struct i40e_pf *pf = np->vsi->back;
7383 int err = 0;
7384
7385 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
7386 return -EOPNOTSUPP;
7387
7388 /* Hardware does not support aging addresses so if a
7389 * ndm_state is given only allow permanent addresses
7390 */
7391 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
7392 netdev_info(dev, "FDB only supports static addresses\n");
7393 return -EINVAL;
7394 }
7395
7396 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
7397 err = dev_uc_add_excl(dev, addr);
7398 else if (is_multicast_ether_addr(addr))
7399 err = dev_mc_add_excl(dev, addr);
7400 else
7401 err = -EINVAL;
7402
7403 /* Only return duplicate errors if NLM_F_EXCL is set */
7404 if (err == -EEXIST && !(flags & NLM_F_EXCL))
7405 err = 0;
7406
7407 return err;
7408 }
7409
7410 #ifndef USE_DEFAULT_FDB_DEL_DUMP
7411 #ifdef USE_CONST_DEV_UC_CHAR
7412 static int i40e_ndo_fdb_del(struct ndmsg *ndm,
7413 struct net_device *dev,
7414 const unsigned char *addr)
7415 #else
7416 static int i40e_ndo_fdb_del(struct ndmsg *ndm,
7417 struct net_device *dev,
7418 unsigned char *addr)
7419 #endif
7420 {
7421 struct i40e_netdev_priv *np = netdev_priv(dev);
7422 struct i40e_pf *pf = np->vsi->back;
7423 int err = -EOPNOTSUPP;
7424
7425 if (ndm->ndm_state & NUD_PERMANENT) {
7426 netdev_info(dev, "FDB only supports static addresses\n");
7427 return -EINVAL;
7428 }
7429
7430 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
7431 if (is_unicast_ether_addr(addr))
7432 err = dev_uc_del(dev, addr);
7433 else if (is_multicast_ether_addr(addr))
7434 err = dev_mc_del(dev, addr);
7435 else
7436 err = -EINVAL;
7437 }
7438
7439 return err;
7440 }
7441
7442 static int i40e_ndo_fdb_dump(struct sk_buff *skb,
7443 struct netlink_callback *cb,
7444 struct net_device *dev,
7445 struct net_device *filter_dev,
7446 int idx)
7447 {
7448 struct i40e_netdev_priv *np = netdev_priv(dev);
7449 struct i40e_pf *pf = np->vsi->back;
7450
7451 if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
7452 idx = ndo_dflt_fdb_dump(skb, cb, dev, filter_dev, idx);
7453
7454 return idx;
7455 }
7456
7457 #endif /* USE_DEFAULT_FDB_DEL_DUMP */
7458 #endif /* HAVE_FDB_OPS */
7459 static const struct net_device_ops i40e_netdev_ops = {
7460 .ndo_open = i40e_open,
7461 .ndo_stop = i40e_close,
7462 .ndo_start_xmit = i40e_lan_xmit_frame,
7463 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
7464 .ndo_set_rx_mode = i40e_set_rx_mode,
7465 .ndo_validate_addr = eth_validate_addr,
7466 .ndo_set_mac_address = i40e_set_mac,
7467 .ndo_change_mtu = i40e_change_mtu,
7468 .ndo_do_ioctl = i40e_ioctl,
7469 .ndo_tx_timeout = i40e_tx_timeout,
7470 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
7471 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
7472 #ifdef CONFIG_NET_POLL_CONTROLLER
7473 .ndo_poll_controller = i40e_netpoll,
7474 #endif
7475 .ndo_setup_tc = i40e_setup_tc,
7476 #ifdef I40E_FCOE
7477 .ndo_fcoe_enable = i40e_fcoe_enable,
7478 .ndo_fcoe_disable = i40e_fcoe_disable,
7479 #endif
7480 .ndo_set_features = i40e_set_features,
7481 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
7482 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
7483 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
7484 .ndo_get_vf_config = i40e_ndo_get_vf_config,
7485 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
7486 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
7487 #ifdef CONFIG_I40E_VXLAN
7488 .ndo_add_vxlan_port = i40e_add_vxlan_port,
7489 .ndo_del_vxlan_port = i40e_del_vxlan_port,
7490 #endif
7491 .ndo_get_phys_port_id = i40e_get_phys_port_id,
7492 #ifdef HAVE_FDB_OPS
7493 .ndo_fdb_add = i40e_ndo_fdb_add,
7494 #ifndef USE_DEFAULT_FDB_DEL_DUMP
7495 .ndo_fdb_del = i40e_ndo_fdb_del,
7496 .ndo_fdb_dump = i40e_ndo_fdb_dump,
7497 #endif
7498 #endif
7499 };
7500
7501 /**
7502 * i40e_config_netdev - Setup the netdev flags
7503 * @vsi: the VSI being configured
7504 *
7505 * Returns 0 on success, negative value on failure
7506 **/
7507 static int i40e_config_netdev(struct i40e_vsi *vsi)
7508 {
7509 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
7510 struct i40e_pf *pf = vsi->back;
7511 struct i40e_hw *hw = &pf->hw;
7512 struct i40e_netdev_priv *np;
7513 struct net_device *netdev;
7514 u8 mac_addr[ETH_ALEN];
7515 int etherdev_size;
7516
7517 etherdev_size = sizeof(struct i40e_netdev_priv);
7518 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
7519 if (!netdev)
7520 return -ENOMEM;
7521
7522 vsi->netdev = netdev;
7523 np = netdev_priv(netdev);
7524 np->vsi = vsi;
7525
7526 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
7527 NETIF_F_GSO_UDP_TUNNEL |
7528 NETIF_F_TSO;
7529
7530 netdev->features = NETIF_F_SG |
7531 NETIF_F_IP_CSUM |
7532 NETIF_F_SCTP_CSUM |
7533 NETIF_F_HIGHDMA |
7534 NETIF_F_GSO_UDP_TUNNEL |
7535 NETIF_F_HW_VLAN_CTAG_TX |
7536 NETIF_F_HW_VLAN_CTAG_RX |
7537 NETIF_F_HW_VLAN_CTAG_FILTER |
7538 NETIF_F_IPV6_CSUM |
7539 NETIF_F_TSO |
7540 NETIF_F_TSO_ECN |
7541 NETIF_F_TSO6 |
7542 NETIF_F_RXCSUM |
7543 NETIF_F_RXHASH |
7544 0;
7545
7546 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
7547 netdev->features |= NETIF_F_NTUPLE;
7548
7549 /* copy netdev features into list of user selectable features */
7550 netdev->hw_features |= netdev->features;
7551
7552 if (vsi->type == I40E_VSI_MAIN) {
7553 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
7554 ether_addr_copy(mac_addr, hw->mac.perm_addr);
7555 /* The following steps are necessary to prevent reception
7556 * of tagged packets - some older NVM configurations load a
7557 * default a MAC-VLAN filter that accepts any tagged packet
7558 * which must be replaced by a normal filter.
7559 */
7560 if (!i40e_rm_default_mac_filter(vsi, mac_addr))
7561 i40e_add_filter(vsi, mac_addr,
7562 I40E_VLAN_ANY, false, true);
7563 } else {
7564 /* relate the VSI_VMDQ name to the VSI_MAIN name */
7565 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
7566 pf->vsi[pf->lan_vsi]->netdev->name);
7567 random_ether_addr(mac_addr);
7568 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
7569 }
7570 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
7571
7572 ether_addr_copy(netdev->dev_addr, mac_addr);
7573 ether_addr_copy(netdev->perm_addr, mac_addr);
7574 /* vlan gets same features (except vlan offload)
7575 * after any tweaks for specific VSI types
7576 */
7577 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
7578 NETIF_F_HW_VLAN_CTAG_RX |
7579 NETIF_F_HW_VLAN_CTAG_FILTER);
7580 netdev->priv_flags |= IFF_UNICAST_FLT;
7581 netdev->priv_flags |= IFF_SUPP_NOFCS;
7582 /* Setup netdev TC information */
7583 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
7584
7585 netdev->netdev_ops = &i40e_netdev_ops;
7586 netdev->watchdog_timeo = 5 * HZ;
7587 i40e_set_ethtool_ops(netdev);
7588 #ifdef I40E_FCOE
7589 i40e_fcoe_config_netdev(netdev, vsi);
7590 #endif
7591
7592 return 0;
7593 }
7594
7595 /**
7596 * i40e_vsi_delete - Delete a VSI from the switch
7597 * @vsi: the VSI being removed
7598 *
7599 * Returns 0 on success, negative value on failure
7600 **/
7601 static void i40e_vsi_delete(struct i40e_vsi *vsi)
7602 {
7603 /* remove default VSI is not allowed */
7604 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
7605 return;
7606
7607 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
7608 }
7609
7610 /**
7611 * i40e_add_vsi - Add a VSI to the switch
7612 * @vsi: the VSI being configured
7613 *
7614 * This initializes a VSI context depending on the VSI type to be added and
7615 * passes it down to the add_vsi aq command.
7616 **/
7617 static int i40e_add_vsi(struct i40e_vsi *vsi)
7618 {
7619 int ret = -ENODEV;
7620 struct i40e_mac_filter *f, *ftmp;
7621 struct i40e_pf *pf = vsi->back;
7622 struct i40e_hw *hw = &pf->hw;
7623 struct i40e_vsi_context ctxt;
7624 u8 enabled_tc = 0x1; /* TC0 enabled */
7625 int f_count = 0;
7626
7627 memset(&ctxt, 0, sizeof(ctxt));
7628 switch (vsi->type) {
7629 case I40E_VSI_MAIN:
7630 /* The PF's main VSI is already setup as part of the
7631 * device initialization, so we'll not bother with
7632 * the add_vsi call, but we will retrieve the current
7633 * VSI context.
7634 */
7635 ctxt.seid = pf->main_vsi_seid;
7636 ctxt.pf_num = pf->hw.pf_id;
7637 ctxt.vf_num = 0;
7638 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
7639 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
7640 if (ret) {
7641 dev_info(&pf->pdev->dev,
7642 "couldn't get pf vsi config, err %d, aq_err %d\n",
7643 ret, pf->hw.aq.asq_last_status);
7644 return -ENOENT;
7645 }
7646 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
7647 vsi->info.valid_sections = 0;
7648
7649 vsi->seid = ctxt.seid;
7650 vsi->id = ctxt.vsi_number;
7651
7652 enabled_tc = i40e_pf_get_tc_map(pf);
7653
7654 /* MFP mode setup queue map and update VSI */
7655 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
7656 memset(&ctxt, 0, sizeof(ctxt));
7657 ctxt.seid = pf->main_vsi_seid;
7658 ctxt.pf_num = pf->hw.pf_id;
7659 ctxt.vf_num = 0;
7660 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
7661 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7662 if (ret) {
7663 dev_info(&pf->pdev->dev,
7664 "update vsi failed, aq_err=%d\n",
7665 pf->hw.aq.asq_last_status);
7666 ret = -ENOENT;
7667 goto err;
7668 }
7669 /* update the local VSI info queue map */
7670 i40e_vsi_update_queue_map(vsi, &ctxt);
7671 vsi->info.valid_sections = 0;
7672 } else {
7673 /* Default/Main VSI is only enabled for TC0
7674 * reconfigure it to enable all TCs that are
7675 * available on the port in SFP mode.
7676 */
7677 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7678 if (ret) {
7679 dev_info(&pf->pdev->dev,
7680 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
7681 enabled_tc, ret,
7682 pf->hw.aq.asq_last_status);
7683 ret = -ENOENT;
7684 }
7685 }
7686 break;
7687
7688 case I40E_VSI_FDIR:
7689 ctxt.pf_num = hw->pf_id;
7690 ctxt.vf_num = 0;
7691 ctxt.uplink_seid = vsi->uplink_seid;
7692 ctxt.connection_type = 0x1; /* regular data port */
7693 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
7694 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7695 break;
7696
7697 case I40E_VSI_VMDQ2:
7698 ctxt.pf_num = hw->pf_id;
7699 ctxt.vf_num = 0;
7700 ctxt.uplink_seid = vsi->uplink_seid;
7701 ctxt.connection_type = 0x1; /* regular data port */
7702 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
7703
7704 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7705
7706 /* This VSI is connected to VEB so the switch_id
7707 * should be set to zero by default.
7708 */
7709 ctxt.info.switch_id = 0;
7710 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7711
7712 /* Setup the VSI tx/rx queue map for TC0 only for now */
7713 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7714 break;
7715
7716 case I40E_VSI_SRIOV:
7717 ctxt.pf_num = hw->pf_id;
7718 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
7719 ctxt.uplink_seid = vsi->uplink_seid;
7720 ctxt.connection_type = 0x1; /* regular data port */
7721 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
7722
7723 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7724
7725 /* This VSI is connected to VEB so the switch_id
7726 * should be set to zero by default.
7727 */
7728 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7729
7730 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
7731 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
7732 if (pf->vf[vsi->vf_id].spoofchk) {
7733 ctxt.info.valid_sections |=
7734 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
7735 ctxt.info.sec_flags |=
7736 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
7737 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
7738 }
7739 /* Setup the VSI tx/rx queue map for TC0 only for now */
7740 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7741 break;
7742
7743 #ifdef I40E_FCOE
7744 case I40E_VSI_FCOE:
7745 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
7746 if (ret) {
7747 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
7748 return ret;
7749 }
7750 break;
7751
7752 #endif /* I40E_FCOE */
7753 default:
7754 return -ENODEV;
7755 }
7756
7757 if (vsi->type != I40E_VSI_MAIN) {
7758 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
7759 if (ret) {
7760 dev_info(&vsi->back->pdev->dev,
7761 "add vsi failed, aq_err=%d\n",
7762 vsi->back->hw.aq.asq_last_status);
7763 ret = -ENOENT;
7764 goto err;
7765 }
7766 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
7767 vsi->info.valid_sections = 0;
7768 vsi->seid = ctxt.seid;
7769 vsi->id = ctxt.vsi_number;
7770 }
7771
7772 /* If macvlan filters already exist, force them to get loaded */
7773 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
7774 f->changed = true;
7775 f_count++;
7776
7777 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
7778 struct i40e_aqc_remove_macvlan_element_data element;
7779
7780 memset(&element, 0, sizeof(element));
7781 ether_addr_copy(element.mac_addr, f->macaddr);
7782 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7783 ret = i40e_aq_remove_macvlan(hw, vsi->seid,
7784 &element, 1, NULL);
7785 if (ret) {
7786 /* some older FW has a different default */
7787 element.flags |=
7788 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7789 i40e_aq_remove_macvlan(hw, vsi->seid,
7790 &element, 1, NULL);
7791 }
7792
7793 i40e_aq_mac_address_write(hw,
7794 I40E_AQC_WRITE_TYPE_LAA_WOL,
7795 f->macaddr, NULL);
7796 }
7797 }
7798 if (f_count) {
7799 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
7800 pf->flags |= I40E_FLAG_FILTER_SYNC;
7801 }
7802
7803 /* Update VSI BW information */
7804 ret = i40e_vsi_get_bw_info(vsi);
7805 if (ret) {
7806 dev_info(&pf->pdev->dev,
7807 "couldn't get vsi bw info, err %d, aq_err %d\n",
7808 ret, pf->hw.aq.asq_last_status);
7809 /* VSI is already added so not tearing that up */
7810 ret = 0;
7811 }
7812
7813 err:
7814 return ret;
7815 }
7816
7817 /**
7818 * i40e_vsi_release - Delete a VSI and free its resources
7819 * @vsi: the VSI being removed
7820 *
7821 * Returns 0 on success or < 0 on error
7822 **/
7823 int i40e_vsi_release(struct i40e_vsi *vsi)
7824 {
7825 struct i40e_mac_filter *f, *ftmp;
7826 struct i40e_veb *veb = NULL;
7827 struct i40e_pf *pf;
7828 u16 uplink_seid;
7829 int i, n;
7830
7831 pf = vsi->back;
7832
7833 /* release of a VEB-owner or last VSI is not allowed */
7834 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
7835 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
7836 vsi->seid, vsi->uplink_seid);
7837 return -ENODEV;
7838 }
7839 if (vsi == pf->vsi[pf->lan_vsi] &&
7840 !test_bit(__I40E_DOWN, &pf->state)) {
7841 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
7842 return -ENODEV;
7843 }
7844
7845 uplink_seid = vsi->uplink_seid;
7846 if (vsi->type != I40E_VSI_SRIOV) {
7847 if (vsi->netdev_registered) {
7848 vsi->netdev_registered = false;
7849 if (vsi->netdev) {
7850 /* results in a call to i40e_close() */
7851 unregister_netdev(vsi->netdev);
7852 }
7853 } else {
7854 i40e_vsi_close(vsi);
7855 }
7856 i40e_vsi_disable_irq(vsi);
7857 }
7858
7859 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
7860 i40e_del_filter(vsi, f->macaddr, f->vlan,
7861 f->is_vf, f->is_netdev);
7862 i40e_sync_vsi_filters(vsi);
7863
7864 i40e_vsi_delete(vsi);
7865 i40e_vsi_free_q_vectors(vsi);
7866 if (vsi->netdev) {
7867 free_netdev(vsi->netdev);
7868 vsi->netdev = NULL;
7869 }
7870 i40e_vsi_clear_rings(vsi);
7871 i40e_vsi_clear(vsi);
7872
7873 /* If this was the last thing on the VEB, except for the
7874 * controlling VSI, remove the VEB, which puts the controlling
7875 * VSI onto the next level down in the switch.
7876 *
7877 * Well, okay, there's one more exception here: don't remove
7878 * the orphan VEBs yet. We'll wait for an explicit remove request
7879 * from up the network stack.
7880 */
7881 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
7882 if (pf->vsi[i] &&
7883 pf->vsi[i]->uplink_seid == uplink_seid &&
7884 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
7885 n++; /* count the VSIs */
7886 }
7887 }
7888 for (i = 0; i < I40E_MAX_VEB; i++) {
7889 if (!pf->veb[i])
7890 continue;
7891 if (pf->veb[i]->uplink_seid == uplink_seid)
7892 n++; /* count the VEBs */
7893 if (pf->veb[i]->seid == uplink_seid)
7894 veb = pf->veb[i];
7895 }
7896 if (n == 0 && veb && veb->uplink_seid != 0)
7897 i40e_veb_release(veb);
7898
7899 return 0;
7900 }
7901
7902 /**
7903 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
7904 * @vsi: ptr to the VSI
7905 *
7906 * This should only be called after i40e_vsi_mem_alloc() which allocates the
7907 * corresponding SW VSI structure and initializes num_queue_pairs for the
7908 * newly allocated VSI.
7909 *
7910 * Returns 0 on success or negative on failure
7911 **/
7912 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
7913 {
7914 int ret = -ENOENT;
7915 struct i40e_pf *pf = vsi->back;
7916
7917 if (vsi->q_vectors[0]) {
7918 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
7919 vsi->seid);
7920 return -EEXIST;
7921 }
7922
7923 if (vsi->base_vector) {
7924 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
7925 vsi->seid, vsi->base_vector);
7926 return -EEXIST;
7927 }
7928
7929 ret = i40e_vsi_alloc_q_vectors(vsi);
7930 if (ret) {
7931 dev_info(&pf->pdev->dev,
7932 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
7933 vsi->num_q_vectors, vsi->seid, ret);
7934 vsi->num_q_vectors = 0;
7935 goto vector_setup_out;
7936 }
7937
7938 if (vsi->num_q_vectors)
7939 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
7940 vsi->num_q_vectors, vsi->idx);
7941 if (vsi->base_vector < 0) {
7942 dev_info(&pf->pdev->dev,
7943 "failed to get queue tracking for VSI %d, err=%d\n",
7944 vsi->seid, vsi->base_vector);
7945 i40e_vsi_free_q_vectors(vsi);
7946 ret = -ENOENT;
7947 goto vector_setup_out;
7948 }
7949
7950 vector_setup_out:
7951 return ret;
7952 }
7953
7954 /**
7955 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
7956 * @vsi: pointer to the vsi.
7957 *
7958 * This re-allocates a vsi's queue resources.
7959 *
7960 * Returns pointer to the successfully allocated and configured VSI sw struct
7961 * on success, otherwise returns NULL on failure.
7962 **/
7963 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
7964 {
7965 struct i40e_pf *pf = vsi->back;
7966 u8 enabled_tc;
7967 int ret;
7968
7969 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7970 i40e_vsi_clear_rings(vsi);
7971
7972 i40e_vsi_free_arrays(vsi, false);
7973 i40e_set_num_rings_in_vsi(vsi);
7974 ret = i40e_vsi_alloc_arrays(vsi, false);
7975 if (ret)
7976 goto err_vsi;
7977
7978 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
7979 if (ret < 0) {
7980 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
7981 vsi->seid, ret);
7982 goto err_vsi;
7983 }
7984 vsi->base_queue = ret;
7985
7986 /* Update the FW view of the VSI. Force a reset of TC and queue
7987 * layout configurations.
7988 */
7989 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
7990 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
7991 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
7992 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
7993
7994 /* assign it some queues */
7995 ret = i40e_alloc_rings(vsi);
7996 if (ret)
7997 goto err_rings;
7998
7999 /* map all of the rings to the q_vectors */
8000 i40e_vsi_map_rings_to_vectors(vsi);
8001 return vsi;
8002
8003 err_rings:
8004 i40e_vsi_free_q_vectors(vsi);
8005 if (vsi->netdev_registered) {
8006 vsi->netdev_registered = false;
8007 unregister_netdev(vsi->netdev);
8008 free_netdev(vsi->netdev);
8009 vsi->netdev = NULL;
8010 }
8011 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8012 err_vsi:
8013 i40e_vsi_clear(vsi);
8014 return NULL;
8015 }
8016
8017 /**
8018 * i40e_vsi_setup - Set up a VSI by a given type
8019 * @pf: board private structure
8020 * @type: VSI type
8021 * @uplink_seid: the switch element to link to
8022 * @param1: usage depends upon VSI type. For VF types, indicates VF id
8023 *
8024 * This allocates the sw VSI structure and its queue resources, then add a VSI
8025 * to the identified VEB.
8026 *
8027 * Returns pointer to the successfully allocated and configure VSI sw struct on
8028 * success, otherwise returns NULL on failure.
8029 **/
8030 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
8031 u16 uplink_seid, u32 param1)
8032 {
8033 struct i40e_vsi *vsi = NULL;
8034 struct i40e_veb *veb = NULL;
8035 int ret, i;
8036 int v_idx;
8037
8038 /* The requested uplink_seid must be either
8039 * - the PF's port seid
8040 * no VEB is needed because this is the PF
8041 * or this is a Flow Director special case VSI
8042 * - seid of an existing VEB
8043 * - seid of a VSI that owns an existing VEB
8044 * - seid of a VSI that doesn't own a VEB
8045 * a new VEB is created and the VSI becomes the owner
8046 * - seid of the PF VSI, which is what creates the first VEB
8047 * this is a special case of the previous
8048 *
8049 * Find which uplink_seid we were given and create a new VEB if needed
8050 */
8051 for (i = 0; i < I40E_MAX_VEB; i++) {
8052 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
8053 veb = pf->veb[i];
8054 break;
8055 }
8056 }
8057
8058 if (!veb && uplink_seid != pf->mac_seid) {
8059
8060 for (i = 0; i < pf->num_alloc_vsi; i++) {
8061 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
8062 vsi = pf->vsi[i];
8063 break;
8064 }
8065 }
8066 if (!vsi) {
8067 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
8068 uplink_seid);
8069 return NULL;
8070 }
8071
8072 if (vsi->uplink_seid == pf->mac_seid)
8073 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
8074 vsi->tc_config.enabled_tc);
8075 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
8076 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8077 vsi->tc_config.enabled_tc);
8078
8079 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8080 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8081 veb = pf->veb[i];
8082 }
8083 if (!veb) {
8084 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
8085 return NULL;
8086 }
8087
8088 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8089 uplink_seid = veb->seid;
8090 }
8091
8092 /* get vsi sw struct */
8093 v_idx = i40e_vsi_mem_alloc(pf, type);
8094 if (v_idx < 0)
8095 goto err_alloc;
8096 vsi = pf->vsi[v_idx];
8097 if (!vsi)
8098 goto err_alloc;
8099 vsi->type = type;
8100 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
8101
8102 if (type == I40E_VSI_MAIN)
8103 pf->lan_vsi = v_idx;
8104 else if (type == I40E_VSI_SRIOV)
8105 vsi->vf_id = param1;
8106 /* assign it some queues */
8107 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
8108 vsi->idx);
8109 if (ret < 0) {
8110 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
8111 vsi->seid, ret);
8112 goto err_vsi;
8113 }
8114 vsi->base_queue = ret;
8115
8116 /* get a VSI from the hardware */
8117 vsi->uplink_seid = uplink_seid;
8118 ret = i40e_add_vsi(vsi);
8119 if (ret)
8120 goto err_vsi;
8121
8122 switch (vsi->type) {
8123 /* setup the netdev if needed */
8124 case I40E_VSI_MAIN:
8125 case I40E_VSI_VMDQ2:
8126 case I40E_VSI_FCOE:
8127 ret = i40e_config_netdev(vsi);
8128 if (ret)
8129 goto err_netdev;
8130 ret = register_netdev(vsi->netdev);
8131 if (ret)
8132 goto err_netdev;
8133 vsi->netdev_registered = true;
8134 netif_carrier_off(vsi->netdev);
8135 #ifdef CONFIG_I40E_DCB
8136 /* Setup DCB netlink interface */
8137 i40e_dcbnl_setup(vsi);
8138 #endif /* CONFIG_I40E_DCB */
8139 /* fall through */
8140
8141 case I40E_VSI_FDIR:
8142 /* set up vectors and rings if needed */
8143 ret = i40e_vsi_setup_vectors(vsi);
8144 if (ret)
8145 goto err_msix;
8146
8147 ret = i40e_alloc_rings(vsi);
8148 if (ret)
8149 goto err_rings;
8150
8151 /* map all of the rings to the q_vectors */
8152 i40e_vsi_map_rings_to_vectors(vsi);
8153
8154 i40e_vsi_reset_stats(vsi);
8155 break;
8156
8157 default:
8158 /* no netdev or rings for the other VSI types */
8159 break;
8160 }
8161
8162 return vsi;
8163
8164 err_rings:
8165 i40e_vsi_free_q_vectors(vsi);
8166 err_msix:
8167 if (vsi->netdev_registered) {
8168 vsi->netdev_registered = false;
8169 unregister_netdev(vsi->netdev);
8170 free_netdev(vsi->netdev);
8171 vsi->netdev = NULL;
8172 }
8173 err_netdev:
8174 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8175 err_vsi:
8176 i40e_vsi_clear(vsi);
8177 err_alloc:
8178 return NULL;
8179 }
8180
8181 /**
8182 * i40e_veb_get_bw_info - Query VEB BW information
8183 * @veb: the veb to query
8184 *
8185 * Query the Tx scheduler BW configuration data for given VEB
8186 **/
8187 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
8188 {
8189 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
8190 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
8191 struct i40e_pf *pf = veb->pf;
8192 struct i40e_hw *hw = &pf->hw;
8193 u32 tc_bw_max;
8194 int ret = 0;
8195 int i;
8196
8197 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
8198 &bw_data, NULL);
8199 if (ret) {
8200 dev_info(&pf->pdev->dev,
8201 "query veb bw config failed, aq_err=%d\n",
8202 hw->aq.asq_last_status);
8203 goto out;
8204 }
8205
8206 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
8207 &ets_data, NULL);
8208 if (ret) {
8209 dev_info(&pf->pdev->dev,
8210 "query veb bw ets config failed, aq_err=%d\n",
8211 hw->aq.asq_last_status);
8212 goto out;
8213 }
8214
8215 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
8216 veb->bw_max_quanta = ets_data.tc_bw_max;
8217 veb->is_abs_credits = bw_data.absolute_credits_enable;
8218 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
8219 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
8220 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8221 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
8222 veb->bw_tc_limit_credits[i] =
8223 le16_to_cpu(bw_data.tc_bw_limits[i]);
8224 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
8225 }
8226
8227 out:
8228 return ret;
8229 }
8230
8231 /**
8232 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
8233 * @pf: board private structure
8234 *
8235 * On error: returns error code (negative)
8236 * On success: returns vsi index in PF (positive)
8237 **/
8238 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
8239 {
8240 int ret = -ENOENT;
8241 struct i40e_veb *veb;
8242 int i;
8243
8244 /* Need to protect the allocation of switch elements at the PF level */
8245 mutex_lock(&pf->switch_mutex);
8246
8247 /* VEB list may be fragmented if VEB creation/destruction has
8248 * been happening. We can afford to do a quick scan to look
8249 * for any free slots in the list.
8250 *
8251 * find next empty veb slot, looping back around if necessary
8252 */
8253 i = 0;
8254 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
8255 i++;
8256 if (i >= I40E_MAX_VEB) {
8257 ret = -ENOMEM;
8258 goto err_alloc_veb; /* out of VEB slots! */
8259 }
8260
8261 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
8262 if (!veb) {
8263 ret = -ENOMEM;
8264 goto err_alloc_veb;
8265 }
8266 veb->pf = pf;
8267 veb->idx = i;
8268 veb->enabled_tc = 1;
8269
8270 pf->veb[i] = veb;
8271 ret = i;
8272 err_alloc_veb:
8273 mutex_unlock(&pf->switch_mutex);
8274 return ret;
8275 }
8276
8277 /**
8278 * i40e_switch_branch_release - Delete a branch of the switch tree
8279 * @branch: where to start deleting
8280 *
8281 * This uses recursion to find the tips of the branch to be
8282 * removed, deleting until we get back to and can delete this VEB.
8283 **/
8284 static void i40e_switch_branch_release(struct i40e_veb *branch)
8285 {
8286 struct i40e_pf *pf = branch->pf;
8287 u16 branch_seid = branch->seid;
8288 u16 veb_idx = branch->idx;
8289 int i;
8290
8291 /* release any VEBs on this VEB - RECURSION */
8292 for (i = 0; i < I40E_MAX_VEB; i++) {
8293 if (!pf->veb[i])
8294 continue;
8295 if (pf->veb[i]->uplink_seid == branch->seid)
8296 i40e_switch_branch_release(pf->veb[i]);
8297 }
8298
8299 /* Release the VSIs on this VEB, but not the owner VSI.
8300 *
8301 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
8302 * the VEB itself, so don't use (*branch) after this loop.
8303 */
8304 for (i = 0; i < pf->num_alloc_vsi; i++) {
8305 if (!pf->vsi[i])
8306 continue;
8307 if (pf->vsi[i]->uplink_seid == branch_seid &&
8308 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8309 i40e_vsi_release(pf->vsi[i]);
8310 }
8311 }
8312
8313 /* There's one corner case where the VEB might not have been
8314 * removed, so double check it here and remove it if needed.
8315 * This case happens if the veb was created from the debugfs
8316 * commands and no VSIs were added to it.
8317 */
8318 if (pf->veb[veb_idx])
8319 i40e_veb_release(pf->veb[veb_idx]);
8320 }
8321
8322 /**
8323 * i40e_veb_clear - remove veb struct
8324 * @veb: the veb to remove
8325 **/
8326 static void i40e_veb_clear(struct i40e_veb *veb)
8327 {
8328 if (!veb)
8329 return;
8330
8331 if (veb->pf) {
8332 struct i40e_pf *pf = veb->pf;
8333
8334 mutex_lock(&pf->switch_mutex);
8335 if (pf->veb[veb->idx] == veb)
8336 pf->veb[veb->idx] = NULL;
8337 mutex_unlock(&pf->switch_mutex);
8338 }
8339
8340 kfree(veb);
8341 }
8342
8343 /**
8344 * i40e_veb_release - Delete a VEB and free its resources
8345 * @veb: the VEB being removed
8346 **/
8347 void i40e_veb_release(struct i40e_veb *veb)
8348 {
8349 struct i40e_vsi *vsi = NULL;
8350 struct i40e_pf *pf;
8351 int i, n = 0;
8352
8353 pf = veb->pf;
8354
8355 /* find the remaining VSI and check for extras */
8356 for (i = 0; i < pf->num_alloc_vsi; i++) {
8357 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
8358 n++;
8359 vsi = pf->vsi[i];
8360 }
8361 }
8362 if (n != 1) {
8363 dev_info(&pf->pdev->dev,
8364 "can't remove VEB %d with %d VSIs left\n",
8365 veb->seid, n);
8366 return;
8367 }
8368
8369 /* move the remaining VSI to uplink veb */
8370 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
8371 if (veb->uplink_seid) {
8372 vsi->uplink_seid = veb->uplink_seid;
8373 if (veb->uplink_seid == pf->mac_seid)
8374 vsi->veb_idx = I40E_NO_VEB;
8375 else
8376 vsi->veb_idx = veb->veb_idx;
8377 } else {
8378 /* floating VEB */
8379 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
8380 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
8381 }
8382
8383 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
8384 i40e_veb_clear(veb);
8385 }
8386
8387 /**
8388 * i40e_add_veb - create the VEB in the switch
8389 * @veb: the VEB to be instantiated
8390 * @vsi: the controlling VSI
8391 **/
8392 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
8393 {
8394 bool is_default = false;
8395 bool is_cloud = false;
8396 int ret;
8397
8398 /* get a VEB from the hardware */
8399 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
8400 veb->enabled_tc, is_default,
8401 is_cloud, &veb->seid, NULL);
8402 if (ret) {
8403 dev_info(&veb->pf->pdev->dev,
8404 "couldn't add VEB, err %d, aq_err %d\n",
8405 ret, veb->pf->hw.aq.asq_last_status);
8406 return -EPERM;
8407 }
8408
8409 /* get statistics counter */
8410 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
8411 &veb->stats_idx, NULL, NULL, NULL);
8412 if (ret) {
8413 dev_info(&veb->pf->pdev->dev,
8414 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
8415 ret, veb->pf->hw.aq.asq_last_status);
8416 return -EPERM;
8417 }
8418 ret = i40e_veb_get_bw_info(veb);
8419 if (ret) {
8420 dev_info(&veb->pf->pdev->dev,
8421 "couldn't get VEB bw info, err %d, aq_err %d\n",
8422 ret, veb->pf->hw.aq.asq_last_status);
8423 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
8424 return -ENOENT;
8425 }
8426
8427 vsi->uplink_seid = veb->seid;
8428 vsi->veb_idx = veb->idx;
8429 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8430
8431 return 0;
8432 }
8433
8434 /**
8435 * i40e_veb_setup - Set up a VEB
8436 * @pf: board private structure
8437 * @flags: VEB setup flags
8438 * @uplink_seid: the switch element to link to
8439 * @vsi_seid: the initial VSI seid
8440 * @enabled_tc: Enabled TC bit-map
8441 *
8442 * This allocates the sw VEB structure and links it into the switch
8443 * It is possible and legal for this to be a duplicate of an already
8444 * existing VEB. It is also possible for both uplink and vsi seids
8445 * to be zero, in order to create a floating VEB.
8446 *
8447 * Returns pointer to the successfully allocated VEB sw struct on
8448 * success, otherwise returns NULL on failure.
8449 **/
8450 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
8451 u16 uplink_seid, u16 vsi_seid,
8452 u8 enabled_tc)
8453 {
8454 struct i40e_veb *veb, *uplink_veb = NULL;
8455 int vsi_idx, veb_idx;
8456 int ret;
8457
8458 /* if one seid is 0, the other must be 0 to create a floating relay */
8459 if ((uplink_seid == 0 || vsi_seid == 0) &&
8460 (uplink_seid + vsi_seid != 0)) {
8461 dev_info(&pf->pdev->dev,
8462 "one, not both seid's are 0: uplink=%d vsi=%d\n",
8463 uplink_seid, vsi_seid);
8464 return NULL;
8465 }
8466
8467 /* make sure there is such a vsi and uplink */
8468 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
8469 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
8470 break;
8471 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
8472 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
8473 vsi_seid);
8474 return NULL;
8475 }
8476
8477 if (uplink_seid && uplink_seid != pf->mac_seid) {
8478 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
8479 if (pf->veb[veb_idx] &&
8480 pf->veb[veb_idx]->seid == uplink_seid) {
8481 uplink_veb = pf->veb[veb_idx];
8482 break;
8483 }
8484 }
8485 if (!uplink_veb) {
8486 dev_info(&pf->pdev->dev,
8487 "uplink seid %d not found\n", uplink_seid);
8488 return NULL;
8489 }
8490 }
8491
8492 /* get veb sw struct */
8493 veb_idx = i40e_veb_mem_alloc(pf);
8494 if (veb_idx < 0)
8495 goto err_alloc;
8496 veb = pf->veb[veb_idx];
8497 veb->flags = flags;
8498 veb->uplink_seid = uplink_seid;
8499 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
8500 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
8501
8502 /* create the VEB in the switch */
8503 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
8504 if (ret)
8505 goto err_veb;
8506 if (vsi_idx == pf->lan_vsi)
8507 pf->lan_veb = veb->idx;
8508
8509 return veb;
8510
8511 err_veb:
8512 i40e_veb_clear(veb);
8513 err_alloc:
8514 return NULL;
8515 }
8516
8517 /**
8518 * i40e_setup_pf_switch_element - set pf vars based on switch type
8519 * @pf: board private structure
8520 * @ele: element we are building info from
8521 * @num_reported: total number of elements
8522 * @printconfig: should we print the contents
8523 *
8524 * helper function to assist in extracting a few useful SEID values.
8525 **/
8526 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
8527 struct i40e_aqc_switch_config_element_resp *ele,
8528 u16 num_reported, bool printconfig)
8529 {
8530 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
8531 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
8532 u8 element_type = ele->element_type;
8533 u16 seid = le16_to_cpu(ele->seid);
8534
8535 if (printconfig)
8536 dev_info(&pf->pdev->dev,
8537 "type=%d seid=%d uplink=%d downlink=%d\n",
8538 element_type, seid, uplink_seid, downlink_seid);
8539
8540 switch (element_type) {
8541 case I40E_SWITCH_ELEMENT_TYPE_MAC:
8542 pf->mac_seid = seid;
8543 break;
8544 case I40E_SWITCH_ELEMENT_TYPE_VEB:
8545 /* Main VEB? */
8546 if (uplink_seid != pf->mac_seid)
8547 break;
8548 if (pf->lan_veb == I40E_NO_VEB) {
8549 int v;
8550
8551 /* find existing or else empty VEB */
8552 for (v = 0; v < I40E_MAX_VEB; v++) {
8553 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
8554 pf->lan_veb = v;
8555 break;
8556 }
8557 }
8558 if (pf->lan_veb == I40E_NO_VEB) {
8559 v = i40e_veb_mem_alloc(pf);
8560 if (v < 0)
8561 break;
8562 pf->lan_veb = v;
8563 }
8564 }
8565
8566 pf->veb[pf->lan_veb]->seid = seid;
8567 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
8568 pf->veb[pf->lan_veb]->pf = pf;
8569 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
8570 break;
8571 case I40E_SWITCH_ELEMENT_TYPE_VSI:
8572 if (num_reported != 1)
8573 break;
8574 /* This is immediately after a reset so we can assume this is
8575 * the PF's VSI
8576 */
8577 pf->mac_seid = uplink_seid;
8578 pf->pf_seid = downlink_seid;
8579 pf->main_vsi_seid = seid;
8580 if (printconfig)
8581 dev_info(&pf->pdev->dev,
8582 "pf_seid=%d main_vsi_seid=%d\n",
8583 pf->pf_seid, pf->main_vsi_seid);
8584 break;
8585 case I40E_SWITCH_ELEMENT_TYPE_PF:
8586 case I40E_SWITCH_ELEMENT_TYPE_VF:
8587 case I40E_SWITCH_ELEMENT_TYPE_EMP:
8588 case I40E_SWITCH_ELEMENT_TYPE_BMC:
8589 case I40E_SWITCH_ELEMENT_TYPE_PE:
8590 case I40E_SWITCH_ELEMENT_TYPE_PA:
8591 /* ignore these for now */
8592 break;
8593 default:
8594 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
8595 element_type, seid);
8596 break;
8597 }
8598 }
8599
8600 /**
8601 * i40e_fetch_switch_configuration - Get switch config from firmware
8602 * @pf: board private structure
8603 * @printconfig: should we print the contents
8604 *
8605 * Get the current switch configuration from the device and
8606 * extract a few useful SEID values.
8607 **/
8608 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
8609 {
8610 struct i40e_aqc_get_switch_config_resp *sw_config;
8611 u16 next_seid = 0;
8612 int ret = 0;
8613 u8 *aq_buf;
8614 int i;
8615
8616 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
8617 if (!aq_buf)
8618 return -ENOMEM;
8619
8620 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
8621 do {
8622 u16 num_reported, num_total;
8623
8624 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
8625 I40E_AQ_LARGE_BUF,
8626 &next_seid, NULL);
8627 if (ret) {
8628 dev_info(&pf->pdev->dev,
8629 "get switch config failed %d aq_err=%x\n",
8630 ret, pf->hw.aq.asq_last_status);
8631 kfree(aq_buf);
8632 return -ENOENT;
8633 }
8634
8635 num_reported = le16_to_cpu(sw_config->header.num_reported);
8636 num_total = le16_to_cpu(sw_config->header.num_total);
8637
8638 if (printconfig)
8639 dev_info(&pf->pdev->dev,
8640 "header: %d reported %d total\n",
8641 num_reported, num_total);
8642
8643 for (i = 0; i < num_reported; i++) {
8644 struct i40e_aqc_switch_config_element_resp *ele =
8645 &sw_config->element[i];
8646
8647 i40e_setup_pf_switch_element(pf, ele, num_reported,
8648 printconfig);
8649 }
8650 } while (next_seid != 0);
8651
8652 kfree(aq_buf);
8653 return ret;
8654 }
8655
8656 /**
8657 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
8658 * @pf: board private structure
8659 * @reinit: if the Main VSI needs to re-initialized.
8660 *
8661 * Returns 0 on success, negative value on failure
8662 **/
8663 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
8664 {
8665 int ret;
8666
8667 /* find out what's out there already */
8668 ret = i40e_fetch_switch_configuration(pf, false);
8669 if (ret) {
8670 dev_info(&pf->pdev->dev,
8671 "couldn't fetch switch config, err %d, aq_err %d\n",
8672 ret, pf->hw.aq.asq_last_status);
8673 return ret;
8674 }
8675 i40e_pf_reset_stats(pf);
8676
8677 /* first time setup */
8678 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
8679 struct i40e_vsi *vsi = NULL;
8680 u16 uplink_seid;
8681
8682 /* Set up the PF VSI associated with the PF's main VSI
8683 * that is already in the HW switch
8684 */
8685 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8686 uplink_seid = pf->veb[pf->lan_veb]->seid;
8687 else
8688 uplink_seid = pf->mac_seid;
8689 if (pf->lan_vsi == I40E_NO_VSI)
8690 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
8691 else if (reinit)
8692 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
8693 if (!vsi) {
8694 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
8695 i40e_fdir_teardown(pf);
8696 return -EAGAIN;
8697 }
8698 } else {
8699 /* force a reset of TC and queue layout configurations */
8700 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8701 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8702 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8703 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8704 }
8705 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
8706
8707 i40e_fdir_sb_setup(pf);
8708
8709 /* Setup static PF queue filter control settings */
8710 ret = i40e_setup_pf_filter_control(pf);
8711 if (ret) {
8712 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
8713 ret);
8714 /* Failure here should not stop continuing other steps */
8715 }
8716
8717 /* enable RSS in the HW, even for only one queue, as the stack can use
8718 * the hash
8719 */
8720 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
8721 i40e_config_rss(pf);
8722
8723 /* fill in link information and enable LSE reporting */
8724 i40e_update_link_info(&pf->hw, true);
8725 i40e_link_event(pf);
8726
8727 /* Initialize user-specific link properties */
8728 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
8729 I40E_AQ_AN_COMPLETED) ? true : false);
8730
8731 i40e_ptp_init(pf);
8732
8733 return ret;
8734 }
8735
8736 /**
8737 * i40e_determine_queue_usage - Work out queue distribution
8738 * @pf: board private structure
8739 **/
8740 static void i40e_determine_queue_usage(struct i40e_pf *pf)
8741 {
8742 int queues_left;
8743
8744 pf->num_lan_qps = 0;
8745 #ifdef I40E_FCOE
8746 pf->num_fcoe_qps = 0;
8747 #endif
8748
8749 /* Find the max queues to be put into basic use. We'll always be
8750 * using TC0, whether or not DCB is running, and TC0 will get the
8751 * big RSS set.
8752 */
8753 queues_left = pf->hw.func_caps.num_tx_qp;
8754
8755 if ((queues_left == 1) ||
8756 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
8757 /* one qp for PF, no queues for anything else */
8758 queues_left = 0;
8759 pf->rss_size = pf->num_lan_qps = 1;
8760
8761 /* make sure all the fancies are disabled */
8762 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
8763 #ifdef I40E_FCOE
8764 I40E_FLAG_FCOE_ENABLED |
8765 #endif
8766 I40E_FLAG_FD_SB_ENABLED |
8767 I40E_FLAG_FD_ATR_ENABLED |
8768 I40E_FLAG_DCB_CAPABLE |
8769 I40E_FLAG_SRIOV_ENABLED |
8770 I40E_FLAG_VMDQ_ENABLED);
8771 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
8772 I40E_FLAG_FD_SB_ENABLED |
8773 I40E_FLAG_FD_ATR_ENABLED |
8774 I40E_FLAG_DCB_CAPABLE))) {
8775 /* one qp for PF */
8776 pf->rss_size = pf->num_lan_qps = 1;
8777 queues_left -= pf->num_lan_qps;
8778
8779 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
8780 #ifdef I40E_FCOE
8781 I40E_FLAG_FCOE_ENABLED |
8782 #endif
8783 I40E_FLAG_FD_SB_ENABLED |
8784 I40E_FLAG_FD_ATR_ENABLED |
8785 I40E_FLAG_DCB_ENABLED |
8786 I40E_FLAG_VMDQ_ENABLED);
8787 } else {
8788 /* Not enough queues for all TCs */
8789 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
8790 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
8791 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
8792 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
8793 }
8794 pf->num_lan_qps = pf->rss_size_max;
8795 queues_left -= pf->num_lan_qps;
8796 }
8797
8798 #ifdef I40E_FCOE
8799 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
8800 if (I40E_DEFAULT_FCOE <= queues_left) {
8801 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
8802 } else if (I40E_MINIMUM_FCOE <= queues_left) {
8803 pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
8804 } else {
8805 pf->num_fcoe_qps = 0;
8806 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
8807 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
8808 }
8809
8810 queues_left -= pf->num_fcoe_qps;
8811 }
8812
8813 #endif
8814 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8815 if (queues_left > 1) {
8816 queues_left -= 1; /* save 1 queue for FD */
8817 } else {
8818 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8819 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
8820 }
8821 }
8822
8823 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
8824 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
8825 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
8826 (queues_left / pf->num_vf_qps));
8827 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
8828 }
8829
8830 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
8831 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
8832 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
8833 (queues_left / pf->num_vmdq_qps));
8834 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
8835 }
8836
8837 pf->queues_left = queues_left;
8838 #ifdef I40E_FCOE
8839 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
8840 #endif
8841 }
8842
8843 /**
8844 * i40e_setup_pf_filter_control - Setup PF static filter control
8845 * @pf: PF to be setup
8846 *
8847 * i40e_setup_pf_filter_control sets up a pf's initial filter control
8848 * settings. If PE/FCoE are enabled then it will also set the per PF
8849 * based filter sizes required for them. It also enables Flow director,
8850 * ethertype and macvlan type filter settings for the pf.
8851 *
8852 * Returns 0 on success, negative on failure
8853 **/
8854 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
8855 {
8856 struct i40e_filter_control_settings *settings = &pf->filter_settings;
8857
8858 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
8859
8860 /* Flow Director is enabled */
8861 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
8862 settings->enable_fdir = true;
8863
8864 /* Ethtype and MACVLAN filters enabled for PF */
8865 settings->enable_ethtype = true;
8866 settings->enable_macvlan = true;
8867
8868 if (i40e_set_filter_control(&pf->hw, settings))
8869 return -ENOENT;
8870
8871 return 0;
8872 }
8873
8874 #define INFO_STRING_LEN 255
8875 static void i40e_print_features(struct i40e_pf *pf)
8876 {
8877 struct i40e_hw *hw = &pf->hw;
8878 char *buf, *string;
8879
8880 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
8881 if (!string) {
8882 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
8883 return;
8884 }
8885
8886 buf = string;
8887
8888 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
8889 #ifdef CONFIG_PCI_IOV
8890 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
8891 #endif
8892 buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
8893 pf->vsi[pf->lan_vsi]->num_queue_pairs);
8894
8895 if (pf->flags & I40E_FLAG_RSS_ENABLED)
8896 buf += sprintf(buf, "RSS ");
8897 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
8898 buf += sprintf(buf, "FD_ATR ");
8899 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8900 buf += sprintf(buf, "FD_SB ");
8901 buf += sprintf(buf, "NTUPLE ");
8902 }
8903 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
8904 buf += sprintf(buf, "DCB ");
8905 if (pf->flags & I40E_FLAG_PTP)
8906 buf += sprintf(buf, "PTP ");
8907 #ifdef I40E_FCOE
8908 if (pf->flags & I40E_FLAG_FCOE_ENABLED)
8909 buf += sprintf(buf, "FCOE ");
8910 #endif
8911
8912 BUG_ON(buf > (string + INFO_STRING_LEN));
8913 dev_info(&pf->pdev->dev, "%s\n", string);
8914 kfree(string);
8915 }
8916
8917 /**
8918 * i40e_probe - Device initialization routine
8919 * @pdev: PCI device information struct
8920 * @ent: entry in i40e_pci_tbl
8921 *
8922 * i40e_probe initializes a pf identified by a pci_dev structure.
8923 * The OS initialization, configuring of the pf private structure,
8924 * and a hardware reset occur.
8925 *
8926 * Returns 0 on success, negative on failure
8927 **/
8928 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8929 {
8930 struct i40e_pf *pf;
8931 struct i40e_hw *hw;
8932 static u16 pfs_found;
8933 u16 link_status;
8934 int err = 0;
8935 u32 len;
8936 u32 i;
8937
8938 err = pci_enable_device_mem(pdev);
8939 if (err)
8940 return err;
8941
8942 /* set up for high or low dma */
8943 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8944 if (err) {
8945 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8946 if (err) {
8947 dev_err(&pdev->dev,
8948 "DMA configuration failed: 0x%x\n", err);
8949 goto err_dma;
8950 }
8951 }
8952
8953 /* set up pci connections */
8954 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
8955 IORESOURCE_MEM), i40e_driver_name);
8956 if (err) {
8957 dev_info(&pdev->dev,
8958 "pci_request_selected_regions failed %d\n", err);
8959 goto err_pci_reg;
8960 }
8961
8962 pci_enable_pcie_error_reporting(pdev);
8963 pci_set_master(pdev);
8964
8965 /* Now that we have a PCI connection, we need to do the
8966 * low level device setup. This is primarily setting up
8967 * the Admin Queue structures and then querying for the
8968 * device's current profile information.
8969 */
8970 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
8971 if (!pf) {
8972 err = -ENOMEM;
8973 goto err_pf_alloc;
8974 }
8975 pf->next_vsi = 0;
8976 pf->pdev = pdev;
8977 set_bit(__I40E_DOWN, &pf->state);
8978
8979 hw = &pf->hw;
8980 hw->back = pf;
8981 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
8982 pci_resource_len(pdev, 0));
8983 if (!hw->hw_addr) {
8984 err = -EIO;
8985 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
8986 (unsigned int)pci_resource_start(pdev, 0),
8987 (unsigned int)pci_resource_len(pdev, 0), err);
8988 goto err_ioremap;
8989 }
8990 hw->vendor_id = pdev->vendor;
8991 hw->device_id = pdev->device;
8992 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
8993 hw->subsystem_vendor_id = pdev->subsystem_vendor;
8994 hw->subsystem_device_id = pdev->subsystem_device;
8995 hw->bus.device = PCI_SLOT(pdev->devfn);
8996 hw->bus.func = PCI_FUNC(pdev->devfn);
8997 pf->instance = pfs_found;
8998
8999 /* do a special CORER for clearing PXE mode once at init */
9000 if (hw->revision_id == 0 &&
9001 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
9002 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
9003 i40e_flush(hw);
9004 msleep(200);
9005 pf->corer_count++;
9006
9007 i40e_clear_pxe_mode(hw);
9008 }
9009
9010 /* Reset here to make sure all is clean and to define PF 'n' */
9011 i40e_clear_hw(hw);
9012 err = i40e_pf_reset(hw);
9013 if (err) {
9014 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
9015 goto err_pf_reset;
9016 }
9017 pf->pfr_count++;
9018
9019 hw->aq.num_arq_entries = I40E_AQ_LEN;
9020 hw->aq.num_asq_entries = I40E_AQ_LEN;
9021 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9022 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9023 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
9024 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
9025 "%s-pf%d:misc",
9026 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
9027
9028 err = i40e_init_shared_code(hw);
9029 if (err) {
9030 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
9031 goto err_pf_reset;
9032 }
9033
9034 /* set up a default setting for link flow control */
9035 pf->hw.fc.requested_mode = I40E_FC_NONE;
9036
9037 err = i40e_init_adminq(hw);
9038 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
9039 if (err) {
9040 dev_info(&pdev->dev,
9041 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
9042 goto err_pf_reset;
9043 }
9044
9045 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
9046 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
9047 dev_info(&pdev->dev,
9048 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
9049 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
9050 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
9051 dev_info(&pdev->dev,
9052 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
9053
9054
9055 i40e_verify_eeprom(pf);
9056
9057 /* Rev 0 hardware was never productized */
9058 if (hw->revision_id < 1)
9059 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
9060
9061 i40e_clear_pxe_mode(hw);
9062 err = i40e_get_capabilities(pf);
9063 if (err)
9064 goto err_adminq_setup;
9065
9066 err = i40e_sw_init(pf);
9067 if (err) {
9068 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
9069 goto err_sw_init;
9070 }
9071
9072 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9073 hw->func_caps.num_rx_qp,
9074 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
9075 if (err) {
9076 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
9077 goto err_init_lan_hmc;
9078 }
9079
9080 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9081 if (err) {
9082 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
9083 err = -ENOENT;
9084 goto err_configure_lan_hmc;
9085 }
9086
9087 i40e_get_mac_addr(hw, hw->mac.addr);
9088 if (!is_valid_ether_addr(hw->mac.addr)) {
9089 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
9090 err = -EIO;
9091 goto err_mac_addr;
9092 }
9093 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
9094 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
9095 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
9096 if (is_valid_ether_addr(hw->mac.port_addr))
9097 pf->flags |= I40E_FLAG_PORT_ID_VALID;
9098 #ifdef I40E_FCOE
9099 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
9100 if (err)
9101 dev_info(&pdev->dev,
9102 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
9103 if (!is_valid_ether_addr(hw->mac.san_addr)) {
9104 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
9105 hw->mac.san_addr);
9106 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
9107 }
9108 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
9109 #endif /* I40E_FCOE */
9110
9111 pci_set_drvdata(pdev, pf);
9112 pci_save_state(pdev);
9113 #ifdef CONFIG_I40E_DCB
9114 err = i40e_init_pf_dcb(pf);
9115 if (err) {
9116 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
9117 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9118 /* Continue without DCB enabled */
9119 }
9120 #endif /* CONFIG_I40E_DCB */
9121
9122 /* set up periodic task facility */
9123 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
9124 pf->service_timer_period = HZ;
9125
9126 INIT_WORK(&pf->service_task, i40e_service_task);
9127 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
9128 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
9129 pf->link_check_timeout = jiffies;
9130
9131 /* WoL defaults to disabled */
9132 pf->wol_en = false;
9133 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
9134
9135 /* set up the main switch operations */
9136 i40e_determine_queue_usage(pf);
9137 i40e_init_interrupt_scheme(pf);
9138
9139 /* The number of VSIs reported by the FW is the minimum guaranteed
9140 * to us; HW supports far more and we share the remaining pool with
9141 * the other PFs. We allocate space for more than the guarantee with
9142 * the understanding that we might not get them all later.
9143 */
9144 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
9145 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
9146 else
9147 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
9148
9149 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
9150 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
9151 pf->vsi = kzalloc(len, GFP_KERNEL);
9152 if (!pf->vsi) {
9153 err = -ENOMEM;
9154 goto err_switch_setup;
9155 }
9156
9157 err = i40e_setup_pf_switch(pf, false);
9158 if (err) {
9159 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
9160 goto err_vsis;
9161 }
9162 /* if FDIR VSI was set up, start it now */
9163 for (i = 0; i < pf->num_alloc_vsi; i++) {
9164 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
9165 i40e_vsi_open(pf->vsi[i]);
9166 break;
9167 }
9168 }
9169
9170 /* driver is only interested in link up/down and module qualification
9171 * reports from firmware
9172 */
9173 err = i40e_aq_set_phy_int_mask(&pf->hw,
9174 I40E_AQ_EVENT_LINK_UPDOWN |
9175 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
9176 if (err)
9177 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
9178
9179 /* The main driver is (mostly) up and happy. We need to set this state
9180 * before setting up the misc vector or we get a race and the vector
9181 * ends up disabled forever.
9182 */
9183 clear_bit(__I40E_DOWN, &pf->state);
9184
9185 /* In case of MSIX we are going to setup the misc vector right here
9186 * to handle admin queue events etc. In case of legacy and MSI
9187 * the misc functionality and queue processing is combined in
9188 * the same vector and that gets setup at open.
9189 */
9190 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9191 err = i40e_setup_misc_vector(pf);
9192 if (err) {
9193 dev_info(&pdev->dev,
9194 "setup of misc vector failed: %d\n", err);
9195 goto err_vsis;
9196 }
9197 }
9198
9199 #ifdef CONFIG_PCI_IOV
9200 /* prep for VF support */
9201 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9202 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
9203 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
9204 u32 val;
9205
9206 /* disable link interrupts for VFs */
9207 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
9208 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
9209 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
9210 i40e_flush(hw);
9211
9212 if (pci_num_vf(pdev)) {
9213 dev_info(&pdev->dev,
9214 "Active VFs found, allocating resources.\n");
9215 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
9216 if (err)
9217 dev_info(&pdev->dev,
9218 "Error %d allocating resources for existing VFs\n",
9219 err);
9220 }
9221 }
9222 #endif /* CONFIG_PCI_IOV */
9223
9224 pfs_found++;
9225
9226 i40e_dbg_pf_init(pf);
9227
9228 /* tell the firmware that we're starting */
9229 i40e_send_version(pf);
9230
9231 /* since everything's happy, start the service_task timer */
9232 mod_timer(&pf->service_timer,
9233 round_jiffies(jiffies + pf->service_timer_period));
9234
9235 #ifdef I40E_FCOE
9236 /* create FCoE interface */
9237 i40e_fcoe_vsi_setup(pf);
9238
9239 #endif
9240 /* Get the negotiated link width and speed from PCI config space */
9241 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
9242
9243 i40e_set_pci_config_data(hw, link_status);
9244
9245 dev_info(&pdev->dev, "PCI-Express: %s %s\n",
9246 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
9247 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
9248 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
9249 "Unknown"),
9250 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
9251 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
9252 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
9253 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
9254 "Unknown"));
9255
9256 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
9257 hw->bus.speed < i40e_bus_speed_8000) {
9258 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
9259 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
9260 }
9261
9262 /* print a string summarizing features */
9263 i40e_print_features(pf);
9264
9265 return 0;
9266
9267 /* Unwind what we've done if something failed in the setup */
9268 err_vsis:
9269 set_bit(__I40E_DOWN, &pf->state);
9270 i40e_clear_interrupt_scheme(pf);
9271 kfree(pf->vsi);
9272 err_switch_setup:
9273 i40e_reset_interrupt_capability(pf);
9274 del_timer_sync(&pf->service_timer);
9275 err_mac_addr:
9276 err_configure_lan_hmc:
9277 (void)i40e_shutdown_lan_hmc(hw);
9278 err_init_lan_hmc:
9279 kfree(pf->qp_pile);
9280 kfree(pf->irq_pile);
9281 err_sw_init:
9282 err_adminq_setup:
9283 (void)i40e_shutdown_adminq(hw);
9284 err_pf_reset:
9285 iounmap(hw->hw_addr);
9286 err_ioremap:
9287 kfree(pf);
9288 err_pf_alloc:
9289 pci_disable_pcie_error_reporting(pdev);
9290 pci_release_selected_regions(pdev,
9291 pci_select_bars(pdev, IORESOURCE_MEM));
9292 err_pci_reg:
9293 err_dma:
9294 pci_disable_device(pdev);
9295 return err;
9296 }
9297
9298 /**
9299 * i40e_remove - Device removal routine
9300 * @pdev: PCI device information struct
9301 *
9302 * i40e_remove is called by the PCI subsystem to alert the driver
9303 * that is should release a PCI device. This could be caused by a
9304 * Hot-Plug event, or because the driver is going to be removed from
9305 * memory.
9306 **/
9307 static void i40e_remove(struct pci_dev *pdev)
9308 {
9309 struct i40e_pf *pf = pci_get_drvdata(pdev);
9310 i40e_status ret_code;
9311 int i;
9312
9313 i40e_dbg_pf_exit(pf);
9314
9315 i40e_ptp_stop(pf);
9316
9317 /* no more scheduling of any task */
9318 set_bit(__I40E_DOWN, &pf->state);
9319 del_timer_sync(&pf->service_timer);
9320 cancel_work_sync(&pf->service_task);
9321
9322 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
9323 i40e_free_vfs(pf);
9324 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
9325 }
9326
9327 i40e_fdir_teardown(pf);
9328
9329 /* If there is a switch structure or any orphans, remove them.
9330 * This will leave only the PF's VSI remaining.
9331 */
9332 for (i = 0; i < I40E_MAX_VEB; i++) {
9333 if (!pf->veb[i])
9334 continue;
9335
9336 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
9337 pf->veb[i]->uplink_seid == 0)
9338 i40e_switch_branch_release(pf->veb[i]);
9339 }
9340
9341 /* Now we can shutdown the PF's VSI, just before we kill
9342 * adminq and hmc.
9343 */
9344 if (pf->vsi[pf->lan_vsi])
9345 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
9346
9347 i40e_stop_misc_vector(pf);
9348 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9349 synchronize_irq(pf->msix_entries[0].vector);
9350 free_irq(pf->msix_entries[0].vector, pf);
9351 }
9352
9353 /* shutdown and destroy the HMC */
9354 if (pf->hw.hmc.hmc_obj) {
9355 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
9356 if (ret_code)
9357 dev_warn(&pdev->dev,
9358 "Failed to destroy the HMC resources: %d\n",
9359 ret_code);
9360 }
9361
9362 /* shutdown the adminq */
9363 ret_code = i40e_shutdown_adminq(&pf->hw);
9364 if (ret_code)
9365 dev_warn(&pdev->dev,
9366 "Failed to destroy the Admin Queue resources: %d\n",
9367 ret_code);
9368
9369 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
9370 i40e_clear_interrupt_scheme(pf);
9371 for (i = 0; i < pf->num_alloc_vsi; i++) {
9372 if (pf->vsi[i]) {
9373 i40e_vsi_clear_rings(pf->vsi[i]);
9374 i40e_vsi_clear(pf->vsi[i]);
9375 pf->vsi[i] = NULL;
9376 }
9377 }
9378
9379 for (i = 0; i < I40E_MAX_VEB; i++) {
9380 kfree(pf->veb[i]);
9381 pf->veb[i] = NULL;
9382 }
9383
9384 kfree(pf->qp_pile);
9385 kfree(pf->irq_pile);
9386 kfree(pf->vsi);
9387
9388 iounmap(pf->hw.hw_addr);
9389 kfree(pf);
9390 pci_release_selected_regions(pdev,
9391 pci_select_bars(pdev, IORESOURCE_MEM));
9392
9393 pci_disable_pcie_error_reporting(pdev);
9394 pci_disable_device(pdev);
9395 }
9396
9397 /**
9398 * i40e_pci_error_detected - warning that something funky happened in PCI land
9399 * @pdev: PCI device information struct
9400 *
9401 * Called to warn that something happened and the error handling steps
9402 * are in progress. Allows the driver to quiesce things, be ready for
9403 * remediation.
9404 **/
9405 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
9406 enum pci_channel_state error)
9407 {
9408 struct i40e_pf *pf = pci_get_drvdata(pdev);
9409
9410 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
9411
9412 /* shutdown all operations */
9413 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
9414 rtnl_lock();
9415 i40e_prep_for_reset(pf);
9416 rtnl_unlock();
9417 }
9418
9419 /* Request a slot reset */
9420 return PCI_ERS_RESULT_NEED_RESET;
9421 }
9422
9423 /**
9424 * i40e_pci_error_slot_reset - a PCI slot reset just happened
9425 * @pdev: PCI device information struct
9426 *
9427 * Called to find if the driver can work with the device now that
9428 * the pci slot has been reset. If a basic connection seems good
9429 * (registers are readable and have sane content) then return a
9430 * happy little PCI_ERS_RESULT_xxx.
9431 **/
9432 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
9433 {
9434 struct i40e_pf *pf = pci_get_drvdata(pdev);
9435 pci_ers_result_t result;
9436 int err;
9437 u32 reg;
9438
9439 dev_info(&pdev->dev, "%s\n", __func__);
9440 if (pci_enable_device_mem(pdev)) {
9441 dev_info(&pdev->dev,
9442 "Cannot re-enable PCI device after reset.\n");
9443 result = PCI_ERS_RESULT_DISCONNECT;
9444 } else {
9445 pci_set_master(pdev);
9446 pci_restore_state(pdev);
9447 pci_save_state(pdev);
9448 pci_wake_from_d3(pdev, false);
9449
9450 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9451 if (reg == 0)
9452 result = PCI_ERS_RESULT_RECOVERED;
9453 else
9454 result = PCI_ERS_RESULT_DISCONNECT;
9455 }
9456
9457 err = pci_cleanup_aer_uncorrect_error_status(pdev);
9458 if (err) {
9459 dev_info(&pdev->dev,
9460 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
9461 err);
9462 /* non-fatal, continue */
9463 }
9464
9465 return result;
9466 }
9467
9468 /**
9469 * i40e_pci_error_resume - restart operations after PCI error recovery
9470 * @pdev: PCI device information struct
9471 *
9472 * Called to allow the driver to bring things back up after PCI error
9473 * and/or reset recovery has finished.
9474 **/
9475 static void i40e_pci_error_resume(struct pci_dev *pdev)
9476 {
9477 struct i40e_pf *pf = pci_get_drvdata(pdev);
9478
9479 dev_info(&pdev->dev, "%s\n", __func__);
9480 if (test_bit(__I40E_SUSPENDED, &pf->state))
9481 return;
9482
9483 rtnl_lock();
9484 i40e_handle_reset_warning(pf);
9485 rtnl_lock();
9486 }
9487
9488 /**
9489 * i40e_shutdown - PCI callback for shutting down
9490 * @pdev: PCI device information struct
9491 **/
9492 static void i40e_shutdown(struct pci_dev *pdev)
9493 {
9494 struct i40e_pf *pf = pci_get_drvdata(pdev);
9495 struct i40e_hw *hw = &pf->hw;
9496
9497 set_bit(__I40E_SUSPENDED, &pf->state);
9498 set_bit(__I40E_DOWN, &pf->state);
9499 rtnl_lock();
9500 i40e_prep_for_reset(pf);
9501 rtnl_unlock();
9502
9503 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
9504 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
9505
9506 if (system_state == SYSTEM_POWER_OFF) {
9507 pci_wake_from_d3(pdev, pf->wol_en);
9508 pci_set_power_state(pdev, PCI_D3hot);
9509 }
9510 }
9511
9512 #ifdef CONFIG_PM
9513 /**
9514 * i40e_suspend - PCI callback for moving to D3
9515 * @pdev: PCI device information struct
9516 **/
9517 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
9518 {
9519 struct i40e_pf *pf = pci_get_drvdata(pdev);
9520 struct i40e_hw *hw = &pf->hw;
9521
9522 set_bit(__I40E_SUSPENDED, &pf->state);
9523 set_bit(__I40E_DOWN, &pf->state);
9524 rtnl_lock();
9525 i40e_prep_for_reset(pf);
9526 rtnl_unlock();
9527
9528 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
9529 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
9530
9531 pci_wake_from_d3(pdev, pf->wol_en);
9532 pci_set_power_state(pdev, PCI_D3hot);
9533
9534 return 0;
9535 }
9536
9537 /**
9538 * i40e_resume - PCI callback for waking up from D3
9539 * @pdev: PCI device information struct
9540 **/
9541 static int i40e_resume(struct pci_dev *pdev)
9542 {
9543 struct i40e_pf *pf = pci_get_drvdata(pdev);
9544 u32 err;
9545
9546 pci_set_power_state(pdev, PCI_D0);
9547 pci_restore_state(pdev);
9548 /* pci_restore_state() clears dev->state_saves, so
9549 * call pci_save_state() again to restore it.
9550 */
9551 pci_save_state(pdev);
9552
9553 err = pci_enable_device_mem(pdev);
9554 if (err) {
9555 dev_err(&pdev->dev,
9556 "%s: Cannot enable PCI device from suspend\n",
9557 __func__);
9558 return err;
9559 }
9560 pci_set_master(pdev);
9561
9562 /* no wakeup events while running */
9563 pci_wake_from_d3(pdev, false);
9564
9565 /* handling the reset will rebuild the device state */
9566 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
9567 clear_bit(__I40E_DOWN, &pf->state);
9568 rtnl_lock();
9569 i40e_reset_and_rebuild(pf, false);
9570 rtnl_unlock();
9571 }
9572
9573 return 0;
9574 }
9575
9576 #endif
9577 static const struct pci_error_handlers i40e_err_handler = {
9578 .error_detected = i40e_pci_error_detected,
9579 .slot_reset = i40e_pci_error_slot_reset,
9580 .resume = i40e_pci_error_resume,
9581 };
9582
9583 static struct pci_driver i40e_driver = {
9584 .name = i40e_driver_name,
9585 .id_table = i40e_pci_tbl,
9586 .probe = i40e_probe,
9587 .remove = i40e_remove,
9588 #ifdef CONFIG_PM
9589 .suspend = i40e_suspend,
9590 .resume = i40e_resume,
9591 #endif
9592 .shutdown = i40e_shutdown,
9593 .err_handler = &i40e_err_handler,
9594 .sriov_configure = i40e_pci_sriov_configure,
9595 };
9596
9597 /**
9598 * i40e_init_module - Driver registration routine
9599 *
9600 * i40e_init_module is the first routine called when the driver is
9601 * loaded. All it does is register with the PCI subsystem.
9602 **/
9603 static int __init i40e_init_module(void)
9604 {
9605 pr_info("%s: %s - version %s\n", i40e_driver_name,
9606 i40e_driver_string, i40e_driver_version_str);
9607 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
9608 i40e_dbg_init();
9609 return pci_register_driver(&i40e_driver);
9610 }
9611 module_init(i40e_init_module);
9612
9613 /**
9614 * i40e_exit_module - Driver exit cleanup routine
9615 *
9616 * i40e_exit_module is called just before the driver is removed
9617 * from memory.
9618 **/
9619 static void __exit i40e_exit_module(void)
9620 {
9621 pci_unregister_driver(&i40e_driver);
9622 i40e_dbg_exit();
9623 }
9624 module_exit(i40e_exit_module);
This page took 0.367428 seconds and 5 git commands to generate.