i40e: rework fdir setup and teardown
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
CommitLineData
41c445ff
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
dc641b73 4 * Copyright(c) 2013 - 2014 Intel Corporation.
41c445ff
JB
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
dc641b73
GR
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
41c445ff
JB
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27/* Local includes */
28#include "i40e.h"
4eb3f768 29#include "i40e_diag.h"
a1c9a9d9
JK
30#ifdef CONFIG_I40E_VXLAN
31#include <net/vxlan.h>
32#endif
41c445ff
JB
33
34const char i40e_driver_name[] = "i40e";
35static const char i40e_driver_string[] =
36 "Intel(R) Ethernet Connection XL710 Network Driver";
37
38#define DRV_KERN "-k"
39
40#define DRV_VERSION_MAJOR 0
41#define DRV_VERSION_MINOR 3
acb3676b 42#define DRV_VERSION_BUILD 41
41c445ff
JB
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN
46const char i40e_driver_version_str[] = DRV_VERSION;
8fb905b3 47static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
41c445ff
JB
48
49/* a bit of forward declarations */
50static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51static void i40e_handle_reset_warning(struct i40e_pf *pf);
52static int i40e_add_vsi(struct i40e_vsi *vsi);
53static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
bc7d338f 54static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
41c445ff
JB
55static int i40e_setup_misc_vector(struct i40e_pf *pf);
56static void i40e_determine_queue_usage(struct i40e_pf *pf);
57static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
cbf61325 58static void i40e_fdir_sb_setup(struct i40e_pf *pf);
4e3b35b0 59static int i40e_veb_get_bw_info(struct i40e_veb *veb);
41c445ff
JB
60
61/* i40e_pci_tbl - PCI Device ID Table
62 *
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
68static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
ab60085e
SN
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
41c445ff
JB
79 /* required last entry */
80 {0, }
81};
82MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
83
84#define I40E_MAX_VF_COUNT 128
85static int debug = -1;
86module_param(debug, int, 0);
87MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
88
89MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
90MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
91MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION);
93
94/**
95 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
96 * @hw: pointer to the HW structure
97 * @mem: ptr to mem struct to fill out
98 * @size: size of memory requested
99 * @alignment: what to align the allocation to
100 **/
101int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
102 u64 size, u32 alignment)
103{
104 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
105
106 mem->size = ALIGN(size, alignment);
107 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
108 &mem->pa, GFP_KERNEL);
93bc73b8
JB
109 if (!mem->va)
110 return -ENOMEM;
41c445ff 111
93bc73b8 112 return 0;
41c445ff
JB
113}
114
115/**
116 * i40e_free_dma_mem_d - OS specific memory free for shared code
117 * @hw: pointer to the HW structure
118 * @mem: ptr to mem struct to free
119 **/
120int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
121{
122 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
123
124 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
125 mem->va = NULL;
126 mem->pa = 0;
127 mem->size = 0;
128
129 return 0;
130}
131
132/**
133 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
134 * @hw: pointer to the HW structure
135 * @mem: ptr to mem struct to fill out
136 * @size: size of memory requested
137 **/
138int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
139 u32 size)
140{
141 mem->size = size;
142 mem->va = kzalloc(size, GFP_KERNEL);
143
93bc73b8
JB
144 if (!mem->va)
145 return -ENOMEM;
41c445ff 146
93bc73b8 147 return 0;
41c445ff
JB
148}
149
150/**
151 * i40e_free_virt_mem_d - OS specific memory free for shared code
152 * @hw: pointer to the HW structure
153 * @mem: ptr to mem struct to free
154 **/
155int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
156{
157 /* it's ok to kfree a NULL pointer */
158 kfree(mem->va);
159 mem->va = NULL;
160 mem->size = 0;
161
162 return 0;
163}
164
165/**
166 * i40e_get_lump - find a lump of free generic resource
167 * @pf: board private structure
168 * @pile: the pile of resource to search
169 * @needed: the number of items needed
170 * @id: an owner id to stick on the items assigned
171 *
172 * Returns the base item index of the lump, or negative for error
173 *
174 * The search_hint trick and lack of advanced fit-finding only work
175 * because we're highly likely to have all the same size lump requests.
176 * Linear search time and any fragmentation should be minimal.
177 **/
178static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
179 u16 needed, u16 id)
180{
181 int ret = -ENOMEM;
ddf434ac 182 int i, j;
41c445ff
JB
183
184 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
185 dev_info(&pf->pdev->dev,
186 "param err: pile=%p needed=%d id=0x%04x\n",
187 pile, needed, id);
188 return -EINVAL;
189 }
190
191 /* start the linear search with an imperfect hint */
192 i = pile->search_hint;
ddf434ac 193 while (i < pile->num_entries) {
41c445ff
JB
194 /* skip already allocated entries */
195 if (pile->list[i] & I40E_PILE_VALID_BIT) {
196 i++;
197 continue;
198 }
199
200 /* do we have enough in this lump? */
201 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
202 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
203 break;
204 }
205
206 if (j == needed) {
207 /* there was enough, so assign it to the requestor */
208 for (j = 0; j < needed; j++)
209 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
210 ret = i;
211 pile->search_hint = i + j;
ddf434ac 212 break;
41c445ff
JB
213 } else {
214 /* not enough, so skip over it and continue looking */
215 i += j;
216 }
217 }
218
219 return ret;
220}
221
222/**
223 * i40e_put_lump - return a lump of generic resource
224 * @pile: the pile of resource to search
225 * @index: the base item index
226 * @id: the owner id of the items assigned
227 *
228 * Returns the count of items in the lump
229 **/
230static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
231{
232 int valid_id = (id | I40E_PILE_VALID_BIT);
233 int count = 0;
234 int i;
235
236 if (!pile || index >= pile->num_entries)
237 return -EINVAL;
238
239 for (i = index;
240 i < pile->num_entries && pile->list[i] == valid_id;
241 i++) {
242 pile->list[i] = 0;
243 count++;
244 }
245
246 if (count && index < pile->search_hint)
247 pile->search_hint = index;
248
249 return count;
250}
251
252/**
253 * i40e_service_event_schedule - Schedule the service task to wake up
254 * @pf: board private structure
255 *
256 * If not already scheduled, this puts the task into the work queue
257 **/
258static void i40e_service_event_schedule(struct i40e_pf *pf)
259{
260 if (!test_bit(__I40E_DOWN, &pf->state) &&
261 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
262 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
263 schedule_work(&pf->service_task);
264}
265
266/**
267 * i40e_tx_timeout - Respond to a Tx Hang
268 * @netdev: network interface device structure
269 *
270 * If any port has noticed a Tx timeout, it is likely that the whole
271 * device is munged, not just the one netdev port, so go for the full
272 * reset.
273 **/
274static void i40e_tx_timeout(struct net_device *netdev)
275{
276 struct i40e_netdev_priv *np = netdev_priv(netdev);
277 struct i40e_vsi *vsi = np->vsi;
278 struct i40e_pf *pf = vsi->back;
279
280 pf->tx_timeout_count++;
281
282 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
283 pf->tx_timeout_recovery_level = 0;
284 pf->tx_timeout_last_recovery = jiffies;
285 netdev_info(netdev, "tx_timeout recovery level %d\n",
286 pf->tx_timeout_recovery_level);
287
288 switch (pf->tx_timeout_recovery_level) {
289 case 0:
290 /* disable and re-enable queues for the VSI */
291 if (in_interrupt()) {
292 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
293 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
294 } else {
295 i40e_vsi_reinit_locked(vsi);
296 }
297 break;
298 case 1:
299 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
300 break;
301 case 2:
302 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
303 break;
304 case 3:
305 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
306 break;
307 default:
308 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
e108b0e3 309 set_bit(__I40E_DOWN, &vsi->state);
41c445ff
JB
310 i40e_down(vsi);
311 break;
312 }
313 i40e_service_event_schedule(pf);
314 pf->tx_timeout_recovery_level++;
315}
316
317/**
318 * i40e_release_rx_desc - Store the new tail and head values
319 * @rx_ring: ring to bump
320 * @val: new head index
321 **/
322static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
323{
324 rx_ring->next_to_use = val;
325
326 /* Force memory writes to complete before letting h/w
327 * know there are new descriptors to fetch. (Only
328 * applicable for weak-ordered memory model archs,
329 * such as IA-64).
330 */
331 wmb();
332 writel(val, rx_ring->tail);
333}
334
335/**
336 * i40e_get_vsi_stats_struct - Get System Network Statistics
337 * @vsi: the VSI we care about
338 *
339 * Returns the address of the device statistics structure.
340 * The statistics are actually updated from the service task.
341 **/
342struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
343{
344 return &vsi->net_stats;
345}
346
347/**
348 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
349 * @netdev: network interface device structure
350 *
351 * Returns the address of the device statistics structure.
352 * The statistics are actually updated from the service task.
353 **/
354static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
355 struct net_device *netdev,
980e9b11 356 struct rtnl_link_stats64 *stats)
41c445ff
JB
357{
358 struct i40e_netdev_priv *np = netdev_priv(netdev);
359 struct i40e_vsi *vsi = np->vsi;
980e9b11
AD
360 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
361 int i;
362
bc7d338f
ASJ
363 if (test_bit(__I40E_DOWN, &vsi->state))
364 return stats;
365
3c325ced
JB
366 if (!vsi->tx_rings)
367 return stats;
368
980e9b11
AD
369 rcu_read_lock();
370 for (i = 0; i < vsi->num_queue_pairs; i++) {
371 struct i40e_ring *tx_ring, *rx_ring;
372 u64 bytes, packets;
373 unsigned int start;
374
375 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
376 if (!tx_ring)
377 continue;
378
379 do {
57a7744e 380 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
980e9b11
AD
381 packets = tx_ring->stats.packets;
382 bytes = tx_ring->stats.bytes;
57a7744e 383 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
980e9b11
AD
384
385 stats->tx_packets += packets;
386 stats->tx_bytes += bytes;
387 rx_ring = &tx_ring[1];
388
389 do {
57a7744e 390 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
980e9b11
AD
391 packets = rx_ring->stats.packets;
392 bytes = rx_ring->stats.bytes;
57a7744e 393 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
41c445ff 394
980e9b11
AD
395 stats->rx_packets += packets;
396 stats->rx_bytes += bytes;
397 }
398 rcu_read_unlock();
399
400 /* following stats updated by ixgbe_watchdog_task() */
401 stats->multicast = vsi_stats->multicast;
402 stats->tx_errors = vsi_stats->tx_errors;
403 stats->tx_dropped = vsi_stats->tx_dropped;
404 stats->rx_errors = vsi_stats->rx_errors;
405 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
406 stats->rx_length_errors = vsi_stats->rx_length_errors;
41c445ff 407
980e9b11 408 return stats;
41c445ff
JB
409}
410
411/**
412 * i40e_vsi_reset_stats - Resets all stats of the given vsi
413 * @vsi: the VSI to have its stats reset
414 **/
415void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
416{
417 struct rtnl_link_stats64 *ns;
418 int i;
419
420 if (!vsi)
421 return;
422
423 ns = i40e_get_vsi_stats_struct(vsi);
424 memset(ns, 0, sizeof(*ns));
425 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
426 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
427 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
8e9dca53 428 if (vsi->rx_rings && vsi->rx_rings[0]) {
41c445ff 429 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
430 memset(&vsi->rx_rings[i]->stats, 0 ,
431 sizeof(vsi->rx_rings[i]->stats));
432 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
433 sizeof(vsi->rx_rings[i]->rx_stats));
434 memset(&vsi->tx_rings[i]->stats, 0 ,
435 sizeof(vsi->tx_rings[i]->stats));
436 memset(&vsi->tx_rings[i]->tx_stats, 0,
437 sizeof(vsi->tx_rings[i]->tx_stats));
41c445ff 438 }
8e9dca53 439 }
41c445ff
JB
440 vsi->stat_offsets_loaded = false;
441}
442
443/**
444 * i40e_pf_reset_stats - Reset all of the stats for the given pf
445 * @pf: the PF to be reset
446 **/
447void i40e_pf_reset_stats(struct i40e_pf *pf)
448{
449 memset(&pf->stats, 0, sizeof(pf->stats));
450 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
451 pf->stat_offsets_loaded = false;
452}
453
454/**
455 * i40e_stat_update48 - read and update a 48 bit stat from the chip
456 * @hw: ptr to the hardware info
457 * @hireg: the high 32 bit reg to read
458 * @loreg: the low 32 bit reg to read
459 * @offset_loaded: has the initial offset been loaded yet
460 * @offset: ptr to current offset value
461 * @stat: ptr to the stat
462 *
463 * Since the device stats are not reset at PFReset, they likely will not
464 * be zeroed when the driver starts. We'll save the first values read
465 * and use them as offsets to be subtracted from the raw values in order
466 * to report stats that count from zero. In the process, we also manage
467 * the potential roll-over.
468 **/
469static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
470 bool offset_loaded, u64 *offset, u64 *stat)
471{
472 u64 new_data;
473
ab60085e 474 if (hw->device_id == I40E_DEV_ID_QEMU) {
41c445ff
JB
475 new_data = rd32(hw, loreg);
476 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
477 } else {
478 new_data = rd64(hw, loreg);
479 }
480 if (!offset_loaded)
481 *offset = new_data;
482 if (likely(new_data >= *offset))
483 *stat = new_data - *offset;
484 else
485 *stat = (new_data + ((u64)1 << 48)) - *offset;
486 *stat &= 0xFFFFFFFFFFFFULL;
487}
488
489/**
490 * i40e_stat_update32 - read and update a 32 bit stat from the chip
491 * @hw: ptr to the hardware info
492 * @reg: the hw reg to read
493 * @offset_loaded: has the initial offset been loaded yet
494 * @offset: ptr to current offset value
495 * @stat: ptr to the stat
496 **/
497static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
498 bool offset_loaded, u64 *offset, u64 *stat)
499{
500 u32 new_data;
501
502 new_data = rd32(hw, reg);
503 if (!offset_loaded)
504 *offset = new_data;
505 if (likely(new_data >= *offset))
506 *stat = (u32)(new_data - *offset);
507 else
508 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
509}
510
511/**
512 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
513 * @vsi: the VSI to be updated
514 **/
515void i40e_update_eth_stats(struct i40e_vsi *vsi)
516{
517 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
518 struct i40e_pf *pf = vsi->back;
519 struct i40e_hw *hw = &pf->hw;
520 struct i40e_eth_stats *oes;
521 struct i40e_eth_stats *es; /* device's eth stats */
522
523 es = &vsi->eth_stats;
524 oes = &vsi->eth_stats_offsets;
525
526 /* Gather up the stats that the hw collects */
527 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
528 vsi->stat_offsets_loaded,
529 &oes->tx_errors, &es->tx_errors);
530 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
531 vsi->stat_offsets_loaded,
532 &oes->rx_discards, &es->rx_discards);
533
534 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
535 I40E_GLV_GORCL(stat_idx),
536 vsi->stat_offsets_loaded,
537 &oes->rx_bytes, &es->rx_bytes);
538 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
539 I40E_GLV_UPRCL(stat_idx),
540 vsi->stat_offsets_loaded,
541 &oes->rx_unicast, &es->rx_unicast);
542 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
543 I40E_GLV_MPRCL(stat_idx),
544 vsi->stat_offsets_loaded,
545 &oes->rx_multicast, &es->rx_multicast);
546 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
547 I40E_GLV_BPRCL(stat_idx),
548 vsi->stat_offsets_loaded,
549 &oes->rx_broadcast, &es->rx_broadcast);
550
551 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
552 I40E_GLV_GOTCL(stat_idx),
553 vsi->stat_offsets_loaded,
554 &oes->tx_bytes, &es->tx_bytes);
555 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
556 I40E_GLV_UPTCL(stat_idx),
557 vsi->stat_offsets_loaded,
558 &oes->tx_unicast, &es->tx_unicast);
559 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
560 I40E_GLV_MPTCL(stat_idx),
561 vsi->stat_offsets_loaded,
562 &oes->tx_multicast, &es->tx_multicast);
563 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
564 I40E_GLV_BPTCL(stat_idx),
565 vsi->stat_offsets_loaded,
566 &oes->tx_broadcast, &es->tx_broadcast);
567 vsi->stat_offsets_loaded = true;
568}
569
570/**
571 * i40e_update_veb_stats - Update Switch component statistics
572 * @veb: the VEB being updated
573 **/
574static void i40e_update_veb_stats(struct i40e_veb *veb)
575{
576 struct i40e_pf *pf = veb->pf;
577 struct i40e_hw *hw = &pf->hw;
578 struct i40e_eth_stats *oes;
579 struct i40e_eth_stats *es; /* device's eth stats */
580 int idx = 0;
581
582 idx = veb->stats_idx;
583 es = &veb->stats;
584 oes = &veb->stats_offsets;
585
586 /* Gather up the stats that the hw collects */
587 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
588 veb->stat_offsets_loaded,
589 &oes->tx_discards, &es->tx_discards);
7134f9ce
JB
590 if (hw->revision_id > 0)
591 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
592 veb->stat_offsets_loaded,
593 &oes->rx_unknown_protocol,
594 &es->rx_unknown_protocol);
41c445ff
JB
595 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
596 veb->stat_offsets_loaded,
597 &oes->rx_bytes, &es->rx_bytes);
598 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
599 veb->stat_offsets_loaded,
600 &oes->rx_unicast, &es->rx_unicast);
601 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
602 veb->stat_offsets_loaded,
603 &oes->rx_multicast, &es->rx_multicast);
604 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
605 veb->stat_offsets_loaded,
606 &oes->rx_broadcast, &es->rx_broadcast);
607
608 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
609 veb->stat_offsets_loaded,
610 &oes->tx_bytes, &es->tx_bytes);
611 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
612 veb->stat_offsets_loaded,
613 &oes->tx_unicast, &es->tx_unicast);
614 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
615 veb->stat_offsets_loaded,
616 &oes->tx_multicast, &es->tx_multicast);
617 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
618 veb->stat_offsets_loaded,
619 &oes->tx_broadcast, &es->tx_broadcast);
620 veb->stat_offsets_loaded = true;
621}
622
623/**
624 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
625 * @pf: the corresponding PF
626 *
627 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
628 **/
629static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
630{
631 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
632 struct i40e_hw_port_stats *nsd = &pf->stats;
633 struct i40e_hw *hw = &pf->hw;
634 u64 xoff = 0;
635 u16 i, v;
636
637 if ((hw->fc.current_mode != I40E_FC_FULL) &&
638 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
639 return;
640
641 xoff = nsd->link_xoff_rx;
642 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
643 pf->stat_offsets_loaded,
644 &osd->link_xoff_rx, &nsd->link_xoff_rx);
645
646 /* No new LFC xoff rx */
647 if (!(nsd->link_xoff_rx - xoff))
648 return;
649
650 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
651 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
652 struct i40e_vsi *vsi = pf->vsi[v];
653
654 if (!vsi)
655 continue;
656
657 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 658 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
659 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
660 }
661 }
662}
663
664/**
665 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
666 * @pf: the corresponding PF
667 *
668 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
669 **/
670static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
671{
672 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
673 struct i40e_hw_port_stats *nsd = &pf->stats;
674 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
675 struct i40e_dcbx_config *dcb_cfg;
676 struct i40e_hw *hw = &pf->hw;
677 u16 i, v;
678 u8 tc;
679
680 dcb_cfg = &hw->local_dcbx_config;
681
682 /* See if DCB enabled with PFC TC */
683 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
684 !(dcb_cfg->pfc.pfcenable)) {
685 i40e_update_link_xoff_rx(pf);
686 return;
687 }
688
689 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
690 u64 prio_xoff = nsd->priority_xoff_rx[i];
691 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
692 pf->stat_offsets_loaded,
693 &osd->priority_xoff_rx[i],
694 &nsd->priority_xoff_rx[i]);
695
696 /* No new PFC xoff rx */
697 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
698 continue;
699 /* Get the TC for given priority */
700 tc = dcb_cfg->etscfg.prioritytable[i];
701 xoff[tc] = true;
702 }
703
704 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
705 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
706 struct i40e_vsi *vsi = pf->vsi[v];
707
708 if (!vsi)
709 continue;
710
711 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 712 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
713
714 tc = ring->dcb_tc;
715 if (xoff[tc])
716 clear_bit(__I40E_HANG_CHECK_ARMED,
717 &ring->state);
718 }
719 }
720}
721
722/**
723 * i40e_update_stats - Update the board statistics counters.
724 * @vsi: the VSI to be updated
725 *
726 * There are a few instances where we store the same stat in a
727 * couple of different structs. This is partly because we have
728 * the netdev stats that need to be filled out, which is slightly
729 * different from the "eth_stats" defined by the chip and used in
730 * VF communications. We sort it all out here in a central place.
731 **/
732void i40e_update_stats(struct i40e_vsi *vsi)
733{
734 struct i40e_pf *pf = vsi->back;
735 struct i40e_hw *hw = &pf->hw;
736 struct rtnl_link_stats64 *ons;
737 struct rtnl_link_stats64 *ns; /* netdev stats */
738 struct i40e_eth_stats *oes;
739 struct i40e_eth_stats *es; /* device's eth stats */
740 u32 tx_restart, tx_busy;
741 u32 rx_page, rx_buf;
742 u64 rx_p, rx_b;
743 u64 tx_p, tx_b;
bee5af7e 744 u32 val;
41c445ff
JB
745 int i;
746 u16 q;
747
748 if (test_bit(__I40E_DOWN, &vsi->state) ||
749 test_bit(__I40E_CONFIG_BUSY, &pf->state))
750 return;
751
752 ns = i40e_get_vsi_stats_struct(vsi);
753 ons = &vsi->net_stats_offsets;
754 es = &vsi->eth_stats;
755 oes = &vsi->eth_stats_offsets;
756
757 /* Gather up the netdev and vsi stats that the driver collects
758 * on the fly during packet processing
759 */
760 rx_b = rx_p = 0;
761 tx_b = tx_p = 0;
762 tx_restart = tx_busy = 0;
763 rx_page = 0;
764 rx_buf = 0;
980e9b11 765 rcu_read_lock();
41c445ff
JB
766 for (q = 0; q < vsi->num_queue_pairs; q++) {
767 struct i40e_ring *p;
980e9b11
AD
768 u64 bytes, packets;
769 unsigned int start;
770
771 /* locate Tx ring */
772 p = ACCESS_ONCE(vsi->tx_rings[q]);
773
774 do {
57a7744e 775 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
776 packets = p->stats.packets;
777 bytes = p->stats.bytes;
57a7744e 778 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
779 tx_b += bytes;
780 tx_p += packets;
781 tx_restart += p->tx_stats.restart_queue;
782 tx_busy += p->tx_stats.tx_busy;
41c445ff 783
980e9b11
AD
784 /* Rx queue is part of the same block as Tx queue */
785 p = &p[1];
786 do {
57a7744e 787 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
788 packets = p->stats.packets;
789 bytes = p->stats.bytes;
57a7744e 790 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
791 rx_b += bytes;
792 rx_p += packets;
420136cc
MW
793 rx_buf += p->rx_stats.alloc_buff_failed;
794 rx_page += p->rx_stats.alloc_page_failed;
41c445ff 795 }
980e9b11 796 rcu_read_unlock();
41c445ff
JB
797 vsi->tx_restart = tx_restart;
798 vsi->tx_busy = tx_busy;
799 vsi->rx_page_failed = rx_page;
800 vsi->rx_buf_failed = rx_buf;
801
802 ns->rx_packets = rx_p;
803 ns->rx_bytes = rx_b;
804 ns->tx_packets = tx_p;
805 ns->tx_bytes = tx_b;
806
807 i40e_update_eth_stats(vsi);
808 /* update netdev stats from eth stats */
809 ons->rx_errors = oes->rx_errors;
810 ns->rx_errors = es->rx_errors;
811 ons->tx_errors = oes->tx_errors;
812 ns->tx_errors = es->tx_errors;
813 ons->multicast = oes->rx_multicast;
814 ns->multicast = es->rx_multicast;
815 ons->tx_dropped = oes->tx_discards;
816 ns->tx_dropped = es->tx_discards;
817
818 /* Get the port data only if this is the main PF VSI */
819 if (vsi == pf->vsi[pf->lan_vsi]) {
820 struct i40e_hw_port_stats *nsd = &pf->stats;
821 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
822
823 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
824 I40E_GLPRT_GORCL(hw->port),
825 pf->stat_offsets_loaded,
826 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
827 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
828 I40E_GLPRT_GOTCL(hw->port),
829 pf->stat_offsets_loaded,
830 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
831 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
832 pf->stat_offsets_loaded,
833 &osd->eth.rx_discards,
834 &nsd->eth.rx_discards);
835 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
836 pf->stat_offsets_loaded,
837 &osd->eth.tx_discards,
838 &nsd->eth.tx_discards);
839 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
840 I40E_GLPRT_MPRCL(hw->port),
841 pf->stat_offsets_loaded,
842 &osd->eth.rx_multicast,
843 &nsd->eth.rx_multicast);
844
845 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
846 pf->stat_offsets_loaded,
847 &osd->tx_dropped_link_down,
848 &nsd->tx_dropped_link_down);
849
850 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
851 pf->stat_offsets_loaded,
852 &osd->crc_errors, &nsd->crc_errors);
853 ns->rx_crc_errors = nsd->crc_errors;
854
855 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
856 pf->stat_offsets_loaded,
857 &osd->illegal_bytes, &nsd->illegal_bytes);
858 ns->rx_errors = nsd->crc_errors
859 + nsd->illegal_bytes;
860
861 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
862 pf->stat_offsets_loaded,
863 &osd->mac_local_faults,
864 &nsd->mac_local_faults);
865 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
866 pf->stat_offsets_loaded,
867 &osd->mac_remote_faults,
868 &nsd->mac_remote_faults);
869
870 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
871 pf->stat_offsets_loaded,
872 &osd->rx_length_errors,
873 &nsd->rx_length_errors);
874 ns->rx_length_errors = nsd->rx_length_errors;
875
876 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
877 pf->stat_offsets_loaded,
878 &osd->link_xon_rx, &nsd->link_xon_rx);
879 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
880 pf->stat_offsets_loaded,
881 &osd->link_xon_tx, &nsd->link_xon_tx);
882 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
883 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
884 pf->stat_offsets_loaded,
885 &osd->link_xoff_tx, &nsd->link_xoff_tx);
886
887 for (i = 0; i < 8; i++) {
888 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
889 pf->stat_offsets_loaded,
890 &osd->priority_xon_rx[i],
891 &nsd->priority_xon_rx[i]);
892 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
893 pf->stat_offsets_loaded,
894 &osd->priority_xon_tx[i],
895 &nsd->priority_xon_tx[i]);
896 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
897 pf->stat_offsets_loaded,
898 &osd->priority_xoff_tx[i],
899 &nsd->priority_xoff_tx[i]);
900 i40e_stat_update32(hw,
901 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
902 pf->stat_offsets_loaded,
903 &osd->priority_xon_2_xoff[i],
904 &nsd->priority_xon_2_xoff[i]);
905 }
906
907 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
908 I40E_GLPRT_PRC64L(hw->port),
909 pf->stat_offsets_loaded,
910 &osd->rx_size_64, &nsd->rx_size_64);
911 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
912 I40E_GLPRT_PRC127L(hw->port),
913 pf->stat_offsets_loaded,
914 &osd->rx_size_127, &nsd->rx_size_127);
915 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
916 I40E_GLPRT_PRC255L(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->rx_size_255, &nsd->rx_size_255);
919 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
920 I40E_GLPRT_PRC511L(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->rx_size_511, &nsd->rx_size_511);
923 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
924 I40E_GLPRT_PRC1023L(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->rx_size_1023, &nsd->rx_size_1023);
927 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
928 I40E_GLPRT_PRC1522L(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->rx_size_1522, &nsd->rx_size_1522);
931 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
932 I40E_GLPRT_PRC9522L(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->rx_size_big, &nsd->rx_size_big);
935
936 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
937 I40E_GLPRT_PTC64L(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->tx_size_64, &nsd->tx_size_64);
940 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
941 I40E_GLPRT_PTC127L(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->tx_size_127, &nsd->tx_size_127);
944 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
945 I40E_GLPRT_PTC255L(hw->port),
946 pf->stat_offsets_loaded,
947 &osd->tx_size_255, &nsd->tx_size_255);
948 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
949 I40E_GLPRT_PTC511L(hw->port),
950 pf->stat_offsets_loaded,
951 &osd->tx_size_511, &nsd->tx_size_511);
952 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
953 I40E_GLPRT_PTC1023L(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->tx_size_1023, &nsd->tx_size_1023);
956 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
957 I40E_GLPRT_PTC1522L(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->tx_size_1522, &nsd->tx_size_1522);
960 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
961 I40E_GLPRT_PTC9522L(hw->port),
962 pf->stat_offsets_loaded,
963 &osd->tx_size_big, &nsd->tx_size_big);
964
965 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
966 pf->stat_offsets_loaded,
967 &osd->rx_undersize, &nsd->rx_undersize);
968 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
969 pf->stat_offsets_loaded,
970 &osd->rx_fragments, &nsd->rx_fragments);
971 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
972 pf->stat_offsets_loaded,
973 &osd->rx_oversize, &nsd->rx_oversize);
974 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
975 pf->stat_offsets_loaded,
976 &osd->rx_jabber, &nsd->rx_jabber);
bee5af7e
ASJ
977
978 val = rd32(hw, I40E_PRTPM_EEE_STAT);
979 nsd->tx_lpi_status =
980 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
981 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
982 nsd->rx_lpi_status =
983 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
984 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
985 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
986 pf->stat_offsets_loaded,
987 &osd->tx_lpi_count, &nsd->tx_lpi_count);
988 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
989 pf->stat_offsets_loaded,
990 &osd->rx_lpi_count, &nsd->rx_lpi_count);
41c445ff
JB
991 }
992
993 pf->stat_offsets_loaded = true;
994}
995
996/**
997 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
998 * @vsi: the VSI to be searched
999 * @macaddr: the MAC address
1000 * @vlan: the vlan
1001 * @is_vf: make sure its a vf filter, else doesn't matter
1002 * @is_netdev: make sure its a netdev filter, else doesn't matter
1003 *
1004 * Returns ptr to the filter object or NULL
1005 **/
1006static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1007 u8 *macaddr, s16 vlan,
1008 bool is_vf, bool is_netdev)
1009{
1010 struct i40e_mac_filter *f;
1011
1012 if (!vsi || !macaddr)
1013 return NULL;
1014
1015 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1016 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1017 (vlan == f->vlan) &&
1018 (!is_vf || f->is_vf) &&
1019 (!is_netdev || f->is_netdev))
1020 return f;
1021 }
1022 return NULL;
1023}
1024
1025/**
1026 * i40e_find_mac - Find a mac addr in the macvlan filters list
1027 * @vsi: the VSI to be searched
1028 * @macaddr: the MAC address we are searching for
1029 * @is_vf: make sure its a vf filter, else doesn't matter
1030 * @is_netdev: make sure its a netdev filter, else doesn't matter
1031 *
1032 * Returns the first filter with the provided MAC address or NULL if
1033 * MAC address was not found
1034 **/
1035struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1036 bool is_vf, bool is_netdev)
1037{
1038 struct i40e_mac_filter *f;
1039
1040 if (!vsi || !macaddr)
1041 return NULL;
1042
1043 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1044 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1045 (!is_vf || f->is_vf) &&
1046 (!is_netdev || f->is_netdev))
1047 return f;
1048 }
1049 return NULL;
1050}
1051
1052/**
1053 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1054 * @vsi: the VSI to be searched
1055 *
1056 * Returns true if VSI is in vlan mode or false otherwise
1057 **/
1058bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1059{
1060 struct i40e_mac_filter *f;
1061
1062 /* Only -1 for all the filters denotes not in vlan mode
1063 * so we have to go through all the list in order to make sure
1064 */
1065 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1066 if (f->vlan >= 0)
1067 return true;
1068 }
1069
1070 return false;
1071}
1072
1073/**
1074 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1075 * @vsi: the VSI to be searched
1076 * @macaddr: the mac address to be filtered
1077 * @is_vf: true if it is a vf
1078 * @is_netdev: true if it is a netdev
1079 *
1080 * Goes through all the macvlan filters and adds a
1081 * macvlan filter for each unique vlan that already exists
1082 *
1083 * Returns first filter found on success, else NULL
1084 **/
1085struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1086 bool is_vf, bool is_netdev)
1087{
1088 struct i40e_mac_filter *f;
1089
1090 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1091 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1092 is_vf, is_netdev)) {
1093 if (!i40e_add_filter(vsi, macaddr, f->vlan,
8fb905b3 1094 is_vf, is_netdev))
41c445ff
JB
1095 return NULL;
1096 }
1097 }
1098
1099 return list_first_entry_or_null(&vsi->mac_filter_list,
1100 struct i40e_mac_filter, list);
1101}
1102
1103/**
1104 * i40e_add_filter - Add a mac/vlan filter to the VSI
1105 * @vsi: the VSI to be searched
1106 * @macaddr: the MAC address
1107 * @vlan: the vlan
1108 * @is_vf: make sure its a vf filter, else doesn't matter
1109 * @is_netdev: make sure its a netdev filter, else doesn't matter
1110 *
1111 * Returns ptr to the filter object or NULL when no memory available.
1112 **/
1113struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1114 u8 *macaddr, s16 vlan,
1115 bool is_vf, bool is_netdev)
1116{
1117 struct i40e_mac_filter *f;
1118
1119 if (!vsi || !macaddr)
1120 return NULL;
1121
1122 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1123 if (!f) {
1124 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1125 if (!f)
1126 goto add_filter_out;
1127
1128 memcpy(f->macaddr, macaddr, ETH_ALEN);
1129 f->vlan = vlan;
1130 f->changed = true;
1131
1132 INIT_LIST_HEAD(&f->list);
1133 list_add(&f->list, &vsi->mac_filter_list);
1134 }
1135
1136 /* increment counter and add a new flag if needed */
1137 if (is_vf) {
1138 if (!f->is_vf) {
1139 f->is_vf = true;
1140 f->counter++;
1141 }
1142 } else if (is_netdev) {
1143 if (!f->is_netdev) {
1144 f->is_netdev = true;
1145 f->counter++;
1146 }
1147 } else {
1148 f->counter++;
1149 }
1150
1151 /* changed tells sync_filters_subtask to
1152 * push the filter down to the firmware
1153 */
1154 if (f->changed) {
1155 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1156 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1157 }
1158
1159add_filter_out:
1160 return f;
1161}
1162
1163/**
1164 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1165 * @vsi: the VSI to be searched
1166 * @macaddr: the MAC address
1167 * @vlan: the vlan
1168 * @is_vf: make sure it's a vf filter, else doesn't matter
1169 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1170 **/
1171void i40e_del_filter(struct i40e_vsi *vsi,
1172 u8 *macaddr, s16 vlan,
1173 bool is_vf, bool is_netdev)
1174{
1175 struct i40e_mac_filter *f;
1176
1177 if (!vsi || !macaddr)
1178 return;
1179
1180 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1181 if (!f || f->counter == 0)
1182 return;
1183
1184 if (is_vf) {
1185 if (f->is_vf) {
1186 f->is_vf = false;
1187 f->counter--;
1188 }
1189 } else if (is_netdev) {
1190 if (f->is_netdev) {
1191 f->is_netdev = false;
1192 f->counter--;
1193 }
1194 } else {
1195 /* make sure we don't remove a filter in use by vf or netdev */
1196 int min_f = 0;
1197 min_f += (f->is_vf ? 1 : 0);
1198 min_f += (f->is_netdev ? 1 : 0);
1199
1200 if (f->counter > min_f)
1201 f->counter--;
1202 }
1203
1204 /* counter == 0 tells sync_filters_subtask to
1205 * remove the filter from the firmware's list
1206 */
1207 if (f->counter == 0) {
1208 f->changed = true;
1209 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1210 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1211 }
1212}
1213
1214/**
1215 * i40e_set_mac - NDO callback to set mac address
1216 * @netdev: network interface device structure
1217 * @p: pointer to an address structure
1218 *
1219 * Returns 0 on success, negative on failure
1220 **/
1221static int i40e_set_mac(struct net_device *netdev, void *p)
1222{
1223 struct i40e_netdev_priv *np = netdev_priv(netdev);
1224 struct i40e_vsi *vsi = np->vsi;
1225 struct sockaddr *addr = p;
1226 struct i40e_mac_filter *f;
1227
1228 if (!is_valid_ether_addr(addr->sa_data))
1229 return -EADDRNOTAVAIL;
1230
1231 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
1232
1233 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1234 return 0;
1235
80f6428f
ASJ
1236 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1237 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1238 return -EADDRNOTAVAIL;
1239
41c445ff
JB
1240 if (vsi->type == I40E_VSI_MAIN) {
1241 i40e_status ret;
1242 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1243 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1244 addr->sa_data, NULL);
1245 if (ret) {
1246 netdev_info(netdev,
1247 "Addr change for Main VSI failed: %d\n",
1248 ret);
1249 return -EADDRNOTAVAIL;
1250 }
1251
1252 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
1253 }
1254
1255 /* In order to be sure to not drop any packets, add the new address
1256 * then delete the old one.
1257 */
1258 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
1259 if (!f)
1260 return -ENOMEM;
1261
1262 i40e_sync_vsi_filters(vsi);
1263 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1264 i40e_sync_vsi_filters(vsi);
1265
1266 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1267
1268 return 0;
1269}
1270
1271/**
1272 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1273 * @vsi: the VSI being setup
1274 * @ctxt: VSI context structure
1275 * @enabled_tc: Enabled TCs bitmap
1276 * @is_add: True if called before Add VSI
1277 *
1278 * Setup VSI queue mapping for enabled traffic classes.
1279 **/
1280static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1281 struct i40e_vsi_context *ctxt,
1282 u8 enabled_tc,
1283 bool is_add)
1284{
1285 struct i40e_pf *pf = vsi->back;
1286 u16 sections = 0;
1287 u8 netdev_tc = 0;
1288 u16 numtc = 0;
1289 u16 qcount;
1290 u8 offset;
1291 u16 qmap;
1292 int i;
4e3b35b0 1293 u16 num_tc_qps = 0;
41c445ff
JB
1294
1295 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1296 offset = 0;
1297
1298 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1299 /* Find numtc from enabled TC bitmap */
1300 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1301 if (enabled_tc & (1 << i)) /* TC is enabled */
1302 numtc++;
1303 }
1304 if (!numtc) {
1305 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1306 numtc = 1;
1307 }
1308 } else {
1309 /* At least TC0 is enabled in case of non-DCB case */
1310 numtc = 1;
1311 }
1312
1313 vsi->tc_config.numtc = numtc;
1314 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
4e3b35b0
NP
1315 /* Number of queues per enabled TC */
1316 num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
1317 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
41c445ff
JB
1318
1319 /* Setup queue offset/count for all TCs for given VSI */
1320 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1321 /* See if the given TC is enabled for the given VSI */
1322 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1323 int pow, num_qps;
1324
41c445ff
JB
1325 switch (vsi->type) {
1326 case I40E_VSI_MAIN:
4e3b35b0 1327 qcount = min_t(int, pf->rss_size, num_tc_qps);
41c445ff
JB
1328 break;
1329 case I40E_VSI_FDIR:
1330 case I40E_VSI_SRIOV:
1331 case I40E_VSI_VMDQ2:
1332 default:
4e3b35b0 1333 qcount = num_tc_qps;
41c445ff
JB
1334 WARN_ON(i != 0);
1335 break;
1336 }
4e3b35b0
NP
1337 vsi->tc_config.tc_info[i].qoffset = offset;
1338 vsi->tc_config.tc_info[i].qcount = qcount;
41c445ff
JB
1339
1340 /* find the power-of-2 of the number of queue pairs */
4e3b35b0 1341 num_qps = qcount;
41c445ff 1342 pow = 0;
4e3b35b0 1343 while (num_qps && ((1 << pow) < qcount)) {
41c445ff
JB
1344 pow++;
1345 num_qps >>= 1;
1346 }
1347
1348 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1349 qmap =
1350 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1351 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1352
4e3b35b0 1353 offset += qcount;
41c445ff
JB
1354 } else {
1355 /* TC is not enabled so set the offset to
1356 * default queue and allocate one queue
1357 * for the given TC.
1358 */
1359 vsi->tc_config.tc_info[i].qoffset = 0;
1360 vsi->tc_config.tc_info[i].qcount = 1;
1361 vsi->tc_config.tc_info[i].netdev_tc = 0;
1362
1363 qmap = 0;
1364 }
1365 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1366 }
1367
1368 /* Set actual Tx/Rx queue pairs */
1369 vsi->num_queue_pairs = offset;
1370
1371 /* Scheduler section valid can only be set for ADD VSI */
1372 if (is_add) {
1373 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1374
1375 ctxt->info.up_enable_bits = enabled_tc;
1376 }
1377 if (vsi->type == I40E_VSI_SRIOV) {
1378 ctxt->info.mapping_flags |=
1379 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1380 for (i = 0; i < vsi->num_queue_pairs; i++)
1381 ctxt->info.queue_mapping[i] =
1382 cpu_to_le16(vsi->base_queue + i);
1383 } else {
1384 ctxt->info.mapping_flags |=
1385 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1386 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1387 }
1388 ctxt->info.valid_sections |= cpu_to_le16(sections);
1389}
1390
1391/**
1392 * i40e_set_rx_mode - NDO callback to set the netdev filters
1393 * @netdev: network interface device structure
1394 **/
1395static void i40e_set_rx_mode(struct net_device *netdev)
1396{
1397 struct i40e_netdev_priv *np = netdev_priv(netdev);
1398 struct i40e_mac_filter *f, *ftmp;
1399 struct i40e_vsi *vsi = np->vsi;
1400 struct netdev_hw_addr *uca;
1401 struct netdev_hw_addr *mca;
1402 struct netdev_hw_addr *ha;
1403
1404 /* add addr if not already in the filter list */
1405 netdev_for_each_uc_addr(uca, netdev) {
1406 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1407 if (i40e_is_vsi_in_vlan(vsi))
1408 i40e_put_mac_in_vlan(vsi, uca->addr,
1409 false, true);
1410 else
1411 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1412 false, true);
1413 }
1414 }
1415
1416 netdev_for_each_mc_addr(mca, netdev) {
1417 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1418 if (i40e_is_vsi_in_vlan(vsi))
1419 i40e_put_mac_in_vlan(vsi, mca->addr,
1420 false, true);
1421 else
1422 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1423 false, true);
1424 }
1425 }
1426
1427 /* remove filter if not in netdev list */
1428 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1429 bool found = false;
1430
1431 if (!f->is_netdev)
1432 continue;
1433
1434 if (is_multicast_ether_addr(f->macaddr)) {
1435 netdev_for_each_mc_addr(mca, netdev) {
1436 if (ether_addr_equal(mca->addr, f->macaddr)) {
1437 found = true;
1438 break;
1439 }
1440 }
1441 } else {
1442 netdev_for_each_uc_addr(uca, netdev) {
1443 if (ether_addr_equal(uca->addr, f->macaddr)) {
1444 found = true;
1445 break;
1446 }
1447 }
1448
1449 for_each_dev_addr(netdev, ha) {
1450 if (ether_addr_equal(ha->addr, f->macaddr)) {
1451 found = true;
1452 break;
1453 }
1454 }
1455 }
1456 if (!found)
1457 i40e_del_filter(
1458 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1459 }
1460
1461 /* check for other flag changes */
1462 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1463 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1464 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1465 }
1466}
1467
1468/**
1469 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1470 * @vsi: ptr to the VSI
1471 *
1472 * Push any outstanding VSI filter changes through the AdminQ.
1473 *
1474 * Returns 0 or error value
1475 **/
1476int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1477{
1478 struct i40e_mac_filter *f, *ftmp;
1479 bool promisc_forced_on = false;
1480 bool add_happened = false;
1481 int filter_list_len = 0;
1482 u32 changed_flags = 0;
dcae29be 1483 i40e_status aq_ret = 0;
41c445ff
JB
1484 struct i40e_pf *pf;
1485 int num_add = 0;
1486 int num_del = 0;
1487 u16 cmd_flags;
1488
1489 /* empty array typed pointers, kcalloc later */
1490 struct i40e_aqc_add_macvlan_element_data *add_list;
1491 struct i40e_aqc_remove_macvlan_element_data *del_list;
1492
1493 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1494 usleep_range(1000, 2000);
1495 pf = vsi->back;
1496
1497 if (vsi->netdev) {
1498 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1499 vsi->current_netdev_flags = vsi->netdev->flags;
1500 }
1501
1502 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1503 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1504
1505 filter_list_len = pf->hw.aq.asq_buf_size /
1506 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1507 del_list = kcalloc(filter_list_len,
1508 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1509 GFP_KERNEL);
1510 if (!del_list)
1511 return -ENOMEM;
1512
1513 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1514 if (!f->changed)
1515 continue;
1516
1517 if (f->counter != 0)
1518 continue;
1519 f->changed = false;
1520 cmd_flags = 0;
1521
1522 /* add to delete list */
1523 memcpy(del_list[num_del].mac_addr,
1524 f->macaddr, ETH_ALEN);
1525 del_list[num_del].vlan_tag =
1526 cpu_to_le16((u16)(f->vlan ==
1527 I40E_VLAN_ANY ? 0 : f->vlan));
1528
41c445ff
JB
1529 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1530 del_list[num_del].flags = cmd_flags;
1531 num_del++;
1532
1533 /* unlink from filter list */
1534 list_del(&f->list);
1535 kfree(f);
1536
1537 /* flush a full buffer */
1538 if (num_del == filter_list_len) {
dcae29be 1539 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
41c445ff
JB
1540 vsi->seid, del_list, num_del,
1541 NULL);
1542 num_del = 0;
1543 memset(del_list, 0, sizeof(*del_list));
1544
dcae29be 1545 if (aq_ret)
41c445ff
JB
1546 dev_info(&pf->pdev->dev,
1547 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
dcae29be 1548 aq_ret,
41c445ff
JB
1549 pf->hw.aq.asq_last_status);
1550 }
1551 }
1552 if (num_del) {
dcae29be 1553 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
41c445ff
JB
1554 del_list, num_del, NULL);
1555 num_del = 0;
1556
dcae29be 1557 if (aq_ret)
41c445ff
JB
1558 dev_info(&pf->pdev->dev,
1559 "ignoring delete macvlan error, err %d, aq_err %d\n",
dcae29be 1560 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1561 }
1562
1563 kfree(del_list);
1564 del_list = NULL;
1565
1566 /* do all the adds now */
1567 filter_list_len = pf->hw.aq.asq_buf_size /
1568 sizeof(struct i40e_aqc_add_macvlan_element_data),
1569 add_list = kcalloc(filter_list_len,
1570 sizeof(struct i40e_aqc_add_macvlan_element_data),
1571 GFP_KERNEL);
1572 if (!add_list)
1573 return -ENOMEM;
1574
1575 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1576 if (!f->changed)
1577 continue;
1578
1579 if (f->counter == 0)
1580 continue;
1581 f->changed = false;
1582 add_happened = true;
1583 cmd_flags = 0;
1584
1585 /* add to add array */
1586 memcpy(add_list[num_add].mac_addr,
1587 f->macaddr, ETH_ALEN);
1588 add_list[num_add].vlan_tag =
1589 cpu_to_le16(
1590 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1591 add_list[num_add].queue_number = 0;
1592
1593 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
41c445ff
JB
1594 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1595 num_add++;
1596
1597 /* flush a full buffer */
1598 if (num_add == filter_list_len) {
dcae29be
JB
1599 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1600 add_list, num_add,
1601 NULL);
41c445ff
JB
1602 num_add = 0;
1603
dcae29be 1604 if (aq_ret)
41c445ff
JB
1605 break;
1606 memset(add_list, 0, sizeof(*add_list));
1607 }
1608 }
1609 if (num_add) {
dcae29be
JB
1610 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1611 add_list, num_add, NULL);
41c445ff
JB
1612 num_add = 0;
1613 }
1614 kfree(add_list);
1615 add_list = NULL;
1616
dcae29be 1617 if (add_happened && (!aq_ret)) {
41c445ff 1618 /* do nothing */;
dcae29be 1619 } else if (add_happened && (aq_ret)) {
41c445ff
JB
1620 dev_info(&pf->pdev->dev,
1621 "add filter failed, err %d, aq_err %d\n",
dcae29be 1622 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1623 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1624 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1625 &vsi->state)) {
1626 promisc_forced_on = true;
1627 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1628 &vsi->state);
1629 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1630 }
1631 }
1632 }
1633
1634 /* check for changes in promiscuous modes */
1635 if (changed_flags & IFF_ALLMULTI) {
1636 bool cur_multipromisc;
1637 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
dcae29be
JB
1638 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1639 vsi->seid,
1640 cur_multipromisc,
1641 NULL);
1642 if (aq_ret)
41c445ff
JB
1643 dev_info(&pf->pdev->dev,
1644 "set multi promisc failed, err %d, aq_err %d\n",
dcae29be 1645 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1646 }
1647 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1648 bool cur_promisc;
1649 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1650 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1651 &vsi->state));
dcae29be
JB
1652 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1653 vsi->seid,
1654 cur_promisc, NULL);
1655 if (aq_ret)
41c445ff
JB
1656 dev_info(&pf->pdev->dev,
1657 "set uni promisc failed, err %d, aq_err %d\n",
dcae29be 1658 aq_ret, pf->hw.aq.asq_last_status);
1a10370a
GR
1659 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1660 vsi->seid,
1661 cur_promisc, NULL);
1662 if (aq_ret)
1663 dev_info(&pf->pdev->dev,
1664 "set brdcast promisc failed, err %d, aq_err %d\n",
1665 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1666 }
1667
1668 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1669 return 0;
1670}
1671
1672/**
1673 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1674 * @pf: board private structure
1675 **/
1676static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1677{
1678 int v;
1679
1680 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1681 return;
1682 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1683
1684 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
1685 if (pf->vsi[v] &&
1686 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1687 i40e_sync_vsi_filters(pf->vsi[v]);
1688 }
1689}
1690
1691/**
1692 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1693 * @netdev: network interface device structure
1694 * @new_mtu: new value for maximum frame size
1695 *
1696 * Returns 0 on success, negative on failure
1697 **/
1698static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1699{
1700 struct i40e_netdev_priv *np = netdev_priv(netdev);
1701 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1702 struct i40e_vsi *vsi = np->vsi;
1703
1704 /* MTU < 68 is an error and causes problems on some kernels */
1705 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1706 return -EINVAL;
1707
1708 netdev_info(netdev, "changing MTU from %d to %d\n",
1709 netdev->mtu, new_mtu);
1710 netdev->mtu = new_mtu;
1711 if (netif_running(netdev))
1712 i40e_vsi_reinit_locked(vsi);
1713
1714 return 0;
1715}
1716
beb0dff1
JK
1717/**
1718 * i40e_ioctl - Access the hwtstamp interface
1719 * @netdev: network interface device structure
1720 * @ifr: interface request data
1721 * @cmd: ioctl command
1722 **/
1723int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1724{
1725 struct i40e_netdev_priv *np = netdev_priv(netdev);
1726 struct i40e_pf *pf = np->vsi->back;
1727
1728 switch (cmd) {
1729 case SIOCGHWTSTAMP:
1730 return i40e_ptp_get_ts_config(pf, ifr);
1731 case SIOCSHWTSTAMP:
1732 return i40e_ptp_set_ts_config(pf, ifr);
1733 default:
1734 return -EOPNOTSUPP;
1735 }
1736}
1737
41c445ff
JB
1738/**
1739 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1740 * @vsi: the vsi being adjusted
1741 **/
1742void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1743{
1744 struct i40e_vsi_context ctxt;
1745 i40e_status ret;
1746
1747 if ((vsi->info.valid_sections &
1748 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1749 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1750 return; /* already enabled */
1751
1752 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1753 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1754 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1755
1756 ctxt.seid = vsi->seid;
1757 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1758 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1759 if (ret) {
1760 dev_info(&vsi->back->pdev->dev,
1761 "%s: update vsi failed, aq_err=%d\n",
1762 __func__, vsi->back->hw.aq.asq_last_status);
1763 }
1764}
1765
1766/**
1767 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1768 * @vsi: the vsi being adjusted
1769 **/
1770void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1771{
1772 struct i40e_vsi_context ctxt;
1773 i40e_status ret;
1774
1775 if ((vsi->info.valid_sections &
1776 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1777 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1778 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1779 return; /* already disabled */
1780
1781 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1782 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1783 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1784
1785 ctxt.seid = vsi->seid;
1786 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1787 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1788 if (ret) {
1789 dev_info(&vsi->back->pdev->dev,
1790 "%s: update vsi failed, aq_err=%d\n",
1791 __func__, vsi->back->hw.aq.asq_last_status);
1792 }
1793}
1794
1795/**
1796 * i40e_vlan_rx_register - Setup or shutdown vlan offload
1797 * @netdev: network interface to be adjusted
1798 * @features: netdev features to test if VLAN offload is enabled or not
1799 **/
1800static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
1801{
1802 struct i40e_netdev_priv *np = netdev_priv(netdev);
1803 struct i40e_vsi *vsi = np->vsi;
1804
1805 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1806 i40e_vlan_stripping_enable(vsi);
1807 else
1808 i40e_vlan_stripping_disable(vsi);
1809}
1810
1811/**
1812 * i40e_vsi_add_vlan - Add vsi membership for given vlan
1813 * @vsi: the vsi being configured
1814 * @vid: vlan id to be added (0 = untagged only , -1 = any)
1815 **/
1816int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1817{
1818 struct i40e_mac_filter *f, *add_f;
1819 bool is_netdev, is_vf;
41c445ff
JB
1820
1821 is_vf = (vsi->type == I40E_VSI_SRIOV);
1822 is_netdev = !!(vsi->netdev);
1823
1824 if (is_netdev) {
1825 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
1826 is_vf, is_netdev);
1827 if (!add_f) {
1828 dev_info(&vsi->back->pdev->dev,
1829 "Could not add vlan filter %d for %pM\n",
1830 vid, vsi->netdev->dev_addr);
1831 return -ENOMEM;
1832 }
1833 }
1834
1835 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1836 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1837 if (!add_f) {
1838 dev_info(&vsi->back->pdev->dev,
1839 "Could not add vlan filter %d for %pM\n",
1840 vid, f->macaddr);
1841 return -ENOMEM;
1842 }
1843 }
1844
41c445ff
JB
1845 /* Now if we add a vlan tag, make sure to check if it is the first
1846 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
1847 * with 0, so we now accept untagged and specified tagged traffic
1848 * (and not any taged and untagged)
1849 */
1850 if (vid > 0) {
1851 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
1852 I40E_VLAN_ANY,
1853 is_vf, is_netdev)) {
1854 i40e_del_filter(vsi, vsi->netdev->dev_addr,
1855 I40E_VLAN_ANY, is_vf, is_netdev);
1856 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
1857 is_vf, is_netdev);
1858 if (!add_f) {
1859 dev_info(&vsi->back->pdev->dev,
1860 "Could not add filter 0 for %pM\n",
1861 vsi->netdev->dev_addr);
1862 return -ENOMEM;
1863 }
1864 }
8d82a7c5 1865 }
41c445ff 1866
8d82a7c5
GR
1867 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
1868 if (vid > 0 && !vsi->info.pvid) {
41c445ff
JB
1869 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1870 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1871 is_vf, is_netdev)) {
1872 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1873 is_vf, is_netdev);
1874 add_f = i40e_add_filter(vsi, f->macaddr,
1875 0, is_vf, is_netdev);
1876 if (!add_f) {
1877 dev_info(&vsi->back->pdev->dev,
1878 "Could not add filter 0 for %pM\n",
1879 f->macaddr);
1880 return -ENOMEM;
1881 }
1882 }
1883 }
41c445ff
JB
1884 }
1885
80f6428f
ASJ
1886 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1887 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1888 return 0;
1889
1890 return i40e_sync_vsi_filters(vsi);
41c445ff
JB
1891}
1892
1893/**
1894 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1895 * @vsi: the vsi being configured
1896 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
078b5876
JB
1897 *
1898 * Return: 0 on success or negative otherwise
41c445ff
JB
1899 **/
1900int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1901{
1902 struct net_device *netdev = vsi->netdev;
1903 struct i40e_mac_filter *f, *add_f;
1904 bool is_vf, is_netdev;
1905 int filter_count = 0;
41c445ff
JB
1906
1907 is_vf = (vsi->type == I40E_VSI_SRIOV);
1908 is_netdev = !!(netdev);
1909
1910 if (is_netdev)
1911 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
1912
1913 list_for_each_entry(f, &vsi->mac_filter_list, list)
1914 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1915
41c445ff
JB
1916 /* go through all the filters for this VSI and if there is only
1917 * vid == 0 it means there are no other filters, so vid 0 must
1918 * be replaced with -1. This signifies that we should from now
1919 * on accept any traffic (with any tag present, or untagged)
1920 */
1921 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1922 if (is_netdev) {
1923 if (f->vlan &&
1924 ether_addr_equal(netdev->dev_addr, f->macaddr))
1925 filter_count++;
1926 }
1927
1928 if (f->vlan)
1929 filter_count++;
1930 }
1931
1932 if (!filter_count && is_netdev) {
1933 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
1934 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1935 is_vf, is_netdev);
1936 if (!f) {
1937 dev_info(&vsi->back->pdev->dev,
1938 "Could not add filter %d for %pM\n",
1939 I40E_VLAN_ANY, netdev->dev_addr);
1940 return -ENOMEM;
1941 }
1942 }
1943
1944 if (!filter_count) {
1945 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1946 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
1947 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1948 is_vf, is_netdev);
1949 if (!add_f) {
1950 dev_info(&vsi->back->pdev->dev,
1951 "Could not add filter %d for %pM\n",
1952 I40E_VLAN_ANY, f->macaddr);
1953 return -ENOMEM;
1954 }
1955 }
1956 }
1957
80f6428f
ASJ
1958 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1959 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1960 return 0;
1961
41c445ff
JB
1962 return i40e_sync_vsi_filters(vsi);
1963}
1964
1965/**
1966 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1967 * @netdev: network interface to be adjusted
1968 * @vid: vlan id to be added
078b5876
JB
1969 *
1970 * net_device_ops implementation for adding vlan ids
41c445ff
JB
1971 **/
1972static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1973 __always_unused __be16 proto, u16 vid)
1974{
1975 struct i40e_netdev_priv *np = netdev_priv(netdev);
1976 struct i40e_vsi *vsi = np->vsi;
078b5876 1977 int ret = 0;
41c445ff
JB
1978
1979 if (vid > 4095)
078b5876
JB
1980 return -EINVAL;
1981
1982 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
41c445ff 1983
6982d429
ASJ
1984 /* If the network stack called us with vid = 0 then
1985 * it is asking to receive priority tagged packets with
1986 * vlan id 0. Our HW receives them by default when configured
1987 * to receive untagged packets so there is no need to add an
1988 * extra filter for vlan 0 tagged packets.
41c445ff 1989 */
6982d429
ASJ
1990 if (vid)
1991 ret = i40e_vsi_add_vlan(vsi, vid);
41c445ff 1992
078b5876
JB
1993 if (!ret && (vid < VLAN_N_VID))
1994 set_bit(vid, vsi->active_vlans);
41c445ff 1995
078b5876 1996 return ret;
41c445ff
JB
1997}
1998
1999/**
2000 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2001 * @netdev: network interface to be adjusted
2002 * @vid: vlan id to be removed
078b5876 2003 *
fdfd943e 2004 * net_device_ops implementation for removing vlan ids
41c445ff
JB
2005 **/
2006static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2007 __always_unused __be16 proto, u16 vid)
2008{
2009 struct i40e_netdev_priv *np = netdev_priv(netdev);
2010 struct i40e_vsi *vsi = np->vsi;
2011
078b5876
JB
2012 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2013
41c445ff
JB
2014 /* return code is ignored as there is nothing a user
2015 * can do about failure to remove and a log message was
078b5876 2016 * already printed from the other function
41c445ff
JB
2017 */
2018 i40e_vsi_kill_vlan(vsi, vid);
2019
2020 clear_bit(vid, vsi->active_vlans);
078b5876 2021
41c445ff
JB
2022 return 0;
2023}
2024
2025/**
2026 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2027 * @vsi: the vsi being brought back up
2028 **/
2029static void i40e_restore_vlan(struct i40e_vsi *vsi)
2030{
2031 u16 vid;
2032
2033 if (!vsi->netdev)
2034 return;
2035
2036 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2037
2038 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2039 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2040 vid);
2041}
2042
2043/**
2044 * i40e_vsi_add_pvid - Add pvid for the VSI
2045 * @vsi: the vsi being adjusted
2046 * @vid: the vlan id to set as a PVID
2047 **/
dcae29be 2048int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
41c445ff
JB
2049{
2050 struct i40e_vsi_context ctxt;
dcae29be 2051 i40e_status aq_ret;
41c445ff
JB
2052
2053 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2054 vsi->info.pvid = cpu_to_le16(vid);
6c12fcbf
GR
2055 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2056 I40E_AQ_VSI_PVLAN_INSERT_PVID |
b774c7dd 2057 I40E_AQ_VSI_PVLAN_EMOD_STR;
41c445ff
JB
2058
2059 ctxt.seid = vsi->seid;
2060 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
dcae29be
JB
2061 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2062 if (aq_ret) {
41c445ff
JB
2063 dev_info(&vsi->back->pdev->dev,
2064 "%s: update vsi failed, aq_err=%d\n",
2065 __func__, vsi->back->hw.aq.asq_last_status);
dcae29be 2066 return -ENOENT;
41c445ff
JB
2067 }
2068
dcae29be 2069 return 0;
41c445ff
JB
2070}
2071
2072/**
2073 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2074 * @vsi: the vsi being adjusted
2075 *
2076 * Just use the vlan_rx_register() service to put it back to normal
2077 **/
2078void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2079{
6c12fcbf
GR
2080 i40e_vlan_stripping_disable(vsi);
2081
41c445ff 2082 vsi->info.pvid = 0;
41c445ff
JB
2083}
2084
2085/**
2086 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2087 * @vsi: ptr to the VSI
2088 *
2089 * If this function returns with an error, then it's possible one or
2090 * more of the rings is populated (while the rest are not). It is the
2091 * callers duty to clean those orphaned rings.
2092 *
2093 * Return 0 on success, negative on failure
2094 **/
2095static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2096{
2097 int i, err = 0;
2098
2099 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2100 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
41c445ff
JB
2101
2102 return err;
2103}
2104
2105/**
2106 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2107 * @vsi: ptr to the VSI
2108 *
2109 * Free VSI's transmit software resources
2110 **/
2111static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2112{
2113 int i;
2114
8e9dca53
GR
2115 if (!vsi->tx_rings)
2116 return;
2117
41c445ff 2118 for (i = 0; i < vsi->num_queue_pairs; i++)
8e9dca53 2119 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
9f65e15b 2120 i40e_free_tx_resources(vsi->tx_rings[i]);
41c445ff
JB
2121}
2122
2123/**
2124 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2125 * @vsi: ptr to the VSI
2126 *
2127 * If this function returns with an error, then it's possible one or
2128 * more of the rings is populated (while the rest are not). It is the
2129 * callers duty to clean those orphaned rings.
2130 *
2131 * Return 0 on success, negative on failure
2132 **/
2133static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2134{
2135 int i, err = 0;
2136
2137 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2138 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
41c445ff
JB
2139 return err;
2140}
2141
2142/**
2143 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2144 * @vsi: ptr to the VSI
2145 *
2146 * Free all receive software resources
2147 **/
2148static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2149{
2150 int i;
2151
8e9dca53
GR
2152 if (!vsi->rx_rings)
2153 return;
2154
41c445ff 2155 for (i = 0; i < vsi->num_queue_pairs; i++)
8e9dca53 2156 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
9f65e15b 2157 i40e_free_rx_resources(vsi->rx_rings[i]);
41c445ff
JB
2158}
2159
2160/**
2161 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2162 * @ring: The Tx ring to configure
2163 *
2164 * Configure the Tx descriptor ring in the HMC context.
2165 **/
2166static int i40e_configure_tx_ring(struct i40e_ring *ring)
2167{
2168 struct i40e_vsi *vsi = ring->vsi;
2169 u16 pf_q = vsi->base_queue + ring->queue_index;
2170 struct i40e_hw *hw = &vsi->back->hw;
2171 struct i40e_hmc_obj_txq tx_ctx;
2172 i40e_status err = 0;
2173 u32 qtx_ctl = 0;
2174
2175 /* some ATR related tx ring init */
60ea5f83 2176 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
41c445ff
JB
2177 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2178 ring->atr_count = 0;
2179 } else {
2180 ring->atr_sample_rate = 0;
2181 }
2182
2183 /* initialize XPS */
2184 if (ring->q_vector && ring->netdev &&
4e3b35b0 2185 vsi->tc_config.numtc <= 1 &&
41c445ff
JB
2186 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2187 netif_set_xps_queue(ring->netdev,
2188 &ring->q_vector->affinity_mask,
2189 ring->queue_index);
2190
2191 /* clear the context structure first */
2192 memset(&tx_ctx, 0, sizeof(tx_ctx));
2193
2194 tx_ctx.new_context = 1;
2195 tx_ctx.base = (ring->dma / 128);
2196 tx_ctx.qlen = ring->count;
60ea5f83
JB
2197 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2198 I40E_FLAG_FD_ATR_ENABLED));
beb0dff1 2199 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
1943d8ba
JB
2200 /* FDIR VSI tx ring can still use RS bit and writebacks */
2201 if (vsi->type != I40E_VSI_FDIR)
2202 tx_ctx.head_wb_ena = 1;
2203 tx_ctx.head_wb_addr = ring->dma +
2204 (ring->count * sizeof(struct i40e_tx_desc));
41c445ff
JB
2205
2206 /* As part of VSI creation/update, FW allocates certain
2207 * Tx arbitration queue sets for each TC enabled for
2208 * the VSI. The FW returns the handles to these queue
2209 * sets as part of the response buffer to Add VSI,
2210 * Update VSI, etc. AQ commands. It is expected that
2211 * these queue set handles be associated with the Tx
2212 * queues by the driver as part of the TX queue context
2213 * initialization. This has to be done regardless of
2214 * DCB as by default everything is mapped to TC0.
2215 */
2216 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2217 tx_ctx.rdylist_act = 0;
2218
2219 /* clear the context in the HMC */
2220 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2221 if (err) {
2222 dev_info(&vsi->back->pdev->dev,
2223 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2224 ring->queue_index, pf_q, err);
2225 return -ENOMEM;
2226 }
2227
2228 /* set the context in the HMC */
2229 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2230 if (err) {
2231 dev_info(&vsi->back->pdev->dev,
2232 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2233 ring->queue_index, pf_q, err);
2234 return -ENOMEM;
2235 }
2236
2237 /* Now associate this queue with this PCI function */
9d8bf547
SN
2238 if (vsi->type == I40E_VSI_VMDQ2)
2239 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2240 else
2241 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
13fd9774
SN
2242 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2243 I40E_QTX_CTL_PF_INDX_MASK);
41c445ff
JB
2244 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2245 i40e_flush(hw);
2246
2247 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2248
2249 /* cache tail off for easier writes later */
2250 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2251
2252 return 0;
2253}
2254
2255/**
2256 * i40e_configure_rx_ring - Configure a receive ring context
2257 * @ring: The Rx ring to configure
2258 *
2259 * Configure the Rx descriptor ring in the HMC context.
2260 **/
2261static int i40e_configure_rx_ring(struct i40e_ring *ring)
2262{
2263 struct i40e_vsi *vsi = ring->vsi;
2264 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2265 u16 pf_q = vsi->base_queue + ring->queue_index;
2266 struct i40e_hw *hw = &vsi->back->hw;
2267 struct i40e_hmc_obj_rxq rx_ctx;
2268 i40e_status err = 0;
2269
2270 ring->state = 0;
2271
2272 /* clear the context structure first */
2273 memset(&rx_ctx, 0, sizeof(rx_ctx));
2274
2275 ring->rx_buf_len = vsi->rx_buf_len;
2276 ring->rx_hdr_len = vsi->rx_hdr_len;
2277
2278 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2279 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2280
2281 rx_ctx.base = (ring->dma / 128);
2282 rx_ctx.qlen = ring->count;
2283
2284 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2285 set_ring_16byte_desc_enabled(ring);
2286 rx_ctx.dsize = 0;
2287 } else {
2288 rx_ctx.dsize = 1;
2289 }
2290
2291 rx_ctx.dtype = vsi->dtype;
2292 if (vsi->dtype) {
2293 set_ring_ps_enabled(ring);
2294 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2295 I40E_RX_SPLIT_IP |
2296 I40E_RX_SPLIT_TCP_UDP |
2297 I40E_RX_SPLIT_SCTP;
2298 } else {
2299 rx_ctx.hsplit_0 = 0;
2300 }
2301
2302 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2303 (chain_len * ring->rx_buf_len));
2304 rx_ctx.tphrdesc_ena = 1;
2305 rx_ctx.tphwdesc_ena = 1;
2306 rx_ctx.tphdata_ena = 1;
2307 rx_ctx.tphhead_ena = 1;
7134f9ce
JB
2308 if (hw->revision_id == 0)
2309 rx_ctx.lrxqthresh = 0;
2310 else
2311 rx_ctx.lrxqthresh = 2;
41c445ff
JB
2312 rx_ctx.crcstrip = 1;
2313 rx_ctx.l2tsel = 1;
2314 rx_ctx.showiv = 1;
acb3676b
CS
2315 /* set the prefena field to 1 because the manual says to */
2316 rx_ctx.prefena = 1;
41c445ff
JB
2317
2318 /* clear the context in the HMC */
2319 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2320 if (err) {
2321 dev_info(&vsi->back->pdev->dev,
2322 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2323 ring->queue_index, pf_q, err);
2324 return -ENOMEM;
2325 }
2326
2327 /* set the context in the HMC */
2328 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2329 if (err) {
2330 dev_info(&vsi->back->pdev->dev,
2331 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2332 ring->queue_index, pf_q, err);
2333 return -ENOMEM;
2334 }
2335
2336 /* cache tail for quicker writes, and clear the reg before use */
2337 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2338 writel(0, ring->tail);
2339
2340 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2341
2342 return 0;
2343}
2344
2345/**
2346 * i40e_vsi_configure_tx - Configure the VSI for Tx
2347 * @vsi: VSI structure describing this set of rings and resources
2348 *
2349 * Configure the Tx VSI for operation.
2350 **/
2351static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2352{
2353 int err = 0;
2354 u16 i;
2355
9f65e15b
AD
2356 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2357 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
41c445ff
JB
2358
2359 return err;
2360}
2361
2362/**
2363 * i40e_vsi_configure_rx - Configure the VSI for Rx
2364 * @vsi: the VSI being configured
2365 *
2366 * Configure the Rx VSI for operation.
2367 **/
2368static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2369{
2370 int err = 0;
2371 u16 i;
2372
2373 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2374 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2375 + ETH_FCS_LEN + VLAN_HLEN;
2376 else
2377 vsi->max_frame = I40E_RXBUFFER_2048;
2378
2379 /* figure out correct receive buffer length */
2380 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2381 I40E_FLAG_RX_PS_ENABLED)) {
2382 case I40E_FLAG_RX_1BUF_ENABLED:
2383 vsi->rx_hdr_len = 0;
2384 vsi->rx_buf_len = vsi->max_frame;
2385 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2386 break;
2387 case I40E_FLAG_RX_PS_ENABLED:
2388 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2389 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2390 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2391 break;
2392 default:
2393 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2394 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2395 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2396 break;
2397 }
2398
2399 /* round up for the chip's needs */
2400 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2401 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2402 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2403 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2404
2405 /* set up individual rings */
2406 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2407 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
2408
2409 return err;
2410}
2411
2412/**
2413 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2414 * @vsi: ptr to the VSI
2415 **/
2416static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2417{
2418 u16 qoffset, qcount;
2419 int i, n;
2420
2421 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2422 return;
2423
2424 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2425 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2426 continue;
2427
2428 qoffset = vsi->tc_config.tc_info[n].qoffset;
2429 qcount = vsi->tc_config.tc_info[n].qcount;
2430 for (i = qoffset; i < (qoffset + qcount); i++) {
9f65e15b
AD
2431 struct i40e_ring *rx_ring = vsi->rx_rings[i];
2432 struct i40e_ring *tx_ring = vsi->tx_rings[i];
41c445ff
JB
2433 rx_ring->dcb_tc = n;
2434 tx_ring->dcb_tc = n;
2435 }
2436 }
2437}
2438
2439/**
2440 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2441 * @vsi: ptr to the VSI
2442 **/
2443static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2444{
2445 if (vsi->netdev)
2446 i40e_set_rx_mode(vsi->netdev);
2447}
2448
17a73f6b
JG
2449/**
2450 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2451 * @vsi: Pointer to the targeted VSI
2452 *
2453 * This function replays the hlist on the hw where all the SB Flow Director
2454 * filters were saved.
2455 **/
2456static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2457{
2458 struct i40e_fdir_filter *filter;
2459 struct i40e_pf *pf = vsi->back;
2460 struct hlist_node *node;
2461
55a5e60b
ASJ
2462 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2463 return;
2464
17a73f6b
JG
2465 hlist_for_each_entry_safe(filter, node,
2466 &pf->fdir_filter_list, fdir_node) {
2467 i40e_add_del_fdir(vsi, filter, true);
2468 }
2469}
2470
41c445ff
JB
2471/**
2472 * i40e_vsi_configure - Set up the VSI for action
2473 * @vsi: the VSI being configured
2474 **/
2475static int i40e_vsi_configure(struct i40e_vsi *vsi)
2476{
2477 int err;
2478
2479 i40e_set_vsi_rx_mode(vsi);
2480 i40e_restore_vlan(vsi);
2481 i40e_vsi_config_dcb_rings(vsi);
2482 err = i40e_vsi_configure_tx(vsi);
2483 if (!err)
2484 err = i40e_vsi_configure_rx(vsi);
2485
2486 return err;
2487}
2488
2489/**
2490 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2491 * @vsi: the VSI being configured
2492 **/
2493static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2494{
2495 struct i40e_pf *pf = vsi->back;
2496 struct i40e_q_vector *q_vector;
2497 struct i40e_hw *hw = &pf->hw;
2498 u16 vector;
2499 int i, q;
2500 u32 val;
2501 u32 qp;
2502
2503 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2504 * and PFINT_LNKLSTn registers, e.g.:
2505 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2506 */
2507 qp = vsi->base_queue;
2508 vector = vsi->base_vector;
493fb300
AD
2509 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2510 q_vector = vsi->q_vectors[i];
41c445ff
JB
2511 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2512 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2513 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2514 q_vector->rx.itr);
2515 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2516 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2517 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2518 q_vector->tx.itr);
2519
2520 /* Linked list for the queuepairs assigned to this vector */
2521 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2522 for (q = 0; q < q_vector->num_ringpairs; q++) {
2523 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2524 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2525 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2526 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2527 (I40E_QUEUE_TYPE_TX
2528 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2529
2530 wr32(hw, I40E_QINT_RQCTL(qp), val);
2531
2532 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2533 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2534 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2535 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2536 (I40E_QUEUE_TYPE_RX
2537 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2538
2539 /* Terminate the linked list */
2540 if (q == (q_vector->num_ringpairs - 1))
2541 val |= (I40E_QUEUE_END_OF_LIST
2542 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2543
2544 wr32(hw, I40E_QINT_TQCTL(qp), val);
2545 qp++;
2546 }
2547 }
2548
2549 i40e_flush(hw);
2550}
2551
2552/**
2553 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2554 * @hw: ptr to the hardware info
2555 **/
2556static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2557{
2558 u32 val;
2559
2560 /* clear things first */
2561 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2562 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2563
2564 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2565 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2566 I40E_PFINT_ICR0_ENA_GRST_MASK |
2567 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2568 I40E_PFINT_ICR0_ENA_GPIO_MASK |
beb0dff1 2569 I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
41c445ff
JB
2570 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
2571 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2572 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2573 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2574
2575 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2576
2577 /* SW_ITR_IDX = 0, but don't change INTENA */
84ed40e7
ASJ
2578 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2579 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
41c445ff
JB
2580
2581 /* OTHER_ITR_IDX = 0 */
2582 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2583}
2584
2585/**
2586 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2587 * @vsi: the VSI being configured
2588 **/
2589static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2590{
493fb300 2591 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
41c445ff
JB
2592 struct i40e_pf *pf = vsi->back;
2593 struct i40e_hw *hw = &pf->hw;
2594 u32 val;
2595
2596 /* set the ITR configuration */
2597 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2598 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2599 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2600 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2601 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2602 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2603
2604 i40e_enable_misc_int_causes(hw);
2605
2606 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2607 wr32(hw, I40E_PFINT_LNKLST0, 0);
2608
f29eaa3d 2609 /* Associate the queue pair to the vector and enable the queue int */
41c445ff
JB
2610 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2611 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2612 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2613
2614 wr32(hw, I40E_QINT_RQCTL(0), val);
2615
2616 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2617 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2618 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2619
2620 wr32(hw, I40E_QINT_TQCTL(0), val);
2621 i40e_flush(hw);
2622}
2623
2ef28cfb
MW
2624/**
2625 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2626 * @pf: board private structure
2627 **/
2628void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2629{
2630 struct i40e_hw *hw = &pf->hw;
2631
2632 wr32(hw, I40E_PFINT_DYN_CTL0,
2633 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2634 i40e_flush(hw);
2635}
2636
41c445ff
JB
2637/**
2638 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2639 * @pf: board private structure
2640 **/
116a57d4 2641void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
41c445ff
JB
2642{
2643 struct i40e_hw *hw = &pf->hw;
2644 u32 val;
2645
2646 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2647 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2648 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2649
2650 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2651 i40e_flush(hw);
2652}
2653
2654/**
2655 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2656 * @vsi: pointer to a vsi
2657 * @vector: enable a particular Hw Interrupt vector
2658 **/
2659void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2660{
2661 struct i40e_pf *pf = vsi->back;
2662 struct i40e_hw *hw = &pf->hw;
2663 u32 val;
2664
2665 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2666 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2667 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2668 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1022cb6c 2669 /* skip the flush */
41c445ff
JB
2670}
2671
2672/**
2673 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2674 * @irq: interrupt number
2675 * @data: pointer to a q_vector
2676 **/
2677static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2678{
2679 struct i40e_q_vector *q_vector = data;
2680
cd0b6fa6 2681 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
2682 return IRQ_HANDLED;
2683
2684 napi_schedule(&q_vector->napi);
2685
2686 return IRQ_HANDLED;
2687}
2688
41c445ff
JB
2689/**
2690 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2691 * @vsi: the VSI being configured
2692 * @basename: name for the vector
2693 *
2694 * Allocates MSI-X vectors and requests interrupts from the kernel.
2695 **/
2696static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2697{
2698 int q_vectors = vsi->num_q_vectors;
2699 struct i40e_pf *pf = vsi->back;
2700 int base = vsi->base_vector;
2701 int rx_int_idx = 0;
2702 int tx_int_idx = 0;
2703 int vector, err;
2704
2705 for (vector = 0; vector < q_vectors; vector++) {
493fb300 2706 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
41c445ff 2707
cd0b6fa6 2708 if (q_vector->tx.ring && q_vector->rx.ring) {
41c445ff
JB
2709 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2710 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2711 tx_int_idx++;
cd0b6fa6 2712 } else if (q_vector->rx.ring) {
41c445ff
JB
2713 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2714 "%s-%s-%d", basename, "rx", rx_int_idx++);
cd0b6fa6 2715 } else if (q_vector->tx.ring) {
41c445ff
JB
2716 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2717 "%s-%s-%d", basename, "tx", tx_int_idx++);
2718 } else {
2719 /* skip this unused q_vector */
2720 continue;
2721 }
2722 err = request_irq(pf->msix_entries[base + vector].vector,
2723 vsi->irq_handler,
2724 0,
2725 q_vector->name,
2726 q_vector);
2727 if (err) {
2728 dev_info(&pf->pdev->dev,
2729 "%s: request_irq failed, error: %d\n",
2730 __func__, err);
2731 goto free_queue_irqs;
2732 }
2733 /* assign the mask for this irq */
2734 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2735 &q_vector->affinity_mask);
2736 }
2737
2738 return 0;
2739
2740free_queue_irqs:
2741 while (vector) {
2742 vector--;
2743 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2744 NULL);
2745 free_irq(pf->msix_entries[base + vector].vector,
2746 &(vsi->q_vectors[vector]));
2747 }
2748 return err;
2749}
2750
2751/**
2752 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
2753 * @vsi: the VSI being un-configured
2754 **/
2755static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2756{
2757 struct i40e_pf *pf = vsi->back;
2758 struct i40e_hw *hw = &pf->hw;
2759 int base = vsi->base_vector;
2760 int i;
2761
2762 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
2763 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
2764 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
41c445ff
JB
2765 }
2766
2767 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2768 for (i = vsi->base_vector;
2769 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2770 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
2771
2772 i40e_flush(hw);
2773 for (i = 0; i < vsi->num_q_vectors; i++)
2774 synchronize_irq(pf->msix_entries[i + base].vector);
2775 } else {
2776 /* Legacy and MSI mode - this stops all interrupt handling */
2777 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2778 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
2779 i40e_flush(hw);
2780 synchronize_irq(pf->pdev->irq);
2781 }
2782}
2783
2784/**
2785 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
2786 * @vsi: the VSI being configured
2787 **/
2788static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2789{
2790 struct i40e_pf *pf = vsi->back;
2791 int i;
2792
2793 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2794 for (i = vsi->base_vector;
2795 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2796 i40e_irq_dynamic_enable(vsi, i);
2797 } else {
2798 i40e_irq_dynamic_enable_icr0(pf);
2799 }
2800
1022cb6c 2801 i40e_flush(&pf->hw);
41c445ff
JB
2802 return 0;
2803}
2804
2805/**
2806 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
2807 * @pf: board private structure
2808 **/
2809static void i40e_stop_misc_vector(struct i40e_pf *pf)
2810{
2811 /* Disable ICR 0 */
2812 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
2813 i40e_flush(&pf->hw);
2814}
2815
2816/**
2817 * i40e_intr - MSI/Legacy and non-queue interrupt handler
2818 * @irq: interrupt number
2819 * @data: pointer to a q_vector
2820 *
2821 * This is the handler used for all MSI/Legacy interrupts, and deals
2822 * with both queue and non-queue interrupts. This is also used in
2823 * MSIX mode to handle the non-queue interrupts.
2824 **/
2825static irqreturn_t i40e_intr(int irq, void *data)
2826{
2827 struct i40e_pf *pf = (struct i40e_pf *)data;
2828 struct i40e_hw *hw = &pf->hw;
5e823066 2829 irqreturn_t ret = IRQ_NONE;
41c445ff
JB
2830 u32 icr0, icr0_remaining;
2831 u32 val, ena_mask;
2832
2833 icr0 = rd32(hw, I40E_PFINT_ICR0);
5e823066 2834 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
41c445ff 2835
116a57d4
SN
2836 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2837 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
5e823066 2838 goto enable_intr;
41c445ff 2839
cd92e72f
SN
2840 /* if interrupt but no bits showing, must be SWINT */
2841 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
2842 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
2843 pf->sw_int_count++;
2844
41c445ff
JB
2845 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
2846 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
2847
2848 /* temporarily disable queue cause for NAPI processing */
2849 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
2850 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2851 wr32(hw, I40E_QINT_RQCTL(0), qval);
2852
2853 qval = rd32(hw, I40E_QINT_TQCTL(0));
2854 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2855 wr32(hw, I40E_QINT_TQCTL(0), qval);
41c445ff
JB
2856
2857 if (!test_bit(__I40E_DOWN, &pf->state))
493fb300 2858 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
41c445ff
JB
2859 }
2860
2861 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
2862 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2863 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
2864 }
2865
2866 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
2867 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2868 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
2869 }
2870
2871 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
2872 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
2873 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2874 }
2875
2876 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
2877 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
2878 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
2879 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
2880 val = rd32(hw, I40E_GLGEN_RSTAT);
2881 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2882 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4eb3f768 2883 if (val == I40E_RESET_CORER) {
41c445ff 2884 pf->corer_count++;
4eb3f768 2885 } else if (val == I40E_RESET_GLOBR) {
41c445ff 2886 pf->globr_count++;
4eb3f768 2887 } else if (val == I40E_RESET_EMPR) {
41c445ff 2888 pf->empr_count++;
4eb3f768
SN
2889 set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
2890 }
41c445ff
JB
2891 }
2892
9c010ee0
ASJ
2893 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
2894 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
2895 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
2896 }
2897
beb0dff1
JK
2898 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
2899 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
2900
2901 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
2902 ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2903 i40e_ptp_tx_hwtstamp(pf);
2904 prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
2905 }
2906
2907 wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
2908 }
2909
41c445ff
JB
2910 /* If a critical error is pending we have no choice but to reset the
2911 * device.
2912 * Report and mask out any remaining unexpected interrupts.
2913 */
2914 icr0_remaining = icr0 & ena_mask;
2915 if (icr0_remaining) {
2916 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
2917 icr0_remaining);
9c010ee0 2918 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
41c445ff 2919 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
c0c28975 2920 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
9c010ee0
ASJ
2921 dev_info(&pf->pdev->dev, "device will be reset\n");
2922 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2923 i40e_service_event_schedule(pf);
41c445ff
JB
2924 }
2925 ena_mask &= ~icr0_remaining;
2926 }
5e823066 2927 ret = IRQ_HANDLED;
41c445ff 2928
5e823066 2929enable_intr:
41c445ff
JB
2930 /* re-enable interrupt causes */
2931 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
41c445ff
JB
2932 if (!test_bit(__I40E_DOWN, &pf->state)) {
2933 i40e_service_event_schedule(pf);
2934 i40e_irq_dynamic_enable_icr0(pf);
2935 }
2936
5e823066 2937 return ret;
41c445ff
JB
2938}
2939
cbf61325
ASJ
2940/**
2941 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
2942 * @tx_ring: tx ring to clean
2943 * @budget: how many cleans we're allowed
2944 *
2945 * Returns true if there's any budget left (e.g. the clean is finished)
2946 **/
2947static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
2948{
2949 struct i40e_vsi *vsi = tx_ring->vsi;
2950 u16 i = tx_ring->next_to_clean;
2951 struct i40e_tx_buffer *tx_buf;
2952 struct i40e_tx_desc *tx_desc;
2953
2954 tx_buf = &tx_ring->tx_bi[i];
2955 tx_desc = I40E_TX_DESC(tx_ring, i);
2956 i -= tx_ring->count;
2957
2958 do {
2959 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
2960
2961 /* if next_to_watch is not set then there is no work pending */
2962 if (!eop_desc)
2963 break;
2964
2965 /* prevent any other reads prior to eop_desc */
2966 read_barrier_depends();
2967
2968 /* if the descriptor isn't done, no work yet to do */
2969 if (!(eop_desc->cmd_type_offset_bsz &
2970 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
2971 break;
2972
2973 /* clear next_to_watch to prevent false hangs */
2974 tx_buf->next_to_watch = NULL;
2975
2976 /* unmap skb header data */
2977 dma_unmap_single(tx_ring->dev,
2978 dma_unmap_addr(tx_buf, dma),
2979 dma_unmap_len(tx_buf, len),
2980 DMA_TO_DEVICE);
2981
2982 dma_unmap_len_set(tx_buf, len, 0);
2983
2984
2985 /* move to the next desc and buffer to clean */
2986 tx_buf++;
2987 tx_desc++;
2988 i++;
2989 if (unlikely(!i)) {
2990 i -= tx_ring->count;
2991 tx_buf = tx_ring->tx_bi;
2992 tx_desc = I40E_TX_DESC(tx_ring, 0);
2993 }
2994
2995 /* update budget accounting */
2996 budget--;
2997 } while (likely(budget));
2998
2999 i += tx_ring->count;
3000 tx_ring->next_to_clean = i;
3001
3002 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3003 i40e_irq_dynamic_enable(vsi,
3004 tx_ring->q_vector->v_idx + vsi->base_vector);
3005 }
3006 return budget > 0;
3007}
3008
3009/**
3010 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3011 * @irq: interrupt number
3012 * @data: pointer to a q_vector
3013 **/
3014static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3015{
3016 struct i40e_q_vector *q_vector = data;
3017 struct i40e_vsi *vsi;
3018
3019 if (!q_vector->tx.ring)
3020 return IRQ_HANDLED;
3021
3022 vsi = q_vector->tx.ring->vsi;
3023 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3024
3025 return IRQ_HANDLED;
3026}
3027
41c445ff 3028/**
cd0b6fa6 3029 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
41c445ff
JB
3030 * @vsi: the VSI being configured
3031 * @v_idx: vector index
cd0b6fa6 3032 * @qp_idx: queue pair index
41c445ff 3033 **/
cd0b6fa6 3034static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
41c445ff 3035{
493fb300 3036 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
9f65e15b
AD
3037 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3038 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
41c445ff
JB
3039
3040 tx_ring->q_vector = q_vector;
cd0b6fa6
AD
3041 tx_ring->next = q_vector->tx.ring;
3042 q_vector->tx.ring = tx_ring;
41c445ff 3043 q_vector->tx.count++;
cd0b6fa6
AD
3044
3045 rx_ring->q_vector = q_vector;
3046 rx_ring->next = q_vector->rx.ring;
3047 q_vector->rx.ring = rx_ring;
3048 q_vector->rx.count++;
41c445ff
JB
3049}
3050
3051/**
3052 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3053 * @vsi: the VSI being configured
3054 *
3055 * This function maps descriptor rings to the queue-specific vectors
3056 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3057 * one vector per queue pair, but on a constrained vector budget, we
3058 * group the queue pairs as "efficiently" as possible.
3059 **/
3060static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3061{
3062 int qp_remaining = vsi->num_queue_pairs;
3063 int q_vectors = vsi->num_q_vectors;
cd0b6fa6 3064 int num_ringpairs;
41c445ff
JB
3065 int v_start = 0;
3066 int qp_idx = 0;
3067
3068 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3069 * group them so there are multiple queues per vector.
3070 */
3071 for (; v_start < q_vectors && qp_remaining; v_start++) {
cd0b6fa6
AD
3072 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3073
3074 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3075
3076 q_vector->num_ringpairs = num_ringpairs;
3077
3078 q_vector->rx.count = 0;
3079 q_vector->tx.count = 0;
3080 q_vector->rx.ring = NULL;
3081 q_vector->tx.ring = NULL;
3082
3083 while (num_ringpairs--) {
3084 map_vector_to_qp(vsi, v_start, qp_idx);
3085 qp_idx++;
3086 qp_remaining--;
41c445ff
JB
3087 }
3088 }
3089}
3090
3091/**
3092 * i40e_vsi_request_irq - Request IRQ from the OS
3093 * @vsi: the VSI being configured
3094 * @basename: name for the vector
3095 **/
3096static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3097{
3098 struct i40e_pf *pf = vsi->back;
3099 int err;
3100
3101 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3102 err = i40e_vsi_request_irq_msix(vsi, basename);
3103 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3104 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3105 pf->misc_int_name, pf);
3106 else
3107 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3108 pf->misc_int_name, pf);
3109
3110 if (err)
3111 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3112
3113 return err;
3114}
3115
3116#ifdef CONFIG_NET_POLL_CONTROLLER
3117/**
3118 * i40e_netpoll - A Polling 'interrupt'handler
3119 * @netdev: network interface device structure
3120 *
3121 * This is used by netconsole to send skbs without having to re-enable
3122 * interrupts. It's not called while the normal interrupt routine is executing.
3123 **/
3124static void i40e_netpoll(struct net_device *netdev)
3125{
3126 struct i40e_netdev_priv *np = netdev_priv(netdev);
3127 struct i40e_vsi *vsi = np->vsi;
3128 struct i40e_pf *pf = vsi->back;
3129 int i;
3130
3131 /* if interface is down do nothing */
3132 if (test_bit(__I40E_DOWN, &vsi->state))
3133 return;
3134
3135 pf->flags |= I40E_FLAG_IN_NETPOLL;
3136 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3137 for (i = 0; i < vsi->num_q_vectors; i++)
493fb300 3138 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
41c445ff
JB
3139 } else {
3140 i40e_intr(pf->pdev->irq, netdev);
3141 }
3142 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3143}
3144#endif
3145
3146/**
3147 * i40e_vsi_control_tx - Start or stop a VSI's rings
3148 * @vsi: the VSI being configured
3149 * @enable: start or stop the rings
3150 **/
3151static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3152{
3153 struct i40e_pf *pf = vsi->back;
3154 struct i40e_hw *hw = &pf->hw;
3155 int i, j, pf_q;
3156 u32 tx_reg;
3157
3158 pf_q = vsi->base_queue;
3159 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
6c5ef620 3160 for (j = 0; j < 50; j++) {
41c445ff 3161 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
6c5ef620
MW
3162 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3163 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3164 break;
3165 usleep_range(1000, 2000);
3166 }
fda972f6
MW
3167 /* Skip if the queue is already in the requested state */
3168 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3169 continue;
3170 if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3171 continue;
41c445ff
JB
3172
3173 /* turn on/off the queue */
c5c9eb9e
SN
3174 if (enable) {
3175 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
6c5ef620 3176 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
c5c9eb9e 3177 } else {
41c445ff 3178 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
c5c9eb9e 3179 }
41c445ff
JB
3180
3181 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3182
3183 /* wait for the change to finish */
3184 for (j = 0; j < 10; j++) {
3185 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3186 if (enable) {
3187 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3188 break;
3189 } else {
3190 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3191 break;
3192 }
3193
3194 udelay(10);
3195 }
3196 if (j >= 10) {
3197 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
3198 pf_q, (enable ? "en" : "dis"));
3199 return -ETIMEDOUT;
3200 }
3201 }
3202
7134f9ce
JB
3203 if (hw->revision_id == 0)
3204 mdelay(50);
3205
41c445ff
JB
3206 return 0;
3207}
3208
3209/**
3210 * i40e_vsi_control_rx - Start or stop a VSI's rings
3211 * @vsi: the VSI being configured
3212 * @enable: start or stop the rings
3213 **/
3214static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3215{
3216 struct i40e_pf *pf = vsi->back;
3217 struct i40e_hw *hw = &pf->hw;
3218 int i, j, pf_q;
3219 u32 rx_reg;
3220
3221 pf_q = vsi->base_queue;
3222 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
6c5ef620 3223 for (j = 0; j < 50; j++) {
41c445ff 3224 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
6c5ef620
MW
3225 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3226 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3227 break;
3228 usleep_range(1000, 2000);
3229 }
41c445ff
JB
3230
3231 if (enable) {
3232 /* is STAT set ? */
3233 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3234 continue;
3235 } else {
3236 /* is !STAT set ? */
3237 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3238 continue;
3239 }
3240
3241 /* turn on/off the queue */
3242 if (enable)
6c5ef620 3243 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
41c445ff 3244 else
6c5ef620 3245 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
41c445ff
JB
3246 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3247
3248 /* wait for the change to finish */
3249 for (j = 0; j < 10; j++) {
3250 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3251
3252 if (enable) {
3253 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3254 break;
3255 } else {
3256 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3257 break;
3258 }
3259
3260 udelay(10);
3261 }
3262 if (j >= 10) {
3263 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
3264 pf_q, (enable ? "en" : "dis"));
3265 return -ETIMEDOUT;
3266 }
3267 }
3268
3269 return 0;
3270}
3271
3272/**
3273 * i40e_vsi_control_rings - Start or stop a VSI's rings
3274 * @vsi: the VSI being configured
3275 * @enable: start or stop the rings
3276 **/
fc18eaa0 3277int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
41c445ff 3278{
3b867b28 3279 int ret = 0;
41c445ff
JB
3280
3281 /* do rx first for enable and last for disable */
3282 if (request) {
3283 ret = i40e_vsi_control_rx(vsi, request);
3284 if (ret)
3285 return ret;
3286 ret = i40e_vsi_control_tx(vsi, request);
3287 } else {
3b867b28
ASJ
3288 /* Ignore return value, we need to shutdown whatever we can */
3289 i40e_vsi_control_tx(vsi, request);
3290 i40e_vsi_control_rx(vsi, request);
41c445ff
JB
3291 }
3292
3293 return ret;
3294}
3295
3296/**
3297 * i40e_vsi_free_irq - Free the irq association with the OS
3298 * @vsi: the VSI being configured
3299 **/
3300static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3301{
3302 struct i40e_pf *pf = vsi->back;
3303 struct i40e_hw *hw = &pf->hw;
3304 int base = vsi->base_vector;
3305 u32 val, qp;
3306 int i;
3307
3308 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3309 if (!vsi->q_vectors)
3310 return;
3311
3312 for (i = 0; i < vsi->num_q_vectors; i++) {
3313 u16 vector = i + base;
3314
3315 /* free only the irqs that were actually requested */
78681b1f
SN
3316 if (!vsi->q_vectors[i] ||
3317 !vsi->q_vectors[i]->num_ringpairs)
41c445ff
JB
3318 continue;
3319
3320 /* clear the affinity_mask in the IRQ descriptor */
3321 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3322 NULL);
3323 free_irq(pf->msix_entries[vector].vector,
493fb300 3324 vsi->q_vectors[i]);
41c445ff
JB
3325
3326 /* Tear down the interrupt queue link list
3327 *
3328 * We know that they come in pairs and always
3329 * the Rx first, then the Tx. To clear the
3330 * link list, stick the EOL value into the
3331 * next_q field of the registers.
3332 */
3333 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3334 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3335 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3336 val |= I40E_QUEUE_END_OF_LIST
3337 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3338 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3339
3340 while (qp != I40E_QUEUE_END_OF_LIST) {
3341 u32 next;
3342
3343 val = rd32(hw, I40E_QINT_RQCTL(qp));
3344
3345 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3346 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3347 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3348 I40E_QINT_RQCTL_INTEVENT_MASK);
3349
3350 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3351 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3352
3353 wr32(hw, I40E_QINT_RQCTL(qp), val);
3354
3355 val = rd32(hw, I40E_QINT_TQCTL(qp));
3356
3357 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3358 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3359
3360 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3361 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3362 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3363 I40E_QINT_TQCTL_INTEVENT_MASK);
3364
3365 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3366 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3367
3368 wr32(hw, I40E_QINT_TQCTL(qp), val);
3369 qp = next;
3370 }
3371 }
3372 } else {
3373 free_irq(pf->pdev->irq, pf);
3374
3375 val = rd32(hw, I40E_PFINT_LNKLST0);
3376 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3377 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3378 val |= I40E_QUEUE_END_OF_LIST
3379 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3380 wr32(hw, I40E_PFINT_LNKLST0, val);
3381
3382 val = rd32(hw, I40E_QINT_RQCTL(qp));
3383 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3384 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3385 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3386 I40E_QINT_RQCTL_INTEVENT_MASK);
3387
3388 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3389 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3390
3391 wr32(hw, I40E_QINT_RQCTL(qp), val);
3392
3393 val = rd32(hw, I40E_QINT_TQCTL(qp));
3394
3395 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3396 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3397 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3398 I40E_QINT_TQCTL_INTEVENT_MASK);
3399
3400 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3401 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3402
3403 wr32(hw, I40E_QINT_TQCTL(qp), val);
3404 }
3405}
3406
493fb300
AD
3407/**
3408 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3409 * @vsi: the VSI being configured
3410 * @v_idx: Index of vector to be freed
3411 *
3412 * This function frees the memory allocated to the q_vector. In addition if
3413 * NAPI is enabled it will delete any references to the NAPI struct prior
3414 * to freeing the q_vector.
3415 **/
3416static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3417{
3418 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
cd0b6fa6 3419 struct i40e_ring *ring;
493fb300
AD
3420
3421 if (!q_vector)
3422 return;
3423
3424 /* disassociate q_vector from rings */
cd0b6fa6
AD
3425 i40e_for_each_ring(ring, q_vector->tx)
3426 ring->q_vector = NULL;
3427
3428 i40e_for_each_ring(ring, q_vector->rx)
3429 ring->q_vector = NULL;
493fb300
AD
3430
3431 /* only VSI w/ an associated netdev is set up w/ NAPI */
3432 if (vsi->netdev)
3433 netif_napi_del(&q_vector->napi);
3434
3435 vsi->q_vectors[v_idx] = NULL;
3436
3437 kfree_rcu(q_vector, rcu);
3438}
3439
41c445ff
JB
3440/**
3441 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3442 * @vsi: the VSI being un-configured
3443 *
3444 * This frees the memory allocated to the q_vectors and
3445 * deletes references to the NAPI struct.
3446 **/
3447static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3448{
3449 int v_idx;
3450
493fb300
AD
3451 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3452 i40e_free_q_vector(vsi, v_idx);
41c445ff
JB
3453}
3454
3455/**
3456 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3457 * @pf: board private structure
3458 **/
3459static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3460{
3461 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3462 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3463 pci_disable_msix(pf->pdev);
3464 kfree(pf->msix_entries);
3465 pf->msix_entries = NULL;
3466 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3467 pci_disable_msi(pf->pdev);
3468 }
3469 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3470}
3471
3472/**
3473 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3474 * @pf: board private structure
3475 *
3476 * We go through and clear interrupt specific resources and reset the structure
3477 * to pre-load conditions
3478 **/
3479static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3480{
3481 int i;
3482
3483 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3484 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
3485 if (pf->vsi[i])
3486 i40e_vsi_free_q_vectors(pf->vsi[i]);
3487 i40e_reset_interrupt_capability(pf);
3488}
3489
3490/**
3491 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3492 * @vsi: the VSI being configured
3493 **/
3494static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3495{
3496 int q_idx;
3497
3498 if (!vsi->netdev)
3499 return;
3500
3501 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3502 napi_enable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3503}
3504
3505/**
3506 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3507 * @vsi: the VSI being configured
3508 **/
3509static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3510{
3511 int q_idx;
3512
3513 if (!vsi->netdev)
3514 return;
3515
3516 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3517 napi_disable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3518}
3519
90ef8d47
SN
3520/**
3521 * i40e_vsi_close - Shut down a VSI
3522 * @vsi: the vsi to be quelled
3523 **/
3524static void i40e_vsi_close(struct i40e_vsi *vsi)
3525{
3526 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3527 i40e_down(vsi);
3528 i40e_vsi_free_irq(vsi);
3529 i40e_vsi_free_tx_resources(vsi);
3530 i40e_vsi_free_rx_resources(vsi);
3531}
3532
41c445ff
JB
3533/**
3534 * i40e_quiesce_vsi - Pause a given VSI
3535 * @vsi: the VSI being paused
3536 **/
3537static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3538{
3539 if (test_bit(__I40E_DOWN, &vsi->state))
3540 return;
3541
3542 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3543 if (vsi->netdev && netif_running(vsi->netdev)) {
3544 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3545 } else {
90ef8d47 3546 i40e_vsi_close(vsi);
41c445ff
JB
3547 }
3548}
3549
3550/**
3551 * i40e_unquiesce_vsi - Resume a given VSI
3552 * @vsi: the VSI being resumed
3553 **/
3554static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3555{
3556 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3557 return;
3558
3559 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3560 if (vsi->netdev && netif_running(vsi->netdev))
3561 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3562 else
8276f757 3563 i40e_vsi_open(vsi); /* this clears the DOWN bit */
41c445ff
JB
3564}
3565
3566/**
3567 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3568 * @pf: the PF
3569 **/
3570static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3571{
3572 int v;
3573
3574 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3575 if (pf->vsi[v])
3576 i40e_quiesce_vsi(pf->vsi[v]);
3577 }
3578}
3579
3580/**
3581 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3582 * @pf: the PF
3583 **/
3584static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3585{
3586 int v;
3587
3588 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3589 if (pf->vsi[v])
3590 i40e_unquiesce_vsi(pf->vsi[v]);
3591 }
3592}
3593
3594/**
3595 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
3596 * @dcbcfg: the corresponding DCBx configuration structure
3597 *
3598 * Return the number of TCs from given DCBx configuration
3599 **/
3600static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3601{
078b5876
JB
3602 u8 num_tc = 0;
3603 int i;
41c445ff
JB
3604
3605 /* Scan the ETS Config Priority Table to find
3606 * traffic class enabled for a given priority
3607 * and use the traffic class index to get the
3608 * number of traffic classes enabled
3609 */
3610 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3611 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3612 num_tc = dcbcfg->etscfg.prioritytable[i];
3613 }
3614
3615 /* Traffic class index starts from zero so
3616 * increment to return the actual count
3617 */
078b5876 3618 return num_tc + 1;
41c445ff
JB
3619}
3620
3621/**
3622 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
3623 * @dcbcfg: the corresponding DCBx configuration structure
3624 *
3625 * Query the current DCB configuration and return the number of
3626 * traffic classes enabled from the given DCBX config
3627 **/
3628static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3629{
3630 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3631 u8 enabled_tc = 1;
3632 u8 i;
3633
3634 for (i = 0; i < num_tc; i++)
3635 enabled_tc |= 1 << i;
3636
3637 return enabled_tc;
3638}
3639
3640/**
3641 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
3642 * @pf: PF being queried
3643 *
3644 * Return number of traffic classes enabled for the given PF
3645 **/
3646static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3647{
3648 struct i40e_hw *hw = &pf->hw;
3649 u8 i, enabled_tc;
3650 u8 num_tc = 0;
3651 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3652
3653 /* If DCB is not enabled then always in single TC */
3654 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3655 return 1;
3656
3657 /* MFP mode return count of enabled TCs for this PF */
3658 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3659 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3660 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3661 if (enabled_tc & (1 << i))
3662 num_tc++;
3663 }
3664 return num_tc;
3665 }
3666
3667 /* SFP mode will be enabled for all TCs on port */
3668 return i40e_dcb_get_num_tc(dcbcfg);
3669}
3670
3671/**
3672 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
3673 * @pf: PF being queried
3674 *
3675 * Return a bitmap for first enabled traffic class for this PF.
3676 **/
3677static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3678{
3679 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3680 u8 i = 0;
3681
3682 if (!enabled_tc)
3683 return 0x1; /* TC0 */
3684
3685 /* Find the first enabled TC */
3686 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3687 if (enabled_tc & (1 << i))
3688 break;
3689 }
3690
3691 return 1 << i;
3692}
3693
3694/**
3695 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
3696 * @pf: PF being queried
3697 *
3698 * Return a bitmap for enabled traffic classes for this PF.
3699 **/
3700static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
3701{
3702 /* If DCB is not enabled for this PF then just return default TC */
3703 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3704 return i40e_pf_get_default_tc(pf);
3705
3706 /* MFP mode will have enabled TCs set by FW */
3707 if (pf->flags & I40E_FLAG_MFP_ENABLED)
3708 return pf->hw.func_caps.enabled_tcmap;
3709
3710 /* SFP mode we want PF to be enabled for all TCs */
3711 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
3712}
3713
3714/**
3715 * i40e_vsi_get_bw_info - Query VSI BW Information
3716 * @vsi: the VSI being queried
3717 *
3718 * Returns 0 on success, negative value on failure
3719 **/
3720static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3721{
3722 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
3723 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3724 struct i40e_pf *pf = vsi->back;
3725 struct i40e_hw *hw = &pf->hw;
dcae29be 3726 i40e_status aq_ret;
41c445ff 3727 u32 tc_bw_max;
41c445ff
JB
3728 int i;
3729
3730 /* Get the VSI level BW configuration */
dcae29be
JB
3731 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3732 if (aq_ret) {
41c445ff
JB
3733 dev_info(&pf->pdev->dev,
3734 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
dcae29be
JB
3735 aq_ret, pf->hw.aq.asq_last_status);
3736 return -EINVAL;
41c445ff
JB
3737 }
3738
3739 /* Get the VSI level BW configuration per TC */
dcae29be 3740 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
6838b535 3741 NULL);
dcae29be 3742 if (aq_ret) {
41c445ff
JB
3743 dev_info(&pf->pdev->dev,
3744 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
dcae29be
JB
3745 aq_ret, pf->hw.aq.asq_last_status);
3746 return -EINVAL;
41c445ff
JB
3747 }
3748
3749 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
3750 dev_info(&pf->pdev->dev,
3751 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
3752 bw_config.tc_valid_bits,
3753 bw_ets_config.tc_valid_bits);
3754 /* Still continuing */
3755 }
3756
3757 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
3758 vsi->bw_max_quanta = bw_config.max_bw;
3759 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
3760 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
3761 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3762 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
3763 vsi->bw_ets_limit_credits[i] =
3764 le16_to_cpu(bw_ets_config.credits[i]);
3765 /* 3 bits out of 4 for each TC */
3766 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3767 }
078b5876 3768
dcae29be 3769 return 0;
41c445ff
JB
3770}
3771
3772/**
3773 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
3774 * @vsi: the VSI being configured
3775 * @enabled_tc: TC bitmap
3776 * @bw_credits: BW shared credits per TC
3777 *
3778 * Returns 0 on success, negative value on failure
3779 **/
dcae29be 3780static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
41c445ff
JB
3781 u8 *bw_share)
3782{
3783 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
dcae29be
JB
3784 i40e_status aq_ret;
3785 int i;
41c445ff
JB
3786
3787 bw_data.tc_valid_bits = enabled_tc;
3788 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3789 bw_data.tc_bw_credits[i] = bw_share[i];
3790
dcae29be
JB
3791 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3792 NULL);
3793 if (aq_ret) {
41c445ff 3794 dev_info(&vsi->back->pdev->dev,
69bfb110
JB
3795 "AQ command Config VSI BW allocation per TC failed = %d\n",
3796 vsi->back->hw.aq.asq_last_status);
dcae29be 3797 return -EINVAL;
41c445ff
JB
3798 }
3799
3800 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3801 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3802
dcae29be 3803 return 0;
41c445ff
JB
3804}
3805
3806/**
3807 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
3808 * @vsi: the VSI being configured
3809 * @enabled_tc: TC map to be enabled
3810 *
3811 **/
3812static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3813{
3814 struct net_device *netdev = vsi->netdev;
3815 struct i40e_pf *pf = vsi->back;
3816 struct i40e_hw *hw = &pf->hw;
3817 u8 netdev_tc = 0;
3818 int i;
3819 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3820
3821 if (!netdev)
3822 return;
3823
3824 if (!enabled_tc) {
3825 netdev_reset_tc(netdev);
3826 return;
3827 }
3828
3829 /* Set up actual enabled TCs on the VSI */
3830 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
3831 return;
3832
3833 /* set per TC queues for the VSI */
3834 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3835 /* Only set TC queues for enabled tcs
3836 *
3837 * e.g. For a VSI that has TC0 and TC3 enabled the
3838 * enabled_tc bitmap would be 0x00001001; the driver
3839 * will set the numtc for netdev as 2 that will be
3840 * referenced by the netdev layer as TC 0 and 1.
3841 */
3842 if (vsi->tc_config.enabled_tc & (1 << i))
3843 netdev_set_tc_queue(netdev,
3844 vsi->tc_config.tc_info[i].netdev_tc,
3845 vsi->tc_config.tc_info[i].qcount,
3846 vsi->tc_config.tc_info[i].qoffset);
3847 }
3848
3849 /* Assign UP2TC map for the VSI */
3850 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3851 /* Get the actual TC# for the UP */
3852 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
3853 /* Get the mapped netdev TC# for the UP */
3854 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
3855 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3856 }
3857}
3858
3859/**
3860 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
3861 * @vsi: the VSI being configured
3862 * @ctxt: the ctxt buffer returned from AQ VSI update param command
3863 **/
3864static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
3865 struct i40e_vsi_context *ctxt)
3866{
3867 /* copy just the sections touched not the entire info
3868 * since not all sections are valid as returned by
3869 * update vsi params
3870 */
3871 vsi->info.mapping_flags = ctxt->info.mapping_flags;
3872 memcpy(&vsi->info.queue_mapping,
3873 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
3874 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
3875 sizeof(vsi->info.tc_mapping));
3876}
3877
3878/**
3879 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
3880 * @vsi: VSI to be configured
3881 * @enabled_tc: TC bitmap
3882 *
3883 * This configures a particular VSI for TCs that are mapped to the
3884 * given TC bitmap. It uses default bandwidth share for TCs across
3885 * VSIs to configure TC for a particular VSI.
3886 *
3887 * NOTE:
3888 * It is expected that the VSI queues have been quisced before calling
3889 * this function.
3890 **/
3891static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3892{
3893 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
3894 struct i40e_vsi_context ctxt;
3895 int ret = 0;
3896 int i;
3897
3898 /* Check if enabled_tc is same as existing or new TCs */
3899 if (vsi->tc_config.enabled_tc == enabled_tc)
3900 return ret;
3901
3902 /* Enable ETS TCs with equal BW Share for now across all VSIs */
3903 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3904 if (enabled_tc & (1 << i))
3905 bw_share[i] = 1;
3906 }
3907
3908 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
3909 if (ret) {
3910 dev_info(&vsi->back->pdev->dev,
3911 "Failed configuring TC map %d for VSI %d\n",
3912 enabled_tc, vsi->seid);
3913 goto out;
3914 }
3915
3916 /* Update Queue Pairs Mapping for currently enabled UPs */
3917 ctxt.seid = vsi->seid;
3918 ctxt.pf_num = vsi->back->hw.pf_id;
3919 ctxt.vf_num = 0;
3920 ctxt.uplink_seid = vsi->uplink_seid;
3921 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3922 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
3923
3924 /* Update the VSI after updating the VSI queue-mapping information */
3925 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3926 if (ret) {
3927 dev_info(&vsi->back->pdev->dev,
3928 "update vsi failed, aq_err=%d\n",
3929 vsi->back->hw.aq.asq_last_status);
3930 goto out;
3931 }
3932 /* update the local VSI info with updated queue map */
3933 i40e_vsi_update_queue_map(vsi, &ctxt);
3934 vsi->info.valid_sections = 0;
3935
3936 /* Update current VSI BW information */
3937 ret = i40e_vsi_get_bw_info(vsi);
3938 if (ret) {
3939 dev_info(&vsi->back->pdev->dev,
3940 "Failed updating vsi bw info, aq_err=%d\n",
3941 vsi->back->hw.aq.asq_last_status);
3942 goto out;
3943 }
3944
3945 /* Update the netdev TC setup */
3946 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
3947out:
3948 return ret;
3949}
3950
4e3b35b0
NP
3951/**
3952 * i40e_veb_config_tc - Configure TCs for given VEB
3953 * @veb: given VEB
3954 * @enabled_tc: TC bitmap
3955 *
3956 * Configures given TC bitmap for VEB (switching) element
3957 **/
3958int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
3959{
3960 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
3961 struct i40e_pf *pf = veb->pf;
3962 int ret = 0;
3963 int i;
3964
3965 /* No TCs or already enabled TCs just return */
3966 if (!enabled_tc || veb->enabled_tc == enabled_tc)
3967 return ret;
3968
3969 bw_data.tc_valid_bits = enabled_tc;
3970 /* bw_data.absolute_credits is not set (relative) */
3971
3972 /* Enable ETS TCs with equal BW Share for now */
3973 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3974 if (enabled_tc & (1 << i))
3975 bw_data.tc_bw_share_credits[i] = 1;
3976 }
3977
3978 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
3979 &bw_data, NULL);
3980 if (ret) {
3981 dev_info(&pf->pdev->dev,
3982 "veb bw config failed, aq_err=%d\n",
3983 pf->hw.aq.asq_last_status);
3984 goto out;
3985 }
3986
3987 /* Update the BW information */
3988 ret = i40e_veb_get_bw_info(veb);
3989 if (ret) {
3990 dev_info(&pf->pdev->dev,
3991 "Failed getting veb bw config, aq_err=%d\n",
3992 pf->hw.aq.asq_last_status);
3993 }
3994
3995out:
3996 return ret;
3997}
3998
3999#ifdef CONFIG_I40E_DCB
4000/**
4001 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4002 * @pf: PF struct
4003 *
4004 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4005 * the caller would've quiesce all the VSIs before calling
4006 * this function
4007 **/
4008static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4009{
4010 u8 tc_map = 0;
4011 int ret;
4012 u8 v;
4013
4014 /* Enable the TCs available on PF to all VEBs */
4015 tc_map = i40e_pf_get_tc_map(pf);
4016 for (v = 0; v < I40E_MAX_VEB; v++) {
4017 if (!pf->veb[v])
4018 continue;
4019 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4020 if (ret) {
4021 dev_info(&pf->pdev->dev,
4022 "Failed configuring TC for VEB seid=%d\n",
4023 pf->veb[v]->seid);
4024 /* Will try to configure as many components */
4025 }
4026 }
4027
4028 /* Update each VSI */
4029 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4030 if (!pf->vsi[v])
4031 continue;
4032
4033 /* - Enable all TCs for the LAN VSI
4034 * - For all others keep them at TC0 for now
4035 */
4036 if (v == pf->lan_vsi)
4037 tc_map = i40e_pf_get_tc_map(pf);
4038 else
4039 tc_map = i40e_pf_get_default_tc(pf);
4040
4041 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4042 if (ret) {
4043 dev_info(&pf->pdev->dev,
4044 "Failed configuring TC for VSI seid=%d\n",
4045 pf->vsi[v]->seid);
4046 /* Will try to configure as many components */
4047 } else {
4048 if (pf->vsi[v]->netdev)
4049 i40e_dcbnl_set_all(pf->vsi[v]);
4050 }
4051 }
4052}
4053
4054/**
4055 * i40e_init_pf_dcb - Initialize DCB configuration
4056 * @pf: PF being configured
4057 *
4058 * Query the current DCB configuration and cache it
4059 * in the hardware structure
4060 **/
4061static int i40e_init_pf_dcb(struct i40e_pf *pf)
4062{
4063 struct i40e_hw *hw = &pf->hw;
4064 int err = 0;
4065
4066 if (pf->hw.func_caps.npar_enable)
4067 goto out;
4068
4069 /* Get the initial DCB configuration */
4070 err = i40e_init_dcb(hw);
4071 if (!err) {
4072 /* Device/Function is not DCBX capable */
4073 if ((!hw->func_caps.dcb) ||
4074 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4075 dev_info(&pf->pdev->dev,
4076 "DCBX offload is not supported or is disabled for this PF.\n");
4077
4078 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4079 goto out;
4080
4081 } else {
4082 /* When status is not DISABLED then DCBX in FW */
4083 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4084 DCB_CAP_DCBX_VER_IEEE;
4085 pf->flags |= I40E_FLAG_DCB_ENABLED;
4086 }
4087 }
4088
4089out:
4090 return err;
4091}
4092#endif /* CONFIG_I40E_DCB */
4093
41c445ff
JB
4094/**
4095 * i40e_up_complete - Finish the last steps of bringing up a connection
4096 * @vsi: the VSI being configured
4097 **/
4098static int i40e_up_complete(struct i40e_vsi *vsi)
4099{
4100 struct i40e_pf *pf = vsi->back;
4101 int err;
4102
4103 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4104 i40e_vsi_configure_msix(vsi);
4105 else
4106 i40e_configure_msi_and_legacy(vsi);
4107
4108 /* start rings */
4109 err = i40e_vsi_control_rings(vsi, true);
4110 if (err)
4111 return err;
4112
4113 clear_bit(__I40E_DOWN, &vsi->state);
4114 i40e_napi_enable_all(vsi);
4115 i40e_vsi_enable_irq(vsi);
4116
4117 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4118 (vsi->netdev)) {
6d779b41 4119 netdev_info(vsi->netdev, "NIC Link is Up\n");
41c445ff
JB
4120 netif_tx_start_all_queues(vsi->netdev);
4121 netif_carrier_on(vsi->netdev);
6d779b41
AS
4122 } else if (vsi->netdev) {
4123 netdev_info(vsi->netdev, "NIC Link is Down\n");
41c445ff 4124 }
ca64fa4e
ASJ
4125
4126 /* replay FDIR SB filters */
4127 if (vsi->type == I40E_VSI_FDIR)
4128 i40e_fdir_filter_restore(vsi);
41c445ff
JB
4129 i40e_service_event_schedule(pf);
4130
4131 return 0;
4132}
4133
4134/**
4135 * i40e_vsi_reinit_locked - Reset the VSI
4136 * @vsi: the VSI being configured
4137 *
4138 * Rebuild the ring structs after some configuration
4139 * has changed, e.g. MTU size.
4140 **/
4141static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4142{
4143 struct i40e_pf *pf = vsi->back;
4144
4145 WARN_ON(in_interrupt());
4146 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4147 usleep_range(1000, 2000);
4148 i40e_down(vsi);
4149
4150 /* Give a VF some time to respond to the reset. The
4151 * two second wait is based upon the watchdog cycle in
4152 * the VF driver.
4153 */
4154 if (vsi->type == I40E_VSI_SRIOV)
4155 msleep(2000);
4156 i40e_up(vsi);
4157 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4158}
4159
4160/**
4161 * i40e_up - Bring the connection back up after being down
4162 * @vsi: the VSI being configured
4163 **/
4164int i40e_up(struct i40e_vsi *vsi)
4165{
4166 int err;
4167
4168 err = i40e_vsi_configure(vsi);
4169 if (!err)
4170 err = i40e_up_complete(vsi);
4171
4172 return err;
4173}
4174
4175/**
4176 * i40e_down - Shutdown the connection processing
4177 * @vsi: the VSI being stopped
4178 **/
4179void i40e_down(struct i40e_vsi *vsi)
4180{
4181 int i;
4182
4183 /* It is assumed that the caller of this function
4184 * sets the vsi->state __I40E_DOWN bit.
4185 */
4186 if (vsi->netdev) {
4187 netif_carrier_off(vsi->netdev);
4188 netif_tx_disable(vsi->netdev);
4189 }
4190 i40e_vsi_disable_irq(vsi);
4191 i40e_vsi_control_rings(vsi, false);
4192 i40e_napi_disable_all(vsi);
4193
4194 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
4195 i40e_clean_tx_ring(vsi->tx_rings[i]);
4196 i40e_clean_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
4197 }
4198}
4199
4200/**
4201 * i40e_setup_tc - configure multiple traffic classes
4202 * @netdev: net device to configure
4203 * @tc: number of traffic classes to enable
4204 **/
4205static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4206{
4207 struct i40e_netdev_priv *np = netdev_priv(netdev);
4208 struct i40e_vsi *vsi = np->vsi;
4209 struct i40e_pf *pf = vsi->back;
4210 u8 enabled_tc = 0;
4211 int ret = -EINVAL;
4212 int i;
4213
4214 /* Check if DCB enabled to continue */
4215 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4216 netdev_info(netdev, "DCB is not enabled for adapter\n");
4217 goto exit;
4218 }
4219
4220 /* Check if MFP enabled */
4221 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4222 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4223 goto exit;
4224 }
4225
4226 /* Check whether tc count is within enabled limit */
4227 if (tc > i40e_pf_get_num_tc(pf)) {
4228 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4229 goto exit;
4230 }
4231
4232 /* Generate TC map for number of tc requested */
4233 for (i = 0; i < tc; i++)
4234 enabled_tc |= (1 << i);
4235
4236 /* Requesting same TC configuration as already enabled */
4237 if (enabled_tc == vsi->tc_config.enabled_tc)
4238 return 0;
4239
4240 /* Quiesce VSI queues */
4241 i40e_quiesce_vsi(vsi);
4242
4243 /* Configure VSI for enabled TCs */
4244 ret = i40e_vsi_config_tc(vsi, enabled_tc);
4245 if (ret) {
4246 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4247 vsi->seid);
4248 goto exit;
4249 }
4250
4251 /* Unquiesce VSI */
4252 i40e_unquiesce_vsi(vsi);
4253
4254exit:
4255 return ret;
4256}
4257
4258/**
4259 * i40e_open - Called when a network interface is made active
4260 * @netdev: network interface device structure
4261 *
4262 * The open entry point is called when a network interface is made
4263 * active by the system (IFF_UP). At this point all resources needed
4264 * for transmit and receive operations are allocated, the interrupt
4265 * handler is registered with the OS, the netdev watchdog subtask is
4266 * enabled, and the stack is notified that the interface is ready.
4267 *
4268 * Returns 0 on success, negative value on failure
4269 **/
4270static int i40e_open(struct net_device *netdev)
4271{
4272 struct i40e_netdev_priv *np = netdev_priv(netdev);
4273 struct i40e_vsi *vsi = np->vsi;
4274 struct i40e_pf *pf = vsi->back;
41c445ff
JB
4275 int err;
4276
4eb3f768
SN
4277 /* disallow open during test or if eeprom is broken */
4278 if (test_bit(__I40E_TESTING, &pf->state) ||
4279 test_bit(__I40E_BAD_EEPROM, &pf->state))
41c445ff
JB
4280 return -EBUSY;
4281
4282 netif_carrier_off(netdev);
4283
6c167f58
EK
4284 err = i40e_vsi_open(vsi);
4285 if (err)
4286 return err;
4287
4288#ifdef CONFIG_I40E_VXLAN
4289 vxlan_get_rx_port(netdev);
4290#endif
4291
4292 return 0;
4293}
4294
4295/**
4296 * i40e_vsi_open -
4297 * @vsi: the VSI to open
4298 *
4299 * Finish initialization of the VSI.
4300 *
4301 * Returns 0 on success, negative value on failure
4302 **/
4303int i40e_vsi_open(struct i40e_vsi *vsi)
4304{
4305 struct i40e_pf *pf = vsi->back;
4306 char int_name[IFNAMSIZ];
4307 int err;
4308
41c445ff
JB
4309 /* allocate descriptors */
4310 err = i40e_vsi_setup_tx_resources(vsi);
4311 if (err)
4312 goto err_setup_tx;
4313 err = i40e_vsi_setup_rx_resources(vsi);
4314 if (err)
4315 goto err_setup_rx;
4316
4317 err = i40e_vsi_configure(vsi);
4318 if (err)
4319 goto err_setup_rx;
4320
c22e3c6c
SN
4321 if (vsi->netdev) {
4322 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4323 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4324 err = i40e_vsi_request_irq(vsi, int_name);
4325 if (err)
4326 goto err_setup_rx;
4327
4328 /* Notify the stack of the actual queue counts. */
4329 err = netif_set_real_num_tx_queues(vsi->netdev,
4330 vsi->num_queue_pairs);
4331 if (err)
4332 goto err_set_queues;
4333
4334 err = netif_set_real_num_rx_queues(vsi->netdev,
4335 vsi->num_queue_pairs);
4336 if (err)
4337 goto err_set_queues;
8a9eb7d3
SN
4338
4339 } else if (vsi->type == I40E_VSI_FDIR) {
4340 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4341 dev_driver_string(&pf->pdev->dev));
4342 err = i40e_vsi_request_irq(vsi, int_name);
c22e3c6c 4343 } else {
6c167f58
EK
4344 err = EINVAL;
4345 goto err_setup_rx;
4346 }
25946ddb 4347
41c445ff
JB
4348 err = i40e_up_complete(vsi);
4349 if (err)
4350 goto err_up_complete;
4351
41c445ff
JB
4352 return 0;
4353
4354err_up_complete:
4355 i40e_down(vsi);
25946ddb 4356err_set_queues:
41c445ff
JB
4357 i40e_vsi_free_irq(vsi);
4358err_setup_rx:
4359 i40e_vsi_free_rx_resources(vsi);
4360err_setup_tx:
4361 i40e_vsi_free_tx_resources(vsi);
4362 if (vsi == pf->vsi[pf->lan_vsi])
4363 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
4364
4365 return err;
4366}
4367
17a73f6b
JG
4368/**
4369 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
4370 * @pf: Pointer to pf
4371 *
4372 * This function destroys the hlist where all the Flow Director
4373 * filters were saved.
4374 **/
4375static void i40e_fdir_filter_exit(struct i40e_pf *pf)
4376{
4377 struct i40e_fdir_filter *filter;
4378 struct hlist_node *node2;
4379
4380 hlist_for_each_entry_safe(filter, node2,
4381 &pf->fdir_filter_list, fdir_node) {
4382 hlist_del(&filter->fdir_node);
4383 kfree(filter);
4384 }
4385 pf->fdir_pf_active_filters = 0;
4386}
4387
41c445ff
JB
4388/**
4389 * i40e_close - Disables a network interface
4390 * @netdev: network interface device structure
4391 *
4392 * The close entry point is called when an interface is de-activated
4393 * by the OS. The hardware is still under the driver's control, but
4394 * this netdev interface is disabled.
4395 *
4396 * Returns 0, this is not allowed to fail
4397 **/
4398static int i40e_close(struct net_device *netdev)
4399{
4400 struct i40e_netdev_priv *np = netdev_priv(netdev);
4401 struct i40e_vsi *vsi = np->vsi;
4402
90ef8d47 4403 i40e_vsi_close(vsi);
41c445ff
JB
4404
4405 return 0;
4406}
4407
4408/**
4409 * i40e_do_reset - Start a PF or Core Reset sequence
4410 * @pf: board private structure
4411 * @reset_flags: which reset is requested
4412 *
4413 * The essential difference in resets is that the PF Reset
4414 * doesn't clear the packet buffers, doesn't reset the PE
4415 * firmware, and doesn't bother the other PFs on the chip.
4416 **/
4417void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4418{
4419 u32 val;
4420
4421 WARN_ON(in_interrupt());
4422
4423 /* do the biggest reset indicated */
4424 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4425
4426 /* Request a Global Reset
4427 *
4428 * This will start the chip's countdown to the actual full
4429 * chip reset event, and a warning interrupt to be sent
4430 * to all PFs, including the requestor. Our handler
4431 * for the warning interrupt will deal with the shutdown
4432 * and recovery of the switch setup.
4433 */
69bfb110 4434 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
41c445ff
JB
4435 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4436 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
4437 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4438
4439 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
4440
4441 /* Request a Core Reset
4442 *
4443 * Same as Global Reset, except does *not* include the MAC/PHY
4444 */
69bfb110 4445 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
41c445ff
JB
4446 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4447 val |= I40E_GLGEN_RTRIG_CORER_MASK;
4448 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4449 i40e_flush(&pf->hw);
4450
7823fe34
SN
4451 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
4452
4453 /* Request a Firmware Reset
4454 *
4455 * Same as Global reset, plus restarting the
4456 * embedded firmware engine.
4457 */
4458 /* enable EMP Reset */
4459 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
4460 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
4461 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
4462
4463 /* force the reset */
4464 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4465 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
4466 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4467 i40e_flush(&pf->hw);
4468
41c445ff
JB
4469 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
4470
4471 /* Request a PF Reset
4472 *
4473 * Resets only the PF-specific registers
4474 *
4475 * This goes directly to the tear-down and rebuild of
4476 * the switch, since we need to do all the recovery as
4477 * for the Core Reset.
4478 */
69bfb110 4479 dev_dbg(&pf->pdev->dev, "PFR requested\n");
41c445ff
JB
4480 i40e_handle_reset_warning(pf);
4481
4482 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
4483 int v;
4484
4485 /* Find the VSI(s) that requested a re-init */
4486 dev_info(&pf->pdev->dev,
4487 "VSI reinit requested\n");
4488 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4489 struct i40e_vsi *vsi = pf->vsi[v];
4490 if (vsi != NULL &&
4491 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4492 i40e_vsi_reinit_locked(pf->vsi[v]);
4493 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4494 }
4495 }
4496
4497 /* no further action needed, so return now */
4498 return;
4499 } else {
4500 dev_info(&pf->pdev->dev,
4501 "bad reset request 0x%08x\n", reset_flags);
4502 return;
4503 }
4504}
4505
4e3b35b0
NP
4506#ifdef CONFIG_I40E_DCB
4507/**
4508 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
4509 * @pf: board private structure
4510 * @old_cfg: current DCB config
4511 * @new_cfg: new DCB config
4512 **/
4513bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4514 struct i40e_dcbx_config *old_cfg,
4515 struct i40e_dcbx_config *new_cfg)
4516{
4517 bool need_reconfig = false;
4518
4519 /* Check if ETS configuration has changed */
4520 if (memcmp(&new_cfg->etscfg,
4521 &old_cfg->etscfg,
4522 sizeof(new_cfg->etscfg))) {
4523 /* If Priority Table has changed reconfig is needed */
4524 if (memcmp(&new_cfg->etscfg.prioritytable,
4525 &old_cfg->etscfg.prioritytable,
4526 sizeof(new_cfg->etscfg.prioritytable))) {
4527 need_reconfig = true;
69bfb110 4528 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
4e3b35b0
NP
4529 }
4530
4531 if (memcmp(&new_cfg->etscfg.tcbwtable,
4532 &old_cfg->etscfg.tcbwtable,
4533 sizeof(new_cfg->etscfg.tcbwtable)))
69bfb110 4534 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
4e3b35b0
NP
4535
4536 if (memcmp(&new_cfg->etscfg.tsatable,
4537 &old_cfg->etscfg.tsatable,
4538 sizeof(new_cfg->etscfg.tsatable)))
69bfb110 4539 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
4e3b35b0
NP
4540 }
4541
4542 /* Check if PFC configuration has changed */
4543 if (memcmp(&new_cfg->pfc,
4544 &old_cfg->pfc,
4545 sizeof(new_cfg->pfc))) {
4546 need_reconfig = true;
69bfb110 4547 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
4e3b35b0
NP
4548 }
4549
4550 /* Check if APP Table has changed */
4551 if (memcmp(&new_cfg->app,
4552 &old_cfg->app,
3d9667a9 4553 sizeof(new_cfg->app))) {
4e3b35b0 4554 need_reconfig = true;
69bfb110 4555 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
3d9667a9 4556 }
4e3b35b0
NP
4557
4558 return need_reconfig;
4559}
4560
4561/**
4562 * i40e_handle_lldp_event - Handle LLDP Change MIB event
4563 * @pf: board private structure
4564 * @e: event info posted on ARQ
4565 **/
4566static int i40e_handle_lldp_event(struct i40e_pf *pf,
4567 struct i40e_arq_event_info *e)
4568{
4569 struct i40e_aqc_lldp_get_mib *mib =
4570 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
4571 struct i40e_hw *hw = &pf->hw;
4572 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
4573 struct i40e_dcbx_config tmp_dcbx_cfg;
4574 bool need_reconfig = false;
4575 int ret = 0;
4576 u8 type;
4577
4578 /* Ignore if event is not for Nearest Bridge */
4579 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
4580 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
4581 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
4582 return ret;
4583
4584 /* Check MIB Type and return if event for Remote MIB update */
4585 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
4586 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
4587 /* Update the remote cached instance and return */
4588 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
4589 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
4590 &hw->remote_dcbx_config);
4591 goto exit;
4592 }
4593
4594 /* Convert/store the DCBX data from LLDPDU temporarily */
4595 memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
4596 ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
4597 if (ret) {
4598 /* Error in LLDPDU parsing return */
4599 dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
4600 goto exit;
4601 }
4602
4603 /* No change detected in DCBX configs */
4604 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
69bfb110 4605 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
4e3b35b0
NP
4606 goto exit;
4607 }
4608
4609 need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
4610
4611 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
4612
4613 /* Overwrite the new configuration */
4614 *dcbx_cfg = tmp_dcbx_cfg;
4615
4616 if (!need_reconfig)
4617 goto exit;
4618
4619 /* Reconfiguration needed quiesce all VSIs */
4620 i40e_pf_quiesce_all_vsi(pf);
4621
4622 /* Changes in configuration update VEB/VSI */
4623 i40e_dcb_reconfigure(pf);
4624
4625 i40e_pf_unquiesce_all_vsi(pf);
4626exit:
4627 return ret;
4628}
4629#endif /* CONFIG_I40E_DCB */
4630
23326186
ASJ
4631/**
4632 * i40e_do_reset_safe - Protected reset path for userland calls.
4633 * @pf: board private structure
4634 * @reset_flags: which reset is requested
4635 *
4636 **/
4637void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
4638{
4639 rtnl_lock();
4640 i40e_do_reset(pf, reset_flags);
4641 rtnl_unlock();
4642}
4643
41c445ff
JB
4644/**
4645 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
4646 * @pf: board private structure
4647 * @e: event info posted on ARQ
4648 *
4649 * Handler for LAN Queue Overflow Event generated by the firmware for PF
4650 * and VF queues
4651 **/
4652static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4653 struct i40e_arq_event_info *e)
4654{
4655 struct i40e_aqc_lan_overflow *data =
4656 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
4657 u32 queue = le32_to_cpu(data->prtdcb_rupto);
4658 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
4659 struct i40e_hw *hw = &pf->hw;
4660 struct i40e_vf *vf;
4661 u16 vf_id;
4662
69bfb110
JB
4663 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
4664 queue, qtx_ctl);
41c445ff
JB
4665
4666 /* Queue belongs to VF, find the VF and issue VF reset */
4667 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
4668 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
4669 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
4670 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
4671 vf_id -= hw->func_caps.vf_base_id;
4672 vf = &pf->vf[vf_id];
4673 i40e_vc_notify_vf_reset(vf);
4674 /* Allow VF to process pending reset notification */
4675 msleep(20);
4676 i40e_reset_vf(vf, false);
4677 }
4678}
4679
4680/**
4681 * i40e_service_event_complete - Finish up the service event
4682 * @pf: board private structure
4683 **/
4684static void i40e_service_event_complete(struct i40e_pf *pf)
4685{
4686 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4687
4688 /* flush memory to make sure state is correct before next watchog */
4689 smp_mb__before_clear_bit();
4690 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4691}
4692
55a5e60b
ASJ
4693/**
4694 * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
4695 * @pf: board private structure
4696 **/
4697int i40e_get_current_fd_count(struct i40e_pf *pf)
4698{
4699 int val, fcnt_prog;
4700 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
4701 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
4702 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
4703 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
4704 return fcnt_prog;
4705}
4706
4707/**
4708 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
4709 * @pf: board private structure
4710 **/
4711void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
4712{
4713 u32 fcnt_prog, fcnt_avail;
4714
4715 /* Check if, FD SB or ATR was auto disabled and if there is enough room
4716 * to re-enable
4717 */
4718 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4719 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4720 return;
4721 fcnt_prog = i40e_get_current_fd_count(pf);
4722 fcnt_avail = pf->hw.fdir_shared_filter_count +
4723 pf->fdir_pf_filter_count;
4724 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
4725 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
4726 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
4727 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
4728 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
4729 }
4730 }
4731 /* Wait for some more space to be available to turn on ATR */
4732 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
4733 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4734 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
4735 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4736 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
4737 }
4738 }
4739}
4740
41c445ff
JB
4741/**
4742 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4743 * @pf: board private structure
4744 **/
4745static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4746{
4747 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4748 return;
4749
41c445ff
JB
4750 /* if interface is down do nothing */
4751 if (test_bit(__I40E_DOWN, &pf->state))
4752 return;
55a5e60b
ASJ
4753 i40e_fdir_check_and_reenable(pf);
4754
4755 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4756 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4757 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
41c445ff
JB
4758}
4759
4760/**
4761 * i40e_vsi_link_event - notify VSI of a link event
4762 * @vsi: vsi to be notified
4763 * @link_up: link up or down
4764 **/
4765static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
4766{
4767 if (!vsi)
4768 return;
4769
4770 switch (vsi->type) {
4771 case I40E_VSI_MAIN:
4772 if (!vsi->netdev || !vsi->netdev_registered)
4773 break;
4774
4775 if (link_up) {
4776 netif_carrier_on(vsi->netdev);
4777 netif_tx_wake_all_queues(vsi->netdev);
4778 } else {
4779 netif_carrier_off(vsi->netdev);
4780 netif_tx_stop_all_queues(vsi->netdev);
4781 }
4782 break;
4783
4784 case I40E_VSI_SRIOV:
4785 break;
4786
4787 case I40E_VSI_VMDQ2:
4788 case I40E_VSI_CTRL:
4789 case I40E_VSI_MIRROR:
4790 default:
4791 /* there is no notification for other VSIs */
4792 break;
4793 }
4794}
4795
4796/**
4797 * i40e_veb_link_event - notify elements on the veb of a link event
4798 * @veb: veb to be notified
4799 * @link_up: link up or down
4800 **/
4801static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4802{
4803 struct i40e_pf *pf;
4804 int i;
4805
4806 if (!veb || !veb->pf)
4807 return;
4808 pf = veb->pf;
4809
4810 /* depth first... */
4811 for (i = 0; i < I40E_MAX_VEB; i++)
4812 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
4813 i40e_veb_link_event(pf->veb[i], link_up);
4814
4815 /* ... now the local VSIs */
4816 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4817 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4818 i40e_vsi_link_event(pf->vsi[i], link_up);
4819}
4820
4821/**
4822 * i40e_link_event - Update netif_carrier status
4823 * @pf: board private structure
4824 **/
4825static void i40e_link_event(struct i40e_pf *pf)
4826{
4827 bool new_link, old_link;
4828
4829 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
4830 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
4831
4832 if (new_link == old_link)
4833 return;
4834
6d779b41
AS
4835 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4836 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4837 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
41c445ff
JB
4838
4839 /* Notify the base of the switch tree connected to
4840 * the link. Floating VEBs are not notified.
4841 */
4842 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
4843 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
4844 else
4845 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
4846
4847 if (pf->vf)
4848 i40e_vc_notify_link_state(pf);
beb0dff1
JK
4849
4850 if (pf->flags & I40E_FLAG_PTP)
4851 i40e_ptp_set_increment(pf);
41c445ff
JB
4852}
4853
4854/**
4855 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
4856 * @pf: board private structure
4857 *
4858 * Set the per-queue flags to request a check for stuck queues in the irq
4859 * clean functions, then force interrupts to be sure the irq clean is called.
4860 **/
4861static void i40e_check_hang_subtask(struct i40e_pf *pf)
4862{
4863 int i, v;
4864
4865 /* If we're down or resetting, just bail */
4866 if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
4867 return;
4868
4869 /* for each VSI/netdev
4870 * for each Tx queue
4871 * set the check flag
4872 * for each q_vector
4873 * force an interrupt
4874 */
4875 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4876 struct i40e_vsi *vsi = pf->vsi[v];
4877 int armed = 0;
4878
4879 if (!pf->vsi[v] ||
4880 test_bit(__I40E_DOWN, &vsi->state) ||
4881 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
4882 continue;
4883
4884 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 4885 set_check_for_tx_hang(vsi->tx_rings[i]);
41c445ff 4886 if (test_bit(__I40E_HANG_CHECK_ARMED,
9f65e15b 4887 &vsi->tx_rings[i]->state))
41c445ff
JB
4888 armed++;
4889 }
4890
4891 if (armed) {
4892 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
4893 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
4894 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
4895 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
4896 } else {
4897 u16 vec = vsi->base_vector - 1;
4898 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
4899 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
4900 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
4901 wr32(&vsi->back->hw,
4902 I40E_PFINT_DYN_CTLN(vec), val);
4903 }
4904 i40e_flush(&vsi->back->hw);
4905 }
4906 }
4907}
4908
4909/**
4910 * i40e_watchdog_subtask - Check and bring link up
4911 * @pf: board private structure
4912 **/
4913static void i40e_watchdog_subtask(struct i40e_pf *pf)
4914{
4915 int i;
4916
4917 /* if interface is down do nothing */
4918 if (test_bit(__I40E_DOWN, &pf->state) ||
4919 test_bit(__I40E_CONFIG_BUSY, &pf->state))
4920 return;
4921
4922 /* Update the stats for active netdevs so the network stack
4923 * can look at updated numbers whenever it cares to
4924 */
4925 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4926 if (pf->vsi[i] && pf->vsi[i]->netdev)
4927 i40e_update_stats(pf->vsi[i]);
4928
4929 /* Update the stats for the active switching components */
4930 for (i = 0; i < I40E_MAX_VEB; i++)
4931 if (pf->veb[i])
4932 i40e_update_veb_stats(pf->veb[i]);
beb0dff1
JK
4933
4934 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
41c445ff
JB
4935}
4936
4937/**
4938 * i40e_reset_subtask - Set up for resetting the device and driver
4939 * @pf: board private structure
4940 **/
4941static void i40e_reset_subtask(struct i40e_pf *pf)
4942{
4943 u32 reset_flags = 0;
4944
23326186 4945 rtnl_lock();
41c445ff
JB
4946 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
4947 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
4948 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
4949 }
4950 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
4951 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
4952 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4953 }
4954 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
4955 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
4956 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
4957 }
4958 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
4959 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
4960 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
4961 }
4962
4963 /* If there's a recovery already waiting, it takes
4964 * precedence before starting a new reset sequence.
4965 */
4966 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
4967 i40e_handle_reset_warning(pf);
23326186 4968 goto unlock;
41c445ff
JB
4969 }
4970
4971 /* If we're already down or resetting, just bail */
4972 if (reset_flags &&
4973 !test_bit(__I40E_DOWN, &pf->state) &&
4974 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
4975 i40e_do_reset(pf, reset_flags);
23326186
ASJ
4976
4977unlock:
4978 rtnl_unlock();
41c445ff
JB
4979}
4980
4981/**
4982 * i40e_handle_link_event - Handle link event
4983 * @pf: board private structure
4984 * @e: event info posted on ARQ
4985 **/
4986static void i40e_handle_link_event(struct i40e_pf *pf,
4987 struct i40e_arq_event_info *e)
4988{
4989 struct i40e_hw *hw = &pf->hw;
4990 struct i40e_aqc_get_link_status *status =
4991 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
4992 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
4993
4994 /* save off old link status information */
4995 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
4996 sizeof(pf->hw.phy.link_info_old));
4997
4998 /* update link status */
4999 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
5000 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
5001 hw_link_info->link_info = status->link_info;
5002 hw_link_info->an_info = status->an_info;
5003 hw_link_info->ext_info = status->ext_info;
5004 hw_link_info->lse_enable =
5005 le16_to_cpu(status->command_flags) &
5006 I40E_AQ_LSE_ENABLE;
5007
5008 /* process the event */
5009 i40e_link_event(pf);
5010
5011 /* Do a new status request to re-enable LSE reporting
5012 * and load new status information into the hw struct,
5013 * then see if the status changed while processing the
5014 * initial event.
5015 */
5016 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
5017 i40e_link_event(pf);
5018}
5019
5020/**
5021 * i40e_clean_adminq_subtask - Clean the AdminQ rings
5022 * @pf: board private structure
5023 **/
5024static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5025{
5026 struct i40e_arq_event_info event;
5027 struct i40e_hw *hw = &pf->hw;
5028 u16 pending, i = 0;
5029 i40e_status ret;
5030 u16 opcode;
5031 u32 val;
5032
5033 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
5034 return;
5035
3197ce22 5036 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
41c445ff
JB
5037 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
5038 if (!event.msg_buf)
5039 return;
5040
5041 do {
2f019123 5042 event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
41c445ff
JB
5043 ret = i40e_clean_arq_element(hw, &event, &pending);
5044 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
5045 dev_info(&pf->pdev->dev, "No ARQ event found\n");
5046 break;
5047 } else if (ret) {
5048 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
5049 break;
5050 }
5051
5052 opcode = le16_to_cpu(event.desc.opcode);
5053 switch (opcode) {
5054
5055 case i40e_aqc_opc_get_link_status:
5056 i40e_handle_link_event(pf, &event);
5057 break;
5058 case i40e_aqc_opc_send_msg_to_pf:
5059 ret = i40e_vc_process_vf_msg(pf,
5060 le16_to_cpu(event.desc.retval),
5061 le32_to_cpu(event.desc.cookie_high),
5062 le32_to_cpu(event.desc.cookie_low),
5063 event.msg_buf,
5064 event.msg_size);
5065 break;
5066 case i40e_aqc_opc_lldp_update_mib:
69bfb110 5067 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4e3b35b0
NP
5068#ifdef CONFIG_I40E_DCB
5069 rtnl_lock();
5070 ret = i40e_handle_lldp_event(pf, &event);
5071 rtnl_unlock();
5072#endif /* CONFIG_I40E_DCB */
41c445ff
JB
5073 break;
5074 case i40e_aqc_opc_event_lan_overflow:
69bfb110 5075 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
41c445ff
JB
5076 i40e_handle_lan_overflow_event(pf, &event);
5077 break;
0467bc91
SN
5078 case i40e_aqc_opc_send_msg_to_peer:
5079 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
5080 break;
41c445ff
JB
5081 default:
5082 dev_info(&pf->pdev->dev,
0467bc91
SN
5083 "ARQ Error: Unknown event 0x%04x received\n",
5084 opcode);
41c445ff
JB
5085 break;
5086 }
5087 } while (pending && (i++ < pf->adminq_work_limit));
5088
5089 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
5090 /* re-enable Admin queue interrupt cause */
5091 val = rd32(hw, I40E_PFINT_ICR0_ENA);
5092 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
5093 wr32(hw, I40E_PFINT_ICR0_ENA, val);
5094 i40e_flush(hw);
5095
5096 kfree(event.msg_buf);
5097}
5098
4eb3f768
SN
5099/**
5100 * i40e_verify_eeprom - make sure eeprom is good to use
5101 * @pf: board private structure
5102 **/
5103static void i40e_verify_eeprom(struct i40e_pf *pf)
5104{
5105 int err;
5106
5107 err = i40e_diag_eeprom_test(&pf->hw);
5108 if (err) {
5109 /* retry in case of garbage read */
5110 err = i40e_diag_eeprom_test(&pf->hw);
5111 if (err) {
5112 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
5113 err);
5114 set_bit(__I40E_BAD_EEPROM, &pf->state);
5115 }
5116 }
5117
5118 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
5119 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
5120 clear_bit(__I40E_BAD_EEPROM, &pf->state);
5121 }
5122}
5123
41c445ff
JB
5124/**
5125 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
5126 * @veb: pointer to the VEB instance
5127 *
5128 * This is a recursive function that first builds the attached VSIs then
5129 * recurses in to build the next layer of VEB. We track the connections
5130 * through our own index numbers because the seid's from the HW could
5131 * change across the reset.
5132 **/
5133static int i40e_reconstitute_veb(struct i40e_veb *veb)
5134{
5135 struct i40e_vsi *ctl_vsi = NULL;
5136 struct i40e_pf *pf = veb->pf;
5137 int v, veb_idx;
5138 int ret;
5139
5140 /* build VSI that owns this VEB, temporarily attached to base VEB */
5141 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
5142 if (pf->vsi[v] &&
5143 pf->vsi[v]->veb_idx == veb->idx &&
5144 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
5145 ctl_vsi = pf->vsi[v];
5146 break;
5147 }
5148 }
5149 if (!ctl_vsi) {
5150 dev_info(&pf->pdev->dev,
5151 "missing owner VSI for veb_idx %d\n", veb->idx);
5152 ret = -ENOENT;
5153 goto end_reconstitute;
5154 }
5155 if (ctl_vsi != pf->vsi[pf->lan_vsi])
5156 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5157 ret = i40e_add_vsi(ctl_vsi);
5158 if (ret) {
5159 dev_info(&pf->pdev->dev,
5160 "rebuild of owner VSI failed: %d\n", ret);
5161 goto end_reconstitute;
5162 }
5163 i40e_vsi_reset_stats(ctl_vsi);
5164
5165 /* create the VEB in the switch and move the VSI onto the VEB */
5166 ret = i40e_add_veb(veb, ctl_vsi);
5167 if (ret)
5168 goto end_reconstitute;
5169
5170 /* create the remaining VSIs attached to this VEB */
5171 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5172 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
5173 continue;
5174
5175 if (pf->vsi[v]->veb_idx == veb->idx) {
5176 struct i40e_vsi *vsi = pf->vsi[v];
5177 vsi->uplink_seid = veb->seid;
5178 ret = i40e_add_vsi(vsi);
5179 if (ret) {
5180 dev_info(&pf->pdev->dev,
5181 "rebuild of vsi_idx %d failed: %d\n",
5182 v, ret);
5183 goto end_reconstitute;
5184 }
5185 i40e_vsi_reset_stats(vsi);
5186 }
5187 }
5188
5189 /* create any VEBs attached to this VEB - RECURSION */
5190 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
5191 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
5192 pf->veb[veb_idx]->uplink_seid = veb->seid;
5193 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
5194 if (ret)
5195 break;
5196 }
5197 }
5198
5199end_reconstitute:
5200 return ret;
5201}
5202
5203/**
5204 * i40e_get_capabilities - get info about the HW
5205 * @pf: the PF struct
5206 **/
5207static int i40e_get_capabilities(struct i40e_pf *pf)
5208{
5209 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
5210 u16 data_size;
5211 int buf_len;
5212 int err;
5213
5214 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
5215 do {
5216 cap_buf = kzalloc(buf_len, GFP_KERNEL);
5217 if (!cap_buf)
5218 return -ENOMEM;
5219
5220 /* this loads the data into the hw struct for us */
5221 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
5222 &data_size,
5223 i40e_aqc_opc_list_func_capabilities,
5224 NULL);
5225 /* data loaded, buffer no longer needed */
5226 kfree(cap_buf);
5227
5228 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
5229 /* retry with a larger buffer */
5230 buf_len = data_size;
5231 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
5232 dev_info(&pf->pdev->dev,
5233 "capability discovery failed: aq=%d\n",
5234 pf->hw.aq.asq_last_status);
5235 return -ENODEV;
5236 }
5237 } while (err);
5238
ac71b7ba
ASJ
5239 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
5240 (pf->hw.aq.fw_maj_ver < 2)) {
5241 pf->hw.func_caps.num_msix_vectors++;
5242 pf->hw.func_caps.num_msix_vectors_vf++;
5243 }
5244
41c445ff
JB
5245 if (pf->hw.debug_mask & I40E_DEBUG_USER)
5246 dev_info(&pf->pdev->dev,
5247 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
5248 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
5249 pf->hw.func_caps.num_msix_vectors,
5250 pf->hw.func_caps.num_msix_vectors_vf,
5251 pf->hw.func_caps.fd_filters_guaranteed,
5252 pf->hw.func_caps.fd_filters_best_effort,
5253 pf->hw.func_caps.num_tx_qp,
5254 pf->hw.func_caps.num_vsis);
5255
7134f9ce
JB
5256#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
5257 + pf->hw.func_caps.num_vfs)
5258 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
5259 dev_info(&pf->pdev->dev,
5260 "got num_vsis %d, setting num_vsis to %d\n",
5261 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
5262 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
5263 }
5264
41c445ff
JB
5265 return 0;
5266}
5267
cbf61325
ASJ
5268static int i40e_vsi_clear(struct i40e_vsi *vsi);
5269
41c445ff 5270/**
cbf61325 5271 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
41c445ff
JB
5272 * @pf: board private structure
5273 **/
cbf61325 5274static void i40e_fdir_sb_setup(struct i40e_pf *pf)
41c445ff
JB
5275{
5276 struct i40e_vsi *vsi;
8a9eb7d3 5277 int i;
41c445ff 5278
cbf61325 5279 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
41c445ff
JB
5280 return;
5281
cbf61325 5282 /* find existing VSI and see if it needs configuring */
41c445ff 5283 vsi = NULL;
cbf61325
ASJ
5284 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
5285 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
41c445ff 5286 vsi = pf->vsi[i];
cbf61325
ASJ
5287 break;
5288 }
5289 }
5290
5291 /* create a new VSI if none exists */
41c445ff 5292 if (!vsi) {
cbf61325
ASJ
5293 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
5294 pf->vsi[pf->lan_vsi]->seid, 0);
41c445ff
JB
5295 if (!vsi) {
5296 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
8a9eb7d3
SN
5297 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
5298 return;
41c445ff 5299 }
41c445ff 5300 }
cbf61325 5301
8a9eb7d3 5302 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
41c445ff
JB
5303}
5304
5305/**
5306 * i40e_fdir_teardown - release the Flow Director resources
5307 * @pf: board private structure
5308 **/
5309static void i40e_fdir_teardown(struct i40e_pf *pf)
5310{
5311 int i;
5312
17a73f6b 5313 i40e_fdir_filter_exit(pf);
41c445ff
JB
5314 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
5315 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5316 i40e_vsi_release(pf->vsi[i]);
5317 break;
5318 }
5319 }
5320}
5321
5322/**
f650a38b 5323 * i40e_prep_for_reset - prep for the core to reset
41c445ff
JB
5324 * @pf: board private structure
5325 *
f650a38b
ASJ
5326 * Close up the VFs and other things in prep for pf Reset.
5327 **/
5328static int i40e_prep_for_reset(struct i40e_pf *pf)
41c445ff 5329{
41c445ff
JB
5330 struct i40e_hw *hw = &pf->hw;
5331 i40e_status ret;
5332 u32 v;
5333
5334 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
5335 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
f650a38b 5336 return 0;
41c445ff 5337
69bfb110 5338 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
41c445ff 5339
37f0be6d
ASJ
5340 if (i40e_check_asq_alive(hw))
5341 i40e_vc_notify_reset(pf);
41c445ff
JB
5342
5343 /* quiesce the VSIs and their queues that are not already DOWN */
5344 i40e_pf_quiesce_all_vsi(pf);
5345
5346 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5347 if (pf->vsi[v])
5348 pf->vsi[v]->seid = 0;
5349 }
5350
5351 i40e_shutdown_adminq(&pf->hw);
5352
f650a38b
ASJ
5353 /* call shutdown HMC */
5354 ret = i40e_shutdown_lan_hmc(hw);
5355 if (ret) {
5356 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
5357 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5358 }
5359 return ret;
5360}
5361
5362/**
4dda12e6 5363 * i40e_reset_and_rebuild - reset and rebuild using a saved config
f650a38b 5364 * @pf: board private structure
bc7d338f 5365 * @reinit: if the Main VSI needs to re-initialized.
f650a38b 5366 **/
bc7d338f 5367static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
f650a38b
ASJ
5368{
5369 struct i40e_driver_version dv;
5370 struct i40e_hw *hw = &pf->hw;
5371 i40e_status ret;
5372 u32 v;
5373
41c445ff
JB
5374 /* Now we wait for GRST to settle out.
5375 * We don't have to delete the VEBs or VSIs from the hw switch
5376 * because the reset will make them disappear.
5377 */
5378 ret = i40e_pf_reset(hw);
5379 if (ret)
5380 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
5381 pf->pfr_count++;
5382
5383 if (test_bit(__I40E_DOWN, &pf->state))
5384 goto end_core_reset;
69bfb110 5385 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
41c445ff
JB
5386
5387 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
5388 ret = i40e_init_adminq(&pf->hw);
5389 if (ret) {
5390 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
5391 goto end_core_reset;
5392 }
5393
4eb3f768
SN
5394 /* re-verify the eeprom if we just had an EMP reset */
5395 if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
5396 clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
5397 i40e_verify_eeprom(pf);
5398 }
5399
41c445ff
JB
5400 ret = i40e_get_capabilities(pf);
5401 if (ret) {
5402 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
5403 ret);
5404 goto end_core_reset;
5405 }
5406
41c445ff
JB
5407 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
5408 hw->func_caps.num_rx_qp,
5409 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
5410 if (ret) {
5411 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
5412 goto end_core_reset;
5413 }
5414 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
5415 if (ret) {
5416 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
5417 goto end_core_reset;
5418 }
5419
4e3b35b0
NP
5420#ifdef CONFIG_I40E_DCB
5421 ret = i40e_init_pf_dcb(pf);
5422 if (ret) {
5423 dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
5424 goto end_core_reset;
5425 }
5426#endif /* CONFIG_I40E_DCB */
5427
41c445ff 5428 /* do basic switch setup */
bc7d338f 5429 ret = i40e_setup_pf_switch(pf, reinit);
41c445ff
JB
5430 if (ret)
5431 goto end_core_reset;
5432
5433 /* Rebuild the VSIs and VEBs that existed before reset.
5434 * They are still in our local switch element arrays, so only
5435 * need to rebuild the switch model in the HW.
5436 *
5437 * If there were VEBs but the reconstitution failed, we'll try
5438 * try to recover minimal use by getting the basic PF VSI working.
5439 */
5440 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
69bfb110 5441 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
41c445ff
JB
5442 /* find the one VEB connected to the MAC, and find orphans */
5443 for (v = 0; v < I40E_MAX_VEB; v++) {
5444 if (!pf->veb[v])
5445 continue;
5446
5447 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
5448 pf->veb[v]->uplink_seid == 0) {
5449 ret = i40e_reconstitute_veb(pf->veb[v]);
5450
5451 if (!ret)
5452 continue;
5453
5454 /* If Main VEB failed, we're in deep doodoo,
5455 * so give up rebuilding the switch and set up
5456 * for minimal rebuild of PF VSI.
5457 * If orphan failed, we'll report the error
5458 * but try to keep going.
5459 */
5460 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
5461 dev_info(&pf->pdev->dev,
5462 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
5463 ret);
5464 pf->vsi[pf->lan_vsi]->uplink_seid
5465 = pf->mac_seid;
5466 break;
5467 } else if (pf->veb[v]->uplink_seid == 0) {
5468 dev_info(&pf->pdev->dev,
5469 "rebuild of orphan VEB failed: %d\n",
5470 ret);
5471 }
5472 }
5473 }
5474 }
5475
5476 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
5477 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
5478 /* no VEB, so rebuild only the Main VSI */
5479 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
5480 if (ret) {
5481 dev_info(&pf->pdev->dev,
5482 "rebuild of Main VSI failed: %d\n", ret);
5483 goto end_core_reset;
5484 }
5485 }
5486
5487 /* reinit the misc interrupt */
5488 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5489 ret = i40e_setup_misc_vector(pf);
5490
5491 /* restart the VSIs that were rebuilt and running before the reset */
5492 i40e_pf_unquiesce_all_vsi(pf);
5493
69f64b2b
MW
5494 if (pf->num_alloc_vfs) {
5495 for (v = 0; v < pf->num_alloc_vfs; v++)
5496 i40e_reset_vf(&pf->vf[v], true);
5497 }
5498
41c445ff
JB
5499 /* tell the firmware that we're starting */
5500 dv.major_version = DRV_VERSION_MAJOR;
5501 dv.minor_version = DRV_VERSION_MINOR;
5502 dv.build_version = DRV_VERSION_BUILD;
5503 dv.subbuild_version = 0;
5504 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5505
69bfb110 5506 dev_info(&pf->pdev->dev, "reset complete\n");
41c445ff
JB
5507
5508end_core_reset:
5509 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5510}
5511
f650a38b
ASJ
5512/**
5513 * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
5514 * @pf: board private structure
5515 *
5516 * Close up the VFs and other things in prep for a Core Reset,
5517 * then get ready to rebuild the world.
5518 **/
5519static void i40e_handle_reset_warning(struct i40e_pf *pf)
5520{
5521 i40e_status ret;
5522
5523 ret = i40e_prep_for_reset(pf);
5524 if (!ret)
bc7d338f 5525 i40e_reset_and_rebuild(pf, false);
f650a38b
ASJ
5526}
5527
41c445ff
JB
5528/**
5529 * i40e_handle_mdd_event
5530 * @pf: pointer to the pf structure
5531 *
5532 * Called from the MDD irq handler to identify possibly malicious vfs
5533 **/
5534static void i40e_handle_mdd_event(struct i40e_pf *pf)
5535{
5536 struct i40e_hw *hw = &pf->hw;
5537 bool mdd_detected = false;
5538 struct i40e_vf *vf;
5539 u32 reg;
5540 int i;
5541
5542 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
5543 return;
5544
5545 /* find what triggered the MDD event */
5546 reg = rd32(hw, I40E_GL_MDET_TX);
5547 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
5548 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
5549 >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
5550 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
5551 >> I40E_GL_MDET_TX_EVENT_SHIFT;
5552 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
5553 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
5554 dev_info(&pf->pdev->dev,
f29eaa3d 5555 "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
41c445ff
JB
5556 event, queue, func);
5557 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
5558 mdd_detected = true;
5559 }
5560 reg = rd32(hw, I40E_GL_MDET_RX);
5561 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
5562 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
5563 >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
5564 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
5565 >> I40E_GL_MDET_RX_EVENT_SHIFT;
5566 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
5567 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
5568 dev_info(&pf->pdev->dev,
f29eaa3d 5569 "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
41c445ff
JB
5570 event, queue, func);
5571 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
5572 mdd_detected = true;
5573 }
5574
5575 /* see if one of the VFs needs its hand slapped */
5576 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
5577 vf = &(pf->vf[i]);
5578 reg = rd32(hw, I40E_VP_MDET_TX(i));
5579 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
5580 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
5581 vf->num_mdd_events++;
5582 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
5583 }
5584
5585 reg = rd32(hw, I40E_VP_MDET_RX(i));
5586 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
5587 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
5588 vf->num_mdd_events++;
5589 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
5590 }
5591
5592 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
5593 dev_info(&pf->pdev->dev,
5594 "Too many MDD events on VF %d, disabled\n", i);
5595 dev_info(&pf->pdev->dev,
5596 "Use PF Control I/F to re-enable the VF\n");
5597 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
5598 }
5599 }
5600
5601 /* re-enable mdd interrupt cause */
5602 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
5603 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
5604 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
5605 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
5606 i40e_flush(hw);
5607}
5608
a1c9a9d9
JK
5609#ifdef CONFIG_I40E_VXLAN
5610/**
5611 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
5612 * @pf: board private structure
5613 **/
5614static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
5615{
5616 const int vxlan_hdr_qwords = 4;
5617 struct i40e_hw *hw = &pf->hw;
5618 i40e_status ret;
5619 u8 filter_index;
5620 __be16 port;
5621 int i;
5622
5623 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
5624 return;
5625
5626 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
5627
5628 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
5629 if (pf->pending_vxlan_bitmap & (1 << i)) {
5630 pf->pending_vxlan_bitmap &= ~(1 << i);
5631 port = pf->vxlan_ports[i];
5632 ret = port ?
5633 i40e_aq_add_udp_tunnel(hw, ntohs(port),
5634 vxlan_hdr_qwords,
5635 I40E_AQC_TUNNEL_TYPE_VXLAN,
5636 &filter_index, NULL)
5637 : i40e_aq_del_udp_tunnel(hw, i, NULL);
5638
5639 if (ret) {
5640 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
5641 port ? "adding" : "deleting",
5642 ntohs(port), port ? i : i);
5643
5644 pf->vxlan_ports[i] = 0;
5645 } else {
5646 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
5647 port ? "Added" : "Deleted",
5648 ntohs(port), port ? i : filter_index);
5649 }
5650 }
5651 }
5652}
5653
5654#endif
41c445ff
JB
5655/**
5656 * i40e_service_task - Run the driver's async subtasks
5657 * @work: pointer to work_struct containing our data
5658 **/
5659static void i40e_service_task(struct work_struct *work)
5660{
5661 struct i40e_pf *pf = container_of(work,
5662 struct i40e_pf,
5663 service_task);
5664 unsigned long start_time = jiffies;
5665
5666 i40e_reset_subtask(pf);
5667 i40e_handle_mdd_event(pf);
5668 i40e_vc_process_vflr_event(pf);
5669 i40e_watchdog_subtask(pf);
5670 i40e_fdir_reinit_subtask(pf);
5671 i40e_check_hang_subtask(pf);
5672 i40e_sync_filters_subtask(pf);
a1c9a9d9
JK
5673#ifdef CONFIG_I40E_VXLAN
5674 i40e_sync_vxlan_filters_subtask(pf);
5675#endif
41c445ff
JB
5676 i40e_clean_adminq_subtask(pf);
5677
5678 i40e_service_event_complete(pf);
5679
5680 /* If the tasks have taken longer than one timer cycle or there
5681 * is more work to be done, reschedule the service task now
5682 * rather than wait for the timer to tick again.
5683 */
5684 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
5685 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
5686 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
5687 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
5688 i40e_service_event_schedule(pf);
5689}
5690
5691/**
5692 * i40e_service_timer - timer callback
5693 * @data: pointer to PF struct
5694 **/
5695static void i40e_service_timer(unsigned long data)
5696{
5697 struct i40e_pf *pf = (struct i40e_pf *)data;
5698
5699 mod_timer(&pf->service_timer,
5700 round_jiffies(jiffies + pf->service_timer_period));
5701 i40e_service_event_schedule(pf);
5702}
5703
5704/**
5705 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
5706 * @vsi: the VSI being configured
5707 **/
5708static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
5709{
5710 struct i40e_pf *pf = vsi->back;
5711
5712 switch (vsi->type) {
5713 case I40E_VSI_MAIN:
5714 vsi->alloc_queue_pairs = pf->num_lan_qps;
5715 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5716 I40E_REQ_DESCRIPTOR_MULTIPLE);
5717 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5718 vsi->num_q_vectors = pf->num_lan_msix;
5719 else
5720 vsi->num_q_vectors = 1;
5721
5722 break;
5723
5724 case I40E_VSI_FDIR:
5725 vsi->alloc_queue_pairs = 1;
5726 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
5727 I40E_REQ_DESCRIPTOR_MULTIPLE);
5728 vsi->num_q_vectors = 1;
5729 break;
5730
5731 case I40E_VSI_VMDQ2:
5732 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
5733 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5734 I40E_REQ_DESCRIPTOR_MULTIPLE);
5735 vsi->num_q_vectors = pf->num_vmdq_msix;
5736 break;
5737
5738 case I40E_VSI_SRIOV:
5739 vsi->alloc_queue_pairs = pf->num_vf_qps;
5740 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5741 I40E_REQ_DESCRIPTOR_MULTIPLE);
5742 break;
5743
5744 default:
5745 WARN_ON(1);
5746 return -ENODATA;
5747 }
5748
5749 return 0;
5750}
5751
f650a38b
ASJ
5752/**
5753 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
5754 * @type: VSI pointer
bc7d338f 5755 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
f650a38b
ASJ
5756 *
5757 * On error: returns error code (negative)
5758 * On success: returns 0
5759 **/
bc7d338f 5760static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
f650a38b
ASJ
5761{
5762 int size;
5763 int ret = 0;
5764
ac6c5e3d 5765 /* allocate memory for both Tx and Rx ring pointers */
f650a38b
ASJ
5766 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
5767 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
5768 if (!vsi->tx_rings)
5769 return -ENOMEM;
f650a38b
ASJ
5770 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
5771
bc7d338f
ASJ
5772 if (alloc_qvectors) {
5773 /* allocate memory for q_vector pointers */
5774 size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
5775 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
5776 if (!vsi->q_vectors) {
5777 ret = -ENOMEM;
5778 goto err_vectors;
5779 }
f650a38b
ASJ
5780 }
5781 return ret;
5782
5783err_vectors:
5784 kfree(vsi->tx_rings);
5785 return ret;
5786}
5787
41c445ff
JB
5788/**
5789 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
5790 * @pf: board private structure
5791 * @type: type of VSI
5792 *
5793 * On error: returns error code (negative)
5794 * On success: returns vsi index in PF (positive)
5795 **/
5796static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5797{
5798 int ret = -ENODEV;
5799 struct i40e_vsi *vsi;
5800 int vsi_idx;
5801 int i;
5802
5803 /* Need to protect the allocation of the VSIs at the PF level */
5804 mutex_lock(&pf->switch_mutex);
5805
5806 /* VSI list may be fragmented if VSI creation/destruction has
5807 * been happening. We can afford to do a quick scan to look
5808 * for any free VSIs in the list.
5809 *
5810 * find next empty vsi slot, looping back around if necessary
5811 */
5812 i = pf->next_vsi;
5813 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
5814 i++;
5815 if (i >= pf->hw.func_caps.num_vsis) {
5816 i = 0;
5817 while (i < pf->next_vsi && pf->vsi[i])
5818 i++;
5819 }
5820
5821 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
5822 vsi_idx = i; /* Found one! */
5823 } else {
5824 ret = -ENODEV;
493fb300 5825 goto unlock_pf; /* out of VSI slots! */
41c445ff
JB
5826 }
5827 pf->next_vsi = ++i;
5828
5829 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
5830 if (!vsi) {
5831 ret = -ENOMEM;
493fb300 5832 goto unlock_pf;
41c445ff
JB
5833 }
5834 vsi->type = type;
5835 vsi->back = pf;
5836 set_bit(__I40E_DOWN, &vsi->state);
5837 vsi->flags = 0;
5838 vsi->idx = vsi_idx;
5839 vsi->rx_itr_setting = pf->rx_itr_default;
5840 vsi->tx_itr_setting = pf->tx_itr_default;
5841 vsi->netdev_registered = false;
5842 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
5843 INIT_LIST_HEAD(&vsi->mac_filter_list);
5844
9f65e15b
AD
5845 ret = i40e_set_num_rings_in_vsi(vsi);
5846 if (ret)
5847 goto err_rings;
5848
bc7d338f 5849 ret = i40e_vsi_alloc_arrays(vsi, true);
f650a38b 5850 if (ret)
9f65e15b 5851 goto err_rings;
493fb300 5852
41c445ff
JB
5853 /* Setup default MSIX irq handler for VSI */
5854 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
5855
5856 pf->vsi[vsi_idx] = vsi;
5857 ret = vsi_idx;
493fb300
AD
5858 goto unlock_pf;
5859
9f65e15b 5860err_rings:
493fb300
AD
5861 pf->next_vsi = i - 1;
5862 kfree(vsi);
5863unlock_pf:
41c445ff
JB
5864 mutex_unlock(&pf->switch_mutex);
5865 return ret;
5866}
5867
f650a38b
ASJ
5868/**
5869 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
5870 * @type: VSI pointer
bc7d338f 5871 * @free_qvectors: a bool to specify if q_vectors need to be freed.
f650a38b
ASJ
5872 *
5873 * On error: returns error code (negative)
5874 * On success: returns 0
5875 **/
bc7d338f 5876static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
f650a38b
ASJ
5877{
5878 /* free the ring and vector containers */
bc7d338f
ASJ
5879 if (free_qvectors) {
5880 kfree(vsi->q_vectors);
5881 vsi->q_vectors = NULL;
5882 }
f650a38b
ASJ
5883 kfree(vsi->tx_rings);
5884 vsi->tx_rings = NULL;
5885 vsi->rx_rings = NULL;
5886}
5887
41c445ff
JB
5888/**
5889 * i40e_vsi_clear - Deallocate the VSI provided
5890 * @vsi: the VSI being un-configured
5891 **/
5892static int i40e_vsi_clear(struct i40e_vsi *vsi)
5893{
5894 struct i40e_pf *pf;
5895
5896 if (!vsi)
5897 return 0;
5898
5899 if (!vsi->back)
5900 goto free_vsi;
5901 pf = vsi->back;
5902
5903 mutex_lock(&pf->switch_mutex);
5904 if (!pf->vsi[vsi->idx]) {
5905 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
5906 vsi->idx, vsi->idx, vsi, vsi->type);
5907 goto unlock_vsi;
5908 }
5909
5910 if (pf->vsi[vsi->idx] != vsi) {
5911 dev_err(&pf->pdev->dev,
5912 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
5913 pf->vsi[vsi->idx]->idx,
5914 pf->vsi[vsi->idx],
5915 pf->vsi[vsi->idx]->type,
5916 vsi->idx, vsi, vsi->type);
5917 goto unlock_vsi;
5918 }
5919
5920 /* updates the pf for this cleared vsi */
5921 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5922 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5923
bc7d338f 5924 i40e_vsi_free_arrays(vsi, true);
493fb300 5925
41c445ff
JB
5926 pf->vsi[vsi->idx] = NULL;
5927 if (vsi->idx < pf->next_vsi)
5928 pf->next_vsi = vsi->idx;
5929
5930unlock_vsi:
5931 mutex_unlock(&pf->switch_mutex);
5932free_vsi:
5933 kfree(vsi);
5934
5935 return 0;
5936}
5937
9f65e15b
AD
5938/**
5939 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5940 * @vsi: the VSI being cleaned
5941 **/
be1d5eea 5942static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
9f65e15b
AD
5943{
5944 int i;
5945
8e9dca53 5946 if (vsi->tx_rings && vsi->tx_rings[0]) {
d7397644 5947 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
00403f04
MW
5948 kfree_rcu(vsi->tx_rings[i], rcu);
5949 vsi->tx_rings[i] = NULL;
5950 vsi->rx_rings[i] = NULL;
5951 }
be1d5eea 5952 }
9f65e15b
AD
5953}
5954
41c445ff
JB
5955/**
5956 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
5957 * @vsi: the VSI being configured
5958 **/
5959static int i40e_alloc_rings(struct i40e_vsi *vsi)
5960{
5961 struct i40e_pf *pf = vsi->back;
41c445ff
JB
5962 int i;
5963
41c445ff 5964 /* Set basic values in the rings to be used later during open() */
d7397644 5965 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
9f65e15b
AD
5966 struct i40e_ring *tx_ring;
5967 struct i40e_ring *rx_ring;
5968
ac6c5e3d 5969 /* allocate space for both Tx and Rx in one shot */
9f65e15b
AD
5970 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
5971 if (!tx_ring)
5972 goto err_out;
41c445ff
JB
5973
5974 tx_ring->queue_index = i;
5975 tx_ring->reg_idx = vsi->base_queue + i;
5976 tx_ring->ring_active = false;
5977 tx_ring->vsi = vsi;
5978 tx_ring->netdev = vsi->netdev;
5979 tx_ring->dev = &pf->pdev->dev;
5980 tx_ring->count = vsi->num_desc;
5981 tx_ring->size = 0;
5982 tx_ring->dcb_tc = 0;
9f65e15b 5983 vsi->tx_rings[i] = tx_ring;
41c445ff 5984
9f65e15b 5985 rx_ring = &tx_ring[1];
41c445ff
JB
5986 rx_ring->queue_index = i;
5987 rx_ring->reg_idx = vsi->base_queue + i;
5988 rx_ring->ring_active = false;
5989 rx_ring->vsi = vsi;
5990 rx_ring->netdev = vsi->netdev;
5991 rx_ring->dev = &pf->pdev->dev;
5992 rx_ring->count = vsi->num_desc;
5993 rx_ring->size = 0;
5994 rx_ring->dcb_tc = 0;
5995 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
5996 set_ring_16byte_desc_enabled(rx_ring);
5997 else
5998 clear_ring_16byte_desc_enabled(rx_ring);
9f65e15b 5999 vsi->rx_rings[i] = rx_ring;
41c445ff
JB
6000 }
6001
6002 return 0;
9f65e15b
AD
6003
6004err_out:
6005 i40e_vsi_clear_rings(vsi);
6006 return -ENOMEM;
41c445ff
JB
6007}
6008
6009/**
6010 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
6011 * @pf: board private structure
6012 * @vectors: the number of MSI-X vectors to request
6013 *
6014 * Returns the number of vectors reserved, or error
6015 **/
6016static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
6017{
7b37f376
AG
6018 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
6019 I40E_MIN_MSIX, vectors);
6020 if (vectors < 0) {
41c445ff 6021 dev_info(&pf->pdev->dev,
7b37f376 6022 "MSI-X vector reservation failed: %d\n", vectors);
41c445ff
JB
6023 vectors = 0;
6024 }
6025
7b37f376
AG
6026 pf->num_msix_entries = vectors;
6027
41c445ff
JB
6028 return vectors;
6029}
6030
6031/**
6032 * i40e_init_msix - Setup the MSIX capability
6033 * @pf: board private structure
6034 *
6035 * Work with the OS to set up the MSIX vectors needed.
6036 *
6037 * Returns 0 on success, negative on failure
6038 **/
6039static int i40e_init_msix(struct i40e_pf *pf)
6040{
6041 i40e_status err = 0;
6042 struct i40e_hw *hw = &pf->hw;
6043 int v_budget, i;
6044 int vec;
6045
6046 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6047 return -ENODEV;
6048
6049 /* The number of vectors we'll request will be comprised of:
6050 * - Add 1 for "other" cause for Admin Queue events, etc.
6051 * - The number of LAN queue pairs
f8ff1464
ASJ
6052 * - Queues being used for RSS.
6053 * We don't need as many as max_rss_size vectors.
6054 * use rss_size instead in the calculation since that
6055 * is governed by number of cpus in the system.
6056 * - assumes symmetric Tx/Rx pairing
41c445ff
JB
6057 * - The number of VMDq pairs
6058 * Once we count this up, try the request.
6059 *
6060 * If we can't get what we want, we'll simplify to nearly nothing
6061 * and try again. If that still fails, we punt.
6062 */
f8ff1464 6063 pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
41c445ff
JB
6064 pf->num_vmdq_msix = pf->num_vmdq_qps;
6065 v_budget = 1 + pf->num_lan_msix;
6066 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
60ea5f83 6067 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
41c445ff
JB
6068 v_budget++;
6069
6070 /* Scale down if necessary, and the rings will share vectors */
6071 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
6072
6073 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
6074 GFP_KERNEL);
6075 if (!pf->msix_entries)
6076 return -ENOMEM;
6077
6078 for (i = 0; i < v_budget; i++)
6079 pf->msix_entries[i].entry = i;
6080 vec = i40e_reserve_msix_vectors(pf, v_budget);
6081 if (vec < I40E_MIN_MSIX) {
6082 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
6083 kfree(pf->msix_entries);
6084 pf->msix_entries = NULL;
6085 return -ENODEV;
6086
6087 } else if (vec == I40E_MIN_MSIX) {
6088 /* Adjust for minimal MSIX use */
77fa28be 6089 dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
41c445ff
JB
6090 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
6091 pf->num_vmdq_vsis = 0;
6092 pf->num_vmdq_qps = 0;
6093 pf->num_vmdq_msix = 0;
6094 pf->num_lan_qps = 1;
6095 pf->num_lan_msix = 1;
6096
6097 } else if (vec != v_budget) {
6098 /* Scale vector usage down */
6099 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
6100 vec--; /* reserve the misc vector */
6101
6102 /* partition out the remaining vectors */
6103 switch (vec) {
6104 case 2:
6105 pf->num_vmdq_vsis = 1;
6106 pf->num_lan_msix = 1;
6107 break;
6108 case 3:
6109 pf->num_vmdq_vsis = 1;
6110 pf->num_lan_msix = 2;
6111 break;
6112 default:
6113 pf->num_lan_msix = min_t(int, (vec / 2),
6114 pf->num_lan_qps);
6115 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
6116 I40E_DEFAULT_NUM_VMDQ_VSI);
6117 break;
6118 }
6119 }
6120
6121 return err;
6122}
6123
493fb300 6124/**
90e04070 6125 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
493fb300
AD
6126 * @vsi: the VSI being configured
6127 * @v_idx: index of the vector in the vsi struct
6128 *
6129 * We allocate one q_vector. If allocation fails we return -ENOMEM.
6130 **/
90e04070 6131static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
493fb300
AD
6132{
6133 struct i40e_q_vector *q_vector;
6134
6135 /* allocate q_vector */
6136 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
6137 if (!q_vector)
6138 return -ENOMEM;
6139
6140 q_vector->vsi = vsi;
6141 q_vector->v_idx = v_idx;
6142 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
6143 if (vsi->netdev)
6144 netif_napi_add(vsi->netdev, &q_vector->napi,
6145 i40e_napi_poll, vsi->work_limit);
6146
cd0b6fa6
AD
6147 q_vector->rx.latency_range = I40E_LOW_LATENCY;
6148 q_vector->tx.latency_range = I40E_LOW_LATENCY;
6149
493fb300
AD
6150 /* tie q_vector and vsi together */
6151 vsi->q_vectors[v_idx] = q_vector;
6152
6153 return 0;
6154}
6155
41c445ff 6156/**
90e04070 6157 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
41c445ff
JB
6158 * @vsi: the VSI being configured
6159 *
6160 * We allocate one q_vector per queue interrupt. If allocation fails we
6161 * return -ENOMEM.
6162 **/
90e04070 6163static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
41c445ff
JB
6164{
6165 struct i40e_pf *pf = vsi->back;
6166 int v_idx, num_q_vectors;
493fb300 6167 int err;
41c445ff
JB
6168
6169 /* if not MSIX, give the one vector only to the LAN VSI */
6170 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6171 num_q_vectors = vsi->num_q_vectors;
6172 else if (vsi == pf->vsi[pf->lan_vsi])
6173 num_q_vectors = 1;
6174 else
6175 return -EINVAL;
6176
41c445ff 6177 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
90e04070 6178 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
493fb300
AD
6179 if (err)
6180 goto err_out;
41c445ff
JB
6181 }
6182
6183 return 0;
493fb300
AD
6184
6185err_out:
6186 while (v_idx--)
6187 i40e_free_q_vector(vsi, v_idx);
6188
6189 return err;
41c445ff
JB
6190}
6191
6192/**
6193 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
6194 * @pf: board private structure to initialize
6195 **/
6196static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6197{
6198 int err = 0;
6199
6200 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
6201 err = i40e_init_msix(pf);
6202 if (err) {
60ea5f83
JB
6203 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
6204 I40E_FLAG_RSS_ENABLED |
6205 I40E_FLAG_DCB_ENABLED |
6206 I40E_FLAG_SRIOV_ENABLED |
6207 I40E_FLAG_FD_SB_ENABLED |
6208 I40E_FLAG_FD_ATR_ENABLED |
6209 I40E_FLAG_VMDQ_ENABLED);
41c445ff
JB
6210
6211 /* rework the queue expectations without MSIX */
6212 i40e_determine_queue_usage(pf);
6213 }
6214 }
6215
6216 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6217 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
77fa28be 6218 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
41c445ff
JB
6219 err = pci_enable_msi(pf->pdev);
6220 if (err) {
958a3e3b 6221 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
41c445ff
JB
6222 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
6223 }
6224 }
6225
958a3e3b 6226 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
77fa28be 6227 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
958a3e3b 6228
41c445ff
JB
6229 /* track first vector for misc interrupts */
6230 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
6231}
6232
6233/**
6234 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
6235 * @pf: board private structure
6236 *
6237 * This sets up the handler for MSIX 0, which is used to manage the
6238 * non-queue interrupts, e.g. AdminQ and errors. This is not used
6239 * when in MSI or Legacy interrupt mode.
6240 **/
6241static int i40e_setup_misc_vector(struct i40e_pf *pf)
6242{
6243 struct i40e_hw *hw = &pf->hw;
6244 int err = 0;
6245
6246 /* Only request the irq if this is the first time through, and
6247 * not when we're rebuilding after a Reset
6248 */
6249 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6250 err = request_irq(pf->msix_entries[0].vector,
6251 i40e_intr, 0, pf->misc_int_name, pf);
6252 if (err) {
6253 dev_info(&pf->pdev->dev,
77fa28be
CS
6254 "request_irq for %s failed: %d\n",
6255 pf->misc_int_name, err);
41c445ff
JB
6256 return -EFAULT;
6257 }
6258 }
6259
6260 i40e_enable_misc_int_causes(hw);
6261
6262 /* associate no queues to the misc vector */
6263 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
6264 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
6265
6266 i40e_flush(hw);
6267
6268 i40e_irq_dynamic_enable_icr0(pf);
6269
6270 return err;
6271}
6272
6273/**
6274 * i40e_config_rss - Prepare for RSS if used
6275 * @pf: board private structure
6276 **/
6277static int i40e_config_rss(struct i40e_pf *pf)
6278{
41c445ff
JB
6279 /* Set of random keys generated using kernel random number generator */
6280 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
6281 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
6282 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
6283 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
4617e8c0
ASJ
6284 struct i40e_hw *hw = &pf->hw;
6285 u32 lut = 0;
6286 int i, j;
6287 u64 hena;
41c445ff
JB
6288
6289 /* Fill out hash function seed */
6290 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6291 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
6292
6293 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
6294 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
6295 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
12dc4fe3 6296 hena |= I40E_DEFAULT_RSS_HENA;
41c445ff
JB
6297 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
6298 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
6299
6300 /* Populate the LUT with max no. of queues in round robin fashion */
6301 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
6302
6303 /* The assumption is that lan qp count will be the highest
6304 * qp count for any PF VSI that needs RSS.
6305 * If multiple VSIs need RSS support, all the qp counts
6306 * for those VSIs should be a power of 2 for RSS to work.
6307 * If LAN VSI is the only consumer for RSS then this requirement
6308 * is not necessary.
6309 */
6310 if (j == pf->rss_size)
6311 j = 0;
6312 /* lut = 4-byte sliding window of 4 lut entries */
6313 lut = (lut << 8) | (j &
6314 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
6315 /* On i = 3, we have 4 entries in lut; write to the register */
6316 if ((i & 3) == 3)
6317 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
6318 }
6319 i40e_flush(hw);
6320
6321 return 0;
6322}
6323
f8ff1464
ASJ
6324/**
6325 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
6326 * @pf: board private structure
6327 * @queue_count: the requested queue count for rss.
6328 *
6329 * returns 0 if rss is not enabled, if enabled returns the final rss queue
6330 * count which may be different from the requested queue count.
6331 **/
6332int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
6333{
6334 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
6335 return 0;
6336
6337 queue_count = min_t(int, queue_count, pf->rss_size_max);
6338 queue_count = rounddown_pow_of_two(queue_count);
6339
6340 if (queue_count != pf->rss_size) {
f8ff1464
ASJ
6341 i40e_prep_for_reset(pf);
6342
f8ff1464
ASJ
6343 pf->rss_size = queue_count;
6344
6345 i40e_reset_and_rebuild(pf, true);
6346 i40e_config_rss(pf);
6347 }
6348 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
6349 return pf->rss_size;
6350}
6351
41c445ff
JB
6352/**
6353 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
6354 * @pf: board private structure to initialize
6355 *
6356 * i40e_sw_init initializes the Adapter private data structure.
6357 * Fields are initialized based on PCI device information and
6358 * OS network device settings (MTU size).
6359 **/
6360static int i40e_sw_init(struct i40e_pf *pf)
6361{
6362 int err = 0;
6363 int size;
6364
6365 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
6366 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
2759997b 6367 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
41c445ff
JB
6368 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
6369 if (I40E_DEBUG_USER & debug)
6370 pf->hw.debug_mask = debug;
6371 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
6372 I40E_DEFAULT_MSG_ENABLE);
6373 }
6374
6375 /* Set default capability flags */
6376 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
6377 I40E_FLAG_MSI_ENABLED |
6378 I40E_FLAG_MSIX_ENABLED |
41c445ff
JB
6379 I40E_FLAG_RX_1BUF_ENABLED;
6380
7134f9ce
JB
6381 /* Depending on PF configurations, it is possible that the RSS
6382 * maximum might end up larger than the available queues
6383 */
41c445ff 6384 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
7134f9ce
JB
6385 pf->rss_size_max = min_t(int, pf->rss_size_max,
6386 pf->hw.func_caps.num_tx_qp);
41c445ff
JB
6387 if (pf->hw.func_caps.rss) {
6388 pf->flags |= I40E_FLAG_RSS_ENABLED;
bf051a3b 6389 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
cbf61325 6390 pf->rss_size = rounddown_pow_of_two(pf->rss_size);
41c445ff
JB
6391 } else {
6392 pf->rss_size = 1;
6393 }
6394
2050bc65
CS
6395 /* MFP mode enabled */
6396 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
6397 pf->flags |= I40E_FLAG_MFP_ENABLED;
6398 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
6399 }
6400
cbf61325
ASJ
6401 /* FW/NVM is not yet fixed in this regard */
6402 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
6403 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
6404 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6405 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
cbf61325 6406 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
60ea5f83 6407 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
cbf61325
ASJ
6408 } else {
6409 dev_info(&pf->pdev->dev,
0b67584f 6410 "Flow Director Sideband mode Disabled in MFP mode\n");
41c445ff 6411 }
cbf61325
ASJ
6412 pf->fdir_pf_filter_count =
6413 pf->hw.func_caps.fd_filters_guaranteed;
6414 pf->hw.fdir_shared_filter_count =
6415 pf->hw.func_caps.fd_filters_best_effort;
41c445ff
JB
6416 }
6417
6418 if (pf->hw.func_caps.vmdq) {
6419 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
6420 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
6421 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
6422 }
6423
41c445ff
JB
6424#ifdef CONFIG_PCI_IOV
6425 if (pf->hw.func_caps.num_vfs) {
6426 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
6427 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
6428 pf->num_req_vfs = min_t(int,
6429 pf->hw.func_caps.num_vfs,
6430 I40E_MAX_VF_COUNT);
6431 }
6432#endif /* CONFIG_PCI_IOV */
6433 pf->eeprom_version = 0xDEAD;
6434 pf->lan_veb = I40E_NO_VEB;
6435 pf->lan_vsi = I40E_NO_VSI;
6436
6437 /* set up queue assignment tracking */
6438 size = sizeof(struct i40e_lump_tracking)
6439 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
6440 pf->qp_pile = kzalloc(size, GFP_KERNEL);
6441 if (!pf->qp_pile) {
6442 err = -ENOMEM;
6443 goto sw_init_done;
6444 }
6445 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
6446 pf->qp_pile->search_hint = 0;
6447
6448 /* set up vector assignment tracking */
6449 size = sizeof(struct i40e_lump_tracking)
6450 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
6451 pf->irq_pile = kzalloc(size, GFP_KERNEL);
6452 if (!pf->irq_pile) {
6453 kfree(pf->qp_pile);
6454 err = -ENOMEM;
6455 goto sw_init_done;
6456 }
6457 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
6458 pf->irq_pile->search_hint = 0;
6459
6460 mutex_init(&pf->switch_mutex);
6461
6462sw_init_done:
6463 return err;
6464}
6465
7c3c288b
ASJ
6466/**
6467 * i40e_set_ntuple - set the ntuple feature flag and take action
6468 * @pf: board private structure to initialize
6469 * @features: the feature set that the stack is suggesting
6470 *
6471 * returns a bool to indicate if reset needs to happen
6472 **/
6473bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
6474{
6475 bool need_reset = false;
6476
6477 /* Check if Flow Director n-tuple support was enabled or disabled. If
6478 * the state changed, we need to reset.
6479 */
6480 if (features & NETIF_F_NTUPLE) {
6481 /* Enable filters and mark for reset */
6482 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6483 need_reset = true;
6484 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6485 } else {
6486 /* turn off filters, mark for reset and clear SW filter list */
6487 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
6488 need_reset = true;
6489 i40e_fdir_filter_exit(pf);
6490 }
6491 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6492 /* if ATR was disabled it can be re-enabled. */
6493 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
6494 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6495 }
6496 return need_reset;
6497}
6498
41c445ff
JB
6499/**
6500 * i40e_set_features - set the netdev feature flags
6501 * @netdev: ptr to the netdev being adjusted
6502 * @features: the feature set that the stack is suggesting
6503 **/
6504static int i40e_set_features(struct net_device *netdev,
6505 netdev_features_t features)
6506{
6507 struct i40e_netdev_priv *np = netdev_priv(netdev);
6508 struct i40e_vsi *vsi = np->vsi;
7c3c288b
ASJ
6509 struct i40e_pf *pf = vsi->back;
6510 bool need_reset;
41c445ff
JB
6511
6512 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6513 i40e_vlan_stripping_enable(vsi);
6514 else
6515 i40e_vlan_stripping_disable(vsi);
6516
7c3c288b
ASJ
6517 need_reset = i40e_set_ntuple(pf, features);
6518
6519 if (need_reset)
6520 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
6521
41c445ff
JB
6522 return 0;
6523}
6524
a1c9a9d9
JK
6525#ifdef CONFIG_I40E_VXLAN
6526/**
6527 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
6528 * @pf: board private structure
6529 * @port: The UDP port to look up
6530 *
6531 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
6532 **/
6533static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
6534{
6535 u8 i;
6536
6537 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6538 if (pf->vxlan_ports[i] == port)
6539 return i;
6540 }
6541
6542 return i;
6543}
6544
6545/**
6546 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
6547 * @netdev: This physical port's netdev
6548 * @sa_family: Socket Family that VXLAN is notifying us about
6549 * @port: New UDP port number that VXLAN started listening to
6550 **/
6551static void i40e_add_vxlan_port(struct net_device *netdev,
6552 sa_family_t sa_family, __be16 port)
6553{
6554 struct i40e_netdev_priv *np = netdev_priv(netdev);
6555 struct i40e_vsi *vsi = np->vsi;
6556 struct i40e_pf *pf = vsi->back;
6557 u8 next_idx;
6558 u8 idx;
6559
6560 if (sa_family == AF_INET6)
6561 return;
6562
6563 idx = i40e_get_vxlan_port_idx(pf, port);
6564
6565 /* Check if port already exists */
6566 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6567 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
6568 return;
6569 }
6570
6571 /* Now check if there is space to add the new port */
6572 next_idx = i40e_get_vxlan_port_idx(pf, 0);
6573
6574 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6575 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
6576 ntohs(port));
6577 return;
6578 }
6579
6580 /* New port: add it and mark its index in the bitmap */
6581 pf->vxlan_ports[next_idx] = port;
6582 pf->pending_vxlan_bitmap |= (1 << next_idx);
6583
6584 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
6585}
6586
6587/**
6588 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
6589 * @netdev: This physical port's netdev
6590 * @sa_family: Socket Family that VXLAN is notifying us about
6591 * @port: UDP port number that VXLAN stopped listening to
6592 **/
6593static void i40e_del_vxlan_port(struct net_device *netdev,
6594 sa_family_t sa_family, __be16 port)
6595{
6596 struct i40e_netdev_priv *np = netdev_priv(netdev);
6597 struct i40e_vsi *vsi = np->vsi;
6598 struct i40e_pf *pf = vsi->back;
6599 u8 idx;
6600
6601 if (sa_family == AF_INET6)
6602 return;
6603
6604 idx = i40e_get_vxlan_port_idx(pf, port);
6605
6606 /* Check if port already exists */
6607 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6608 /* if port exists, set it to 0 (mark for deletion)
6609 * and make it pending
6610 */
6611 pf->vxlan_ports[idx] = 0;
6612
6613 pf->pending_vxlan_bitmap |= (1 << idx);
6614
6615 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
6616 } else {
6617 netdev_warn(netdev, "Port %d was not found, not deleting\n",
6618 ntohs(port));
6619 }
6620}
6621
6622#endif
4ba0dea5
GR
6623#ifdef HAVE_FDB_OPS
6624#ifdef USE_CONST_DEV_UC_CHAR
6625static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
6626 struct net_device *dev,
6627 const unsigned char *addr,
6628 u16 flags)
6629#else
6630static int i40e_ndo_fdb_add(struct ndmsg *ndm,
6631 struct net_device *dev,
6632 unsigned char *addr,
6633 u16 flags)
6634#endif
6635{
6636 struct i40e_netdev_priv *np = netdev_priv(dev);
6637 struct i40e_pf *pf = np->vsi->back;
6638 int err = 0;
6639
6640 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
6641 return -EOPNOTSUPP;
6642
6643 /* Hardware does not support aging addresses so if a
6644 * ndm_state is given only allow permanent addresses
6645 */
6646 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6647 netdev_info(dev, "FDB only supports static addresses\n");
6648 return -EINVAL;
6649 }
6650
6651 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
6652 err = dev_uc_add_excl(dev, addr);
6653 else if (is_multicast_ether_addr(addr))
6654 err = dev_mc_add_excl(dev, addr);
6655 else
6656 err = -EINVAL;
6657
6658 /* Only return duplicate errors if NLM_F_EXCL is set */
6659 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6660 err = 0;
6661
6662 return err;
6663}
6664
6665#ifndef USE_DEFAULT_FDB_DEL_DUMP
6666#ifdef USE_CONST_DEV_UC_CHAR
6667static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6668 struct net_device *dev,
6669 const unsigned char *addr)
6670#else
6671static int i40e_ndo_fdb_del(struct ndmsg *ndm,
6672 struct net_device *dev,
6673 unsigned char *addr)
6674#endif
6675{
6676 struct i40e_netdev_priv *np = netdev_priv(dev);
6677 struct i40e_pf *pf = np->vsi->back;
6678 int err = -EOPNOTSUPP;
6679
6680 if (ndm->ndm_state & NUD_PERMANENT) {
6681 netdev_info(dev, "FDB only supports static addresses\n");
6682 return -EINVAL;
6683 }
6684
6685 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
6686 if (is_unicast_ether_addr(addr))
6687 err = dev_uc_del(dev, addr);
6688 else if (is_multicast_ether_addr(addr))
6689 err = dev_mc_del(dev, addr);
6690 else
6691 err = -EINVAL;
6692 }
6693
6694 return err;
6695}
6696
6697static int i40e_ndo_fdb_dump(struct sk_buff *skb,
6698 struct netlink_callback *cb,
6699 struct net_device *dev,
6700 int idx)
6701{
6702 struct i40e_netdev_priv *np = netdev_priv(dev);
6703 struct i40e_pf *pf = np->vsi->back;
6704
6705 if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
6706 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
6707
6708 return idx;
6709}
6710
6711#endif /* USE_DEFAULT_FDB_DEL_DUMP */
6712#endif /* HAVE_FDB_OPS */
41c445ff
JB
6713static const struct net_device_ops i40e_netdev_ops = {
6714 .ndo_open = i40e_open,
6715 .ndo_stop = i40e_close,
6716 .ndo_start_xmit = i40e_lan_xmit_frame,
6717 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
6718 .ndo_set_rx_mode = i40e_set_rx_mode,
6719 .ndo_validate_addr = eth_validate_addr,
6720 .ndo_set_mac_address = i40e_set_mac,
6721 .ndo_change_mtu = i40e_change_mtu,
beb0dff1 6722 .ndo_do_ioctl = i40e_ioctl,
41c445ff
JB
6723 .ndo_tx_timeout = i40e_tx_timeout,
6724 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
6725 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
6726#ifdef CONFIG_NET_POLL_CONTROLLER
6727 .ndo_poll_controller = i40e_netpoll,
6728#endif
6729 .ndo_setup_tc = i40e_setup_tc,
6730 .ndo_set_features = i40e_set_features,
6731 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
6732 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
6733 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
6734 .ndo_get_vf_config = i40e_ndo_get_vf_config,
588aefa0 6735 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
a1c9a9d9
JK
6736#ifdef CONFIG_I40E_VXLAN
6737 .ndo_add_vxlan_port = i40e_add_vxlan_port,
6738 .ndo_del_vxlan_port = i40e_del_vxlan_port,
6739#endif
4ba0dea5
GR
6740#ifdef HAVE_FDB_OPS
6741 .ndo_fdb_add = i40e_ndo_fdb_add,
6742#ifndef USE_DEFAULT_FDB_DEL_DUMP
6743 .ndo_fdb_del = i40e_ndo_fdb_del,
6744 .ndo_fdb_dump = i40e_ndo_fdb_dump,
6745#endif
6746#endif
41c445ff
JB
6747};
6748
6749/**
6750 * i40e_config_netdev - Setup the netdev flags
6751 * @vsi: the VSI being configured
6752 *
6753 * Returns 0 on success, negative value on failure
6754 **/
6755static int i40e_config_netdev(struct i40e_vsi *vsi)
6756{
1a10370a 6757 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
41c445ff
JB
6758 struct i40e_pf *pf = vsi->back;
6759 struct i40e_hw *hw = &pf->hw;
6760 struct i40e_netdev_priv *np;
6761 struct net_device *netdev;
6762 u8 mac_addr[ETH_ALEN];
6763 int etherdev_size;
6764
6765 etherdev_size = sizeof(struct i40e_netdev_priv);
f8ff1464 6766 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
41c445ff
JB
6767 if (!netdev)
6768 return -ENOMEM;
6769
6770 vsi->netdev = netdev;
6771 np = netdev_priv(netdev);
6772 np->vsi = vsi;
6773
d70e941b 6774 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
41c445ff 6775 NETIF_F_GSO_UDP_TUNNEL |
d70e941b 6776 NETIF_F_TSO;
41c445ff
JB
6777
6778 netdev->features = NETIF_F_SG |
6779 NETIF_F_IP_CSUM |
6780 NETIF_F_SCTP_CSUM |
6781 NETIF_F_HIGHDMA |
6782 NETIF_F_GSO_UDP_TUNNEL |
6783 NETIF_F_HW_VLAN_CTAG_TX |
6784 NETIF_F_HW_VLAN_CTAG_RX |
6785 NETIF_F_HW_VLAN_CTAG_FILTER |
6786 NETIF_F_IPV6_CSUM |
6787 NETIF_F_TSO |
6788 NETIF_F_TSO6 |
6789 NETIF_F_RXCSUM |
7c3c288b 6790 NETIF_F_NTUPLE |
41c445ff
JB
6791 NETIF_F_RXHASH |
6792 0;
6793
6794 /* copy netdev features into list of user selectable features */
6795 netdev->hw_features |= netdev->features;
6796
6797 if (vsi->type == I40E_VSI_MAIN) {
6798 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
6799 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
6800 } else {
6801 /* relate the VSI_VMDQ name to the VSI_MAIN name */
6802 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
6803 pf->vsi[pf->lan_vsi]->netdev->name);
6804 random_ether_addr(mac_addr);
6805 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
6806 }
1a10370a 6807 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
41c445ff
JB
6808
6809 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
6810 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
6811 /* vlan gets same features (except vlan offload)
6812 * after any tweaks for specific VSI types
6813 */
6814 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
6815 NETIF_F_HW_VLAN_CTAG_RX |
6816 NETIF_F_HW_VLAN_CTAG_FILTER);
6817 netdev->priv_flags |= IFF_UNICAST_FLT;
6818 netdev->priv_flags |= IFF_SUPP_NOFCS;
6819 /* Setup netdev TC information */
6820 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
6821
6822 netdev->netdev_ops = &i40e_netdev_ops;
6823 netdev->watchdog_timeo = 5 * HZ;
6824 i40e_set_ethtool_ops(netdev);
6825
6826 return 0;
6827}
6828
6829/**
6830 * i40e_vsi_delete - Delete a VSI from the switch
6831 * @vsi: the VSI being removed
6832 *
6833 * Returns 0 on success, negative value on failure
6834 **/
6835static void i40e_vsi_delete(struct i40e_vsi *vsi)
6836{
6837 /* remove default VSI is not allowed */
6838 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
6839 return;
6840
41c445ff
JB
6841 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
6842 return;
6843}
6844
6845/**
6846 * i40e_add_vsi - Add a VSI to the switch
6847 * @vsi: the VSI being configured
6848 *
6849 * This initializes a VSI context depending on the VSI type to be added and
6850 * passes it down to the add_vsi aq command.
6851 **/
6852static int i40e_add_vsi(struct i40e_vsi *vsi)
6853{
6854 int ret = -ENODEV;
6855 struct i40e_mac_filter *f, *ftmp;
6856 struct i40e_pf *pf = vsi->back;
6857 struct i40e_hw *hw = &pf->hw;
6858 struct i40e_vsi_context ctxt;
6859 u8 enabled_tc = 0x1; /* TC0 enabled */
6860 int f_count = 0;
6861
6862 memset(&ctxt, 0, sizeof(ctxt));
6863 switch (vsi->type) {
6864 case I40E_VSI_MAIN:
6865 /* The PF's main VSI is already setup as part of the
6866 * device initialization, so we'll not bother with
6867 * the add_vsi call, but we will retrieve the current
6868 * VSI context.
6869 */
6870 ctxt.seid = pf->main_vsi_seid;
6871 ctxt.pf_num = pf->hw.pf_id;
6872 ctxt.vf_num = 0;
6873 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6874 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6875 if (ret) {
6876 dev_info(&pf->pdev->dev,
6877 "couldn't get pf vsi config, err %d, aq_err %d\n",
6878 ret, pf->hw.aq.asq_last_status);
6879 return -ENOENT;
6880 }
6881 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6882 vsi->info.valid_sections = 0;
6883
6884 vsi->seid = ctxt.seid;
6885 vsi->id = ctxt.vsi_number;
6886
6887 enabled_tc = i40e_pf_get_tc_map(pf);
6888
6889 /* MFP mode setup queue map and update VSI */
6890 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
6891 memset(&ctxt, 0, sizeof(ctxt));
6892 ctxt.seid = pf->main_vsi_seid;
6893 ctxt.pf_num = pf->hw.pf_id;
6894 ctxt.vf_num = 0;
6895 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
6896 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6897 if (ret) {
6898 dev_info(&pf->pdev->dev,
6899 "update vsi failed, aq_err=%d\n",
6900 pf->hw.aq.asq_last_status);
6901 ret = -ENOENT;
6902 goto err;
6903 }
6904 /* update the local VSI info queue map */
6905 i40e_vsi_update_queue_map(vsi, &ctxt);
6906 vsi->info.valid_sections = 0;
6907 } else {
6908 /* Default/Main VSI is only enabled for TC0
6909 * reconfigure it to enable all TCs that are
6910 * available on the port in SFP mode.
6911 */
6912 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6913 if (ret) {
6914 dev_info(&pf->pdev->dev,
6915 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
6916 enabled_tc, ret,
6917 pf->hw.aq.asq_last_status);
6918 ret = -ENOENT;
6919 }
6920 }
6921 break;
6922
6923 case I40E_VSI_FDIR:
cbf61325
ASJ
6924 ctxt.pf_num = hw->pf_id;
6925 ctxt.vf_num = 0;
6926 ctxt.uplink_seid = vsi->uplink_seid;
6927 ctxt.connection_type = 0x1; /* regular data port */
6928 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
41c445ff 6929 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
41c445ff
JB
6930 break;
6931
6932 case I40E_VSI_VMDQ2:
6933 ctxt.pf_num = hw->pf_id;
6934 ctxt.vf_num = 0;
6935 ctxt.uplink_seid = vsi->uplink_seid;
6936 ctxt.connection_type = 0x1; /* regular data port */
6937 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6938
6939 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6940
6941 /* This VSI is connected to VEB so the switch_id
6942 * should be set to zero by default.
6943 */
6944 ctxt.info.switch_id = 0;
6945 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
6946 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6947
6948 /* Setup the VSI tx/rx queue map for TC0 only for now */
6949 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6950 break;
6951
6952 case I40E_VSI_SRIOV:
6953 ctxt.pf_num = hw->pf_id;
6954 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
6955 ctxt.uplink_seid = vsi->uplink_seid;
6956 ctxt.connection_type = 0x1; /* regular data port */
6957 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
6958
6959 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6960
6961 /* This VSI is connected to VEB so the switch_id
6962 * should be set to zero by default.
6963 */
6964 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6965
6966 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
6967 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6968 /* Setup the VSI tx/rx queue map for TC0 only for now */
6969 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6970 break;
6971
6972 default:
6973 return -ENODEV;
6974 }
6975
6976 if (vsi->type != I40E_VSI_MAIN) {
6977 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6978 if (ret) {
6979 dev_info(&vsi->back->pdev->dev,
6980 "add vsi failed, aq_err=%d\n",
6981 vsi->back->hw.aq.asq_last_status);
6982 ret = -ENOENT;
6983 goto err;
6984 }
6985 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6986 vsi->info.valid_sections = 0;
6987 vsi->seid = ctxt.seid;
6988 vsi->id = ctxt.vsi_number;
6989 }
6990
6991 /* If macvlan filters already exist, force them to get loaded */
6992 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
6993 f->changed = true;
6994 f_count++;
6995 }
6996 if (f_count) {
6997 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
6998 pf->flags |= I40E_FLAG_FILTER_SYNC;
6999 }
7000
7001 /* Update VSI BW information */
7002 ret = i40e_vsi_get_bw_info(vsi);
7003 if (ret) {
7004 dev_info(&pf->pdev->dev,
7005 "couldn't get vsi bw info, err %d, aq_err %d\n",
7006 ret, pf->hw.aq.asq_last_status);
7007 /* VSI is already added so not tearing that up */
7008 ret = 0;
7009 }
7010
7011err:
7012 return ret;
7013}
7014
7015/**
7016 * i40e_vsi_release - Delete a VSI and free its resources
7017 * @vsi: the VSI being removed
7018 *
7019 * Returns 0 on success or < 0 on error
7020 **/
7021int i40e_vsi_release(struct i40e_vsi *vsi)
7022{
7023 struct i40e_mac_filter *f, *ftmp;
7024 struct i40e_veb *veb = NULL;
7025 struct i40e_pf *pf;
7026 u16 uplink_seid;
7027 int i, n;
7028
7029 pf = vsi->back;
7030
7031 /* release of a VEB-owner or last VSI is not allowed */
7032 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
7033 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
7034 vsi->seid, vsi->uplink_seid);
7035 return -ENODEV;
7036 }
7037 if (vsi == pf->vsi[pf->lan_vsi] &&
7038 !test_bit(__I40E_DOWN, &pf->state)) {
7039 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
7040 return -ENODEV;
7041 }
7042
7043 uplink_seid = vsi->uplink_seid;
7044 if (vsi->type != I40E_VSI_SRIOV) {
7045 if (vsi->netdev_registered) {
7046 vsi->netdev_registered = false;
7047 if (vsi->netdev) {
7048 /* results in a call to i40e_close() */
7049 unregister_netdev(vsi->netdev);
41c445ff
JB
7050 }
7051 } else {
90ef8d47 7052 i40e_vsi_close(vsi);
41c445ff
JB
7053 }
7054 i40e_vsi_disable_irq(vsi);
7055 }
7056
7057 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
7058 i40e_del_filter(vsi, f->macaddr, f->vlan,
7059 f->is_vf, f->is_netdev);
7060 i40e_sync_vsi_filters(vsi);
7061
7062 i40e_vsi_delete(vsi);
7063 i40e_vsi_free_q_vectors(vsi);
a4866597
SN
7064 if (vsi->netdev) {
7065 free_netdev(vsi->netdev);
7066 vsi->netdev = NULL;
7067 }
41c445ff
JB
7068 i40e_vsi_clear_rings(vsi);
7069 i40e_vsi_clear(vsi);
7070
7071 /* If this was the last thing on the VEB, except for the
7072 * controlling VSI, remove the VEB, which puts the controlling
7073 * VSI onto the next level down in the switch.
7074 *
7075 * Well, okay, there's one more exception here: don't remove
7076 * the orphan VEBs yet. We'll wait for an explicit remove request
7077 * from up the network stack.
7078 */
7079 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7080 if (pf->vsi[i] &&
7081 pf->vsi[i]->uplink_seid == uplink_seid &&
7082 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
7083 n++; /* count the VSIs */
7084 }
7085 }
7086 for (i = 0; i < I40E_MAX_VEB; i++) {
7087 if (!pf->veb[i])
7088 continue;
7089 if (pf->veb[i]->uplink_seid == uplink_seid)
7090 n++; /* count the VEBs */
7091 if (pf->veb[i]->seid == uplink_seid)
7092 veb = pf->veb[i];
7093 }
7094 if (n == 0 && veb && veb->uplink_seid != 0)
7095 i40e_veb_release(veb);
7096
7097 return 0;
7098}
7099
7100/**
7101 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
7102 * @vsi: ptr to the VSI
7103 *
7104 * This should only be called after i40e_vsi_mem_alloc() which allocates the
7105 * corresponding SW VSI structure and initializes num_queue_pairs for the
7106 * newly allocated VSI.
7107 *
7108 * Returns 0 on success or negative on failure
7109 **/
7110static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
7111{
7112 int ret = -ENOENT;
7113 struct i40e_pf *pf = vsi->back;
7114
493fb300 7115 if (vsi->q_vectors[0]) {
41c445ff
JB
7116 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
7117 vsi->seid);
7118 return -EEXIST;
7119 }
7120
7121 if (vsi->base_vector) {
f29eaa3d 7122 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
41c445ff
JB
7123 vsi->seid, vsi->base_vector);
7124 return -EEXIST;
7125 }
7126
90e04070 7127 ret = i40e_vsi_alloc_q_vectors(vsi);
41c445ff
JB
7128 if (ret) {
7129 dev_info(&pf->pdev->dev,
7130 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
7131 vsi->num_q_vectors, vsi->seid, ret);
7132 vsi->num_q_vectors = 0;
7133 goto vector_setup_out;
7134 }
7135
958a3e3b
SN
7136 if (vsi->num_q_vectors)
7137 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
7138 vsi->num_q_vectors, vsi->idx);
41c445ff
JB
7139 if (vsi->base_vector < 0) {
7140 dev_info(&pf->pdev->dev,
f29eaa3d 7141 "failed to get queue tracking for VSI %d, err=%d\n",
41c445ff
JB
7142 vsi->seid, vsi->base_vector);
7143 i40e_vsi_free_q_vectors(vsi);
7144 ret = -ENOENT;
7145 goto vector_setup_out;
7146 }
7147
7148vector_setup_out:
7149 return ret;
7150}
7151
bc7d338f
ASJ
7152/**
7153 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
7154 * @vsi: pointer to the vsi.
7155 *
7156 * This re-allocates a vsi's queue resources.
7157 *
7158 * Returns pointer to the successfully allocated and configured VSI sw struct
7159 * on success, otherwise returns NULL on failure.
7160 **/
7161static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
7162{
7163 struct i40e_pf *pf = vsi->back;
7164 u8 enabled_tc;
7165 int ret;
7166
7167 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7168 i40e_vsi_clear_rings(vsi);
7169
7170 i40e_vsi_free_arrays(vsi, false);
7171 i40e_set_num_rings_in_vsi(vsi);
7172 ret = i40e_vsi_alloc_arrays(vsi, false);
7173 if (ret)
7174 goto err_vsi;
7175
7176 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
7177 if (ret < 0) {
7178 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
7179 vsi->seid, ret);
7180 goto err_vsi;
7181 }
7182 vsi->base_queue = ret;
7183
7184 /* Update the FW view of the VSI. Force a reset of TC and queue
7185 * layout configurations.
7186 */
7187 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
7188 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
7189 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
7190 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
7191
7192 /* assign it some queues */
7193 ret = i40e_alloc_rings(vsi);
7194 if (ret)
7195 goto err_rings;
7196
7197 /* map all of the rings to the q_vectors */
7198 i40e_vsi_map_rings_to_vectors(vsi);
7199 return vsi;
7200
7201err_rings:
7202 i40e_vsi_free_q_vectors(vsi);
7203 if (vsi->netdev_registered) {
7204 vsi->netdev_registered = false;
7205 unregister_netdev(vsi->netdev);
7206 free_netdev(vsi->netdev);
7207 vsi->netdev = NULL;
7208 }
7209 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
7210err_vsi:
7211 i40e_vsi_clear(vsi);
7212 return NULL;
7213}
7214
41c445ff
JB
7215/**
7216 * i40e_vsi_setup - Set up a VSI by a given type
7217 * @pf: board private structure
7218 * @type: VSI type
7219 * @uplink_seid: the switch element to link to
7220 * @param1: usage depends upon VSI type. For VF types, indicates VF id
7221 *
7222 * This allocates the sw VSI structure and its queue resources, then add a VSI
7223 * to the identified VEB.
7224 *
7225 * Returns pointer to the successfully allocated and configure VSI sw struct on
7226 * success, otherwise returns NULL on failure.
7227 **/
7228struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
7229 u16 uplink_seid, u32 param1)
7230{
7231 struct i40e_vsi *vsi = NULL;
7232 struct i40e_veb *veb = NULL;
7233 int ret, i;
7234 int v_idx;
7235
7236 /* The requested uplink_seid must be either
7237 * - the PF's port seid
7238 * no VEB is needed because this is the PF
7239 * or this is a Flow Director special case VSI
7240 * - seid of an existing VEB
7241 * - seid of a VSI that owns an existing VEB
7242 * - seid of a VSI that doesn't own a VEB
7243 * a new VEB is created and the VSI becomes the owner
7244 * - seid of the PF VSI, which is what creates the first VEB
7245 * this is a special case of the previous
7246 *
7247 * Find which uplink_seid we were given and create a new VEB if needed
7248 */
7249 for (i = 0; i < I40E_MAX_VEB; i++) {
7250 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
7251 veb = pf->veb[i];
7252 break;
7253 }
7254 }
7255
7256 if (!veb && uplink_seid != pf->mac_seid) {
7257
7258 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7259 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
7260 vsi = pf->vsi[i];
7261 break;
7262 }
7263 }
7264 if (!vsi) {
7265 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
7266 uplink_seid);
7267 return NULL;
7268 }
7269
7270 if (vsi->uplink_seid == pf->mac_seid)
7271 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
7272 vsi->tc_config.enabled_tc);
7273 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
7274 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
7275 vsi->tc_config.enabled_tc);
7276
7277 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
7278 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
7279 veb = pf->veb[i];
7280 }
7281 if (!veb) {
7282 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
7283 return NULL;
7284 }
7285
7286 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
7287 uplink_seid = veb->seid;
7288 }
7289
7290 /* get vsi sw struct */
7291 v_idx = i40e_vsi_mem_alloc(pf, type);
7292 if (v_idx < 0)
7293 goto err_alloc;
7294 vsi = pf->vsi[v_idx];
cbf61325
ASJ
7295 if (!vsi)
7296 goto err_alloc;
41c445ff
JB
7297 vsi->type = type;
7298 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
7299
7300 if (type == I40E_VSI_MAIN)
7301 pf->lan_vsi = v_idx;
7302 else if (type == I40E_VSI_SRIOV)
7303 vsi->vf_id = param1;
7304 /* assign it some queues */
cbf61325
ASJ
7305 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
7306 vsi->idx);
41c445ff
JB
7307 if (ret < 0) {
7308 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
7309 vsi->seid, ret);
7310 goto err_vsi;
7311 }
7312 vsi->base_queue = ret;
7313
7314 /* get a VSI from the hardware */
7315 vsi->uplink_seid = uplink_seid;
7316 ret = i40e_add_vsi(vsi);
7317 if (ret)
7318 goto err_vsi;
7319
7320 switch (vsi->type) {
7321 /* setup the netdev if needed */
7322 case I40E_VSI_MAIN:
7323 case I40E_VSI_VMDQ2:
7324 ret = i40e_config_netdev(vsi);
7325 if (ret)
7326 goto err_netdev;
7327 ret = register_netdev(vsi->netdev);
7328 if (ret)
7329 goto err_netdev;
7330 vsi->netdev_registered = true;
7331 netif_carrier_off(vsi->netdev);
4e3b35b0
NP
7332#ifdef CONFIG_I40E_DCB
7333 /* Setup DCB netlink interface */
7334 i40e_dcbnl_setup(vsi);
7335#endif /* CONFIG_I40E_DCB */
41c445ff
JB
7336 /* fall through */
7337
7338 case I40E_VSI_FDIR:
7339 /* set up vectors and rings if needed */
7340 ret = i40e_vsi_setup_vectors(vsi);
7341 if (ret)
7342 goto err_msix;
7343
7344 ret = i40e_alloc_rings(vsi);
7345 if (ret)
7346 goto err_rings;
7347
7348 /* map all of the rings to the q_vectors */
7349 i40e_vsi_map_rings_to_vectors(vsi);
7350
7351 i40e_vsi_reset_stats(vsi);
7352 break;
7353
7354 default:
7355 /* no netdev or rings for the other VSI types */
7356 break;
7357 }
7358
7359 return vsi;
7360
7361err_rings:
7362 i40e_vsi_free_q_vectors(vsi);
7363err_msix:
7364 if (vsi->netdev_registered) {
7365 vsi->netdev_registered = false;
7366 unregister_netdev(vsi->netdev);
7367 free_netdev(vsi->netdev);
7368 vsi->netdev = NULL;
7369 }
7370err_netdev:
7371 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
7372err_vsi:
7373 i40e_vsi_clear(vsi);
7374err_alloc:
7375 return NULL;
7376}
7377
7378/**
7379 * i40e_veb_get_bw_info - Query VEB BW information
7380 * @veb: the veb to query
7381 *
7382 * Query the Tx scheduler BW configuration data for given VEB
7383 **/
7384static int i40e_veb_get_bw_info(struct i40e_veb *veb)
7385{
7386 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
7387 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
7388 struct i40e_pf *pf = veb->pf;
7389 struct i40e_hw *hw = &pf->hw;
7390 u32 tc_bw_max;
7391 int ret = 0;
7392 int i;
7393
7394 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
7395 &bw_data, NULL);
7396 if (ret) {
7397 dev_info(&pf->pdev->dev,
7398 "query veb bw config failed, aq_err=%d\n",
7399 hw->aq.asq_last_status);
7400 goto out;
7401 }
7402
7403 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
7404 &ets_data, NULL);
7405 if (ret) {
7406 dev_info(&pf->pdev->dev,
7407 "query veb bw ets config failed, aq_err=%d\n",
7408 hw->aq.asq_last_status);
7409 goto out;
7410 }
7411
7412 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
7413 veb->bw_max_quanta = ets_data.tc_bw_max;
7414 veb->is_abs_credits = bw_data.absolute_credits_enable;
7415 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
7416 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
7417 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7418 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
7419 veb->bw_tc_limit_credits[i] =
7420 le16_to_cpu(bw_data.tc_bw_limits[i]);
7421 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
7422 }
7423
7424out:
7425 return ret;
7426}
7427
7428/**
7429 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
7430 * @pf: board private structure
7431 *
7432 * On error: returns error code (negative)
7433 * On success: returns vsi index in PF (positive)
7434 **/
7435static int i40e_veb_mem_alloc(struct i40e_pf *pf)
7436{
7437 int ret = -ENOENT;
7438 struct i40e_veb *veb;
7439 int i;
7440
7441 /* Need to protect the allocation of switch elements at the PF level */
7442 mutex_lock(&pf->switch_mutex);
7443
7444 /* VEB list may be fragmented if VEB creation/destruction has
7445 * been happening. We can afford to do a quick scan to look
7446 * for any free slots in the list.
7447 *
7448 * find next empty veb slot, looping back around if necessary
7449 */
7450 i = 0;
7451 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
7452 i++;
7453 if (i >= I40E_MAX_VEB) {
7454 ret = -ENOMEM;
7455 goto err_alloc_veb; /* out of VEB slots! */
7456 }
7457
7458 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
7459 if (!veb) {
7460 ret = -ENOMEM;
7461 goto err_alloc_veb;
7462 }
7463 veb->pf = pf;
7464 veb->idx = i;
7465 veb->enabled_tc = 1;
7466
7467 pf->veb[i] = veb;
7468 ret = i;
7469err_alloc_veb:
7470 mutex_unlock(&pf->switch_mutex);
7471 return ret;
7472}
7473
7474/**
7475 * i40e_switch_branch_release - Delete a branch of the switch tree
7476 * @branch: where to start deleting
7477 *
7478 * This uses recursion to find the tips of the branch to be
7479 * removed, deleting until we get back to and can delete this VEB.
7480 **/
7481static void i40e_switch_branch_release(struct i40e_veb *branch)
7482{
7483 struct i40e_pf *pf = branch->pf;
7484 u16 branch_seid = branch->seid;
7485 u16 veb_idx = branch->idx;
7486 int i;
7487
7488 /* release any VEBs on this VEB - RECURSION */
7489 for (i = 0; i < I40E_MAX_VEB; i++) {
7490 if (!pf->veb[i])
7491 continue;
7492 if (pf->veb[i]->uplink_seid == branch->seid)
7493 i40e_switch_branch_release(pf->veb[i]);
7494 }
7495
7496 /* Release the VSIs on this VEB, but not the owner VSI.
7497 *
7498 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
7499 * the VEB itself, so don't use (*branch) after this loop.
7500 */
7501 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7502 if (!pf->vsi[i])
7503 continue;
7504 if (pf->vsi[i]->uplink_seid == branch_seid &&
7505 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
7506 i40e_vsi_release(pf->vsi[i]);
7507 }
7508 }
7509
7510 /* There's one corner case where the VEB might not have been
7511 * removed, so double check it here and remove it if needed.
7512 * This case happens if the veb was created from the debugfs
7513 * commands and no VSIs were added to it.
7514 */
7515 if (pf->veb[veb_idx])
7516 i40e_veb_release(pf->veb[veb_idx]);
7517}
7518
7519/**
7520 * i40e_veb_clear - remove veb struct
7521 * @veb: the veb to remove
7522 **/
7523static void i40e_veb_clear(struct i40e_veb *veb)
7524{
7525 if (!veb)
7526 return;
7527
7528 if (veb->pf) {
7529 struct i40e_pf *pf = veb->pf;
7530
7531 mutex_lock(&pf->switch_mutex);
7532 if (pf->veb[veb->idx] == veb)
7533 pf->veb[veb->idx] = NULL;
7534 mutex_unlock(&pf->switch_mutex);
7535 }
7536
7537 kfree(veb);
7538}
7539
7540/**
7541 * i40e_veb_release - Delete a VEB and free its resources
7542 * @veb: the VEB being removed
7543 **/
7544void i40e_veb_release(struct i40e_veb *veb)
7545{
7546 struct i40e_vsi *vsi = NULL;
7547 struct i40e_pf *pf;
7548 int i, n = 0;
7549
7550 pf = veb->pf;
7551
7552 /* find the remaining VSI and check for extras */
7553 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7554 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
7555 n++;
7556 vsi = pf->vsi[i];
7557 }
7558 }
7559 if (n != 1) {
7560 dev_info(&pf->pdev->dev,
7561 "can't remove VEB %d with %d VSIs left\n",
7562 veb->seid, n);
7563 return;
7564 }
7565
7566 /* move the remaining VSI to uplink veb */
7567 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
7568 if (veb->uplink_seid) {
7569 vsi->uplink_seid = veb->uplink_seid;
7570 if (veb->uplink_seid == pf->mac_seid)
7571 vsi->veb_idx = I40E_NO_VEB;
7572 else
7573 vsi->veb_idx = veb->veb_idx;
7574 } else {
7575 /* floating VEB */
7576 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
7577 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
7578 }
7579
7580 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
7581 i40e_veb_clear(veb);
7582
7583 return;
7584}
7585
7586/**
7587 * i40e_add_veb - create the VEB in the switch
7588 * @veb: the VEB to be instantiated
7589 * @vsi: the controlling VSI
7590 **/
7591static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
7592{
56747264 7593 bool is_default = false;
e1c51b95 7594 bool is_cloud = false;
41c445ff
JB
7595 int ret;
7596
7597 /* get a VEB from the hardware */
7598 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
e1c51b95
KS
7599 veb->enabled_tc, is_default,
7600 is_cloud, &veb->seid, NULL);
41c445ff
JB
7601 if (ret) {
7602 dev_info(&veb->pf->pdev->dev,
7603 "couldn't add VEB, err %d, aq_err %d\n",
7604 ret, veb->pf->hw.aq.asq_last_status);
7605 return -EPERM;
7606 }
7607
7608 /* get statistics counter */
7609 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
7610 &veb->stats_idx, NULL, NULL, NULL);
7611 if (ret) {
7612 dev_info(&veb->pf->pdev->dev,
7613 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
7614 ret, veb->pf->hw.aq.asq_last_status);
7615 return -EPERM;
7616 }
7617 ret = i40e_veb_get_bw_info(veb);
7618 if (ret) {
7619 dev_info(&veb->pf->pdev->dev,
7620 "couldn't get VEB bw info, err %d, aq_err %d\n",
7621 ret, veb->pf->hw.aq.asq_last_status);
7622 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
7623 return -ENOENT;
7624 }
7625
7626 vsi->uplink_seid = veb->seid;
7627 vsi->veb_idx = veb->idx;
7628 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
7629
7630 return 0;
7631}
7632
7633/**
7634 * i40e_veb_setup - Set up a VEB
7635 * @pf: board private structure
7636 * @flags: VEB setup flags
7637 * @uplink_seid: the switch element to link to
7638 * @vsi_seid: the initial VSI seid
7639 * @enabled_tc: Enabled TC bit-map
7640 *
7641 * This allocates the sw VEB structure and links it into the switch
7642 * It is possible and legal for this to be a duplicate of an already
7643 * existing VEB. It is also possible for both uplink and vsi seids
7644 * to be zero, in order to create a floating VEB.
7645 *
7646 * Returns pointer to the successfully allocated VEB sw struct on
7647 * success, otherwise returns NULL on failure.
7648 **/
7649struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
7650 u16 uplink_seid, u16 vsi_seid,
7651 u8 enabled_tc)
7652{
7653 struct i40e_veb *veb, *uplink_veb = NULL;
7654 int vsi_idx, veb_idx;
7655 int ret;
7656
7657 /* if one seid is 0, the other must be 0 to create a floating relay */
7658 if ((uplink_seid == 0 || vsi_seid == 0) &&
7659 (uplink_seid + vsi_seid != 0)) {
7660 dev_info(&pf->pdev->dev,
7661 "one, not both seid's are 0: uplink=%d vsi=%d\n",
7662 uplink_seid, vsi_seid);
7663 return NULL;
7664 }
7665
7666 /* make sure there is such a vsi and uplink */
7667 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
7668 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
7669 break;
7670 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
7671 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
7672 vsi_seid);
7673 return NULL;
7674 }
7675
7676 if (uplink_seid && uplink_seid != pf->mac_seid) {
7677 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
7678 if (pf->veb[veb_idx] &&
7679 pf->veb[veb_idx]->seid == uplink_seid) {
7680 uplink_veb = pf->veb[veb_idx];
7681 break;
7682 }
7683 }
7684 if (!uplink_veb) {
7685 dev_info(&pf->pdev->dev,
7686 "uplink seid %d not found\n", uplink_seid);
7687 return NULL;
7688 }
7689 }
7690
7691 /* get veb sw struct */
7692 veb_idx = i40e_veb_mem_alloc(pf);
7693 if (veb_idx < 0)
7694 goto err_alloc;
7695 veb = pf->veb[veb_idx];
7696 veb->flags = flags;
7697 veb->uplink_seid = uplink_seid;
7698 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
7699 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
7700
7701 /* create the VEB in the switch */
7702 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
7703 if (ret)
7704 goto err_veb;
7705
7706 return veb;
7707
7708err_veb:
7709 i40e_veb_clear(veb);
7710err_alloc:
7711 return NULL;
7712}
7713
7714/**
7715 * i40e_setup_pf_switch_element - set pf vars based on switch type
7716 * @pf: board private structure
7717 * @ele: element we are building info from
7718 * @num_reported: total number of elements
7719 * @printconfig: should we print the contents
7720 *
7721 * helper function to assist in extracting a few useful SEID values.
7722 **/
7723static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
7724 struct i40e_aqc_switch_config_element_resp *ele,
7725 u16 num_reported, bool printconfig)
7726{
7727 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
7728 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
7729 u8 element_type = ele->element_type;
7730 u16 seid = le16_to_cpu(ele->seid);
7731
7732 if (printconfig)
7733 dev_info(&pf->pdev->dev,
7734 "type=%d seid=%d uplink=%d downlink=%d\n",
7735 element_type, seid, uplink_seid, downlink_seid);
7736
7737 switch (element_type) {
7738 case I40E_SWITCH_ELEMENT_TYPE_MAC:
7739 pf->mac_seid = seid;
7740 break;
7741 case I40E_SWITCH_ELEMENT_TYPE_VEB:
7742 /* Main VEB? */
7743 if (uplink_seid != pf->mac_seid)
7744 break;
7745 if (pf->lan_veb == I40E_NO_VEB) {
7746 int v;
7747
7748 /* find existing or else empty VEB */
7749 for (v = 0; v < I40E_MAX_VEB; v++) {
7750 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
7751 pf->lan_veb = v;
7752 break;
7753 }
7754 }
7755 if (pf->lan_veb == I40E_NO_VEB) {
7756 v = i40e_veb_mem_alloc(pf);
7757 if (v < 0)
7758 break;
7759 pf->lan_veb = v;
7760 }
7761 }
7762
7763 pf->veb[pf->lan_veb]->seid = seid;
7764 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
7765 pf->veb[pf->lan_veb]->pf = pf;
7766 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
7767 break;
7768 case I40E_SWITCH_ELEMENT_TYPE_VSI:
7769 if (num_reported != 1)
7770 break;
7771 /* This is immediately after a reset so we can assume this is
7772 * the PF's VSI
7773 */
7774 pf->mac_seid = uplink_seid;
7775 pf->pf_seid = downlink_seid;
7776 pf->main_vsi_seid = seid;
7777 if (printconfig)
7778 dev_info(&pf->pdev->dev,
7779 "pf_seid=%d main_vsi_seid=%d\n",
7780 pf->pf_seid, pf->main_vsi_seid);
7781 break;
7782 case I40E_SWITCH_ELEMENT_TYPE_PF:
7783 case I40E_SWITCH_ELEMENT_TYPE_VF:
7784 case I40E_SWITCH_ELEMENT_TYPE_EMP:
7785 case I40E_SWITCH_ELEMENT_TYPE_BMC:
7786 case I40E_SWITCH_ELEMENT_TYPE_PE:
7787 case I40E_SWITCH_ELEMENT_TYPE_PA:
7788 /* ignore these for now */
7789 break;
7790 default:
7791 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
7792 element_type, seid);
7793 break;
7794 }
7795}
7796
7797/**
7798 * i40e_fetch_switch_configuration - Get switch config from firmware
7799 * @pf: board private structure
7800 * @printconfig: should we print the contents
7801 *
7802 * Get the current switch configuration from the device and
7803 * extract a few useful SEID values.
7804 **/
7805int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
7806{
7807 struct i40e_aqc_get_switch_config_resp *sw_config;
7808 u16 next_seid = 0;
7809 int ret = 0;
7810 u8 *aq_buf;
7811 int i;
7812
7813 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
7814 if (!aq_buf)
7815 return -ENOMEM;
7816
7817 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
7818 do {
7819 u16 num_reported, num_total;
7820
7821 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
7822 I40E_AQ_LARGE_BUF,
7823 &next_seid, NULL);
7824 if (ret) {
7825 dev_info(&pf->pdev->dev,
7826 "get switch config failed %d aq_err=%x\n",
7827 ret, pf->hw.aq.asq_last_status);
7828 kfree(aq_buf);
7829 return -ENOENT;
7830 }
7831
7832 num_reported = le16_to_cpu(sw_config->header.num_reported);
7833 num_total = le16_to_cpu(sw_config->header.num_total);
7834
7835 if (printconfig)
7836 dev_info(&pf->pdev->dev,
7837 "header: %d reported %d total\n",
7838 num_reported, num_total);
7839
7840 if (num_reported) {
7841 int sz = sizeof(*sw_config) * num_reported;
7842
7843 kfree(pf->sw_config);
7844 pf->sw_config = kzalloc(sz, GFP_KERNEL);
7845 if (pf->sw_config)
7846 memcpy(pf->sw_config, sw_config, sz);
7847 }
7848
7849 for (i = 0; i < num_reported; i++) {
7850 struct i40e_aqc_switch_config_element_resp *ele =
7851 &sw_config->element[i];
7852
7853 i40e_setup_pf_switch_element(pf, ele, num_reported,
7854 printconfig);
7855 }
7856 } while (next_seid != 0);
7857
7858 kfree(aq_buf);
7859 return ret;
7860}
7861
7862/**
7863 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
7864 * @pf: board private structure
bc7d338f 7865 * @reinit: if the Main VSI needs to re-initialized.
41c445ff
JB
7866 *
7867 * Returns 0 on success, negative value on failure
7868 **/
bc7d338f 7869static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
41c445ff 7870{
895106a5 7871 u32 rxfc = 0, txfc = 0, rxfc_reg;
41c445ff
JB
7872 int ret;
7873
7874 /* find out what's out there already */
7875 ret = i40e_fetch_switch_configuration(pf, false);
7876 if (ret) {
7877 dev_info(&pf->pdev->dev,
7878 "couldn't fetch switch config, err %d, aq_err %d\n",
7879 ret, pf->hw.aq.asq_last_status);
7880 return ret;
7881 }
7882 i40e_pf_reset_stats(pf);
7883
41c445ff 7884 /* first time setup */
bc7d338f 7885 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
41c445ff
JB
7886 struct i40e_vsi *vsi = NULL;
7887 u16 uplink_seid;
7888
7889 /* Set up the PF VSI associated with the PF's main VSI
7890 * that is already in the HW switch
7891 */
7892 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
7893 uplink_seid = pf->veb[pf->lan_veb]->seid;
7894 else
7895 uplink_seid = pf->mac_seid;
bc7d338f
ASJ
7896 if (pf->lan_vsi == I40E_NO_VSI)
7897 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
7898 else if (reinit)
7899 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
41c445ff
JB
7900 if (!vsi) {
7901 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
7902 i40e_fdir_teardown(pf);
7903 return -EAGAIN;
7904 }
41c445ff
JB
7905 } else {
7906 /* force a reset of TC and queue layout configurations */
7907 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
7908 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
7909 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
7910 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
7911 }
7912 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
7913
cbf61325
ASJ
7914 i40e_fdir_sb_setup(pf);
7915
41c445ff
JB
7916 /* Setup static PF queue filter control settings */
7917 ret = i40e_setup_pf_filter_control(pf);
7918 if (ret) {
7919 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
7920 ret);
7921 /* Failure here should not stop continuing other steps */
7922 }
7923
7924 /* enable RSS in the HW, even for only one queue, as the stack can use
7925 * the hash
7926 */
7927 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
7928 i40e_config_rss(pf);
7929
7930 /* fill in link information and enable LSE reporting */
7931 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
7932 i40e_link_event(pf);
7933
d52c20b7 7934 /* Initialize user-specific link properties */
41c445ff
JB
7935 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
7936 I40E_AQ_AN_COMPLETED) ? true : false);
d52c20b7
JB
7937 /* requested_mode is set in probe or by ethtool */
7938 if (!pf->fc_autoneg_status)
7939 goto no_autoneg;
7940
7941 if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
7942 (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
41c445ff
JB
7943 pf->hw.fc.current_mode = I40E_FC_FULL;
7944 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
7945 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
7946 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
7947 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
7948 else
d52c20b7
JB
7949 pf->hw.fc.current_mode = I40E_FC_NONE;
7950
7951 /* sync the flow control settings with the auto-neg values */
7952 switch (pf->hw.fc.current_mode) {
7953 case I40E_FC_FULL:
7954 txfc = 1;
7955 rxfc = 1;
7956 break;
7957 case I40E_FC_TX_PAUSE:
7958 txfc = 1;
7959 rxfc = 0;
7960 break;
7961 case I40E_FC_RX_PAUSE:
7962 txfc = 0;
7963 rxfc = 1;
7964 break;
7965 case I40E_FC_NONE:
7966 case I40E_FC_DEFAULT:
7967 txfc = 0;
7968 rxfc = 0;
7969 break;
7970 case I40E_FC_PFC:
7971 /* TBD */
7972 break;
7973 /* no default case, we have to handle all possibilities here */
7974 }
7975
7976 wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
7977
7978 rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
7979 ~I40E_PRTDCB_MFLCN_RFCE_MASK;
7980 rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
7981
7982 wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
41c445ff 7983
d52c20b7
JB
7984 goto fc_complete;
7985
7986no_autoneg:
7987 /* disable L2 flow control, user can turn it on if they wish */
7988 wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
7989 wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
7990 ~I40E_PRTDCB_MFLCN_RFCE_MASK);
7991
7992fc_complete:
beb0dff1
JK
7993 i40e_ptp_init(pf);
7994
41c445ff
JB
7995 return ret;
7996}
7997
41c445ff
JB
7998/**
7999 * i40e_determine_queue_usage - Work out queue distribution
8000 * @pf: board private structure
8001 **/
8002static void i40e_determine_queue_usage(struct i40e_pf *pf)
8003{
41c445ff
JB
8004 int queues_left;
8005
8006 pf->num_lan_qps = 0;
41c445ff
JB
8007
8008 /* Find the max queues to be put into basic use. We'll always be
8009 * using TC0, whether or not DCB is running, and TC0 will get the
8010 * big RSS set.
8011 */
8012 queues_left = pf->hw.func_caps.num_tx_qp;
8013
cbf61325
ASJ
8014 if ((queues_left == 1) ||
8015 !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
8016 !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
8017 I40E_FLAG_DCB_ENABLED))) {
41c445ff
JB
8018 /* one qp for PF, no queues for anything else */
8019 queues_left = 0;
8020 pf->rss_size = pf->num_lan_qps = 1;
8021
8022 /* make sure all the fancies are disabled */
60ea5f83
JB
8023 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
8024 I40E_FLAG_FD_SB_ENABLED |
8025 I40E_FLAG_FD_ATR_ENABLED |
8026 I40E_FLAG_DCB_ENABLED |
8027 I40E_FLAG_SRIOV_ENABLED |
8028 I40E_FLAG_VMDQ_ENABLED);
41c445ff 8029 } else {
cbf61325
ASJ
8030 /* Not enough queues for all TCs */
8031 if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
8032 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
8033 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8034 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
8035 }
8036 pf->num_lan_qps = pf->rss_size_max;
8037 queues_left -= pf->num_lan_qps;
8038 }
8039
8040 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8041 if (queues_left > 1) {
8042 queues_left -= 1; /* save 1 queue for FD */
8043 } else {
8044 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8045 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
8046 }
41c445ff
JB
8047 }
8048
8049 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
8050 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
cbf61325
ASJ
8051 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
8052 (queues_left / pf->num_vf_qps));
41c445ff
JB
8053 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
8054 }
8055
8056 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
8057 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
8058 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
8059 (queues_left / pf->num_vmdq_qps));
8060 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
8061 }
8062
f8ff1464 8063 pf->queues_left = queues_left;
41c445ff
JB
8064 return;
8065}
8066
8067/**
8068 * i40e_setup_pf_filter_control - Setup PF static filter control
8069 * @pf: PF to be setup
8070 *
8071 * i40e_setup_pf_filter_control sets up a pf's initial filter control
8072 * settings. If PE/FCoE are enabled then it will also set the per PF
8073 * based filter sizes required for them. It also enables Flow director,
8074 * ethertype and macvlan type filter settings for the pf.
8075 *
8076 * Returns 0 on success, negative on failure
8077 **/
8078static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
8079{
8080 struct i40e_filter_control_settings *settings = &pf->filter_settings;
8081
8082 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
8083
8084 /* Flow Director is enabled */
60ea5f83 8085 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
41c445ff
JB
8086 settings->enable_fdir = true;
8087
8088 /* Ethtype and MACVLAN filters enabled for PF */
8089 settings->enable_ethtype = true;
8090 settings->enable_macvlan = true;
8091
8092 if (i40e_set_filter_control(&pf->hw, settings))
8093 return -ENOENT;
8094
8095 return 0;
8096}
8097
0c22b3dd
JB
8098#define INFO_STRING_LEN 255
8099static void i40e_print_features(struct i40e_pf *pf)
8100{
8101 struct i40e_hw *hw = &pf->hw;
8102 char *buf, *string;
8103
8104 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
8105 if (!string) {
8106 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
8107 return;
8108 }
8109
8110 buf = string;
8111
8112 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
8113#ifdef CONFIG_PCI_IOV
8114 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
8115#endif
8116 buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
8117 pf->vsi[pf->lan_vsi]->num_queue_pairs);
8118
8119 if (pf->flags & I40E_FLAG_RSS_ENABLED)
8120 buf += sprintf(buf, "RSS ");
8121 buf += sprintf(buf, "FDir ");
8122 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
8123 buf += sprintf(buf, "ATR ");
8124 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
8125 buf += sprintf(buf, "NTUPLE ");
8126 if (pf->flags & I40E_FLAG_DCB_ENABLED)
8127 buf += sprintf(buf, "DCB ");
8128 if (pf->flags & I40E_FLAG_PTP)
8129 buf += sprintf(buf, "PTP ");
8130
8131 BUG_ON(buf > (string + INFO_STRING_LEN));
8132 dev_info(&pf->pdev->dev, "%s\n", string);
8133 kfree(string);
8134}
8135
41c445ff
JB
8136/**
8137 * i40e_probe - Device initialization routine
8138 * @pdev: PCI device information struct
8139 * @ent: entry in i40e_pci_tbl
8140 *
8141 * i40e_probe initializes a pf identified by a pci_dev structure.
8142 * The OS initialization, configuring of the pf private structure,
8143 * and a hardware reset occur.
8144 *
8145 * Returns 0 on success, negative on failure
8146 **/
8147static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8148{
8149 struct i40e_driver_version dv;
8150 struct i40e_pf *pf;
8151 struct i40e_hw *hw;
93cd765b 8152 static u16 pfs_found;
d4dfb81a 8153 u16 link_status;
41c445ff
JB
8154 int err = 0;
8155 u32 len;
8a9eb7d3 8156 u32 i;
41c445ff
JB
8157
8158 err = pci_enable_device_mem(pdev);
8159 if (err)
8160 return err;
8161
8162 /* set up for high or low dma */
6494294f 8163 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6494294f 8164 if (err) {
e3e3bfdd
JS
8165 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8166 if (err) {
8167 dev_err(&pdev->dev,
8168 "DMA configuration failed: 0x%x\n", err);
8169 goto err_dma;
8170 }
41c445ff
JB
8171 }
8172
8173 /* set up pci connections */
8174 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
8175 IORESOURCE_MEM), i40e_driver_name);
8176 if (err) {
8177 dev_info(&pdev->dev,
8178 "pci_request_selected_regions failed %d\n", err);
8179 goto err_pci_reg;
8180 }
8181
8182 pci_enable_pcie_error_reporting(pdev);
8183 pci_set_master(pdev);
8184
8185 /* Now that we have a PCI connection, we need to do the
8186 * low level device setup. This is primarily setting up
8187 * the Admin Queue structures and then querying for the
8188 * device's current profile information.
8189 */
8190 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
8191 if (!pf) {
8192 err = -ENOMEM;
8193 goto err_pf_alloc;
8194 }
8195 pf->next_vsi = 0;
8196 pf->pdev = pdev;
8197 set_bit(__I40E_DOWN, &pf->state);
8198
8199 hw = &pf->hw;
8200 hw->back = pf;
8201 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
8202 pci_resource_len(pdev, 0));
8203 if (!hw->hw_addr) {
8204 err = -EIO;
8205 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
8206 (unsigned int)pci_resource_start(pdev, 0),
8207 (unsigned int)pci_resource_len(pdev, 0), err);
8208 goto err_ioremap;
8209 }
8210 hw->vendor_id = pdev->vendor;
8211 hw->device_id = pdev->device;
8212 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
8213 hw->subsystem_vendor_id = pdev->subsystem_vendor;
8214 hw->subsystem_device_id = pdev->subsystem_device;
8215 hw->bus.device = PCI_SLOT(pdev->devfn);
8216 hw->bus.func = PCI_FUNC(pdev->devfn);
93cd765b 8217 pf->instance = pfs_found;
41c445ff 8218
7134f9ce
JB
8219 /* do a special CORER for clearing PXE mode once at init */
8220 if (hw->revision_id == 0 &&
8221 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
8222 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
8223 i40e_flush(hw);
8224 msleep(200);
8225 pf->corer_count++;
8226
8227 i40e_clear_pxe_mode(hw);
8228 }
8229
41c445ff
JB
8230 /* Reset here to make sure all is clean and to define PF 'n' */
8231 err = i40e_pf_reset(hw);
8232 if (err) {
8233 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
8234 goto err_pf_reset;
8235 }
8236 pf->pfr_count++;
8237
8238 hw->aq.num_arq_entries = I40E_AQ_LEN;
8239 hw->aq.num_asq_entries = I40E_AQ_LEN;
8240 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
8241 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
8242 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
8243 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
8244 "%s-pf%d:misc",
8245 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
8246
8247 err = i40e_init_shared_code(hw);
8248 if (err) {
8249 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
8250 goto err_pf_reset;
8251 }
8252
d52c20b7
JB
8253 /* set up a default setting for link flow control */
8254 pf->hw.fc.requested_mode = I40E_FC_NONE;
8255
41c445ff
JB
8256 err = i40e_init_adminq(hw);
8257 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
8258 if (err) {
8259 dev_info(&pdev->dev,
8260 "init_adminq failed: %d expecting API %02x.%02x\n",
8261 err,
8262 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
8263 goto err_pf_reset;
8264 }
8265
4eb3f768
SN
8266 i40e_verify_eeprom(pf);
8267
6ff4ef86 8268 i40e_clear_pxe_mode(hw);
41c445ff
JB
8269 err = i40e_get_capabilities(pf);
8270 if (err)
8271 goto err_adminq_setup;
8272
8273 err = i40e_sw_init(pf);
8274 if (err) {
8275 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
8276 goto err_sw_init;
8277 }
8278
8279 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
8280 hw->func_caps.num_rx_qp,
8281 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
8282 if (err) {
8283 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
8284 goto err_init_lan_hmc;
8285 }
8286
8287 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
8288 if (err) {
8289 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
8290 err = -ENOENT;
8291 goto err_configure_lan_hmc;
8292 }
8293
8294 i40e_get_mac_addr(hw, hw->mac.addr);
f62b5060 8295 if (!is_valid_ether_addr(hw->mac.addr)) {
41c445ff
JB
8296 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
8297 err = -EIO;
8298 goto err_mac_addr;
8299 }
8300 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
8301 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
8302
8303 pci_set_drvdata(pdev, pf);
8304 pci_save_state(pdev);
4e3b35b0
NP
8305#ifdef CONFIG_I40E_DCB
8306 err = i40e_init_pf_dcb(pf);
8307 if (err) {
8308 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
8309 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8310 goto err_init_dcb;
8311 }
8312#endif /* CONFIG_I40E_DCB */
41c445ff
JB
8313
8314 /* set up periodic task facility */
8315 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
8316 pf->service_timer_period = HZ;
8317
8318 INIT_WORK(&pf->service_task, i40e_service_task);
8319 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
8320 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
8321 pf->link_check_timeout = jiffies;
8322
8e2773ae
SN
8323 /* WoL defaults to disabled */
8324 pf->wol_en = false;
8325 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
8326
41c445ff
JB
8327 /* set up the main switch operations */
8328 i40e_determine_queue_usage(pf);
8329 i40e_init_interrupt_scheme(pf);
8330
8331 /* Set up the *vsi struct based on the number of VSIs in the HW,
8332 * and set up our local tracking of the MAIN PF vsi.
8333 */
8334 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
8335 pf->vsi = kzalloc(len, GFP_KERNEL);
ed87ac09
WY
8336 if (!pf->vsi) {
8337 err = -ENOMEM;
41c445ff 8338 goto err_switch_setup;
ed87ac09 8339 }
41c445ff 8340
bc7d338f 8341 err = i40e_setup_pf_switch(pf, false);
41c445ff
JB
8342 if (err) {
8343 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
8344 goto err_vsis;
8345 }
8a9eb7d3
SN
8346 /* if FDIR VSI was set up, start it now */
8347 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
8348 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
8349 i40e_vsi_open(pf->vsi[i]);
8350 break;
8351 }
8352 }
41c445ff
JB
8353
8354 /* The main driver is (mostly) up and happy. We need to set this state
8355 * before setting up the misc vector or we get a race and the vector
8356 * ends up disabled forever.
8357 */
8358 clear_bit(__I40E_DOWN, &pf->state);
8359
8360 /* In case of MSIX we are going to setup the misc vector right here
8361 * to handle admin queue events etc. In case of legacy and MSI
8362 * the misc functionality and queue processing is combined in
8363 * the same vector and that gets setup at open.
8364 */
8365 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8366 err = i40e_setup_misc_vector(pf);
8367 if (err) {
8368 dev_info(&pdev->dev,
8369 "setup of misc vector failed: %d\n", err);
8370 goto err_vsis;
8371 }
8372 }
8373
8374 /* prep for VF support */
8375 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
4eb3f768
SN
8376 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
8377 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
41c445ff
JB
8378 u32 val;
8379
8380 /* disable link interrupts for VFs */
8381 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
8382 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
8383 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
8384 i40e_flush(hw);
4aeec010
MW
8385
8386 if (pci_num_vf(pdev)) {
8387 dev_info(&pdev->dev,
8388 "Active VFs found, allocating resources.\n");
8389 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
8390 if (err)
8391 dev_info(&pdev->dev,
8392 "Error %d allocating resources for existing VFs\n",
8393 err);
8394 }
41c445ff
JB
8395 }
8396
93cd765b
ASJ
8397 pfs_found++;
8398
41c445ff
JB
8399 i40e_dbg_pf_init(pf);
8400
8401 /* tell the firmware that we're starting */
8402 dv.major_version = DRV_VERSION_MAJOR;
8403 dv.minor_version = DRV_VERSION_MINOR;
8404 dv.build_version = DRV_VERSION_BUILD;
8405 dv.subbuild_version = 0;
8406 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
8407
8408 /* since everything's happy, start the service_task timer */
8409 mod_timer(&pf->service_timer,
8410 round_jiffies(jiffies + pf->service_timer_period));
8411
d4dfb81a
CS
8412 /* Get the negotiated link width and speed from PCI config space */
8413 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
8414
8415 i40e_set_pci_config_data(hw, link_status);
8416
69bfb110 8417 dev_info(&pdev->dev, "PCI-Express: %s %s\n",
d4dfb81a
CS
8418 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
8419 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
8420 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
8421 "Unknown"),
8422 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
8423 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
8424 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
8425 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
8426 "Unknown"));
8427
8428 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
8429 hw->bus.speed < i40e_bus_speed_8000) {
8430 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
8431 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
8432 }
8433
0c22b3dd
JB
8434 /* print a string summarizing features */
8435 i40e_print_features(pf);
8436
41c445ff
JB
8437 return 0;
8438
8439 /* Unwind what we've done if something failed in the setup */
8440err_vsis:
8441 set_bit(__I40E_DOWN, &pf->state);
41c445ff
JB
8442 i40e_clear_interrupt_scheme(pf);
8443 kfree(pf->vsi);
04b03013
SN
8444err_switch_setup:
8445 i40e_reset_interrupt_capability(pf);
41c445ff 8446 del_timer_sync(&pf->service_timer);
4e3b35b0
NP
8447#ifdef CONFIG_I40E_DCB
8448err_init_dcb:
8449#endif /* CONFIG_I40E_DCB */
41c445ff
JB
8450err_mac_addr:
8451err_configure_lan_hmc:
8452 (void)i40e_shutdown_lan_hmc(hw);
8453err_init_lan_hmc:
8454 kfree(pf->qp_pile);
8455 kfree(pf->irq_pile);
8456err_sw_init:
8457err_adminq_setup:
8458 (void)i40e_shutdown_adminq(hw);
8459err_pf_reset:
8460 iounmap(hw->hw_addr);
8461err_ioremap:
8462 kfree(pf);
8463err_pf_alloc:
8464 pci_disable_pcie_error_reporting(pdev);
8465 pci_release_selected_regions(pdev,
8466 pci_select_bars(pdev, IORESOURCE_MEM));
8467err_pci_reg:
8468err_dma:
8469 pci_disable_device(pdev);
8470 return err;
8471}
8472
8473/**
8474 * i40e_remove - Device removal routine
8475 * @pdev: PCI device information struct
8476 *
8477 * i40e_remove is called by the PCI subsystem to alert the driver
8478 * that is should release a PCI device. This could be caused by a
8479 * Hot-Plug event, or because the driver is going to be removed from
8480 * memory.
8481 **/
8482static void i40e_remove(struct pci_dev *pdev)
8483{
8484 struct i40e_pf *pf = pci_get_drvdata(pdev);
8485 i40e_status ret_code;
8486 u32 reg;
8487 int i;
8488
8489 i40e_dbg_pf_exit(pf);
8490
beb0dff1
JK
8491 i40e_ptp_stop(pf);
8492
41c445ff
JB
8493 /* no more scheduling of any task */
8494 set_bit(__I40E_DOWN, &pf->state);
8495 del_timer_sync(&pf->service_timer);
8496 cancel_work_sync(&pf->service_task);
8497
eb2d80bc
MW
8498 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
8499 i40e_free_vfs(pf);
8500 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
8501 }
8502
41c445ff
JB
8503 i40e_fdir_teardown(pf);
8504
8505 /* If there is a switch structure or any orphans, remove them.
8506 * This will leave only the PF's VSI remaining.
8507 */
8508 for (i = 0; i < I40E_MAX_VEB; i++) {
8509 if (!pf->veb[i])
8510 continue;
8511
8512 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
8513 pf->veb[i]->uplink_seid == 0)
8514 i40e_switch_branch_release(pf->veb[i]);
8515 }
8516
8517 /* Now we can shutdown the PF's VSI, just before we kill
8518 * adminq and hmc.
8519 */
8520 if (pf->vsi[pf->lan_vsi])
8521 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
8522
8523 i40e_stop_misc_vector(pf);
8524 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8525 synchronize_irq(pf->msix_entries[0].vector);
8526 free_irq(pf->msix_entries[0].vector, pf);
8527 }
8528
8529 /* shutdown and destroy the HMC */
8530 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
8531 if (ret_code)
8532 dev_warn(&pdev->dev,
8533 "Failed to destroy the HMC resources: %d\n", ret_code);
8534
8535 /* shutdown the adminq */
41c445ff
JB
8536 ret_code = i40e_shutdown_adminq(&pf->hw);
8537 if (ret_code)
8538 dev_warn(&pdev->dev,
8539 "Failed to destroy the Admin Queue resources: %d\n",
8540 ret_code);
8541
8542 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
8543 i40e_clear_interrupt_scheme(pf);
8544 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
8545 if (pf->vsi[i]) {
8546 i40e_vsi_clear_rings(pf->vsi[i]);
8547 i40e_vsi_clear(pf->vsi[i]);
8548 pf->vsi[i] = NULL;
8549 }
8550 }
8551
8552 for (i = 0; i < I40E_MAX_VEB; i++) {
8553 kfree(pf->veb[i]);
8554 pf->veb[i] = NULL;
8555 }
8556
8557 kfree(pf->qp_pile);
8558 kfree(pf->irq_pile);
8559 kfree(pf->sw_config);
8560 kfree(pf->vsi);
8561
8562 /* force a PF reset to clean anything leftover */
8563 reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
8564 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
8565 i40e_flush(&pf->hw);
8566
8567 iounmap(pf->hw.hw_addr);
8568 kfree(pf);
8569 pci_release_selected_regions(pdev,
8570 pci_select_bars(pdev, IORESOURCE_MEM));
8571
8572 pci_disable_pcie_error_reporting(pdev);
8573 pci_disable_device(pdev);
8574}
8575
8576/**
8577 * i40e_pci_error_detected - warning that something funky happened in PCI land
8578 * @pdev: PCI device information struct
8579 *
8580 * Called to warn that something happened and the error handling steps
8581 * are in progress. Allows the driver to quiesce things, be ready for
8582 * remediation.
8583 **/
8584static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
8585 enum pci_channel_state error)
8586{
8587 struct i40e_pf *pf = pci_get_drvdata(pdev);
8588
8589 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
8590
8591 /* shutdown all operations */
9007bccd
SN
8592 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
8593 rtnl_lock();
8594 i40e_prep_for_reset(pf);
8595 rtnl_unlock();
8596 }
41c445ff
JB
8597
8598 /* Request a slot reset */
8599 return PCI_ERS_RESULT_NEED_RESET;
8600}
8601
8602/**
8603 * i40e_pci_error_slot_reset - a PCI slot reset just happened
8604 * @pdev: PCI device information struct
8605 *
8606 * Called to find if the driver can work with the device now that
8607 * the pci slot has been reset. If a basic connection seems good
8608 * (registers are readable and have sane content) then return a
8609 * happy little PCI_ERS_RESULT_xxx.
8610 **/
8611static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
8612{
8613 struct i40e_pf *pf = pci_get_drvdata(pdev);
8614 pci_ers_result_t result;
8615 int err;
8616 u32 reg;
8617
8618 dev_info(&pdev->dev, "%s\n", __func__);
8619 if (pci_enable_device_mem(pdev)) {
8620 dev_info(&pdev->dev,
8621 "Cannot re-enable PCI device after reset.\n");
8622 result = PCI_ERS_RESULT_DISCONNECT;
8623 } else {
8624 pci_set_master(pdev);
8625 pci_restore_state(pdev);
8626 pci_save_state(pdev);
8627 pci_wake_from_d3(pdev, false);
8628
8629 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8630 if (reg == 0)
8631 result = PCI_ERS_RESULT_RECOVERED;
8632 else
8633 result = PCI_ERS_RESULT_DISCONNECT;
8634 }
8635
8636 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8637 if (err) {
8638 dev_info(&pdev->dev,
8639 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8640 err);
8641 /* non-fatal, continue */
8642 }
8643
8644 return result;
8645}
8646
8647/**
8648 * i40e_pci_error_resume - restart operations after PCI error recovery
8649 * @pdev: PCI device information struct
8650 *
8651 * Called to allow the driver to bring things back up after PCI error
8652 * and/or reset recovery has finished.
8653 **/
8654static void i40e_pci_error_resume(struct pci_dev *pdev)
8655{
8656 struct i40e_pf *pf = pci_get_drvdata(pdev);
8657
8658 dev_info(&pdev->dev, "%s\n", __func__);
9007bccd
SN
8659 if (test_bit(__I40E_SUSPENDED, &pf->state))
8660 return;
8661
8662 rtnl_lock();
41c445ff 8663 i40e_handle_reset_warning(pf);
9007bccd
SN
8664 rtnl_lock();
8665}
8666
8667/**
8668 * i40e_shutdown - PCI callback for shutting down
8669 * @pdev: PCI device information struct
8670 **/
8671static void i40e_shutdown(struct pci_dev *pdev)
8672{
8673 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 8674 struct i40e_hw *hw = &pf->hw;
9007bccd
SN
8675
8676 set_bit(__I40E_SUSPENDED, &pf->state);
8677 set_bit(__I40E_DOWN, &pf->state);
8678 rtnl_lock();
8679 i40e_prep_for_reset(pf);
8680 rtnl_unlock();
8681
8e2773ae
SN
8682 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
8683 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
8684
9007bccd 8685 if (system_state == SYSTEM_POWER_OFF) {
8e2773ae 8686 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
8687 pci_set_power_state(pdev, PCI_D3hot);
8688 }
8689}
8690
8691#ifdef CONFIG_PM
8692/**
8693 * i40e_suspend - PCI callback for moving to D3
8694 * @pdev: PCI device information struct
8695 **/
8696static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
8697{
8698 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 8699 struct i40e_hw *hw = &pf->hw;
9007bccd
SN
8700
8701 set_bit(__I40E_SUSPENDED, &pf->state);
8702 set_bit(__I40E_DOWN, &pf->state);
8703 rtnl_lock();
8704 i40e_prep_for_reset(pf);
8705 rtnl_unlock();
8706
8e2773ae
SN
8707 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
8708 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
8709
8710 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
8711 pci_set_power_state(pdev, PCI_D3hot);
8712
8713 return 0;
41c445ff
JB
8714}
8715
9007bccd
SN
8716/**
8717 * i40e_resume - PCI callback for waking up from D3
8718 * @pdev: PCI device information struct
8719 **/
8720static int i40e_resume(struct pci_dev *pdev)
8721{
8722 struct i40e_pf *pf = pci_get_drvdata(pdev);
8723 u32 err;
8724
8725 pci_set_power_state(pdev, PCI_D0);
8726 pci_restore_state(pdev);
8727 /* pci_restore_state() clears dev->state_saves, so
8728 * call pci_save_state() again to restore it.
8729 */
8730 pci_save_state(pdev);
8731
8732 err = pci_enable_device_mem(pdev);
8733 if (err) {
8734 dev_err(&pdev->dev,
8735 "%s: Cannot enable PCI device from suspend\n",
8736 __func__);
8737 return err;
8738 }
8739 pci_set_master(pdev);
8740
8741 /* no wakeup events while running */
8742 pci_wake_from_d3(pdev, false);
8743
8744 /* handling the reset will rebuild the device state */
8745 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
8746 clear_bit(__I40E_DOWN, &pf->state);
8747 rtnl_lock();
8748 i40e_reset_and_rebuild(pf, false);
8749 rtnl_unlock();
8750 }
8751
8752 return 0;
8753}
8754
8755#endif
41c445ff
JB
8756static const struct pci_error_handlers i40e_err_handler = {
8757 .error_detected = i40e_pci_error_detected,
8758 .slot_reset = i40e_pci_error_slot_reset,
8759 .resume = i40e_pci_error_resume,
8760};
8761
8762static struct pci_driver i40e_driver = {
8763 .name = i40e_driver_name,
8764 .id_table = i40e_pci_tbl,
8765 .probe = i40e_probe,
8766 .remove = i40e_remove,
9007bccd
SN
8767#ifdef CONFIG_PM
8768 .suspend = i40e_suspend,
8769 .resume = i40e_resume,
8770#endif
8771 .shutdown = i40e_shutdown,
41c445ff
JB
8772 .err_handler = &i40e_err_handler,
8773 .sriov_configure = i40e_pci_sriov_configure,
8774};
8775
8776/**
8777 * i40e_init_module - Driver registration routine
8778 *
8779 * i40e_init_module is the first routine called when the driver is
8780 * loaded. All it does is register with the PCI subsystem.
8781 **/
8782static int __init i40e_init_module(void)
8783{
8784 pr_info("%s: %s - version %s\n", i40e_driver_name,
8785 i40e_driver_string, i40e_driver_version_str);
8786 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
8787 i40e_dbg_init();
8788 return pci_register_driver(&i40e_driver);
8789}
8790module_init(i40e_init_module);
8791
8792/**
8793 * i40e_exit_module - Driver exit cleanup routine
8794 *
8795 * i40e_exit_module is called just before the driver is removed
8796 * from memory.
8797 **/
8798static void __exit i40e_exit_module(void)
8799{
8800 pci_unregister_driver(&i40e_driver);
8801 i40e_dbg_exit();
8802}
8803module_exit(i40e_exit_module);
This page took 0.872471 seconds and 5 git commands to generate.