i40e: Fix the Tx ring qset handle when DCB reconfigures
[deliverable/linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
CommitLineData
41c445ff
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
dc641b73 4 * Copyright(c) 2013 - 2014 Intel Corporation.
41c445ff
JB
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
dc641b73
GR
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
41c445ff
JB
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27/* Local includes */
28#include "i40e.h"
4eb3f768 29#include "i40e_diag.h"
a1c9a9d9
JK
30#ifdef CONFIG_I40E_VXLAN
31#include <net/vxlan.h>
32#endif
41c445ff
JB
33
34const char i40e_driver_name[] = "i40e";
35static const char i40e_driver_string[] =
36 "Intel(R) Ethernet Connection XL710 Network Driver";
37
38#define DRV_KERN "-k"
39
e8e724db 40#define DRV_VERSION_MAJOR 1
a36fdd8e 41#define DRV_VERSION_MINOR 2
300c34c1 42#define DRV_VERSION_BUILD 6
41c445ff
JB
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN
46const char i40e_driver_version_str[] = DRV_VERSION;
8fb905b3 47static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
41c445ff
JB
48
49/* a bit of forward declarations */
50static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51static void i40e_handle_reset_warning(struct i40e_pf *pf);
52static int i40e_add_vsi(struct i40e_vsi *vsi);
53static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
bc7d338f 54static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
41c445ff
JB
55static int i40e_setup_misc_vector(struct i40e_pf *pf);
56static void i40e_determine_queue_usage(struct i40e_pf *pf);
57static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
cbf61325 58static void i40e_fdir_sb_setup(struct i40e_pf *pf);
4e3b35b0 59static int i40e_veb_get_bw_info(struct i40e_veb *veb);
41c445ff
JB
60
61/* i40e_pci_tbl - PCI Device ID Table
62 *
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
9baa3c34 68static const struct pci_device_id i40e_pci_tbl[] = {
ab60085e 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
ab60085e
SN
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
ab60085e
SN
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
5960d33f 77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
41c445ff
JB
78 /* required last entry */
79 {0, }
80};
81MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
82
83#define I40E_MAX_VF_COUNT 128
84static int debug = -1;
85module_param(debug, int, 0);
86MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
87
88MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
89MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
90MODULE_LICENSE("GPL");
91MODULE_VERSION(DRV_VERSION);
92
93/**
94 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
95 * @hw: pointer to the HW structure
96 * @mem: ptr to mem struct to fill out
97 * @size: size of memory requested
98 * @alignment: what to align the allocation to
99 **/
100int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
101 u64 size, u32 alignment)
102{
103 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
104
105 mem->size = ALIGN(size, alignment);
106 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
107 &mem->pa, GFP_KERNEL);
93bc73b8
JB
108 if (!mem->va)
109 return -ENOMEM;
41c445ff 110
93bc73b8 111 return 0;
41c445ff
JB
112}
113
114/**
115 * i40e_free_dma_mem_d - OS specific memory free for shared code
116 * @hw: pointer to the HW structure
117 * @mem: ptr to mem struct to free
118 **/
119int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
120{
121 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
122
123 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
124 mem->va = NULL;
125 mem->pa = 0;
126 mem->size = 0;
127
128 return 0;
129}
130
131/**
132 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
133 * @hw: pointer to the HW structure
134 * @mem: ptr to mem struct to fill out
135 * @size: size of memory requested
136 **/
137int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
138 u32 size)
139{
140 mem->size = size;
141 mem->va = kzalloc(size, GFP_KERNEL);
142
93bc73b8
JB
143 if (!mem->va)
144 return -ENOMEM;
41c445ff 145
93bc73b8 146 return 0;
41c445ff
JB
147}
148
149/**
150 * i40e_free_virt_mem_d - OS specific memory free for shared code
151 * @hw: pointer to the HW structure
152 * @mem: ptr to mem struct to free
153 **/
154int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
155{
156 /* it's ok to kfree a NULL pointer */
157 kfree(mem->va);
158 mem->va = NULL;
159 mem->size = 0;
160
161 return 0;
162}
163
164/**
165 * i40e_get_lump - find a lump of free generic resource
166 * @pf: board private structure
167 * @pile: the pile of resource to search
168 * @needed: the number of items needed
169 * @id: an owner id to stick on the items assigned
170 *
171 * Returns the base item index of the lump, or negative for error
172 *
173 * The search_hint trick and lack of advanced fit-finding only work
174 * because we're highly likely to have all the same size lump requests.
175 * Linear search time and any fragmentation should be minimal.
176 **/
177static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
178 u16 needed, u16 id)
179{
180 int ret = -ENOMEM;
ddf434ac 181 int i, j;
41c445ff
JB
182
183 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
184 dev_info(&pf->pdev->dev,
185 "param err: pile=%p needed=%d id=0x%04x\n",
186 pile, needed, id);
187 return -EINVAL;
188 }
189
190 /* start the linear search with an imperfect hint */
191 i = pile->search_hint;
ddf434ac 192 while (i < pile->num_entries) {
41c445ff
JB
193 /* skip already allocated entries */
194 if (pile->list[i] & I40E_PILE_VALID_BIT) {
195 i++;
196 continue;
197 }
198
199 /* do we have enough in this lump? */
200 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
201 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
202 break;
203 }
204
205 if (j == needed) {
206 /* there was enough, so assign it to the requestor */
207 for (j = 0; j < needed; j++)
208 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
209 ret = i;
210 pile->search_hint = i + j;
ddf434ac 211 break;
41c445ff
JB
212 } else {
213 /* not enough, so skip over it and continue looking */
214 i += j;
215 }
216 }
217
218 return ret;
219}
220
221/**
222 * i40e_put_lump - return a lump of generic resource
223 * @pile: the pile of resource to search
224 * @index: the base item index
225 * @id: the owner id of the items assigned
226 *
227 * Returns the count of items in the lump
228 **/
229static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
230{
231 int valid_id = (id | I40E_PILE_VALID_BIT);
232 int count = 0;
233 int i;
234
235 if (!pile || index >= pile->num_entries)
236 return -EINVAL;
237
238 for (i = index;
239 i < pile->num_entries && pile->list[i] == valid_id;
240 i++) {
241 pile->list[i] = 0;
242 count++;
243 }
244
245 if (count && index < pile->search_hint)
246 pile->search_hint = index;
247
248 return count;
249}
250
251/**
252 * i40e_service_event_schedule - Schedule the service task to wake up
253 * @pf: board private structure
254 *
255 * If not already scheduled, this puts the task into the work queue
256 **/
257static void i40e_service_event_schedule(struct i40e_pf *pf)
258{
259 if (!test_bit(__I40E_DOWN, &pf->state) &&
260 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
261 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
262 schedule_work(&pf->service_task);
263}
264
265/**
266 * i40e_tx_timeout - Respond to a Tx Hang
267 * @netdev: network interface device structure
268 *
269 * If any port has noticed a Tx timeout, it is likely that the whole
270 * device is munged, not just the one netdev port, so go for the full
271 * reset.
272 **/
38e00438
VD
273#ifdef I40E_FCOE
274void i40e_tx_timeout(struct net_device *netdev)
275#else
41c445ff 276static void i40e_tx_timeout(struct net_device *netdev)
38e00438 277#endif
41c445ff
JB
278{
279 struct i40e_netdev_priv *np = netdev_priv(netdev);
280 struct i40e_vsi *vsi = np->vsi;
281 struct i40e_pf *pf = vsi->back;
282
283 pf->tx_timeout_count++;
284
285 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
327fe04b 286 pf->tx_timeout_recovery_level = 1;
41c445ff
JB
287 pf->tx_timeout_last_recovery = jiffies;
288 netdev_info(netdev, "tx_timeout recovery level %d\n",
289 pf->tx_timeout_recovery_level);
290
291 switch (pf->tx_timeout_recovery_level) {
292 case 0:
293 /* disable and re-enable queues for the VSI */
294 if (in_interrupt()) {
295 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
296 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
297 } else {
298 i40e_vsi_reinit_locked(vsi);
299 }
300 break;
301 case 1:
302 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
303 break;
304 case 2:
305 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
306 break;
307 case 3:
308 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
309 break;
310 default:
311 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
b5d06f05
NP
312 set_bit(__I40E_DOWN_REQUESTED, &pf->state);
313 set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
41c445ff
JB
314 break;
315 }
316 i40e_service_event_schedule(pf);
317 pf->tx_timeout_recovery_level++;
318}
319
320/**
321 * i40e_release_rx_desc - Store the new tail and head values
322 * @rx_ring: ring to bump
323 * @val: new head index
324 **/
325static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
326{
327 rx_ring->next_to_use = val;
328
329 /* Force memory writes to complete before letting h/w
330 * know there are new descriptors to fetch. (Only
331 * applicable for weak-ordered memory model archs,
332 * such as IA-64).
333 */
334 wmb();
335 writel(val, rx_ring->tail);
336}
337
338/**
339 * i40e_get_vsi_stats_struct - Get System Network Statistics
340 * @vsi: the VSI we care about
341 *
342 * Returns the address of the device statistics structure.
343 * The statistics are actually updated from the service task.
344 **/
345struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
346{
347 return &vsi->net_stats;
348}
349
350/**
351 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
352 * @netdev: network interface device structure
353 *
354 * Returns the address of the device statistics structure.
355 * The statistics are actually updated from the service task.
356 **/
38e00438
VD
357#ifdef I40E_FCOE
358struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
359 struct net_device *netdev,
360 struct rtnl_link_stats64 *stats)
361#else
41c445ff
JB
362static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
363 struct net_device *netdev,
980e9b11 364 struct rtnl_link_stats64 *stats)
38e00438 365#endif
41c445ff
JB
366{
367 struct i40e_netdev_priv *np = netdev_priv(netdev);
e7046ee1 368 struct i40e_ring *tx_ring, *rx_ring;
41c445ff 369 struct i40e_vsi *vsi = np->vsi;
980e9b11
AD
370 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
371 int i;
372
bc7d338f
ASJ
373 if (test_bit(__I40E_DOWN, &vsi->state))
374 return stats;
375
3c325ced
JB
376 if (!vsi->tx_rings)
377 return stats;
378
980e9b11
AD
379 rcu_read_lock();
380 for (i = 0; i < vsi->num_queue_pairs; i++) {
980e9b11
AD
381 u64 bytes, packets;
382 unsigned int start;
383
384 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
385 if (!tx_ring)
386 continue;
387
388 do {
57a7744e 389 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
980e9b11
AD
390 packets = tx_ring->stats.packets;
391 bytes = tx_ring->stats.bytes;
57a7744e 392 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
980e9b11
AD
393
394 stats->tx_packets += packets;
395 stats->tx_bytes += bytes;
396 rx_ring = &tx_ring[1];
397
398 do {
57a7744e 399 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
980e9b11
AD
400 packets = rx_ring->stats.packets;
401 bytes = rx_ring->stats.bytes;
57a7744e 402 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
41c445ff 403
980e9b11
AD
404 stats->rx_packets += packets;
405 stats->rx_bytes += bytes;
406 }
407 rcu_read_unlock();
408
a5282f44 409 /* following stats updated by i40e_watchdog_subtask() */
980e9b11
AD
410 stats->multicast = vsi_stats->multicast;
411 stats->tx_errors = vsi_stats->tx_errors;
412 stats->tx_dropped = vsi_stats->tx_dropped;
413 stats->rx_errors = vsi_stats->rx_errors;
414 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
415 stats->rx_length_errors = vsi_stats->rx_length_errors;
41c445ff 416
980e9b11 417 return stats;
41c445ff
JB
418}
419
420/**
421 * i40e_vsi_reset_stats - Resets all stats of the given vsi
422 * @vsi: the VSI to have its stats reset
423 **/
424void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
425{
426 struct rtnl_link_stats64 *ns;
427 int i;
428
429 if (!vsi)
430 return;
431
432 ns = i40e_get_vsi_stats_struct(vsi);
433 memset(ns, 0, sizeof(*ns));
434 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
435 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
436 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
8e9dca53 437 if (vsi->rx_rings && vsi->rx_rings[0]) {
41c445ff 438 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
439 memset(&vsi->rx_rings[i]->stats, 0 ,
440 sizeof(vsi->rx_rings[i]->stats));
441 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
442 sizeof(vsi->rx_rings[i]->rx_stats));
443 memset(&vsi->tx_rings[i]->stats, 0 ,
444 sizeof(vsi->tx_rings[i]->stats));
445 memset(&vsi->tx_rings[i]->tx_stats, 0,
446 sizeof(vsi->tx_rings[i]->tx_stats));
41c445ff 447 }
8e9dca53 448 }
41c445ff
JB
449 vsi->stat_offsets_loaded = false;
450}
451
452/**
453 * i40e_pf_reset_stats - Reset all of the stats for the given pf
454 * @pf: the PF to be reset
455 **/
456void i40e_pf_reset_stats(struct i40e_pf *pf)
457{
e91fdf76
SN
458 int i;
459
41c445ff
JB
460 memset(&pf->stats, 0, sizeof(pf->stats));
461 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
462 pf->stat_offsets_loaded = false;
e91fdf76
SN
463
464 for (i = 0; i < I40E_MAX_VEB; i++) {
465 if (pf->veb[i]) {
466 memset(&pf->veb[i]->stats, 0,
467 sizeof(pf->veb[i]->stats));
468 memset(&pf->veb[i]->stats_offsets, 0,
469 sizeof(pf->veb[i]->stats_offsets));
470 pf->veb[i]->stat_offsets_loaded = false;
471 }
472 }
41c445ff
JB
473}
474
475/**
476 * i40e_stat_update48 - read and update a 48 bit stat from the chip
477 * @hw: ptr to the hardware info
478 * @hireg: the high 32 bit reg to read
479 * @loreg: the low 32 bit reg to read
480 * @offset_loaded: has the initial offset been loaded yet
481 * @offset: ptr to current offset value
482 * @stat: ptr to the stat
483 *
484 * Since the device stats are not reset at PFReset, they likely will not
485 * be zeroed when the driver starts. We'll save the first values read
486 * and use them as offsets to be subtracted from the raw values in order
487 * to report stats that count from zero. In the process, we also manage
488 * the potential roll-over.
489 **/
490static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
491 bool offset_loaded, u64 *offset, u64 *stat)
492{
493 u64 new_data;
494
ab60085e 495 if (hw->device_id == I40E_DEV_ID_QEMU) {
41c445ff
JB
496 new_data = rd32(hw, loreg);
497 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
498 } else {
499 new_data = rd64(hw, loreg);
500 }
501 if (!offset_loaded)
502 *offset = new_data;
503 if (likely(new_data >= *offset))
504 *stat = new_data - *offset;
505 else
506 *stat = (new_data + ((u64)1 << 48)) - *offset;
507 *stat &= 0xFFFFFFFFFFFFULL;
508}
509
510/**
511 * i40e_stat_update32 - read and update a 32 bit stat from the chip
512 * @hw: ptr to the hardware info
513 * @reg: the hw reg to read
514 * @offset_loaded: has the initial offset been loaded yet
515 * @offset: ptr to current offset value
516 * @stat: ptr to the stat
517 **/
518static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
519 bool offset_loaded, u64 *offset, u64 *stat)
520{
521 u32 new_data;
522
523 new_data = rd32(hw, reg);
524 if (!offset_loaded)
525 *offset = new_data;
526 if (likely(new_data >= *offset))
527 *stat = (u32)(new_data - *offset);
528 else
529 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
530}
531
532/**
533 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
534 * @vsi: the VSI to be updated
535 **/
536void i40e_update_eth_stats(struct i40e_vsi *vsi)
537{
538 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
539 struct i40e_pf *pf = vsi->back;
540 struct i40e_hw *hw = &pf->hw;
541 struct i40e_eth_stats *oes;
542 struct i40e_eth_stats *es; /* device's eth stats */
543
544 es = &vsi->eth_stats;
545 oes = &vsi->eth_stats_offsets;
546
547 /* Gather up the stats that the hw collects */
548 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
549 vsi->stat_offsets_loaded,
550 &oes->tx_errors, &es->tx_errors);
551 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
552 vsi->stat_offsets_loaded,
553 &oes->rx_discards, &es->rx_discards);
41a9e55c
SN
554 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
555 vsi->stat_offsets_loaded,
556 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
557 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
558 vsi->stat_offsets_loaded,
559 &oes->tx_errors, &es->tx_errors);
41c445ff
JB
560
561 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
562 I40E_GLV_GORCL(stat_idx),
563 vsi->stat_offsets_loaded,
564 &oes->rx_bytes, &es->rx_bytes);
565 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
566 I40E_GLV_UPRCL(stat_idx),
567 vsi->stat_offsets_loaded,
568 &oes->rx_unicast, &es->rx_unicast);
569 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
570 I40E_GLV_MPRCL(stat_idx),
571 vsi->stat_offsets_loaded,
572 &oes->rx_multicast, &es->rx_multicast);
573 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
574 I40E_GLV_BPRCL(stat_idx),
575 vsi->stat_offsets_loaded,
576 &oes->rx_broadcast, &es->rx_broadcast);
577
578 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
579 I40E_GLV_GOTCL(stat_idx),
580 vsi->stat_offsets_loaded,
581 &oes->tx_bytes, &es->tx_bytes);
582 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
583 I40E_GLV_UPTCL(stat_idx),
584 vsi->stat_offsets_loaded,
585 &oes->tx_unicast, &es->tx_unicast);
586 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
587 I40E_GLV_MPTCL(stat_idx),
588 vsi->stat_offsets_loaded,
589 &oes->tx_multicast, &es->tx_multicast);
590 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
591 I40E_GLV_BPTCL(stat_idx),
592 vsi->stat_offsets_loaded,
593 &oes->tx_broadcast, &es->tx_broadcast);
594 vsi->stat_offsets_loaded = true;
595}
596
597/**
598 * i40e_update_veb_stats - Update Switch component statistics
599 * @veb: the VEB being updated
600 **/
601static void i40e_update_veb_stats(struct i40e_veb *veb)
602{
603 struct i40e_pf *pf = veb->pf;
604 struct i40e_hw *hw = &pf->hw;
605 struct i40e_eth_stats *oes;
606 struct i40e_eth_stats *es; /* device's eth stats */
607 int idx = 0;
608
609 idx = veb->stats_idx;
610 es = &veb->stats;
611 oes = &veb->stats_offsets;
612
613 /* Gather up the stats that the hw collects */
614 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
615 veb->stat_offsets_loaded,
616 &oes->tx_discards, &es->tx_discards);
7134f9ce
JB
617 if (hw->revision_id > 0)
618 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
619 veb->stat_offsets_loaded,
620 &oes->rx_unknown_protocol,
621 &es->rx_unknown_protocol);
41c445ff
JB
622 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
623 veb->stat_offsets_loaded,
624 &oes->rx_bytes, &es->rx_bytes);
625 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
626 veb->stat_offsets_loaded,
627 &oes->rx_unicast, &es->rx_unicast);
628 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
629 veb->stat_offsets_loaded,
630 &oes->rx_multicast, &es->rx_multicast);
631 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
632 veb->stat_offsets_loaded,
633 &oes->rx_broadcast, &es->rx_broadcast);
634
635 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
636 veb->stat_offsets_loaded,
637 &oes->tx_bytes, &es->tx_bytes);
638 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
639 veb->stat_offsets_loaded,
640 &oes->tx_unicast, &es->tx_unicast);
641 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
642 veb->stat_offsets_loaded,
643 &oes->tx_multicast, &es->tx_multicast);
644 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
645 veb->stat_offsets_loaded,
646 &oes->tx_broadcast, &es->tx_broadcast);
647 veb->stat_offsets_loaded = true;
648}
649
38e00438
VD
650#ifdef I40E_FCOE
651/**
652 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
653 * @vsi: the VSI that is capable of doing FCoE
654 **/
655static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
656{
657 struct i40e_pf *pf = vsi->back;
658 struct i40e_hw *hw = &pf->hw;
659 struct i40e_fcoe_stats *ofs;
660 struct i40e_fcoe_stats *fs; /* device's eth stats */
661 int idx;
662
663 if (vsi->type != I40E_VSI_FCOE)
664 return;
665
666 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
667 fs = &vsi->fcoe_stats;
668 ofs = &vsi->fcoe_stats_offsets;
669
670 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
671 vsi->fcoe_stat_offsets_loaded,
672 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
673 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
674 vsi->fcoe_stat_offsets_loaded,
675 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
676 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
677 vsi->fcoe_stat_offsets_loaded,
678 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
679 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
680 vsi->fcoe_stat_offsets_loaded,
681 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
682 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
683 vsi->fcoe_stat_offsets_loaded,
684 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
685 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
686 vsi->fcoe_stat_offsets_loaded,
687 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
688 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
689 vsi->fcoe_stat_offsets_loaded,
690 &ofs->fcoe_last_error, &fs->fcoe_last_error);
691 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
692 vsi->fcoe_stat_offsets_loaded,
693 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
694
695 vsi->fcoe_stat_offsets_loaded = true;
696}
697
698#endif
41c445ff
JB
699/**
700 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
701 * @pf: the corresponding PF
702 *
703 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
704 **/
705static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
706{
707 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
708 struct i40e_hw_port_stats *nsd = &pf->stats;
709 struct i40e_hw *hw = &pf->hw;
710 u64 xoff = 0;
711 u16 i, v;
712
713 if ((hw->fc.current_mode != I40E_FC_FULL) &&
714 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
715 return;
716
717 xoff = nsd->link_xoff_rx;
718 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
719 pf->stat_offsets_loaded,
720 &osd->link_xoff_rx, &nsd->link_xoff_rx);
721
722 /* No new LFC xoff rx */
723 if (!(nsd->link_xoff_rx - xoff))
724 return;
725
726 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
505682cd 727 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
728 struct i40e_vsi *vsi = pf->vsi[v];
729
ddfda80f 730 if (!vsi || !vsi->tx_rings[0])
41c445ff
JB
731 continue;
732
733 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 734 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
735 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
736 }
737 }
738}
739
740/**
741 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
742 * @pf: the corresponding PF
743 *
744 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
745 **/
746static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
747{
748 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
749 struct i40e_hw_port_stats *nsd = &pf->stats;
750 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
751 struct i40e_dcbx_config *dcb_cfg;
752 struct i40e_hw *hw = &pf->hw;
753 u16 i, v;
754 u8 tc;
755
756 dcb_cfg = &hw->local_dcbx_config;
757
758 /* See if DCB enabled with PFC TC */
759 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
760 !(dcb_cfg->pfc.pfcenable)) {
761 i40e_update_link_xoff_rx(pf);
762 return;
763 }
764
765 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
766 u64 prio_xoff = nsd->priority_xoff_rx[i];
767 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
768 pf->stat_offsets_loaded,
769 &osd->priority_xoff_rx[i],
770 &nsd->priority_xoff_rx[i]);
771
772 /* No new PFC xoff rx */
773 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
774 continue;
775 /* Get the TC for given priority */
776 tc = dcb_cfg->etscfg.prioritytable[i];
777 xoff[tc] = true;
778 }
779
780 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
505682cd 781 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
782 struct i40e_vsi *vsi = pf->vsi[v];
783
ddfda80f 784 if (!vsi || !vsi->tx_rings[0])
41c445ff
JB
785 continue;
786
787 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 788 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
789
790 tc = ring->dcb_tc;
791 if (xoff[tc])
792 clear_bit(__I40E_HANG_CHECK_ARMED,
793 &ring->state);
794 }
795 }
796}
797
798/**
7812fddc 799 * i40e_update_vsi_stats - Update the vsi statistics counters.
41c445ff
JB
800 * @vsi: the VSI to be updated
801 *
802 * There are a few instances where we store the same stat in a
803 * couple of different structs. This is partly because we have
804 * the netdev stats that need to be filled out, which is slightly
805 * different from the "eth_stats" defined by the chip and used in
7812fddc 806 * VF communications. We sort it out here.
41c445ff 807 **/
7812fddc 808static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
41c445ff
JB
809{
810 struct i40e_pf *pf = vsi->back;
41c445ff
JB
811 struct rtnl_link_stats64 *ons;
812 struct rtnl_link_stats64 *ns; /* netdev stats */
813 struct i40e_eth_stats *oes;
814 struct i40e_eth_stats *es; /* device's eth stats */
815 u32 tx_restart, tx_busy;
bf00b376 816 struct i40e_ring *p;
41c445ff 817 u32 rx_page, rx_buf;
bf00b376
AA
818 u64 bytes, packets;
819 unsigned int start;
41c445ff
JB
820 u64 rx_p, rx_b;
821 u64 tx_p, tx_b;
41c445ff
JB
822 u16 q;
823
824 if (test_bit(__I40E_DOWN, &vsi->state) ||
825 test_bit(__I40E_CONFIG_BUSY, &pf->state))
826 return;
827
828 ns = i40e_get_vsi_stats_struct(vsi);
829 ons = &vsi->net_stats_offsets;
830 es = &vsi->eth_stats;
831 oes = &vsi->eth_stats_offsets;
832
833 /* Gather up the netdev and vsi stats that the driver collects
834 * on the fly during packet processing
835 */
836 rx_b = rx_p = 0;
837 tx_b = tx_p = 0;
838 tx_restart = tx_busy = 0;
839 rx_page = 0;
840 rx_buf = 0;
980e9b11 841 rcu_read_lock();
41c445ff 842 for (q = 0; q < vsi->num_queue_pairs; q++) {
980e9b11
AD
843 /* locate Tx ring */
844 p = ACCESS_ONCE(vsi->tx_rings[q]);
845
846 do {
57a7744e 847 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
848 packets = p->stats.packets;
849 bytes = p->stats.bytes;
57a7744e 850 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
851 tx_b += bytes;
852 tx_p += packets;
853 tx_restart += p->tx_stats.restart_queue;
854 tx_busy += p->tx_stats.tx_busy;
41c445ff 855
980e9b11
AD
856 /* Rx queue is part of the same block as Tx queue */
857 p = &p[1];
858 do {
57a7744e 859 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
860 packets = p->stats.packets;
861 bytes = p->stats.bytes;
57a7744e 862 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
863 rx_b += bytes;
864 rx_p += packets;
420136cc
MW
865 rx_buf += p->rx_stats.alloc_buff_failed;
866 rx_page += p->rx_stats.alloc_page_failed;
41c445ff 867 }
980e9b11 868 rcu_read_unlock();
41c445ff
JB
869 vsi->tx_restart = tx_restart;
870 vsi->tx_busy = tx_busy;
871 vsi->rx_page_failed = rx_page;
872 vsi->rx_buf_failed = rx_buf;
873
874 ns->rx_packets = rx_p;
875 ns->rx_bytes = rx_b;
876 ns->tx_packets = tx_p;
877 ns->tx_bytes = tx_b;
878
41c445ff 879 /* update netdev stats from eth stats */
7812fddc 880 i40e_update_eth_stats(vsi);
41c445ff
JB
881 ons->tx_errors = oes->tx_errors;
882 ns->tx_errors = es->tx_errors;
883 ons->multicast = oes->rx_multicast;
884 ns->multicast = es->rx_multicast;
41a9e55c
SN
885 ons->rx_dropped = oes->rx_discards;
886 ns->rx_dropped = es->rx_discards;
41c445ff
JB
887 ons->tx_dropped = oes->tx_discards;
888 ns->tx_dropped = es->tx_discards;
889
7812fddc 890 /* pull in a couple PF stats if this is the main vsi */
41c445ff 891 if (vsi == pf->vsi[pf->lan_vsi]) {
7812fddc
SN
892 ns->rx_crc_errors = pf->stats.crc_errors;
893 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
894 ns->rx_length_errors = pf->stats.rx_length_errors;
895 }
896}
41c445ff 897
7812fddc
SN
898/**
899 * i40e_update_pf_stats - Update the pf statistics counters.
900 * @pf: the PF to be updated
901 **/
902static void i40e_update_pf_stats(struct i40e_pf *pf)
903{
904 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
905 struct i40e_hw_port_stats *nsd = &pf->stats;
906 struct i40e_hw *hw = &pf->hw;
907 u32 val;
908 int i;
41c445ff 909
7812fddc
SN
910 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
911 I40E_GLPRT_GORCL(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
914 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
915 I40E_GLPRT_GOTCL(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
918 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
919 pf->stat_offsets_loaded,
920 &osd->eth.rx_discards,
921 &nsd->eth.rx_discards);
922 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
923 pf->stat_offsets_loaded,
924 &osd->eth.tx_discards,
925 &nsd->eth.tx_discards);
41c445ff 926
532d283d
SN
927 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
928 I40E_GLPRT_UPRCL(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->eth.rx_unicast,
931 &nsd->eth.rx_unicast);
7812fddc
SN
932 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
933 I40E_GLPRT_MPRCL(hw->port),
934 pf->stat_offsets_loaded,
935 &osd->eth.rx_multicast,
936 &nsd->eth.rx_multicast);
532d283d
SN
937 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
938 I40E_GLPRT_BPRCL(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->eth.rx_broadcast,
941 &nsd->eth.rx_broadcast);
942 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
943 I40E_GLPRT_UPTCL(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->eth.tx_unicast,
946 &nsd->eth.tx_unicast);
947 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
948 I40E_GLPRT_MPTCL(hw->port),
949 pf->stat_offsets_loaded,
950 &osd->eth.tx_multicast,
951 &nsd->eth.tx_multicast);
952 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
953 I40E_GLPRT_BPTCL(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->eth.tx_broadcast,
956 &nsd->eth.tx_broadcast);
41c445ff 957
7812fddc
SN
958 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
959 pf->stat_offsets_loaded,
960 &osd->tx_dropped_link_down,
961 &nsd->tx_dropped_link_down);
41c445ff 962
7812fddc
SN
963 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
964 pf->stat_offsets_loaded,
965 &osd->crc_errors, &nsd->crc_errors);
41c445ff 966
7812fddc
SN
967 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
968 pf->stat_offsets_loaded,
969 &osd->illegal_bytes, &nsd->illegal_bytes);
41c445ff 970
7812fddc
SN
971 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
972 pf->stat_offsets_loaded,
973 &osd->mac_local_faults,
974 &nsd->mac_local_faults);
975 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->mac_remote_faults,
978 &nsd->mac_remote_faults);
41c445ff 979
7812fddc
SN
980 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
981 pf->stat_offsets_loaded,
982 &osd->rx_length_errors,
983 &nsd->rx_length_errors);
41c445ff 984
7812fddc
SN
985 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
986 pf->stat_offsets_loaded,
987 &osd->link_xon_rx, &nsd->link_xon_rx);
988 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
989 pf->stat_offsets_loaded,
990 &osd->link_xon_tx, &nsd->link_xon_tx);
991 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
992 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
993 pf->stat_offsets_loaded,
994 &osd->link_xoff_tx, &nsd->link_xoff_tx);
41c445ff 995
7812fddc
SN
996 for (i = 0; i < 8; i++) {
997 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
41c445ff 998 pf->stat_offsets_loaded,
7812fddc
SN
999 &osd->priority_xon_rx[i],
1000 &nsd->priority_xon_rx[i]);
1001 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
41c445ff 1002 pf->stat_offsets_loaded,
7812fddc
SN
1003 &osd->priority_xon_tx[i],
1004 &nsd->priority_xon_tx[i]);
1005 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
41c445ff 1006 pf->stat_offsets_loaded,
7812fddc
SN
1007 &osd->priority_xoff_tx[i],
1008 &nsd->priority_xoff_tx[i]);
1009 i40e_stat_update32(hw,
1010 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
bee5af7e 1011 pf->stat_offsets_loaded,
7812fddc
SN
1012 &osd->priority_xon_2_xoff[i],
1013 &nsd->priority_xon_2_xoff[i]);
41c445ff
JB
1014 }
1015
7812fddc
SN
1016 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1017 I40E_GLPRT_PRC64L(hw->port),
1018 pf->stat_offsets_loaded,
1019 &osd->rx_size_64, &nsd->rx_size_64);
1020 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1021 I40E_GLPRT_PRC127L(hw->port),
1022 pf->stat_offsets_loaded,
1023 &osd->rx_size_127, &nsd->rx_size_127);
1024 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1025 I40E_GLPRT_PRC255L(hw->port),
1026 pf->stat_offsets_loaded,
1027 &osd->rx_size_255, &nsd->rx_size_255);
1028 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1029 I40E_GLPRT_PRC511L(hw->port),
1030 pf->stat_offsets_loaded,
1031 &osd->rx_size_511, &nsd->rx_size_511);
1032 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1033 I40E_GLPRT_PRC1023L(hw->port),
1034 pf->stat_offsets_loaded,
1035 &osd->rx_size_1023, &nsd->rx_size_1023);
1036 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1037 I40E_GLPRT_PRC1522L(hw->port),
1038 pf->stat_offsets_loaded,
1039 &osd->rx_size_1522, &nsd->rx_size_1522);
1040 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1041 I40E_GLPRT_PRC9522L(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->rx_size_big, &nsd->rx_size_big);
1044
1045 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1046 I40E_GLPRT_PTC64L(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->tx_size_64, &nsd->tx_size_64);
1049 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1050 I40E_GLPRT_PTC127L(hw->port),
1051 pf->stat_offsets_loaded,
1052 &osd->tx_size_127, &nsd->tx_size_127);
1053 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1054 I40E_GLPRT_PTC255L(hw->port),
1055 pf->stat_offsets_loaded,
1056 &osd->tx_size_255, &nsd->tx_size_255);
1057 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1058 I40E_GLPRT_PTC511L(hw->port),
1059 pf->stat_offsets_loaded,
1060 &osd->tx_size_511, &nsd->tx_size_511);
1061 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1062 I40E_GLPRT_PTC1023L(hw->port),
1063 pf->stat_offsets_loaded,
1064 &osd->tx_size_1023, &nsd->tx_size_1023);
1065 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1066 I40E_GLPRT_PTC1522L(hw->port),
1067 pf->stat_offsets_loaded,
1068 &osd->tx_size_1522, &nsd->tx_size_1522);
1069 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1070 I40E_GLPRT_PTC9522L(hw->port),
1071 pf->stat_offsets_loaded,
1072 &osd->tx_size_big, &nsd->tx_size_big);
1073
1074 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1075 pf->stat_offsets_loaded,
1076 &osd->rx_undersize, &nsd->rx_undersize);
1077 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1078 pf->stat_offsets_loaded,
1079 &osd->rx_fragments, &nsd->rx_fragments);
1080 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1081 pf->stat_offsets_loaded,
1082 &osd->rx_oversize, &nsd->rx_oversize);
1083 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1084 pf->stat_offsets_loaded,
1085 &osd->rx_jabber, &nsd->rx_jabber);
1086
433c47de
ASJ
1087 /* FDIR stats */
1088 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
1089 pf->stat_offsets_loaded,
1090 &osd->fd_atr_match, &nsd->fd_atr_match);
1091 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
1092 pf->stat_offsets_loaded,
1093 &osd->fd_sb_match, &nsd->fd_sb_match);
1094
7812fddc
SN
1095 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1096 nsd->tx_lpi_status =
1097 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1098 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1099 nsd->rx_lpi_status =
1100 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1101 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1102 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1103 pf->stat_offsets_loaded,
1104 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1105 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1106 pf->stat_offsets_loaded,
1107 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1108
41c445ff
JB
1109 pf->stat_offsets_loaded = true;
1110}
1111
7812fddc
SN
1112/**
1113 * i40e_update_stats - Update the various statistics counters.
1114 * @vsi: the VSI to be updated
1115 *
1116 * Update the various stats for this VSI and its related entities.
1117 **/
1118void i40e_update_stats(struct i40e_vsi *vsi)
1119{
1120 struct i40e_pf *pf = vsi->back;
1121
1122 if (vsi == pf->vsi[pf->lan_vsi])
1123 i40e_update_pf_stats(pf);
1124
1125 i40e_update_vsi_stats(vsi);
38e00438
VD
1126#ifdef I40E_FCOE
1127 i40e_update_fcoe_stats(vsi);
1128#endif
7812fddc
SN
1129}
1130
41c445ff
JB
1131/**
1132 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1133 * @vsi: the VSI to be searched
1134 * @macaddr: the MAC address
1135 * @vlan: the vlan
1136 * @is_vf: make sure its a vf filter, else doesn't matter
1137 * @is_netdev: make sure its a netdev filter, else doesn't matter
1138 *
1139 * Returns ptr to the filter object or NULL
1140 **/
1141static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1142 u8 *macaddr, s16 vlan,
1143 bool is_vf, bool is_netdev)
1144{
1145 struct i40e_mac_filter *f;
1146
1147 if (!vsi || !macaddr)
1148 return NULL;
1149
1150 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1151 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1152 (vlan == f->vlan) &&
1153 (!is_vf || f->is_vf) &&
1154 (!is_netdev || f->is_netdev))
1155 return f;
1156 }
1157 return NULL;
1158}
1159
1160/**
1161 * i40e_find_mac - Find a mac addr in the macvlan filters list
1162 * @vsi: the VSI to be searched
1163 * @macaddr: the MAC address we are searching for
1164 * @is_vf: make sure its a vf filter, else doesn't matter
1165 * @is_netdev: make sure its a netdev filter, else doesn't matter
1166 *
1167 * Returns the first filter with the provided MAC address or NULL if
1168 * MAC address was not found
1169 **/
1170struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1171 bool is_vf, bool is_netdev)
1172{
1173 struct i40e_mac_filter *f;
1174
1175 if (!vsi || !macaddr)
1176 return NULL;
1177
1178 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1179 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1180 (!is_vf || f->is_vf) &&
1181 (!is_netdev || f->is_netdev))
1182 return f;
1183 }
1184 return NULL;
1185}
1186
1187/**
1188 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1189 * @vsi: the VSI to be searched
1190 *
1191 * Returns true if VSI is in vlan mode or false otherwise
1192 **/
1193bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1194{
1195 struct i40e_mac_filter *f;
1196
1197 /* Only -1 for all the filters denotes not in vlan mode
1198 * so we have to go through all the list in order to make sure
1199 */
1200 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1201 if (f->vlan >= 0)
1202 return true;
1203 }
1204
1205 return false;
1206}
1207
1208/**
1209 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1210 * @vsi: the VSI to be searched
1211 * @macaddr: the mac address to be filtered
1212 * @is_vf: true if it is a vf
1213 * @is_netdev: true if it is a netdev
1214 *
1215 * Goes through all the macvlan filters and adds a
1216 * macvlan filter for each unique vlan that already exists
1217 *
1218 * Returns first filter found on success, else NULL
1219 **/
1220struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1221 bool is_vf, bool is_netdev)
1222{
1223 struct i40e_mac_filter *f;
1224
1225 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1226 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1227 is_vf, is_netdev)) {
1228 if (!i40e_add_filter(vsi, macaddr, f->vlan,
8fb905b3 1229 is_vf, is_netdev))
41c445ff
JB
1230 return NULL;
1231 }
1232 }
1233
1234 return list_first_entry_or_null(&vsi->mac_filter_list,
1235 struct i40e_mac_filter, list);
1236}
1237
8c27d42e
GR
1238/**
1239 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1240 * @vsi: the PF Main VSI - inappropriate for any other VSI
1241 * @macaddr: the MAC address
30650cc5
SN
1242 *
1243 * Some older firmware configurations set up a default promiscuous VLAN
1244 * filter that needs to be removed.
8c27d42e 1245 **/
30650cc5 1246static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
8c27d42e
GR
1247{
1248 struct i40e_aqc_remove_macvlan_element_data element;
1249 struct i40e_pf *pf = vsi->back;
1250 i40e_status aq_ret;
1251
1252 /* Only appropriate for the PF main VSI */
1253 if (vsi->type != I40E_VSI_MAIN)
30650cc5 1254 return -EINVAL;
8c27d42e 1255
30650cc5 1256 memset(&element, 0, sizeof(element));
8c27d42e
GR
1257 ether_addr_copy(element.mac_addr, macaddr);
1258 element.vlan_tag = 0;
1259 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1260 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1261 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1262 if (aq_ret)
30650cc5
SN
1263 return -ENOENT;
1264
1265 return 0;
8c27d42e
GR
1266}
1267
41c445ff
JB
1268/**
1269 * i40e_add_filter - Add a mac/vlan filter to the VSI
1270 * @vsi: the VSI to be searched
1271 * @macaddr: the MAC address
1272 * @vlan: the vlan
1273 * @is_vf: make sure its a vf filter, else doesn't matter
1274 * @is_netdev: make sure its a netdev filter, else doesn't matter
1275 *
1276 * Returns ptr to the filter object or NULL when no memory available.
1277 **/
1278struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1279 u8 *macaddr, s16 vlan,
1280 bool is_vf, bool is_netdev)
1281{
1282 struct i40e_mac_filter *f;
1283
1284 if (!vsi || !macaddr)
1285 return NULL;
1286
1287 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1288 if (!f) {
1289 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1290 if (!f)
1291 goto add_filter_out;
1292
9a173901 1293 ether_addr_copy(f->macaddr, macaddr);
41c445ff
JB
1294 f->vlan = vlan;
1295 f->changed = true;
1296
1297 INIT_LIST_HEAD(&f->list);
1298 list_add(&f->list, &vsi->mac_filter_list);
1299 }
1300
1301 /* increment counter and add a new flag if needed */
1302 if (is_vf) {
1303 if (!f->is_vf) {
1304 f->is_vf = true;
1305 f->counter++;
1306 }
1307 } else if (is_netdev) {
1308 if (!f->is_netdev) {
1309 f->is_netdev = true;
1310 f->counter++;
1311 }
1312 } else {
1313 f->counter++;
1314 }
1315
1316 /* changed tells sync_filters_subtask to
1317 * push the filter down to the firmware
1318 */
1319 if (f->changed) {
1320 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1321 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1322 }
1323
1324add_filter_out:
1325 return f;
1326}
1327
1328/**
1329 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1330 * @vsi: the VSI to be searched
1331 * @macaddr: the MAC address
1332 * @vlan: the vlan
1333 * @is_vf: make sure it's a vf filter, else doesn't matter
1334 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1335 **/
1336void i40e_del_filter(struct i40e_vsi *vsi,
1337 u8 *macaddr, s16 vlan,
1338 bool is_vf, bool is_netdev)
1339{
1340 struct i40e_mac_filter *f;
1341
1342 if (!vsi || !macaddr)
1343 return;
1344
1345 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1346 if (!f || f->counter == 0)
1347 return;
1348
1349 if (is_vf) {
1350 if (f->is_vf) {
1351 f->is_vf = false;
1352 f->counter--;
1353 }
1354 } else if (is_netdev) {
1355 if (f->is_netdev) {
1356 f->is_netdev = false;
1357 f->counter--;
1358 }
1359 } else {
1360 /* make sure we don't remove a filter in use by vf or netdev */
1361 int min_f = 0;
1362 min_f += (f->is_vf ? 1 : 0);
1363 min_f += (f->is_netdev ? 1 : 0);
1364
1365 if (f->counter > min_f)
1366 f->counter--;
1367 }
1368
1369 /* counter == 0 tells sync_filters_subtask to
1370 * remove the filter from the firmware's list
1371 */
1372 if (f->counter == 0) {
1373 f->changed = true;
1374 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1375 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1376 }
1377}
1378
1379/**
1380 * i40e_set_mac - NDO callback to set mac address
1381 * @netdev: network interface device structure
1382 * @p: pointer to an address structure
1383 *
1384 * Returns 0 on success, negative on failure
1385 **/
38e00438
VD
1386#ifdef I40E_FCOE
1387int i40e_set_mac(struct net_device *netdev, void *p)
1388#else
41c445ff 1389static int i40e_set_mac(struct net_device *netdev, void *p)
38e00438 1390#endif
41c445ff
JB
1391{
1392 struct i40e_netdev_priv *np = netdev_priv(netdev);
1393 struct i40e_vsi *vsi = np->vsi;
30650cc5
SN
1394 struct i40e_pf *pf = vsi->back;
1395 struct i40e_hw *hw = &pf->hw;
41c445ff
JB
1396 struct sockaddr *addr = p;
1397 struct i40e_mac_filter *f;
1398
1399 if (!is_valid_ether_addr(addr->sa_data))
1400 return -EADDRNOTAVAIL;
1401
30650cc5
SN
1402 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1403 netdev_info(netdev, "already using mac address %pM\n",
1404 addr->sa_data);
1405 return 0;
1406 }
41c445ff 1407
80f6428f
ASJ
1408 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1409 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1410 return -EADDRNOTAVAIL;
1411
30650cc5
SN
1412 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1413 netdev_info(netdev, "returning to hw mac address %pM\n",
1414 hw->mac.addr);
1415 else
1416 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1417
41c445ff
JB
1418 if (vsi->type == I40E_VSI_MAIN) {
1419 i40e_status ret;
1420 ret = i40e_aq_mac_address_write(&vsi->back->hw,
cc41222c 1421 I40E_AQC_WRITE_TYPE_LAA_WOL,
41c445ff
JB
1422 addr->sa_data, NULL);
1423 if (ret) {
1424 netdev_info(netdev,
1425 "Addr change for Main VSI failed: %d\n",
1426 ret);
1427 return -EADDRNOTAVAIL;
1428 }
41c445ff
JB
1429 }
1430
30650cc5
SN
1431 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1432 struct i40e_aqc_remove_macvlan_element_data element;
6c8ad1ba 1433
30650cc5
SN
1434 memset(&element, 0, sizeof(element));
1435 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1436 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1437 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1438 } else {
6c8ad1ba
SN
1439 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1440 false, false);
6c8ad1ba 1441 }
41c445ff 1442
30650cc5
SN
1443 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1444 struct i40e_aqc_add_macvlan_element_data element;
1445
1446 memset(&element, 0, sizeof(element));
1447 ether_addr_copy(element.mac_addr, hw->mac.addr);
1448 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1449 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1450 } else {
1451 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1452 false, false);
1453 if (f)
1454 f->is_laa = true;
1455 }
1456
1457 i40e_sync_vsi_filters(vsi);
1458 ether_addr_copy(netdev->dev_addr, addr->sa_data);
41c445ff
JB
1459
1460 return 0;
1461}
1462
1463/**
1464 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1465 * @vsi: the VSI being setup
1466 * @ctxt: VSI context structure
1467 * @enabled_tc: Enabled TCs bitmap
1468 * @is_add: True if called before Add VSI
1469 *
1470 * Setup VSI queue mapping for enabled traffic classes.
1471 **/
38e00438
VD
1472#ifdef I40E_FCOE
1473void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1474 struct i40e_vsi_context *ctxt,
1475 u8 enabled_tc,
1476 bool is_add)
1477#else
41c445ff
JB
1478static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1479 struct i40e_vsi_context *ctxt,
1480 u8 enabled_tc,
1481 bool is_add)
38e00438 1482#endif
41c445ff
JB
1483{
1484 struct i40e_pf *pf = vsi->back;
1485 u16 sections = 0;
1486 u8 netdev_tc = 0;
1487 u16 numtc = 0;
1488 u16 qcount;
1489 u8 offset;
1490 u16 qmap;
1491 int i;
4e3b35b0 1492 u16 num_tc_qps = 0;
41c445ff
JB
1493
1494 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1495 offset = 0;
1496
1497 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1498 /* Find numtc from enabled TC bitmap */
1499 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1500 if (enabled_tc & (1 << i)) /* TC is enabled */
1501 numtc++;
1502 }
1503 if (!numtc) {
1504 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1505 numtc = 1;
1506 }
1507 } else {
1508 /* At least TC0 is enabled in case of non-DCB case */
1509 numtc = 1;
1510 }
1511
1512 vsi->tc_config.numtc = numtc;
1513 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
4e3b35b0 1514 /* Number of queues per enabled TC */
7f9ff476
AS
1515 /* In MFP case we can have a much lower count of MSIx
1516 * vectors available and so we need to lower the used
1517 * q count.
1518 */
1519 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1520 num_tc_qps = qcount / numtc;
4e3b35b0 1521 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
41c445ff
JB
1522
1523 /* Setup queue offset/count for all TCs for given VSI */
1524 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1525 /* See if the given TC is enabled for the given VSI */
1526 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1527 int pow, num_qps;
1528
41c445ff
JB
1529 switch (vsi->type) {
1530 case I40E_VSI_MAIN:
4e3b35b0 1531 qcount = min_t(int, pf->rss_size, num_tc_qps);
41c445ff 1532 break;
38e00438
VD
1533#ifdef I40E_FCOE
1534 case I40E_VSI_FCOE:
1535 qcount = num_tc_qps;
1536 break;
1537#endif
41c445ff
JB
1538 case I40E_VSI_FDIR:
1539 case I40E_VSI_SRIOV:
1540 case I40E_VSI_VMDQ2:
1541 default:
4e3b35b0 1542 qcount = num_tc_qps;
41c445ff
JB
1543 WARN_ON(i != 0);
1544 break;
1545 }
4e3b35b0
NP
1546 vsi->tc_config.tc_info[i].qoffset = offset;
1547 vsi->tc_config.tc_info[i].qcount = qcount;
41c445ff
JB
1548
1549 /* find the power-of-2 of the number of queue pairs */
4e3b35b0 1550 num_qps = qcount;
41c445ff 1551 pow = 0;
4e3b35b0 1552 while (num_qps && ((1 << pow) < qcount)) {
41c445ff
JB
1553 pow++;
1554 num_qps >>= 1;
1555 }
1556
1557 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1558 qmap =
1559 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1560 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1561
4e3b35b0 1562 offset += qcount;
41c445ff
JB
1563 } else {
1564 /* TC is not enabled so set the offset to
1565 * default queue and allocate one queue
1566 * for the given TC.
1567 */
1568 vsi->tc_config.tc_info[i].qoffset = 0;
1569 vsi->tc_config.tc_info[i].qcount = 1;
1570 vsi->tc_config.tc_info[i].netdev_tc = 0;
1571
1572 qmap = 0;
1573 }
1574 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1575 }
1576
1577 /* Set actual Tx/Rx queue pairs */
1578 vsi->num_queue_pairs = offset;
1579
1580 /* Scheduler section valid can only be set for ADD VSI */
1581 if (is_add) {
1582 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1583
1584 ctxt->info.up_enable_bits = enabled_tc;
1585 }
1586 if (vsi->type == I40E_VSI_SRIOV) {
1587 ctxt->info.mapping_flags |=
1588 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1589 for (i = 0; i < vsi->num_queue_pairs; i++)
1590 ctxt->info.queue_mapping[i] =
1591 cpu_to_le16(vsi->base_queue + i);
1592 } else {
1593 ctxt->info.mapping_flags |=
1594 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1595 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1596 }
1597 ctxt->info.valid_sections |= cpu_to_le16(sections);
1598}
1599
1600/**
1601 * i40e_set_rx_mode - NDO callback to set the netdev filters
1602 * @netdev: network interface device structure
1603 **/
38e00438
VD
1604#ifdef I40E_FCOE
1605void i40e_set_rx_mode(struct net_device *netdev)
1606#else
41c445ff 1607static void i40e_set_rx_mode(struct net_device *netdev)
38e00438 1608#endif
41c445ff
JB
1609{
1610 struct i40e_netdev_priv *np = netdev_priv(netdev);
1611 struct i40e_mac_filter *f, *ftmp;
1612 struct i40e_vsi *vsi = np->vsi;
1613 struct netdev_hw_addr *uca;
1614 struct netdev_hw_addr *mca;
1615 struct netdev_hw_addr *ha;
1616
1617 /* add addr if not already in the filter list */
1618 netdev_for_each_uc_addr(uca, netdev) {
1619 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1620 if (i40e_is_vsi_in_vlan(vsi))
1621 i40e_put_mac_in_vlan(vsi, uca->addr,
1622 false, true);
1623 else
1624 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1625 false, true);
1626 }
1627 }
1628
1629 netdev_for_each_mc_addr(mca, netdev) {
1630 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1631 if (i40e_is_vsi_in_vlan(vsi))
1632 i40e_put_mac_in_vlan(vsi, mca->addr,
1633 false, true);
1634 else
1635 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1636 false, true);
1637 }
1638 }
1639
1640 /* remove filter if not in netdev list */
1641 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1642 bool found = false;
1643
1644 if (!f->is_netdev)
1645 continue;
1646
1647 if (is_multicast_ether_addr(f->macaddr)) {
1648 netdev_for_each_mc_addr(mca, netdev) {
1649 if (ether_addr_equal(mca->addr, f->macaddr)) {
1650 found = true;
1651 break;
1652 }
1653 }
1654 } else {
1655 netdev_for_each_uc_addr(uca, netdev) {
1656 if (ether_addr_equal(uca->addr, f->macaddr)) {
1657 found = true;
1658 break;
1659 }
1660 }
1661
1662 for_each_dev_addr(netdev, ha) {
1663 if (ether_addr_equal(ha->addr, f->macaddr)) {
1664 found = true;
1665 break;
1666 }
1667 }
1668 }
1669 if (!found)
1670 i40e_del_filter(
1671 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1672 }
1673
1674 /* check for other flag changes */
1675 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1676 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1677 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1678 }
1679}
1680
1681/**
1682 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1683 * @vsi: ptr to the VSI
1684 *
1685 * Push any outstanding VSI filter changes through the AdminQ.
1686 *
1687 * Returns 0 or error value
1688 **/
1689int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1690{
1691 struct i40e_mac_filter *f, *ftmp;
1692 bool promisc_forced_on = false;
1693 bool add_happened = false;
1694 int filter_list_len = 0;
1695 u32 changed_flags = 0;
dcae29be 1696 i40e_status aq_ret = 0;
41c445ff
JB
1697 struct i40e_pf *pf;
1698 int num_add = 0;
1699 int num_del = 0;
1700 u16 cmd_flags;
1701
1702 /* empty array typed pointers, kcalloc later */
1703 struct i40e_aqc_add_macvlan_element_data *add_list;
1704 struct i40e_aqc_remove_macvlan_element_data *del_list;
1705
1706 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1707 usleep_range(1000, 2000);
1708 pf = vsi->back;
1709
1710 if (vsi->netdev) {
1711 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1712 vsi->current_netdev_flags = vsi->netdev->flags;
1713 }
1714
1715 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1716 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1717
1718 filter_list_len = pf->hw.aq.asq_buf_size /
1719 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1720 del_list = kcalloc(filter_list_len,
1721 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1722 GFP_KERNEL);
1723 if (!del_list)
1724 return -ENOMEM;
1725
1726 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1727 if (!f->changed)
1728 continue;
1729
1730 if (f->counter != 0)
1731 continue;
1732 f->changed = false;
1733 cmd_flags = 0;
1734
1735 /* add to delete list */
9a173901 1736 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
41c445ff
JB
1737 del_list[num_del].vlan_tag =
1738 cpu_to_le16((u16)(f->vlan ==
1739 I40E_VLAN_ANY ? 0 : f->vlan));
1740
41c445ff
JB
1741 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1742 del_list[num_del].flags = cmd_flags;
1743 num_del++;
1744
1745 /* unlink from filter list */
1746 list_del(&f->list);
1747 kfree(f);
1748
1749 /* flush a full buffer */
1750 if (num_del == filter_list_len) {
dcae29be 1751 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
41c445ff
JB
1752 vsi->seid, del_list, num_del,
1753 NULL);
1754 num_del = 0;
1755 memset(del_list, 0, sizeof(*del_list));
1756
fdfe9cbe
SN
1757 if (aq_ret &&
1758 pf->hw.aq.asq_last_status !=
1759 I40E_AQ_RC_ENOENT)
41c445ff
JB
1760 dev_info(&pf->pdev->dev,
1761 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
dcae29be 1762 aq_ret,
41c445ff
JB
1763 pf->hw.aq.asq_last_status);
1764 }
1765 }
1766 if (num_del) {
dcae29be 1767 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
41c445ff
JB
1768 del_list, num_del, NULL);
1769 num_del = 0;
1770
fdfe9cbe
SN
1771 if (aq_ret &&
1772 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
41c445ff
JB
1773 dev_info(&pf->pdev->dev,
1774 "ignoring delete macvlan error, err %d, aq_err %d\n",
dcae29be 1775 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1776 }
1777
1778 kfree(del_list);
1779 del_list = NULL;
1780
1781 /* do all the adds now */
1782 filter_list_len = pf->hw.aq.asq_buf_size /
1783 sizeof(struct i40e_aqc_add_macvlan_element_data),
1784 add_list = kcalloc(filter_list_len,
1785 sizeof(struct i40e_aqc_add_macvlan_element_data),
1786 GFP_KERNEL);
1787 if (!add_list)
1788 return -ENOMEM;
1789
1790 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1791 if (!f->changed)
1792 continue;
1793
1794 if (f->counter == 0)
1795 continue;
1796 f->changed = false;
1797 add_happened = true;
1798 cmd_flags = 0;
1799
1800 /* add to add array */
9a173901 1801 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
41c445ff
JB
1802 add_list[num_add].vlan_tag =
1803 cpu_to_le16(
1804 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1805 add_list[num_add].queue_number = 0;
1806
1807 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
41c445ff
JB
1808 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1809 num_add++;
1810
1811 /* flush a full buffer */
1812 if (num_add == filter_list_len) {
dcae29be
JB
1813 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1814 add_list, num_add,
1815 NULL);
41c445ff
JB
1816 num_add = 0;
1817
dcae29be 1818 if (aq_ret)
41c445ff
JB
1819 break;
1820 memset(add_list, 0, sizeof(*add_list));
1821 }
1822 }
1823 if (num_add) {
dcae29be
JB
1824 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1825 add_list, num_add, NULL);
41c445ff
JB
1826 num_add = 0;
1827 }
1828 kfree(add_list);
1829 add_list = NULL;
1830
30650cc5
SN
1831 if (add_happened && aq_ret &&
1832 pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
41c445ff
JB
1833 dev_info(&pf->pdev->dev,
1834 "add filter failed, err %d, aq_err %d\n",
dcae29be 1835 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1836 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1837 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1838 &vsi->state)) {
1839 promisc_forced_on = true;
1840 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1841 &vsi->state);
1842 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1843 }
1844 }
1845 }
1846
1847 /* check for changes in promiscuous modes */
1848 if (changed_flags & IFF_ALLMULTI) {
1849 bool cur_multipromisc;
1850 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
dcae29be
JB
1851 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1852 vsi->seid,
1853 cur_multipromisc,
1854 NULL);
1855 if (aq_ret)
41c445ff
JB
1856 dev_info(&pf->pdev->dev,
1857 "set multi promisc failed, err %d, aq_err %d\n",
dcae29be 1858 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1859 }
1860 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1861 bool cur_promisc;
1862 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1863 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1864 &vsi->state));
dcae29be
JB
1865 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1866 vsi->seid,
1867 cur_promisc, NULL);
1868 if (aq_ret)
41c445ff
JB
1869 dev_info(&pf->pdev->dev,
1870 "set uni promisc failed, err %d, aq_err %d\n",
dcae29be 1871 aq_ret, pf->hw.aq.asq_last_status);
1a10370a
GR
1872 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1873 vsi->seid,
1874 cur_promisc, NULL);
1875 if (aq_ret)
1876 dev_info(&pf->pdev->dev,
1877 "set brdcast promisc failed, err %d, aq_err %d\n",
1878 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1879 }
1880
1881 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1882 return 0;
1883}
1884
1885/**
1886 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1887 * @pf: board private structure
1888 **/
1889static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1890{
1891 int v;
1892
1893 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1894 return;
1895 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1896
505682cd 1897 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
1898 if (pf->vsi[v] &&
1899 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1900 i40e_sync_vsi_filters(pf->vsi[v]);
1901 }
1902}
1903
1904/**
1905 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1906 * @netdev: network interface device structure
1907 * @new_mtu: new value for maximum frame size
1908 *
1909 * Returns 0 on success, negative on failure
1910 **/
1911static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1912{
1913 struct i40e_netdev_priv *np = netdev_priv(netdev);
61a46a4c 1914 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
41c445ff
JB
1915 struct i40e_vsi *vsi = np->vsi;
1916
1917 /* MTU < 68 is an error and causes problems on some kernels */
1918 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1919 return -EINVAL;
1920
1921 netdev_info(netdev, "changing MTU from %d to %d\n",
1922 netdev->mtu, new_mtu);
1923 netdev->mtu = new_mtu;
1924 if (netif_running(netdev))
1925 i40e_vsi_reinit_locked(vsi);
1926
1927 return 0;
1928}
1929
beb0dff1
JK
1930/**
1931 * i40e_ioctl - Access the hwtstamp interface
1932 * @netdev: network interface device structure
1933 * @ifr: interface request data
1934 * @cmd: ioctl command
1935 **/
1936int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1937{
1938 struct i40e_netdev_priv *np = netdev_priv(netdev);
1939 struct i40e_pf *pf = np->vsi->back;
1940
1941 switch (cmd) {
1942 case SIOCGHWTSTAMP:
1943 return i40e_ptp_get_ts_config(pf, ifr);
1944 case SIOCSHWTSTAMP:
1945 return i40e_ptp_set_ts_config(pf, ifr);
1946 default:
1947 return -EOPNOTSUPP;
1948 }
1949}
1950
41c445ff
JB
1951/**
1952 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1953 * @vsi: the vsi being adjusted
1954 **/
1955void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1956{
1957 struct i40e_vsi_context ctxt;
1958 i40e_status ret;
1959
1960 if ((vsi->info.valid_sections &
1961 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1962 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1963 return; /* already enabled */
1964
1965 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1966 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1967 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1968
1969 ctxt.seid = vsi->seid;
1970 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1971 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1972 if (ret) {
1973 dev_info(&vsi->back->pdev->dev,
1974 "%s: update vsi failed, aq_err=%d\n",
1975 __func__, vsi->back->hw.aq.asq_last_status);
1976 }
1977}
1978
1979/**
1980 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1981 * @vsi: the vsi being adjusted
1982 **/
1983void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1984{
1985 struct i40e_vsi_context ctxt;
1986 i40e_status ret;
1987
1988 if ((vsi->info.valid_sections &
1989 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1990 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1991 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1992 return; /* already disabled */
1993
1994 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1995 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1996 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1997
1998 ctxt.seid = vsi->seid;
1999 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2000 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2001 if (ret) {
2002 dev_info(&vsi->back->pdev->dev,
2003 "%s: update vsi failed, aq_err=%d\n",
2004 __func__, vsi->back->hw.aq.asq_last_status);
2005 }
2006}
2007
2008/**
2009 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2010 * @netdev: network interface to be adjusted
2011 * @features: netdev features to test if VLAN offload is enabled or not
2012 **/
2013static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2014{
2015 struct i40e_netdev_priv *np = netdev_priv(netdev);
2016 struct i40e_vsi *vsi = np->vsi;
2017
2018 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2019 i40e_vlan_stripping_enable(vsi);
2020 else
2021 i40e_vlan_stripping_disable(vsi);
2022}
2023
2024/**
2025 * i40e_vsi_add_vlan - Add vsi membership for given vlan
2026 * @vsi: the vsi being configured
2027 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2028 **/
2029int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2030{
2031 struct i40e_mac_filter *f, *add_f;
2032 bool is_netdev, is_vf;
41c445ff
JB
2033
2034 is_vf = (vsi->type == I40E_VSI_SRIOV);
2035 is_netdev = !!(vsi->netdev);
2036
2037 if (is_netdev) {
2038 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2039 is_vf, is_netdev);
2040 if (!add_f) {
2041 dev_info(&vsi->back->pdev->dev,
2042 "Could not add vlan filter %d for %pM\n",
2043 vid, vsi->netdev->dev_addr);
2044 return -ENOMEM;
2045 }
2046 }
2047
2048 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2049 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2050 if (!add_f) {
2051 dev_info(&vsi->back->pdev->dev,
2052 "Could not add vlan filter %d for %pM\n",
2053 vid, f->macaddr);
2054 return -ENOMEM;
2055 }
2056 }
2057
41c445ff
JB
2058 /* Now if we add a vlan tag, make sure to check if it is the first
2059 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2060 * with 0, so we now accept untagged and specified tagged traffic
2061 * (and not any taged and untagged)
2062 */
2063 if (vid > 0) {
2064 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2065 I40E_VLAN_ANY,
2066 is_vf, is_netdev)) {
2067 i40e_del_filter(vsi, vsi->netdev->dev_addr,
2068 I40E_VLAN_ANY, is_vf, is_netdev);
2069 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2070 is_vf, is_netdev);
2071 if (!add_f) {
2072 dev_info(&vsi->back->pdev->dev,
2073 "Could not add filter 0 for %pM\n",
2074 vsi->netdev->dev_addr);
2075 return -ENOMEM;
2076 }
2077 }
8d82a7c5 2078 }
41c445ff 2079
8d82a7c5
GR
2080 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2081 if (vid > 0 && !vsi->info.pvid) {
41c445ff
JB
2082 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2083 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2084 is_vf, is_netdev)) {
2085 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2086 is_vf, is_netdev);
2087 add_f = i40e_add_filter(vsi, f->macaddr,
2088 0, is_vf, is_netdev);
2089 if (!add_f) {
2090 dev_info(&vsi->back->pdev->dev,
2091 "Could not add filter 0 for %pM\n",
2092 f->macaddr);
2093 return -ENOMEM;
2094 }
2095 }
2096 }
41c445ff
JB
2097 }
2098
80f6428f
ASJ
2099 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2100 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2101 return 0;
2102
2103 return i40e_sync_vsi_filters(vsi);
41c445ff
JB
2104}
2105
2106/**
2107 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2108 * @vsi: the vsi being configured
2109 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
078b5876
JB
2110 *
2111 * Return: 0 on success or negative otherwise
41c445ff
JB
2112 **/
2113int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2114{
2115 struct net_device *netdev = vsi->netdev;
2116 struct i40e_mac_filter *f, *add_f;
2117 bool is_vf, is_netdev;
2118 int filter_count = 0;
41c445ff
JB
2119
2120 is_vf = (vsi->type == I40E_VSI_SRIOV);
2121 is_netdev = !!(netdev);
2122
2123 if (is_netdev)
2124 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2125
2126 list_for_each_entry(f, &vsi->mac_filter_list, list)
2127 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2128
41c445ff
JB
2129 /* go through all the filters for this VSI and if there is only
2130 * vid == 0 it means there are no other filters, so vid 0 must
2131 * be replaced with -1. This signifies that we should from now
2132 * on accept any traffic (with any tag present, or untagged)
2133 */
2134 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2135 if (is_netdev) {
2136 if (f->vlan &&
2137 ether_addr_equal(netdev->dev_addr, f->macaddr))
2138 filter_count++;
2139 }
2140
2141 if (f->vlan)
2142 filter_count++;
2143 }
2144
2145 if (!filter_count && is_netdev) {
2146 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2147 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2148 is_vf, is_netdev);
2149 if (!f) {
2150 dev_info(&vsi->back->pdev->dev,
2151 "Could not add filter %d for %pM\n",
2152 I40E_VLAN_ANY, netdev->dev_addr);
2153 return -ENOMEM;
2154 }
2155 }
2156
2157 if (!filter_count) {
2158 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2159 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2160 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2161 is_vf, is_netdev);
2162 if (!add_f) {
2163 dev_info(&vsi->back->pdev->dev,
2164 "Could not add filter %d for %pM\n",
2165 I40E_VLAN_ANY, f->macaddr);
2166 return -ENOMEM;
2167 }
2168 }
2169 }
2170
80f6428f
ASJ
2171 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2172 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2173 return 0;
2174
41c445ff
JB
2175 return i40e_sync_vsi_filters(vsi);
2176}
2177
2178/**
2179 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2180 * @netdev: network interface to be adjusted
2181 * @vid: vlan id to be added
078b5876
JB
2182 *
2183 * net_device_ops implementation for adding vlan ids
41c445ff 2184 **/
38e00438
VD
2185#ifdef I40E_FCOE
2186int i40e_vlan_rx_add_vid(struct net_device *netdev,
2187 __always_unused __be16 proto, u16 vid)
2188#else
41c445ff
JB
2189static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2190 __always_unused __be16 proto, u16 vid)
38e00438 2191#endif
41c445ff
JB
2192{
2193 struct i40e_netdev_priv *np = netdev_priv(netdev);
2194 struct i40e_vsi *vsi = np->vsi;
078b5876 2195 int ret = 0;
41c445ff
JB
2196
2197 if (vid > 4095)
078b5876
JB
2198 return -EINVAL;
2199
2200 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
41c445ff 2201
6982d429
ASJ
2202 /* If the network stack called us with vid = 0 then
2203 * it is asking to receive priority tagged packets with
2204 * vlan id 0. Our HW receives them by default when configured
2205 * to receive untagged packets so there is no need to add an
2206 * extra filter for vlan 0 tagged packets.
41c445ff 2207 */
6982d429
ASJ
2208 if (vid)
2209 ret = i40e_vsi_add_vlan(vsi, vid);
41c445ff 2210
078b5876
JB
2211 if (!ret && (vid < VLAN_N_VID))
2212 set_bit(vid, vsi->active_vlans);
41c445ff 2213
078b5876 2214 return ret;
41c445ff
JB
2215}
2216
2217/**
2218 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2219 * @netdev: network interface to be adjusted
2220 * @vid: vlan id to be removed
078b5876 2221 *
fdfd943e 2222 * net_device_ops implementation for removing vlan ids
41c445ff 2223 **/
38e00438
VD
2224#ifdef I40E_FCOE
2225int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2226 __always_unused __be16 proto, u16 vid)
2227#else
41c445ff
JB
2228static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2229 __always_unused __be16 proto, u16 vid)
38e00438 2230#endif
41c445ff
JB
2231{
2232 struct i40e_netdev_priv *np = netdev_priv(netdev);
2233 struct i40e_vsi *vsi = np->vsi;
2234
078b5876
JB
2235 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2236
41c445ff
JB
2237 /* return code is ignored as there is nothing a user
2238 * can do about failure to remove and a log message was
078b5876 2239 * already printed from the other function
41c445ff
JB
2240 */
2241 i40e_vsi_kill_vlan(vsi, vid);
2242
2243 clear_bit(vid, vsi->active_vlans);
078b5876 2244
41c445ff
JB
2245 return 0;
2246}
2247
2248/**
2249 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2250 * @vsi: the vsi being brought back up
2251 **/
2252static void i40e_restore_vlan(struct i40e_vsi *vsi)
2253{
2254 u16 vid;
2255
2256 if (!vsi->netdev)
2257 return;
2258
2259 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2260
2261 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2262 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2263 vid);
2264}
2265
2266/**
2267 * i40e_vsi_add_pvid - Add pvid for the VSI
2268 * @vsi: the vsi being adjusted
2269 * @vid: the vlan id to set as a PVID
2270 **/
dcae29be 2271int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
41c445ff
JB
2272{
2273 struct i40e_vsi_context ctxt;
dcae29be 2274 i40e_status aq_ret;
41c445ff
JB
2275
2276 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2277 vsi->info.pvid = cpu_to_le16(vid);
6c12fcbf
GR
2278 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2279 I40E_AQ_VSI_PVLAN_INSERT_PVID |
b774c7dd 2280 I40E_AQ_VSI_PVLAN_EMOD_STR;
41c445ff
JB
2281
2282 ctxt.seid = vsi->seid;
2283 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
dcae29be
JB
2284 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2285 if (aq_ret) {
41c445ff
JB
2286 dev_info(&vsi->back->pdev->dev,
2287 "%s: update vsi failed, aq_err=%d\n",
2288 __func__, vsi->back->hw.aq.asq_last_status);
dcae29be 2289 return -ENOENT;
41c445ff
JB
2290 }
2291
dcae29be 2292 return 0;
41c445ff
JB
2293}
2294
2295/**
2296 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2297 * @vsi: the vsi being adjusted
2298 *
2299 * Just use the vlan_rx_register() service to put it back to normal
2300 **/
2301void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2302{
6c12fcbf
GR
2303 i40e_vlan_stripping_disable(vsi);
2304
41c445ff 2305 vsi->info.pvid = 0;
41c445ff
JB
2306}
2307
2308/**
2309 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2310 * @vsi: ptr to the VSI
2311 *
2312 * If this function returns with an error, then it's possible one or
2313 * more of the rings is populated (while the rest are not). It is the
2314 * callers duty to clean those orphaned rings.
2315 *
2316 * Return 0 on success, negative on failure
2317 **/
2318static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2319{
2320 int i, err = 0;
2321
2322 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2323 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
41c445ff
JB
2324
2325 return err;
2326}
2327
2328/**
2329 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2330 * @vsi: ptr to the VSI
2331 *
2332 * Free VSI's transmit software resources
2333 **/
2334static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2335{
2336 int i;
2337
8e9dca53
GR
2338 if (!vsi->tx_rings)
2339 return;
2340
41c445ff 2341 for (i = 0; i < vsi->num_queue_pairs; i++)
8e9dca53 2342 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
9f65e15b 2343 i40e_free_tx_resources(vsi->tx_rings[i]);
41c445ff
JB
2344}
2345
2346/**
2347 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2348 * @vsi: ptr to the VSI
2349 *
2350 * If this function returns with an error, then it's possible one or
2351 * more of the rings is populated (while the rest are not). It is the
2352 * callers duty to clean those orphaned rings.
2353 *
2354 * Return 0 on success, negative on failure
2355 **/
2356static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2357{
2358 int i, err = 0;
2359
2360 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2361 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
38e00438
VD
2362#ifdef I40E_FCOE
2363 i40e_fcoe_setup_ddp_resources(vsi);
2364#endif
41c445ff
JB
2365 return err;
2366}
2367
2368/**
2369 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2370 * @vsi: ptr to the VSI
2371 *
2372 * Free all receive software resources
2373 **/
2374static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2375{
2376 int i;
2377
8e9dca53
GR
2378 if (!vsi->rx_rings)
2379 return;
2380
41c445ff 2381 for (i = 0; i < vsi->num_queue_pairs; i++)
8e9dca53 2382 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
9f65e15b 2383 i40e_free_rx_resources(vsi->rx_rings[i]);
38e00438
VD
2384#ifdef I40E_FCOE
2385 i40e_fcoe_free_ddp_resources(vsi);
2386#endif
41c445ff
JB
2387}
2388
3ffa037d
NP
2389/**
2390 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2391 * @ring: The Tx ring to configure
2392 *
2393 * This enables/disables XPS for a given Tx descriptor ring
2394 * based on the TCs enabled for the VSI that ring belongs to.
2395 **/
2396static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2397{
2398 struct i40e_vsi *vsi = ring->vsi;
2399 cpumask_var_t mask;
2400
2401 if (ring->q_vector && ring->netdev) {
2402 /* Single TC mode enable XPS */
2403 if (vsi->tc_config.numtc <= 1 &&
2404 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
2405 netif_set_xps_queue(ring->netdev,
2406 &ring->q_vector->affinity_mask,
2407 ring->queue_index);
2408 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2409 /* Disable XPS to allow selection based on TC */
2410 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2411 netif_set_xps_queue(ring->netdev, mask,
2412 ring->queue_index);
2413 free_cpumask_var(mask);
2414 }
2415 }
2416}
2417
41c445ff
JB
2418/**
2419 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2420 * @ring: The Tx ring to configure
2421 *
2422 * Configure the Tx descriptor ring in the HMC context.
2423 **/
2424static int i40e_configure_tx_ring(struct i40e_ring *ring)
2425{
2426 struct i40e_vsi *vsi = ring->vsi;
2427 u16 pf_q = vsi->base_queue + ring->queue_index;
2428 struct i40e_hw *hw = &vsi->back->hw;
2429 struct i40e_hmc_obj_txq tx_ctx;
2430 i40e_status err = 0;
2431 u32 qtx_ctl = 0;
2432
2433 /* some ATR related tx ring init */
60ea5f83 2434 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
41c445ff
JB
2435 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2436 ring->atr_count = 0;
2437 } else {
2438 ring->atr_sample_rate = 0;
2439 }
2440
3ffa037d
NP
2441 /* configure XPS */
2442 i40e_config_xps_tx_ring(ring);
41c445ff
JB
2443
2444 /* clear the context structure first */
2445 memset(&tx_ctx, 0, sizeof(tx_ctx));
2446
2447 tx_ctx.new_context = 1;
2448 tx_ctx.base = (ring->dma / 128);
2449 tx_ctx.qlen = ring->count;
60ea5f83
JB
2450 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2451 I40E_FLAG_FD_ATR_ENABLED));
38e00438
VD
2452#ifdef I40E_FCOE
2453 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2454#endif
beb0dff1 2455 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
1943d8ba
JB
2456 /* FDIR VSI tx ring can still use RS bit and writebacks */
2457 if (vsi->type != I40E_VSI_FDIR)
2458 tx_ctx.head_wb_ena = 1;
2459 tx_ctx.head_wb_addr = ring->dma +
2460 (ring->count * sizeof(struct i40e_tx_desc));
41c445ff
JB
2461
2462 /* As part of VSI creation/update, FW allocates certain
2463 * Tx arbitration queue sets for each TC enabled for
2464 * the VSI. The FW returns the handles to these queue
2465 * sets as part of the response buffer to Add VSI,
2466 * Update VSI, etc. AQ commands. It is expected that
2467 * these queue set handles be associated with the Tx
2468 * queues by the driver as part of the TX queue context
2469 * initialization. This has to be done regardless of
2470 * DCB as by default everything is mapped to TC0.
2471 */
2472 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2473 tx_ctx.rdylist_act = 0;
2474
2475 /* clear the context in the HMC */
2476 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2477 if (err) {
2478 dev_info(&vsi->back->pdev->dev,
2479 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2480 ring->queue_index, pf_q, err);
2481 return -ENOMEM;
2482 }
2483
2484 /* set the context in the HMC */
2485 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2486 if (err) {
2487 dev_info(&vsi->back->pdev->dev,
2488 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2489 ring->queue_index, pf_q, err);
2490 return -ENOMEM;
2491 }
2492
2493 /* Now associate this queue with this PCI function */
7a28d885 2494 if (vsi->type == I40E_VSI_VMDQ2) {
9d8bf547 2495 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
7a28d885
MW
2496 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2497 I40E_QTX_CTL_VFVM_INDX_MASK;
2498 } else {
9d8bf547 2499 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
7a28d885
MW
2500 }
2501
13fd9774
SN
2502 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2503 I40E_QTX_CTL_PF_INDX_MASK);
41c445ff
JB
2504 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2505 i40e_flush(hw);
2506
2507 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2508
2509 /* cache tail off for easier writes later */
2510 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2511
2512 return 0;
2513}
2514
2515/**
2516 * i40e_configure_rx_ring - Configure a receive ring context
2517 * @ring: The Rx ring to configure
2518 *
2519 * Configure the Rx descriptor ring in the HMC context.
2520 **/
2521static int i40e_configure_rx_ring(struct i40e_ring *ring)
2522{
2523 struct i40e_vsi *vsi = ring->vsi;
2524 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2525 u16 pf_q = vsi->base_queue + ring->queue_index;
2526 struct i40e_hw *hw = &vsi->back->hw;
2527 struct i40e_hmc_obj_rxq rx_ctx;
2528 i40e_status err = 0;
2529
2530 ring->state = 0;
2531
2532 /* clear the context structure first */
2533 memset(&rx_ctx, 0, sizeof(rx_ctx));
2534
2535 ring->rx_buf_len = vsi->rx_buf_len;
2536 ring->rx_hdr_len = vsi->rx_hdr_len;
2537
2538 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2539 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2540
2541 rx_ctx.base = (ring->dma / 128);
2542 rx_ctx.qlen = ring->count;
2543
2544 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2545 set_ring_16byte_desc_enabled(ring);
2546 rx_ctx.dsize = 0;
2547 } else {
2548 rx_ctx.dsize = 1;
2549 }
2550
2551 rx_ctx.dtype = vsi->dtype;
2552 if (vsi->dtype) {
2553 set_ring_ps_enabled(ring);
2554 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2555 I40E_RX_SPLIT_IP |
2556 I40E_RX_SPLIT_TCP_UDP |
2557 I40E_RX_SPLIT_SCTP;
2558 } else {
2559 rx_ctx.hsplit_0 = 0;
2560 }
2561
2562 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2563 (chain_len * ring->rx_buf_len));
7134f9ce
JB
2564 if (hw->revision_id == 0)
2565 rx_ctx.lrxqthresh = 0;
2566 else
2567 rx_ctx.lrxqthresh = 2;
41c445ff
JB
2568 rx_ctx.crcstrip = 1;
2569 rx_ctx.l2tsel = 1;
2570 rx_ctx.showiv = 1;
38e00438
VD
2571#ifdef I40E_FCOE
2572 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2573#endif
acb3676b
CS
2574 /* set the prefena field to 1 because the manual says to */
2575 rx_ctx.prefena = 1;
41c445ff
JB
2576
2577 /* clear the context in the HMC */
2578 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2579 if (err) {
2580 dev_info(&vsi->back->pdev->dev,
2581 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2582 ring->queue_index, pf_q, err);
2583 return -ENOMEM;
2584 }
2585
2586 /* set the context in the HMC */
2587 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2588 if (err) {
2589 dev_info(&vsi->back->pdev->dev,
2590 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2591 ring->queue_index, pf_q, err);
2592 return -ENOMEM;
2593 }
2594
2595 /* cache tail for quicker writes, and clear the reg before use */
2596 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2597 writel(0, ring->tail);
2598
2599 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2600
2601 return 0;
2602}
2603
2604/**
2605 * i40e_vsi_configure_tx - Configure the VSI for Tx
2606 * @vsi: VSI structure describing this set of rings and resources
2607 *
2608 * Configure the Tx VSI for operation.
2609 **/
2610static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2611{
2612 int err = 0;
2613 u16 i;
2614
9f65e15b
AD
2615 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2616 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
41c445ff
JB
2617
2618 return err;
2619}
2620
2621/**
2622 * i40e_vsi_configure_rx - Configure the VSI for Rx
2623 * @vsi: the VSI being configured
2624 *
2625 * Configure the Rx VSI for operation.
2626 **/
2627static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2628{
2629 int err = 0;
2630 u16 i;
2631
2632 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2633 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2634 + ETH_FCS_LEN + VLAN_HLEN;
2635 else
2636 vsi->max_frame = I40E_RXBUFFER_2048;
2637
2638 /* figure out correct receive buffer length */
2639 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2640 I40E_FLAG_RX_PS_ENABLED)) {
2641 case I40E_FLAG_RX_1BUF_ENABLED:
2642 vsi->rx_hdr_len = 0;
2643 vsi->rx_buf_len = vsi->max_frame;
2644 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2645 break;
2646 case I40E_FLAG_RX_PS_ENABLED:
2647 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2648 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2649 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2650 break;
2651 default:
2652 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2653 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2654 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2655 break;
2656 }
2657
38e00438
VD
2658#ifdef I40E_FCOE
2659 /* setup rx buffer for FCoE */
2660 if ((vsi->type == I40E_VSI_FCOE) &&
2661 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2662 vsi->rx_hdr_len = 0;
2663 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2664 vsi->max_frame = I40E_RXBUFFER_3072;
2665 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2666 }
2667
2668#endif /* I40E_FCOE */
41c445ff
JB
2669 /* round up for the chip's needs */
2670 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2671 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2672 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2673 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2674
2675 /* set up individual rings */
2676 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2677 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
2678
2679 return err;
2680}
2681
2682/**
2683 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2684 * @vsi: ptr to the VSI
2685 **/
2686static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2687{
e7046ee1 2688 struct i40e_ring *tx_ring, *rx_ring;
41c445ff
JB
2689 u16 qoffset, qcount;
2690 int i, n;
2691
cd238a3e
PN
2692 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2693 /* Reset the TC information */
2694 for (i = 0; i < vsi->num_queue_pairs; i++) {
2695 rx_ring = vsi->rx_rings[i];
2696 tx_ring = vsi->tx_rings[i];
2697 rx_ring->dcb_tc = 0;
2698 tx_ring->dcb_tc = 0;
2699 }
2700 }
41c445ff
JB
2701
2702 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2703 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2704 continue;
2705
2706 qoffset = vsi->tc_config.tc_info[n].qoffset;
2707 qcount = vsi->tc_config.tc_info[n].qcount;
2708 for (i = qoffset; i < (qoffset + qcount); i++) {
e7046ee1
AA
2709 rx_ring = vsi->rx_rings[i];
2710 tx_ring = vsi->tx_rings[i];
41c445ff
JB
2711 rx_ring->dcb_tc = n;
2712 tx_ring->dcb_tc = n;
2713 }
2714 }
2715}
2716
2717/**
2718 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2719 * @vsi: ptr to the VSI
2720 **/
2721static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2722{
2723 if (vsi->netdev)
2724 i40e_set_rx_mode(vsi->netdev);
2725}
2726
17a73f6b
JG
2727/**
2728 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2729 * @vsi: Pointer to the targeted VSI
2730 *
2731 * This function replays the hlist on the hw where all the SB Flow Director
2732 * filters were saved.
2733 **/
2734static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2735{
2736 struct i40e_fdir_filter *filter;
2737 struct i40e_pf *pf = vsi->back;
2738 struct hlist_node *node;
2739
55a5e60b
ASJ
2740 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2741 return;
2742
17a73f6b
JG
2743 hlist_for_each_entry_safe(filter, node,
2744 &pf->fdir_filter_list, fdir_node) {
2745 i40e_add_del_fdir(vsi, filter, true);
2746 }
2747}
2748
41c445ff
JB
2749/**
2750 * i40e_vsi_configure - Set up the VSI for action
2751 * @vsi: the VSI being configured
2752 **/
2753static int i40e_vsi_configure(struct i40e_vsi *vsi)
2754{
2755 int err;
2756
2757 i40e_set_vsi_rx_mode(vsi);
2758 i40e_restore_vlan(vsi);
2759 i40e_vsi_config_dcb_rings(vsi);
2760 err = i40e_vsi_configure_tx(vsi);
2761 if (!err)
2762 err = i40e_vsi_configure_rx(vsi);
2763
2764 return err;
2765}
2766
2767/**
2768 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2769 * @vsi: the VSI being configured
2770 **/
2771static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2772{
2773 struct i40e_pf *pf = vsi->back;
2774 struct i40e_q_vector *q_vector;
2775 struct i40e_hw *hw = &pf->hw;
2776 u16 vector;
2777 int i, q;
2778 u32 val;
2779 u32 qp;
2780
2781 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2782 * and PFINT_LNKLSTn registers, e.g.:
2783 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2784 */
2785 qp = vsi->base_queue;
2786 vector = vsi->base_vector;
493fb300
AD
2787 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2788 q_vector = vsi->q_vectors[i];
41c445ff
JB
2789 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2790 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2791 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2792 q_vector->rx.itr);
2793 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2794 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2795 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2796 q_vector->tx.itr);
2797
2798 /* Linked list for the queuepairs assigned to this vector */
2799 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2800 for (q = 0; q < q_vector->num_ringpairs; q++) {
2801 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2802 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2803 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2804 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2805 (I40E_QUEUE_TYPE_TX
2806 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2807
2808 wr32(hw, I40E_QINT_RQCTL(qp), val);
2809
2810 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2811 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2812 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2813 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2814 (I40E_QUEUE_TYPE_RX
2815 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2816
2817 /* Terminate the linked list */
2818 if (q == (q_vector->num_ringpairs - 1))
2819 val |= (I40E_QUEUE_END_OF_LIST
2820 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2821
2822 wr32(hw, I40E_QINT_TQCTL(qp), val);
2823 qp++;
2824 }
2825 }
2826
2827 i40e_flush(hw);
2828}
2829
2830/**
2831 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2832 * @hw: ptr to the hardware info
2833 **/
ab437b5a 2834static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
41c445ff 2835{
ab437b5a 2836 struct i40e_hw *hw = &pf->hw;
41c445ff
JB
2837 u32 val;
2838
2839 /* clear things first */
2840 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2841 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2842
2843 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2844 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2845 I40E_PFINT_ICR0_ENA_GRST_MASK |
2846 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2847 I40E_PFINT_ICR0_ENA_GPIO_MASK |
41c445ff
JB
2848 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2849 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2850 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2851
ab437b5a
JK
2852 if (pf->flags & I40E_FLAG_PTP)
2853 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2854
41c445ff
JB
2855 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2856
2857 /* SW_ITR_IDX = 0, but don't change INTENA */
84ed40e7
ASJ
2858 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2859 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
41c445ff
JB
2860
2861 /* OTHER_ITR_IDX = 0 */
2862 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2863}
2864
2865/**
2866 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2867 * @vsi: the VSI being configured
2868 **/
2869static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2870{
493fb300 2871 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
41c445ff
JB
2872 struct i40e_pf *pf = vsi->back;
2873 struct i40e_hw *hw = &pf->hw;
2874 u32 val;
2875
2876 /* set the ITR configuration */
2877 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2878 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2879 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2880 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2881 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2882 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2883
ab437b5a 2884 i40e_enable_misc_int_causes(pf);
41c445ff
JB
2885
2886 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2887 wr32(hw, I40E_PFINT_LNKLST0, 0);
2888
f29eaa3d 2889 /* Associate the queue pair to the vector and enable the queue int */
41c445ff
JB
2890 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2891 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2892 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2893
2894 wr32(hw, I40E_QINT_RQCTL(0), val);
2895
2896 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2897 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2898 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2899
2900 wr32(hw, I40E_QINT_TQCTL(0), val);
2901 i40e_flush(hw);
2902}
2903
2ef28cfb
MW
2904/**
2905 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2906 * @pf: board private structure
2907 **/
2908void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2909{
2910 struct i40e_hw *hw = &pf->hw;
2911
2912 wr32(hw, I40E_PFINT_DYN_CTL0,
2913 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2914 i40e_flush(hw);
2915}
2916
41c445ff
JB
2917/**
2918 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2919 * @pf: board private structure
2920 **/
116a57d4 2921void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
41c445ff
JB
2922{
2923 struct i40e_hw *hw = &pf->hw;
2924 u32 val;
2925
2926 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2927 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2928 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2929
2930 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2931 i40e_flush(hw);
2932}
2933
2934/**
2935 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2936 * @vsi: pointer to a vsi
2937 * @vector: enable a particular Hw Interrupt vector
2938 **/
2939void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2940{
2941 struct i40e_pf *pf = vsi->back;
2942 struct i40e_hw *hw = &pf->hw;
2943 u32 val;
2944
2945 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2946 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2947 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2948 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1022cb6c 2949 /* skip the flush */
41c445ff
JB
2950}
2951
5c2cebda
CW
2952/**
2953 * i40e_irq_dynamic_disable - Disable default interrupt generation settings
2954 * @vsi: pointer to a vsi
03147773 2955 * @vector: disable a particular Hw Interrupt vector
5c2cebda
CW
2956 **/
2957void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
2958{
2959 struct i40e_pf *pf = vsi->back;
2960 struct i40e_hw *hw = &pf->hw;
2961 u32 val;
2962
2963 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2964 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2965 i40e_flush(hw);
2966}
2967
41c445ff
JB
2968/**
2969 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2970 * @irq: interrupt number
2971 * @data: pointer to a q_vector
2972 **/
2973static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2974{
2975 struct i40e_q_vector *q_vector = data;
2976
cd0b6fa6 2977 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
2978 return IRQ_HANDLED;
2979
2980 napi_schedule(&q_vector->napi);
2981
2982 return IRQ_HANDLED;
2983}
2984
41c445ff
JB
2985/**
2986 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2987 * @vsi: the VSI being configured
2988 * @basename: name for the vector
2989 *
2990 * Allocates MSI-X vectors and requests interrupts from the kernel.
2991 **/
2992static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2993{
2994 int q_vectors = vsi->num_q_vectors;
2995 struct i40e_pf *pf = vsi->back;
2996 int base = vsi->base_vector;
2997 int rx_int_idx = 0;
2998 int tx_int_idx = 0;
2999 int vector, err;
3000
3001 for (vector = 0; vector < q_vectors; vector++) {
493fb300 3002 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
41c445ff 3003
cd0b6fa6 3004 if (q_vector->tx.ring && q_vector->rx.ring) {
41c445ff
JB
3005 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3006 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3007 tx_int_idx++;
cd0b6fa6 3008 } else if (q_vector->rx.ring) {
41c445ff
JB
3009 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3010 "%s-%s-%d", basename, "rx", rx_int_idx++);
cd0b6fa6 3011 } else if (q_vector->tx.ring) {
41c445ff
JB
3012 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3013 "%s-%s-%d", basename, "tx", tx_int_idx++);
3014 } else {
3015 /* skip this unused q_vector */
3016 continue;
3017 }
3018 err = request_irq(pf->msix_entries[base + vector].vector,
3019 vsi->irq_handler,
3020 0,
3021 q_vector->name,
3022 q_vector);
3023 if (err) {
3024 dev_info(&pf->pdev->dev,
3025 "%s: request_irq failed, error: %d\n",
3026 __func__, err);
3027 goto free_queue_irqs;
3028 }
3029 /* assign the mask for this irq */
3030 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3031 &q_vector->affinity_mask);
3032 }
3033
63741846 3034 vsi->irqs_ready = true;
41c445ff
JB
3035 return 0;
3036
3037free_queue_irqs:
3038 while (vector) {
3039 vector--;
3040 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3041 NULL);
3042 free_irq(pf->msix_entries[base + vector].vector,
3043 &(vsi->q_vectors[vector]));
3044 }
3045 return err;
3046}
3047
3048/**
3049 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3050 * @vsi: the VSI being un-configured
3051 **/
3052static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3053{
3054 struct i40e_pf *pf = vsi->back;
3055 struct i40e_hw *hw = &pf->hw;
3056 int base = vsi->base_vector;
3057 int i;
3058
3059 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
3060 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3061 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
41c445ff
JB
3062 }
3063
3064 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3065 for (i = vsi->base_vector;
3066 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3067 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3068
3069 i40e_flush(hw);
3070 for (i = 0; i < vsi->num_q_vectors; i++)
3071 synchronize_irq(pf->msix_entries[i + base].vector);
3072 } else {
3073 /* Legacy and MSI mode - this stops all interrupt handling */
3074 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3075 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3076 i40e_flush(hw);
3077 synchronize_irq(pf->pdev->irq);
3078 }
3079}
3080
3081/**
3082 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3083 * @vsi: the VSI being configured
3084 **/
3085static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3086{
3087 struct i40e_pf *pf = vsi->back;
3088 int i;
3089
3090 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3091 for (i = vsi->base_vector;
3092 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3093 i40e_irq_dynamic_enable(vsi, i);
3094 } else {
3095 i40e_irq_dynamic_enable_icr0(pf);
3096 }
3097
1022cb6c 3098 i40e_flush(&pf->hw);
41c445ff
JB
3099 return 0;
3100}
3101
3102/**
3103 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3104 * @pf: board private structure
3105 **/
3106static void i40e_stop_misc_vector(struct i40e_pf *pf)
3107{
3108 /* Disable ICR 0 */
3109 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3110 i40e_flush(&pf->hw);
3111}
3112
3113/**
3114 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3115 * @irq: interrupt number
3116 * @data: pointer to a q_vector
3117 *
3118 * This is the handler used for all MSI/Legacy interrupts, and deals
3119 * with both queue and non-queue interrupts. This is also used in
3120 * MSIX mode to handle the non-queue interrupts.
3121 **/
3122static irqreturn_t i40e_intr(int irq, void *data)
3123{
3124 struct i40e_pf *pf = (struct i40e_pf *)data;
3125 struct i40e_hw *hw = &pf->hw;
5e823066 3126 irqreturn_t ret = IRQ_NONE;
41c445ff
JB
3127 u32 icr0, icr0_remaining;
3128 u32 val, ena_mask;
3129
3130 icr0 = rd32(hw, I40E_PFINT_ICR0);
5e823066 3131 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
41c445ff 3132
116a57d4
SN
3133 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3134 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
5e823066 3135 goto enable_intr;
41c445ff 3136
cd92e72f
SN
3137 /* if interrupt but no bits showing, must be SWINT */
3138 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3139 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3140 pf->sw_int_count++;
3141
41c445ff
JB
3142 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3143 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3144
3145 /* temporarily disable queue cause for NAPI processing */
3146 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3147 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3148 wr32(hw, I40E_QINT_RQCTL(0), qval);
3149
3150 qval = rd32(hw, I40E_QINT_TQCTL(0));
3151 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3152 wr32(hw, I40E_QINT_TQCTL(0), qval);
41c445ff
JB
3153
3154 if (!test_bit(__I40E_DOWN, &pf->state))
493fb300 3155 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
41c445ff
JB
3156 }
3157
3158 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3159 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3160 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3161 }
3162
3163 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3164 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3165 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3166 }
3167
3168 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3169 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3170 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3171 }
3172
3173 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3174 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3175 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3176 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3177 val = rd32(hw, I40E_GLGEN_RSTAT);
3178 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3179 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4eb3f768 3180 if (val == I40E_RESET_CORER) {
41c445ff 3181 pf->corer_count++;
4eb3f768 3182 } else if (val == I40E_RESET_GLOBR) {
41c445ff 3183 pf->globr_count++;
4eb3f768 3184 } else if (val == I40E_RESET_EMPR) {
41c445ff 3185 pf->empr_count++;
4eb3f768
SN
3186 set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
3187 }
41c445ff
JB
3188 }
3189
9c010ee0
ASJ
3190 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3191 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3192 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3193 }
3194
beb0dff1
JK
3195 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3196 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3197
3198 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
cafa1fca 3199 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
beb0dff1 3200 i40e_ptp_tx_hwtstamp(pf);
beb0dff1 3201 }
beb0dff1
JK
3202 }
3203
41c445ff
JB
3204 /* If a critical error is pending we have no choice but to reset the
3205 * device.
3206 * Report and mask out any remaining unexpected interrupts.
3207 */
3208 icr0_remaining = icr0 & ena_mask;
3209 if (icr0_remaining) {
3210 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3211 icr0_remaining);
9c010ee0 3212 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
41c445ff 3213 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
c0c28975 3214 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
9c010ee0
ASJ
3215 dev_info(&pf->pdev->dev, "device will be reset\n");
3216 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3217 i40e_service_event_schedule(pf);
41c445ff
JB
3218 }
3219 ena_mask &= ~icr0_remaining;
3220 }
5e823066 3221 ret = IRQ_HANDLED;
41c445ff 3222
5e823066 3223enable_intr:
41c445ff
JB
3224 /* re-enable interrupt causes */
3225 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
41c445ff
JB
3226 if (!test_bit(__I40E_DOWN, &pf->state)) {
3227 i40e_service_event_schedule(pf);
3228 i40e_irq_dynamic_enable_icr0(pf);
3229 }
3230
5e823066 3231 return ret;
41c445ff
JB
3232}
3233
cbf61325
ASJ
3234/**
3235 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3236 * @tx_ring: tx ring to clean
3237 * @budget: how many cleans we're allowed
3238 *
3239 * Returns true if there's any budget left (e.g. the clean is finished)
3240 **/
3241static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3242{
3243 struct i40e_vsi *vsi = tx_ring->vsi;
3244 u16 i = tx_ring->next_to_clean;
3245 struct i40e_tx_buffer *tx_buf;
3246 struct i40e_tx_desc *tx_desc;
3247
3248 tx_buf = &tx_ring->tx_bi[i];
3249 tx_desc = I40E_TX_DESC(tx_ring, i);
3250 i -= tx_ring->count;
3251
3252 do {
3253 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3254
3255 /* if next_to_watch is not set then there is no work pending */
3256 if (!eop_desc)
3257 break;
3258
3259 /* prevent any other reads prior to eop_desc */
3260 read_barrier_depends();
3261
3262 /* if the descriptor isn't done, no work yet to do */
3263 if (!(eop_desc->cmd_type_offset_bsz &
3264 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3265 break;
3266
3267 /* clear next_to_watch to prevent false hangs */
3268 tx_buf->next_to_watch = NULL;
3269
49d7d933
ASJ
3270 tx_desc->buffer_addr = 0;
3271 tx_desc->cmd_type_offset_bsz = 0;
3272 /* move past filter desc */
3273 tx_buf++;
3274 tx_desc++;
3275 i++;
3276 if (unlikely(!i)) {
3277 i -= tx_ring->count;
3278 tx_buf = tx_ring->tx_bi;
3279 tx_desc = I40E_TX_DESC(tx_ring, 0);
3280 }
cbf61325
ASJ
3281 /* unmap skb header data */
3282 dma_unmap_single(tx_ring->dev,
3283 dma_unmap_addr(tx_buf, dma),
3284 dma_unmap_len(tx_buf, len),
3285 DMA_TO_DEVICE);
49d7d933
ASJ
3286 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3287 kfree(tx_buf->raw_buf);
cbf61325 3288
49d7d933
ASJ
3289 tx_buf->raw_buf = NULL;
3290 tx_buf->tx_flags = 0;
3291 tx_buf->next_to_watch = NULL;
cbf61325 3292 dma_unmap_len_set(tx_buf, len, 0);
49d7d933
ASJ
3293 tx_desc->buffer_addr = 0;
3294 tx_desc->cmd_type_offset_bsz = 0;
cbf61325 3295
49d7d933 3296 /* move us past the eop_desc for start of next FD desc */
cbf61325
ASJ
3297 tx_buf++;
3298 tx_desc++;
3299 i++;
3300 if (unlikely(!i)) {
3301 i -= tx_ring->count;
3302 tx_buf = tx_ring->tx_bi;
3303 tx_desc = I40E_TX_DESC(tx_ring, 0);
3304 }
3305
3306 /* update budget accounting */
3307 budget--;
3308 } while (likely(budget));
3309
3310 i += tx_ring->count;
3311 tx_ring->next_to_clean = i;
3312
3313 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3314 i40e_irq_dynamic_enable(vsi,
3315 tx_ring->q_vector->v_idx + vsi->base_vector);
3316 }
3317 return budget > 0;
3318}
3319
3320/**
3321 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3322 * @irq: interrupt number
3323 * @data: pointer to a q_vector
3324 **/
3325static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3326{
3327 struct i40e_q_vector *q_vector = data;
3328 struct i40e_vsi *vsi;
3329
3330 if (!q_vector->tx.ring)
3331 return IRQ_HANDLED;
3332
3333 vsi = q_vector->tx.ring->vsi;
3334 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3335
3336 return IRQ_HANDLED;
3337}
3338
41c445ff 3339/**
cd0b6fa6 3340 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
41c445ff
JB
3341 * @vsi: the VSI being configured
3342 * @v_idx: vector index
cd0b6fa6 3343 * @qp_idx: queue pair index
41c445ff 3344 **/
cd0b6fa6 3345static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
41c445ff 3346{
493fb300 3347 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
9f65e15b
AD
3348 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3349 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
41c445ff
JB
3350
3351 tx_ring->q_vector = q_vector;
cd0b6fa6
AD
3352 tx_ring->next = q_vector->tx.ring;
3353 q_vector->tx.ring = tx_ring;
41c445ff 3354 q_vector->tx.count++;
cd0b6fa6
AD
3355
3356 rx_ring->q_vector = q_vector;
3357 rx_ring->next = q_vector->rx.ring;
3358 q_vector->rx.ring = rx_ring;
3359 q_vector->rx.count++;
41c445ff
JB
3360}
3361
3362/**
3363 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3364 * @vsi: the VSI being configured
3365 *
3366 * This function maps descriptor rings to the queue-specific vectors
3367 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3368 * one vector per queue pair, but on a constrained vector budget, we
3369 * group the queue pairs as "efficiently" as possible.
3370 **/
3371static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3372{
3373 int qp_remaining = vsi->num_queue_pairs;
3374 int q_vectors = vsi->num_q_vectors;
cd0b6fa6 3375 int num_ringpairs;
41c445ff
JB
3376 int v_start = 0;
3377 int qp_idx = 0;
3378
3379 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3380 * group them so there are multiple queues per vector.
70114ec4
ASJ
3381 * It is also important to go through all the vectors available to be
3382 * sure that if we don't use all the vectors, that the remaining vectors
3383 * are cleared. This is especially important when decreasing the
3384 * number of queues in use.
41c445ff 3385 */
70114ec4 3386 for (; v_start < q_vectors; v_start++) {
cd0b6fa6
AD
3387 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3388
3389 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3390
3391 q_vector->num_ringpairs = num_ringpairs;
3392
3393 q_vector->rx.count = 0;
3394 q_vector->tx.count = 0;
3395 q_vector->rx.ring = NULL;
3396 q_vector->tx.ring = NULL;
3397
3398 while (num_ringpairs--) {
3399 map_vector_to_qp(vsi, v_start, qp_idx);
3400 qp_idx++;
3401 qp_remaining--;
41c445ff
JB
3402 }
3403 }
3404}
3405
3406/**
3407 * i40e_vsi_request_irq - Request IRQ from the OS
3408 * @vsi: the VSI being configured
3409 * @basename: name for the vector
3410 **/
3411static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3412{
3413 struct i40e_pf *pf = vsi->back;
3414 int err;
3415
3416 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3417 err = i40e_vsi_request_irq_msix(vsi, basename);
3418 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3419 err = request_irq(pf->pdev->irq, i40e_intr, 0,
b294ac70 3420 pf->int_name, pf);
41c445ff
JB
3421 else
3422 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
b294ac70 3423 pf->int_name, pf);
41c445ff
JB
3424
3425 if (err)
3426 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3427
3428 return err;
3429}
3430
3431#ifdef CONFIG_NET_POLL_CONTROLLER
3432/**
3433 * i40e_netpoll - A Polling 'interrupt'handler
3434 * @netdev: network interface device structure
3435 *
3436 * This is used by netconsole to send skbs without having to re-enable
3437 * interrupts. It's not called while the normal interrupt routine is executing.
3438 **/
38e00438
VD
3439#ifdef I40E_FCOE
3440void i40e_netpoll(struct net_device *netdev)
3441#else
41c445ff 3442static void i40e_netpoll(struct net_device *netdev)
38e00438 3443#endif
41c445ff
JB
3444{
3445 struct i40e_netdev_priv *np = netdev_priv(netdev);
3446 struct i40e_vsi *vsi = np->vsi;
3447 struct i40e_pf *pf = vsi->back;
3448 int i;
3449
3450 /* if interface is down do nothing */
3451 if (test_bit(__I40E_DOWN, &vsi->state))
3452 return;
3453
3454 pf->flags |= I40E_FLAG_IN_NETPOLL;
3455 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3456 for (i = 0; i < vsi->num_q_vectors; i++)
493fb300 3457 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
41c445ff
JB
3458 } else {
3459 i40e_intr(pf->pdev->irq, netdev);
3460 }
3461 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3462}
3463#endif
3464
23527308
NP
3465/**
3466 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3467 * @pf: the PF being configured
3468 * @pf_q: the PF queue
3469 * @enable: enable or disable state of the queue
3470 *
3471 * This routine will wait for the given Tx queue of the PF to reach the
3472 * enabled or disabled state.
3473 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3474 * multiple retries; else will return 0 in case of success.
3475 **/
3476static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3477{
3478 int i;
3479 u32 tx_reg;
3480
3481 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3482 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3483 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3484 break;
3485
f98a2006 3486 usleep_range(10, 20);
23527308
NP
3487 }
3488 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3489 return -ETIMEDOUT;
3490
3491 return 0;
3492}
3493
41c445ff
JB
3494/**
3495 * i40e_vsi_control_tx - Start or stop a VSI's rings
3496 * @vsi: the VSI being configured
3497 * @enable: start or stop the rings
3498 **/
3499static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3500{
3501 struct i40e_pf *pf = vsi->back;
3502 struct i40e_hw *hw = &pf->hw;
23527308 3503 int i, j, pf_q, ret = 0;
41c445ff
JB
3504 u32 tx_reg;
3505
3506 pf_q = vsi->base_queue;
3507 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
351499ab
MJ
3508
3509 /* warn the TX unit of coming changes */
3510 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3511 if (!enable)
f98a2006 3512 usleep_range(10, 20);
351499ab 3513
6c5ef620 3514 for (j = 0; j < 50; j++) {
41c445ff 3515 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
6c5ef620
MW
3516 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3517 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3518 break;
3519 usleep_range(1000, 2000);
3520 }
fda972f6 3521 /* Skip if the queue is already in the requested state */
7c122007 3522 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
fda972f6 3523 continue;
41c445ff
JB
3524
3525 /* turn on/off the queue */
c5c9eb9e
SN
3526 if (enable) {
3527 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
6c5ef620 3528 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
c5c9eb9e 3529 } else {
41c445ff 3530 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
c5c9eb9e 3531 }
41c445ff
JB
3532
3533 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
69129dc3
NP
3534 /* No waiting for the Tx queue to disable */
3535 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3536 continue;
41c445ff
JB
3537
3538 /* wait for the change to finish */
23527308
NP
3539 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3540 if (ret) {
3541 dev_info(&pf->pdev->dev,
3542 "%s: VSI seid %d Tx ring %d %sable timeout\n",
3543 __func__, vsi->seid, pf_q,
3544 (enable ? "en" : "dis"));
3545 break;
41c445ff
JB
3546 }
3547 }
3548
7134f9ce
JB
3549 if (hw->revision_id == 0)
3550 mdelay(50);
23527308
NP
3551 return ret;
3552}
3553
3554/**
3555 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3556 * @pf: the PF being configured
3557 * @pf_q: the PF queue
3558 * @enable: enable or disable state of the queue
3559 *
3560 * This routine will wait for the given Rx queue of the PF to reach the
3561 * enabled or disabled state.
3562 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3563 * multiple retries; else will return 0 in case of success.
3564 **/
3565static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3566{
3567 int i;
3568 u32 rx_reg;
3569
3570 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3571 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3572 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3573 break;
3574
f98a2006 3575 usleep_range(10, 20);
23527308
NP
3576 }
3577 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3578 return -ETIMEDOUT;
7134f9ce 3579
41c445ff
JB
3580 return 0;
3581}
3582
3583/**
3584 * i40e_vsi_control_rx - Start or stop a VSI's rings
3585 * @vsi: the VSI being configured
3586 * @enable: start or stop the rings
3587 **/
3588static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3589{
3590 struct i40e_pf *pf = vsi->back;
3591 struct i40e_hw *hw = &pf->hw;
23527308 3592 int i, j, pf_q, ret = 0;
41c445ff
JB
3593 u32 rx_reg;
3594
3595 pf_q = vsi->base_queue;
3596 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
6c5ef620 3597 for (j = 0; j < 50; j++) {
41c445ff 3598 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
6c5ef620
MW
3599 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3600 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3601 break;
3602 usleep_range(1000, 2000);
3603 }
41c445ff 3604
7c122007
CS
3605 /* Skip if the queue is already in the requested state */
3606 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3607 continue;
41c445ff
JB
3608
3609 /* turn on/off the queue */
3610 if (enable)
6c5ef620 3611 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
41c445ff 3612 else
6c5ef620 3613 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
41c445ff
JB
3614 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3615
3616 /* wait for the change to finish */
23527308
NP
3617 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3618 if (ret) {
3619 dev_info(&pf->pdev->dev,
3620 "%s: VSI seid %d Rx ring %d %sable timeout\n",
3621 __func__, vsi->seid, pf_q,
3622 (enable ? "en" : "dis"));
3623 break;
41c445ff
JB
3624 }
3625 }
3626
23527308 3627 return ret;
41c445ff
JB
3628}
3629
3630/**
3631 * i40e_vsi_control_rings - Start or stop a VSI's rings
3632 * @vsi: the VSI being configured
3633 * @enable: start or stop the rings
3634 **/
fc18eaa0 3635int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
41c445ff 3636{
3b867b28 3637 int ret = 0;
41c445ff
JB
3638
3639 /* do rx first for enable and last for disable */
3640 if (request) {
3641 ret = i40e_vsi_control_rx(vsi, request);
3642 if (ret)
3643 return ret;
3644 ret = i40e_vsi_control_tx(vsi, request);
3645 } else {
3b867b28
ASJ
3646 /* Ignore return value, we need to shutdown whatever we can */
3647 i40e_vsi_control_tx(vsi, request);
3648 i40e_vsi_control_rx(vsi, request);
41c445ff
JB
3649 }
3650
3651 return ret;
3652}
3653
3654/**
3655 * i40e_vsi_free_irq - Free the irq association with the OS
3656 * @vsi: the VSI being configured
3657 **/
3658static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3659{
3660 struct i40e_pf *pf = vsi->back;
3661 struct i40e_hw *hw = &pf->hw;
3662 int base = vsi->base_vector;
3663 u32 val, qp;
3664 int i;
3665
3666 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3667 if (!vsi->q_vectors)
3668 return;
3669
63741846
SN
3670 if (!vsi->irqs_ready)
3671 return;
3672
3673 vsi->irqs_ready = false;
41c445ff
JB
3674 for (i = 0; i < vsi->num_q_vectors; i++) {
3675 u16 vector = i + base;
3676
3677 /* free only the irqs that were actually requested */
78681b1f
SN
3678 if (!vsi->q_vectors[i] ||
3679 !vsi->q_vectors[i]->num_ringpairs)
41c445ff
JB
3680 continue;
3681
3682 /* clear the affinity_mask in the IRQ descriptor */
3683 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3684 NULL);
3685 free_irq(pf->msix_entries[vector].vector,
493fb300 3686 vsi->q_vectors[i]);
41c445ff
JB
3687
3688 /* Tear down the interrupt queue link list
3689 *
3690 * We know that they come in pairs and always
3691 * the Rx first, then the Tx. To clear the
3692 * link list, stick the EOL value into the
3693 * next_q field of the registers.
3694 */
3695 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3696 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3697 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3698 val |= I40E_QUEUE_END_OF_LIST
3699 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3700 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3701
3702 while (qp != I40E_QUEUE_END_OF_LIST) {
3703 u32 next;
3704
3705 val = rd32(hw, I40E_QINT_RQCTL(qp));
3706
3707 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3708 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3709 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3710 I40E_QINT_RQCTL_INTEVENT_MASK);
3711
3712 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3713 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3714
3715 wr32(hw, I40E_QINT_RQCTL(qp), val);
3716
3717 val = rd32(hw, I40E_QINT_TQCTL(qp));
3718
3719 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3720 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3721
3722 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3723 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3724 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3725 I40E_QINT_TQCTL_INTEVENT_MASK);
3726
3727 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3728 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3729
3730 wr32(hw, I40E_QINT_TQCTL(qp), val);
3731 qp = next;
3732 }
3733 }
3734 } else {
3735 free_irq(pf->pdev->irq, pf);
3736
3737 val = rd32(hw, I40E_PFINT_LNKLST0);
3738 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3739 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3740 val |= I40E_QUEUE_END_OF_LIST
3741 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3742 wr32(hw, I40E_PFINT_LNKLST0, val);
3743
3744 val = rd32(hw, I40E_QINT_RQCTL(qp));
3745 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3746 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3747 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3748 I40E_QINT_RQCTL_INTEVENT_MASK);
3749
3750 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3751 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3752
3753 wr32(hw, I40E_QINT_RQCTL(qp), val);
3754
3755 val = rd32(hw, I40E_QINT_TQCTL(qp));
3756
3757 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3758 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3759 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3760 I40E_QINT_TQCTL_INTEVENT_MASK);
3761
3762 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3763 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3764
3765 wr32(hw, I40E_QINT_TQCTL(qp), val);
3766 }
3767}
3768
493fb300
AD
3769/**
3770 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3771 * @vsi: the VSI being configured
3772 * @v_idx: Index of vector to be freed
3773 *
3774 * This function frees the memory allocated to the q_vector. In addition if
3775 * NAPI is enabled it will delete any references to the NAPI struct prior
3776 * to freeing the q_vector.
3777 **/
3778static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3779{
3780 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
cd0b6fa6 3781 struct i40e_ring *ring;
493fb300
AD
3782
3783 if (!q_vector)
3784 return;
3785
3786 /* disassociate q_vector from rings */
cd0b6fa6
AD
3787 i40e_for_each_ring(ring, q_vector->tx)
3788 ring->q_vector = NULL;
3789
3790 i40e_for_each_ring(ring, q_vector->rx)
3791 ring->q_vector = NULL;
493fb300
AD
3792
3793 /* only VSI w/ an associated netdev is set up w/ NAPI */
3794 if (vsi->netdev)
3795 netif_napi_del(&q_vector->napi);
3796
3797 vsi->q_vectors[v_idx] = NULL;
3798
3799 kfree_rcu(q_vector, rcu);
3800}
3801
41c445ff
JB
3802/**
3803 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3804 * @vsi: the VSI being un-configured
3805 *
3806 * This frees the memory allocated to the q_vectors and
3807 * deletes references to the NAPI struct.
3808 **/
3809static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3810{
3811 int v_idx;
3812
493fb300
AD
3813 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3814 i40e_free_q_vector(vsi, v_idx);
41c445ff
JB
3815}
3816
3817/**
3818 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3819 * @pf: board private structure
3820 **/
3821static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3822{
3823 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3824 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3825 pci_disable_msix(pf->pdev);
3826 kfree(pf->msix_entries);
3827 pf->msix_entries = NULL;
3828 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3829 pci_disable_msi(pf->pdev);
3830 }
3831 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3832}
3833
3834/**
3835 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3836 * @pf: board private structure
3837 *
3838 * We go through and clear interrupt specific resources and reset the structure
3839 * to pre-load conditions
3840 **/
3841static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3842{
3843 int i;
3844
3845 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
505682cd 3846 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
3847 if (pf->vsi[i])
3848 i40e_vsi_free_q_vectors(pf->vsi[i]);
3849 i40e_reset_interrupt_capability(pf);
3850}
3851
3852/**
3853 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3854 * @vsi: the VSI being configured
3855 **/
3856static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3857{
3858 int q_idx;
3859
3860 if (!vsi->netdev)
3861 return;
3862
3863 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3864 napi_enable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3865}
3866
3867/**
3868 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3869 * @vsi: the VSI being configured
3870 **/
3871static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3872{
3873 int q_idx;
3874
3875 if (!vsi->netdev)
3876 return;
3877
3878 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3879 napi_disable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3880}
3881
90ef8d47
SN
3882/**
3883 * i40e_vsi_close - Shut down a VSI
3884 * @vsi: the vsi to be quelled
3885 **/
3886static void i40e_vsi_close(struct i40e_vsi *vsi)
3887{
3888 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3889 i40e_down(vsi);
3890 i40e_vsi_free_irq(vsi);
3891 i40e_vsi_free_tx_resources(vsi);
3892 i40e_vsi_free_rx_resources(vsi);
3893}
3894
41c445ff
JB
3895/**
3896 * i40e_quiesce_vsi - Pause a given VSI
3897 * @vsi: the VSI being paused
3898 **/
3899static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3900{
3901 if (test_bit(__I40E_DOWN, &vsi->state))
3902 return;
3903
d341b7a5
NP
3904 /* No need to disable FCoE VSI when Tx suspended */
3905 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
3906 vsi->type == I40E_VSI_FCOE) {
3907 dev_dbg(&vsi->back->pdev->dev,
3908 "%s: VSI seid %d skipping FCoE VSI disable\n",
3909 __func__, vsi->seid);
3910 return;
3911 }
3912
41c445ff
JB
3913 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3914 if (vsi->netdev && netif_running(vsi->netdev)) {
3915 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3916 } else {
90ef8d47 3917 i40e_vsi_close(vsi);
41c445ff
JB
3918 }
3919}
3920
3921/**
3922 * i40e_unquiesce_vsi - Resume a given VSI
3923 * @vsi: the VSI being resumed
3924 **/
3925static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3926{
3927 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3928 return;
3929
3930 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3931 if (vsi->netdev && netif_running(vsi->netdev))
3932 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3933 else
8276f757 3934 i40e_vsi_open(vsi); /* this clears the DOWN bit */
41c445ff
JB
3935}
3936
3937/**
3938 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3939 * @pf: the PF
3940 **/
3941static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3942{
3943 int v;
3944
505682cd 3945 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
3946 if (pf->vsi[v])
3947 i40e_quiesce_vsi(pf->vsi[v]);
3948 }
3949}
3950
3951/**
3952 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3953 * @pf: the PF
3954 **/
3955static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3956{
3957 int v;
3958
505682cd 3959 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
3960 if (pf->vsi[v])
3961 i40e_unquiesce_vsi(pf->vsi[v]);
3962 }
3963}
3964
69129dc3
NP
3965#ifdef CONFIG_I40E_DCB
3966/**
3967 * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
3968 * @vsi: the VSI being configured
3969 *
3970 * This function waits for the given VSI's Tx queues to be disabled.
3971 **/
3972static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
3973{
3974 struct i40e_pf *pf = vsi->back;
3975 int i, pf_q, ret;
3976
3977 pf_q = vsi->base_queue;
3978 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3979 /* Check and wait for the disable status of the queue */
3980 ret = i40e_pf_txq_wait(pf, pf_q, false);
3981 if (ret) {
3982 dev_info(&pf->pdev->dev,
3983 "%s: VSI seid %d Tx ring %d disable timeout\n",
3984 __func__, vsi->seid, pf_q);
3985 return ret;
3986 }
3987 }
3988
3989 return 0;
3990}
3991
3992/**
3993 * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
3994 * @pf: the PF
3995 *
3996 * This function waits for the Tx queues to be in disabled state for all the
3997 * VSIs that are managed by this PF.
3998 **/
3999static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4000{
4001 int v, ret = 0;
4002
4003 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
d341b7a5
NP
4004 /* No need to wait for FCoE VSI queues */
4005 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
69129dc3
NP
4006 ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4007 if (ret)
4008 break;
4009 }
4010 }
4011
4012 return ret;
4013}
4014
4015#endif
63d7e5a4
NP
4016/**
4017 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4018 * @pf: pointer to pf
4019 *
4020 * Get TC map for ISCSI PF type that will include iSCSI TC
4021 * and LAN TC.
4022 **/
4023static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4024{
4025 struct i40e_dcb_app_priority_table app;
4026 struct i40e_hw *hw = &pf->hw;
4027 u8 enabled_tc = 1; /* TC0 is always enabled */
4028 u8 tc, i;
4029 /* Get the iSCSI APP TLV */
4030 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4031
4032 for (i = 0; i < dcbcfg->numapps; i++) {
4033 app = dcbcfg->app[i];
4034 if (app.selector == I40E_APP_SEL_TCPIP &&
4035 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4036 tc = dcbcfg->etscfg.prioritytable[app.priority];
4037 enabled_tc |= (1 << tc);
4038 break;
4039 }
4040 }
4041
4042 return enabled_tc;
4043}
4044
41c445ff
JB
4045/**
4046 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4047 * @dcbcfg: the corresponding DCBx configuration structure
4048 *
4049 * Return the number of TCs from given DCBx configuration
4050 **/
4051static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4052{
078b5876
JB
4053 u8 num_tc = 0;
4054 int i;
41c445ff
JB
4055
4056 /* Scan the ETS Config Priority Table to find
4057 * traffic class enabled for a given priority
4058 * and use the traffic class index to get the
4059 * number of traffic classes enabled
4060 */
4061 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4062 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4063 num_tc = dcbcfg->etscfg.prioritytable[i];
4064 }
4065
4066 /* Traffic class index starts from zero so
4067 * increment to return the actual count
4068 */
078b5876 4069 return num_tc + 1;
41c445ff
JB
4070}
4071
4072/**
4073 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4074 * @dcbcfg: the corresponding DCBx configuration structure
4075 *
4076 * Query the current DCB configuration and return the number of
4077 * traffic classes enabled from the given DCBX config
4078 **/
4079static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4080{
4081 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4082 u8 enabled_tc = 1;
4083 u8 i;
4084
4085 for (i = 0; i < num_tc; i++)
4086 enabled_tc |= 1 << i;
4087
4088 return enabled_tc;
4089}
4090
4091/**
4092 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4093 * @pf: PF being queried
4094 *
4095 * Return number of traffic classes enabled for the given PF
4096 **/
4097static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4098{
4099 struct i40e_hw *hw = &pf->hw;
4100 u8 i, enabled_tc;
4101 u8 num_tc = 0;
4102 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4103
4104 /* If DCB is not enabled then always in single TC */
4105 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4106 return 1;
4107
63d7e5a4
NP
4108 /* SFP mode will be enabled for all TCs on port */
4109 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4110 return i40e_dcb_get_num_tc(dcbcfg);
4111
41c445ff 4112 /* MFP mode return count of enabled TCs for this PF */
63d7e5a4
NP
4113 if (pf->hw.func_caps.iscsi)
4114 enabled_tc = i40e_get_iscsi_tc_map(pf);
4115 else
41c445ff 4116 enabled_tc = pf->hw.func_caps.enabled_tcmap;
41c445ff 4117
63d7e5a4
NP
4118 /* At least have TC0 */
4119 enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4120 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4121 if (enabled_tc & (1 << i))
4122 num_tc++;
4123 }
4124 return num_tc;
41c445ff
JB
4125}
4126
4127/**
4128 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4129 * @pf: PF being queried
4130 *
4131 * Return a bitmap for first enabled traffic class for this PF.
4132 **/
4133static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4134{
4135 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4136 u8 i = 0;
4137
4138 if (!enabled_tc)
4139 return 0x1; /* TC0 */
4140
4141 /* Find the first enabled TC */
4142 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4143 if (enabled_tc & (1 << i))
4144 break;
4145 }
4146
4147 return 1 << i;
4148}
4149
4150/**
4151 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4152 * @pf: PF being queried
4153 *
4154 * Return a bitmap for enabled traffic classes for this PF.
4155 **/
4156static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4157{
4158 /* If DCB is not enabled for this PF then just return default TC */
4159 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4160 return i40e_pf_get_default_tc(pf);
4161
41c445ff 4162 /* SFP mode we want PF to be enabled for all TCs */
63d7e5a4
NP
4163 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4164 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4165
4166 /* MPF enabled and iSCSI PF type */
4167 if (pf->hw.func_caps.iscsi)
4168 return i40e_get_iscsi_tc_map(pf);
4169 else
4170 return pf->hw.func_caps.enabled_tcmap;
41c445ff
JB
4171}
4172
4173/**
4174 * i40e_vsi_get_bw_info - Query VSI BW Information
4175 * @vsi: the VSI being queried
4176 *
4177 * Returns 0 on success, negative value on failure
4178 **/
4179static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4180{
4181 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4182 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4183 struct i40e_pf *pf = vsi->back;
4184 struct i40e_hw *hw = &pf->hw;
dcae29be 4185 i40e_status aq_ret;
41c445ff 4186 u32 tc_bw_max;
41c445ff
JB
4187 int i;
4188
4189 /* Get the VSI level BW configuration */
dcae29be
JB
4190 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4191 if (aq_ret) {
41c445ff
JB
4192 dev_info(&pf->pdev->dev,
4193 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
dcae29be
JB
4194 aq_ret, pf->hw.aq.asq_last_status);
4195 return -EINVAL;
41c445ff
JB
4196 }
4197
4198 /* Get the VSI level BW configuration per TC */
dcae29be 4199 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
6838b535 4200 NULL);
dcae29be 4201 if (aq_ret) {
41c445ff
JB
4202 dev_info(&pf->pdev->dev,
4203 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
dcae29be
JB
4204 aq_ret, pf->hw.aq.asq_last_status);
4205 return -EINVAL;
41c445ff
JB
4206 }
4207
4208 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4209 dev_info(&pf->pdev->dev,
4210 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4211 bw_config.tc_valid_bits,
4212 bw_ets_config.tc_valid_bits);
4213 /* Still continuing */
4214 }
4215
4216 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4217 vsi->bw_max_quanta = bw_config.max_bw;
4218 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4219 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4220 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4221 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4222 vsi->bw_ets_limit_credits[i] =
4223 le16_to_cpu(bw_ets_config.credits[i]);
4224 /* 3 bits out of 4 for each TC */
4225 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4226 }
078b5876 4227
dcae29be 4228 return 0;
41c445ff
JB
4229}
4230
4231/**
4232 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4233 * @vsi: the VSI being configured
4234 * @enabled_tc: TC bitmap
4235 * @bw_credits: BW shared credits per TC
4236 *
4237 * Returns 0 on success, negative value on failure
4238 **/
dcae29be 4239static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
41c445ff
JB
4240 u8 *bw_share)
4241{
4242 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
dcae29be
JB
4243 i40e_status aq_ret;
4244 int i;
41c445ff
JB
4245
4246 bw_data.tc_valid_bits = enabled_tc;
4247 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4248 bw_data.tc_bw_credits[i] = bw_share[i];
4249
dcae29be
JB
4250 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4251 NULL);
4252 if (aq_ret) {
41c445ff 4253 dev_info(&vsi->back->pdev->dev,
69bfb110
JB
4254 "AQ command Config VSI BW allocation per TC failed = %d\n",
4255 vsi->back->hw.aq.asq_last_status);
dcae29be 4256 return -EINVAL;
41c445ff
JB
4257 }
4258
4259 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4260 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4261
dcae29be 4262 return 0;
41c445ff
JB
4263}
4264
4265/**
4266 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4267 * @vsi: the VSI being configured
4268 * @enabled_tc: TC map to be enabled
4269 *
4270 **/
4271static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4272{
4273 struct net_device *netdev = vsi->netdev;
4274 struct i40e_pf *pf = vsi->back;
4275 struct i40e_hw *hw = &pf->hw;
4276 u8 netdev_tc = 0;
4277 int i;
4278 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4279
4280 if (!netdev)
4281 return;
4282
4283 if (!enabled_tc) {
4284 netdev_reset_tc(netdev);
4285 return;
4286 }
4287
4288 /* Set up actual enabled TCs on the VSI */
4289 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4290 return;
4291
4292 /* set per TC queues for the VSI */
4293 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4294 /* Only set TC queues for enabled tcs
4295 *
4296 * e.g. For a VSI that has TC0 and TC3 enabled the
4297 * enabled_tc bitmap would be 0x00001001; the driver
4298 * will set the numtc for netdev as 2 that will be
4299 * referenced by the netdev layer as TC 0 and 1.
4300 */
4301 if (vsi->tc_config.enabled_tc & (1 << i))
4302 netdev_set_tc_queue(netdev,
4303 vsi->tc_config.tc_info[i].netdev_tc,
4304 vsi->tc_config.tc_info[i].qcount,
4305 vsi->tc_config.tc_info[i].qoffset);
4306 }
4307
4308 /* Assign UP2TC map for the VSI */
4309 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4310 /* Get the actual TC# for the UP */
4311 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4312 /* Get the mapped netdev TC# for the UP */
4313 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
4314 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4315 }
4316}
4317
4318/**
4319 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4320 * @vsi: the VSI being configured
4321 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4322 **/
4323static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4324 struct i40e_vsi_context *ctxt)
4325{
4326 /* copy just the sections touched not the entire info
4327 * since not all sections are valid as returned by
4328 * update vsi params
4329 */
4330 vsi->info.mapping_flags = ctxt->info.mapping_flags;
4331 memcpy(&vsi->info.queue_mapping,
4332 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4333 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4334 sizeof(vsi->info.tc_mapping));
4335}
4336
4337/**
4338 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4339 * @vsi: VSI to be configured
4340 * @enabled_tc: TC bitmap
4341 *
4342 * This configures a particular VSI for TCs that are mapped to the
4343 * given TC bitmap. It uses default bandwidth share for TCs across
4344 * VSIs to configure TC for a particular VSI.
4345 *
4346 * NOTE:
4347 * It is expected that the VSI queues have been quisced before calling
4348 * this function.
4349 **/
4350static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4351{
4352 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4353 struct i40e_vsi_context ctxt;
4354 int ret = 0;
4355 int i;
4356
4357 /* Check if enabled_tc is same as existing or new TCs */
4358 if (vsi->tc_config.enabled_tc == enabled_tc)
4359 return ret;
4360
4361 /* Enable ETS TCs with equal BW Share for now across all VSIs */
4362 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4363 if (enabled_tc & (1 << i))
4364 bw_share[i] = 1;
4365 }
4366
4367 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4368 if (ret) {
4369 dev_info(&vsi->back->pdev->dev,
4370 "Failed configuring TC map %d for VSI %d\n",
4371 enabled_tc, vsi->seid);
4372 goto out;
4373 }
4374
4375 /* Update Queue Pairs Mapping for currently enabled UPs */
4376 ctxt.seid = vsi->seid;
4377 ctxt.pf_num = vsi->back->hw.pf_id;
4378 ctxt.vf_num = 0;
4379 ctxt.uplink_seid = vsi->uplink_seid;
4380 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4381 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4382
4383 /* Update the VSI after updating the VSI queue-mapping information */
4384 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4385 if (ret) {
4386 dev_info(&vsi->back->pdev->dev,
4387 "update vsi failed, aq_err=%d\n",
4388 vsi->back->hw.aq.asq_last_status);
4389 goto out;
4390 }
4391 /* update the local VSI info with updated queue map */
4392 i40e_vsi_update_queue_map(vsi, &ctxt);
4393 vsi->info.valid_sections = 0;
4394
4395 /* Update current VSI BW information */
4396 ret = i40e_vsi_get_bw_info(vsi);
4397 if (ret) {
4398 dev_info(&vsi->back->pdev->dev,
4399 "Failed updating vsi bw info, aq_err=%d\n",
4400 vsi->back->hw.aq.asq_last_status);
4401 goto out;
4402 }
4403
4404 /* Update the netdev TC setup */
4405 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4406out:
4407 return ret;
4408}
4409
4e3b35b0
NP
4410/**
4411 * i40e_veb_config_tc - Configure TCs for given VEB
4412 * @veb: given VEB
4413 * @enabled_tc: TC bitmap
4414 *
4415 * Configures given TC bitmap for VEB (switching) element
4416 **/
4417int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4418{
4419 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4420 struct i40e_pf *pf = veb->pf;
4421 int ret = 0;
4422 int i;
4423
4424 /* No TCs or already enabled TCs just return */
4425 if (!enabled_tc || veb->enabled_tc == enabled_tc)
4426 return ret;
4427
4428 bw_data.tc_valid_bits = enabled_tc;
4429 /* bw_data.absolute_credits is not set (relative) */
4430
4431 /* Enable ETS TCs with equal BW Share for now */
4432 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4433 if (enabled_tc & (1 << i))
4434 bw_data.tc_bw_share_credits[i] = 1;
4435 }
4436
4437 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4438 &bw_data, NULL);
4439 if (ret) {
4440 dev_info(&pf->pdev->dev,
4441 "veb bw config failed, aq_err=%d\n",
4442 pf->hw.aq.asq_last_status);
4443 goto out;
4444 }
4445
4446 /* Update the BW information */
4447 ret = i40e_veb_get_bw_info(veb);
4448 if (ret) {
4449 dev_info(&pf->pdev->dev,
4450 "Failed getting veb bw config, aq_err=%d\n",
4451 pf->hw.aq.asq_last_status);
4452 }
4453
4454out:
4455 return ret;
4456}
4457
4458#ifdef CONFIG_I40E_DCB
4459/**
4460 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4461 * @pf: PF struct
4462 *
4463 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4464 * the caller would've quiesce all the VSIs before calling
4465 * this function
4466 **/
4467static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4468{
4469 u8 tc_map = 0;
4470 int ret;
4471 u8 v;
4472
4473 /* Enable the TCs available on PF to all VEBs */
4474 tc_map = i40e_pf_get_tc_map(pf);
4475 for (v = 0; v < I40E_MAX_VEB; v++) {
4476 if (!pf->veb[v])
4477 continue;
4478 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4479 if (ret) {
4480 dev_info(&pf->pdev->dev,
4481 "Failed configuring TC for VEB seid=%d\n",
4482 pf->veb[v]->seid);
4483 /* Will try to configure as many components */
4484 }
4485 }
4486
4487 /* Update each VSI */
505682cd 4488 for (v = 0; v < pf->num_alloc_vsi; v++) {
4e3b35b0
NP
4489 if (!pf->vsi[v])
4490 continue;
4491
4492 /* - Enable all TCs for the LAN VSI
38e00438
VD
4493#ifdef I40E_FCOE
4494 * - For FCoE VSI only enable the TC configured
4495 * as per the APP TLV
4496#endif
4e3b35b0
NP
4497 * - For all others keep them at TC0 for now
4498 */
4499 if (v == pf->lan_vsi)
4500 tc_map = i40e_pf_get_tc_map(pf);
4501 else
4502 tc_map = i40e_pf_get_default_tc(pf);
38e00438
VD
4503#ifdef I40E_FCOE
4504 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4505 tc_map = i40e_get_fcoe_tc_map(pf);
4506#endif /* #ifdef I40E_FCOE */
4e3b35b0
NP
4507
4508 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4509 if (ret) {
4510 dev_info(&pf->pdev->dev,
4511 "Failed configuring TC for VSI seid=%d\n",
4512 pf->vsi[v]->seid);
4513 /* Will try to configure as many components */
4514 } else {
0672a091
NP
4515 /* Re-configure VSI vectors based on updated TC map */
4516 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4e3b35b0
NP
4517 if (pf->vsi[v]->netdev)
4518 i40e_dcbnl_set_all(pf->vsi[v]);
4519 }
4520 }
4521}
4522
2fd75f31
NP
4523/**
4524 * i40e_resume_port_tx - Resume port Tx
4525 * @pf: PF struct
4526 *
4527 * Resume a port's Tx and issue a PF reset in case of failure to
4528 * resume.
4529 **/
4530static int i40e_resume_port_tx(struct i40e_pf *pf)
4531{
4532 struct i40e_hw *hw = &pf->hw;
4533 int ret;
4534
4535 ret = i40e_aq_resume_port_tx(hw, NULL);
4536 if (ret) {
4537 dev_info(&pf->pdev->dev,
4538 "AQ command Resume Port Tx failed = %d\n",
4539 pf->hw.aq.asq_last_status);
4540 /* Schedule PF reset to recover */
4541 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4542 i40e_service_event_schedule(pf);
4543 }
4544
4545 return ret;
4546}
4547
4e3b35b0
NP
4548/**
4549 * i40e_init_pf_dcb - Initialize DCB configuration
4550 * @pf: PF being configured
4551 *
4552 * Query the current DCB configuration and cache it
4553 * in the hardware structure
4554 **/
4555static int i40e_init_pf_dcb(struct i40e_pf *pf)
4556{
4557 struct i40e_hw *hw = &pf->hw;
4558 int err = 0;
4559
4e3b35b0
NP
4560 /* Get the initial DCB configuration */
4561 err = i40e_init_dcb(hw);
4562 if (!err) {
4563 /* Device/Function is not DCBX capable */
4564 if ((!hw->func_caps.dcb) ||
4565 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4566 dev_info(&pf->pdev->dev,
4567 "DCBX offload is not supported or is disabled for this PF.\n");
4568
4569 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4570 goto out;
4571
4572 } else {
4573 /* When status is not DISABLED then DCBX in FW */
4574 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4575 DCB_CAP_DCBX_VER_IEEE;
4d9b6043
NP
4576
4577 pf->flags |= I40E_FLAG_DCB_CAPABLE;
4578 /* Enable DCB tagging only when more than one TC */
4579 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4580 pf->flags |= I40E_FLAG_DCB_ENABLED;
9fa61dd2
NP
4581 dev_dbg(&pf->pdev->dev,
4582 "DCBX offload is supported for this PF.\n");
4e3b35b0 4583 }
014269ff 4584 } else {
aebfc816
SN
4585 dev_info(&pf->pdev->dev,
4586 "AQ Querying DCB configuration failed: aq_err %d\n",
014269ff 4587 pf->hw.aq.asq_last_status);
4e3b35b0
NP
4588 }
4589
4590out:
4591 return err;
4592}
4593#endif /* CONFIG_I40E_DCB */
cf05ed08
JB
4594#define SPEED_SIZE 14
4595#define FC_SIZE 8
4596/**
4597 * i40e_print_link_message - print link up or down
4598 * @vsi: the VSI for which link needs a message
4599 */
4600static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4601{
4602 char speed[SPEED_SIZE] = "Unknown";
4603 char fc[FC_SIZE] = "RX/TX";
4604
4605 if (!isup) {
4606 netdev_info(vsi->netdev, "NIC Link is Down\n");
4607 return;
4608 }
4609
148c2d80
GR
4610 /* Warn user if link speed on NPAR enabled partition is not at
4611 * least 10GB
4612 */
4613 if (vsi->back->hw.func_caps.npar_enable &&
4614 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4615 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4616 netdev_warn(vsi->netdev,
4617 "The partition detected link speed that is less than 10Gbps\n");
4618
cf05ed08
JB
4619 switch (vsi->back->hw.phy.link_info.link_speed) {
4620 case I40E_LINK_SPEED_40GB:
35a7d804 4621 strlcpy(speed, "40 Gbps", SPEED_SIZE);
cf05ed08
JB
4622 break;
4623 case I40E_LINK_SPEED_10GB:
35a7d804 4624 strlcpy(speed, "10 Gbps", SPEED_SIZE);
cf05ed08
JB
4625 break;
4626 case I40E_LINK_SPEED_1GB:
35a7d804 4627 strlcpy(speed, "1000 Mbps", SPEED_SIZE);
cf05ed08 4628 break;
5960d33f
MW
4629 case I40E_LINK_SPEED_100MB:
4630 strncpy(speed, "100 Mbps", SPEED_SIZE);
4631 break;
cf05ed08
JB
4632 default:
4633 break;
4634 }
4635
4636 switch (vsi->back->hw.fc.current_mode) {
4637 case I40E_FC_FULL:
35a7d804 4638 strlcpy(fc, "RX/TX", FC_SIZE);
cf05ed08
JB
4639 break;
4640 case I40E_FC_TX_PAUSE:
35a7d804 4641 strlcpy(fc, "TX", FC_SIZE);
cf05ed08
JB
4642 break;
4643 case I40E_FC_RX_PAUSE:
35a7d804 4644 strlcpy(fc, "RX", FC_SIZE);
cf05ed08
JB
4645 break;
4646 default:
35a7d804 4647 strlcpy(fc, "None", FC_SIZE);
cf05ed08
JB
4648 break;
4649 }
4650
4651 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4652 speed, fc);
4653}
4e3b35b0 4654
41c445ff
JB
4655/**
4656 * i40e_up_complete - Finish the last steps of bringing up a connection
4657 * @vsi: the VSI being configured
4658 **/
4659static int i40e_up_complete(struct i40e_vsi *vsi)
4660{
4661 struct i40e_pf *pf = vsi->back;
4662 int err;
4663
4664 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4665 i40e_vsi_configure_msix(vsi);
4666 else
4667 i40e_configure_msi_and_legacy(vsi);
4668
4669 /* start rings */
4670 err = i40e_vsi_control_rings(vsi, true);
4671 if (err)
4672 return err;
4673
4674 clear_bit(__I40E_DOWN, &vsi->state);
4675 i40e_napi_enable_all(vsi);
4676 i40e_vsi_enable_irq(vsi);
4677
4678 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4679 (vsi->netdev)) {
cf05ed08 4680 i40e_print_link_message(vsi, true);
41c445ff
JB
4681 netif_tx_start_all_queues(vsi->netdev);
4682 netif_carrier_on(vsi->netdev);
6d779b41 4683 } else if (vsi->netdev) {
cf05ed08 4684 i40e_print_link_message(vsi, false);
7b592f61
CW
4685 /* need to check for qualified module here*/
4686 if ((pf->hw.phy.link_info.link_info &
4687 I40E_AQ_MEDIA_AVAILABLE) &&
4688 (!(pf->hw.phy.link_info.an_info &
4689 I40E_AQ_QUALIFIED_MODULE)))
4690 netdev_err(vsi->netdev,
4691 "the driver failed to link because an unqualified module was detected.");
41c445ff 4692 }
ca64fa4e
ASJ
4693
4694 /* replay FDIR SB filters */
1e1be8f6
ASJ
4695 if (vsi->type == I40E_VSI_FDIR) {
4696 /* reset fd counters */
4697 pf->fd_add_err = pf->fd_atr_cnt = 0;
4698 if (pf->fd_tcp_rule > 0) {
4699 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4700 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4701 pf->fd_tcp_rule = 0;
4702 }
ca64fa4e 4703 i40e_fdir_filter_restore(vsi);
1e1be8f6 4704 }
41c445ff
JB
4705 i40e_service_event_schedule(pf);
4706
4707 return 0;
4708}
4709
4710/**
4711 * i40e_vsi_reinit_locked - Reset the VSI
4712 * @vsi: the VSI being configured
4713 *
4714 * Rebuild the ring structs after some configuration
4715 * has changed, e.g. MTU size.
4716 **/
4717static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4718{
4719 struct i40e_pf *pf = vsi->back;
4720
4721 WARN_ON(in_interrupt());
4722 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4723 usleep_range(1000, 2000);
4724 i40e_down(vsi);
4725
4726 /* Give a VF some time to respond to the reset. The
4727 * two second wait is based upon the watchdog cycle in
4728 * the VF driver.
4729 */
4730 if (vsi->type == I40E_VSI_SRIOV)
4731 msleep(2000);
4732 i40e_up(vsi);
4733 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4734}
4735
4736/**
4737 * i40e_up - Bring the connection back up after being down
4738 * @vsi: the VSI being configured
4739 **/
4740int i40e_up(struct i40e_vsi *vsi)
4741{
4742 int err;
4743
4744 err = i40e_vsi_configure(vsi);
4745 if (!err)
4746 err = i40e_up_complete(vsi);
4747
4748 return err;
4749}
4750
4751/**
4752 * i40e_down - Shutdown the connection processing
4753 * @vsi: the VSI being stopped
4754 **/
4755void i40e_down(struct i40e_vsi *vsi)
4756{
4757 int i;
4758
4759 /* It is assumed that the caller of this function
4760 * sets the vsi->state __I40E_DOWN bit.
4761 */
4762 if (vsi->netdev) {
4763 netif_carrier_off(vsi->netdev);
4764 netif_tx_disable(vsi->netdev);
4765 }
4766 i40e_vsi_disable_irq(vsi);
4767 i40e_vsi_control_rings(vsi, false);
4768 i40e_napi_disable_all(vsi);
4769
4770 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
4771 i40e_clean_tx_ring(vsi->tx_rings[i]);
4772 i40e_clean_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
4773 }
4774}
4775
4776/**
4777 * i40e_setup_tc - configure multiple traffic classes
4778 * @netdev: net device to configure
4779 * @tc: number of traffic classes to enable
4780 **/
38e00438
VD
4781#ifdef I40E_FCOE
4782int i40e_setup_tc(struct net_device *netdev, u8 tc)
4783#else
41c445ff 4784static int i40e_setup_tc(struct net_device *netdev, u8 tc)
38e00438 4785#endif
41c445ff
JB
4786{
4787 struct i40e_netdev_priv *np = netdev_priv(netdev);
4788 struct i40e_vsi *vsi = np->vsi;
4789 struct i40e_pf *pf = vsi->back;
4790 u8 enabled_tc = 0;
4791 int ret = -EINVAL;
4792 int i;
4793
4794 /* Check if DCB enabled to continue */
4795 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4796 netdev_info(netdev, "DCB is not enabled for adapter\n");
4797 goto exit;
4798 }
4799
4800 /* Check if MFP enabled */
4801 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4802 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4803 goto exit;
4804 }
4805
4806 /* Check whether tc count is within enabled limit */
4807 if (tc > i40e_pf_get_num_tc(pf)) {
4808 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4809 goto exit;
4810 }
4811
4812 /* Generate TC map for number of tc requested */
4813 for (i = 0; i < tc; i++)
4814 enabled_tc |= (1 << i);
4815
4816 /* Requesting same TC configuration as already enabled */
4817 if (enabled_tc == vsi->tc_config.enabled_tc)
4818 return 0;
4819
4820 /* Quiesce VSI queues */
4821 i40e_quiesce_vsi(vsi);
4822
4823 /* Configure VSI for enabled TCs */
4824 ret = i40e_vsi_config_tc(vsi, enabled_tc);
4825 if (ret) {
4826 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4827 vsi->seid);
4828 goto exit;
4829 }
4830
4831 /* Unquiesce VSI */
4832 i40e_unquiesce_vsi(vsi);
4833
4834exit:
4835 return ret;
4836}
4837
4838/**
4839 * i40e_open - Called when a network interface is made active
4840 * @netdev: network interface device structure
4841 *
4842 * The open entry point is called when a network interface is made
4843 * active by the system (IFF_UP). At this point all resources needed
4844 * for transmit and receive operations are allocated, the interrupt
4845 * handler is registered with the OS, the netdev watchdog subtask is
4846 * enabled, and the stack is notified that the interface is ready.
4847 *
4848 * Returns 0 on success, negative value on failure
4849 **/
38e00438
VD
4850#ifdef I40E_FCOE
4851int i40e_open(struct net_device *netdev)
4852#else
41c445ff 4853static int i40e_open(struct net_device *netdev)
38e00438 4854#endif
41c445ff
JB
4855{
4856 struct i40e_netdev_priv *np = netdev_priv(netdev);
4857 struct i40e_vsi *vsi = np->vsi;
4858 struct i40e_pf *pf = vsi->back;
41c445ff
JB
4859 int err;
4860
4eb3f768
SN
4861 /* disallow open during test or if eeprom is broken */
4862 if (test_bit(__I40E_TESTING, &pf->state) ||
4863 test_bit(__I40E_BAD_EEPROM, &pf->state))
41c445ff
JB
4864 return -EBUSY;
4865
4866 netif_carrier_off(netdev);
4867
6c167f58
EK
4868 err = i40e_vsi_open(vsi);
4869 if (err)
4870 return err;
4871
059dab69
JB
4872 /* configure global TSO hardware offload settings */
4873 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4874 TCP_FLAG_FIN) >> 16);
4875 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4876 TCP_FLAG_FIN |
4877 TCP_FLAG_CWR) >> 16);
4878 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4879
6c167f58
EK
4880#ifdef CONFIG_I40E_VXLAN
4881 vxlan_get_rx_port(netdev);
4882#endif
4883
4884 return 0;
4885}
4886
4887/**
4888 * i40e_vsi_open -
4889 * @vsi: the VSI to open
4890 *
4891 * Finish initialization of the VSI.
4892 *
4893 * Returns 0 on success, negative value on failure
4894 **/
4895int i40e_vsi_open(struct i40e_vsi *vsi)
4896{
4897 struct i40e_pf *pf = vsi->back;
b294ac70 4898 char int_name[I40E_INT_NAME_STR_LEN];
6c167f58
EK
4899 int err;
4900
41c445ff
JB
4901 /* allocate descriptors */
4902 err = i40e_vsi_setup_tx_resources(vsi);
4903 if (err)
4904 goto err_setup_tx;
4905 err = i40e_vsi_setup_rx_resources(vsi);
4906 if (err)
4907 goto err_setup_rx;
4908
4909 err = i40e_vsi_configure(vsi);
4910 if (err)
4911 goto err_setup_rx;
4912
c22e3c6c
SN
4913 if (vsi->netdev) {
4914 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4915 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4916 err = i40e_vsi_request_irq(vsi, int_name);
4917 if (err)
4918 goto err_setup_rx;
41c445ff 4919
c22e3c6c
SN
4920 /* Notify the stack of the actual queue counts. */
4921 err = netif_set_real_num_tx_queues(vsi->netdev,
4922 vsi->num_queue_pairs);
4923 if (err)
4924 goto err_set_queues;
25946ddb 4925
c22e3c6c
SN
4926 err = netif_set_real_num_rx_queues(vsi->netdev,
4927 vsi->num_queue_pairs);
4928 if (err)
4929 goto err_set_queues;
8a9eb7d3
SN
4930
4931 } else if (vsi->type == I40E_VSI_FDIR) {
e240f674 4932 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
b2008cbf
CW
4933 dev_driver_string(&pf->pdev->dev),
4934 dev_name(&pf->pdev->dev));
8a9eb7d3 4935 err = i40e_vsi_request_irq(vsi, int_name);
b2008cbf 4936
c22e3c6c 4937 } else {
ce9ccb17 4938 err = -EINVAL;
6c167f58
EK
4939 goto err_setup_rx;
4940 }
25946ddb 4941
41c445ff
JB
4942 err = i40e_up_complete(vsi);
4943 if (err)
4944 goto err_up_complete;
4945
41c445ff
JB
4946 return 0;
4947
4948err_up_complete:
4949 i40e_down(vsi);
25946ddb 4950err_set_queues:
41c445ff
JB
4951 i40e_vsi_free_irq(vsi);
4952err_setup_rx:
4953 i40e_vsi_free_rx_resources(vsi);
4954err_setup_tx:
4955 i40e_vsi_free_tx_resources(vsi);
4956 if (vsi == pf->vsi[pf->lan_vsi])
4957 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
4958
4959 return err;
4960}
4961
17a73f6b
JG
4962/**
4963 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
4964 * @pf: Pointer to pf
4965 *
4966 * This function destroys the hlist where all the Flow Director
4967 * filters were saved.
4968 **/
4969static void i40e_fdir_filter_exit(struct i40e_pf *pf)
4970{
4971 struct i40e_fdir_filter *filter;
4972 struct hlist_node *node2;
4973
4974 hlist_for_each_entry_safe(filter, node2,
4975 &pf->fdir_filter_list, fdir_node) {
4976 hlist_del(&filter->fdir_node);
4977 kfree(filter);
4978 }
4979 pf->fdir_pf_active_filters = 0;
4980}
4981
41c445ff
JB
4982/**
4983 * i40e_close - Disables a network interface
4984 * @netdev: network interface device structure
4985 *
4986 * The close entry point is called when an interface is de-activated
4987 * by the OS. The hardware is still under the driver's control, but
4988 * this netdev interface is disabled.
4989 *
4990 * Returns 0, this is not allowed to fail
4991 **/
38e00438
VD
4992#ifdef I40E_FCOE
4993int i40e_close(struct net_device *netdev)
4994#else
41c445ff 4995static int i40e_close(struct net_device *netdev)
38e00438 4996#endif
41c445ff
JB
4997{
4998 struct i40e_netdev_priv *np = netdev_priv(netdev);
4999 struct i40e_vsi *vsi = np->vsi;
5000
90ef8d47 5001 i40e_vsi_close(vsi);
41c445ff
JB
5002
5003 return 0;
5004}
5005
5006/**
5007 * i40e_do_reset - Start a PF or Core Reset sequence
5008 * @pf: board private structure
5009 * @reset_flags: which reset is requested
5010 *
5011 * The essential difference in resets is that the PF Reset
5012 * doesn't clear the packet buffers, doesn't reset the PE
5013 * firmware, and doesn't bother the other PFs on the chip.
5014 **/
5015void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5016{
5017 u32 val;
5018
5019 WARN_ON(in_interrupt());
5020
263fc48f
MW
5021 if (i40e_check_asq_alive(&pf->hw))
5022 i40e_vc_notify_reset(pf);
5023
41c445ff
JB
5024 /* do the biggest reset indicated */
5025 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
5026
5027 /* Request a Global Reset
5028 *
5029 * This will start the chip's countdown to the actual full
5030 * chip reset event, and a warning interrupt to be sent
5031 * to all PFs, including the requestor. Our handler
5032 * for the warning interrupt will deal with the shutdown
5033 * and recovery of the switch setup.
5034 */
69bfb110 5035 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
41c445ff
JB
5036 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5037 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5038 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5039
5040 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
5041
5042 /* Request a Core Reset
5043 *
5044 * Same as Global Reset, except does *not* include the MAC/PHY
5045 */
69bfb110 5046 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
41c445ff
JB
5047 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5048 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5049 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5050 i40e_flush(&pf->hw);
5051
7823fe34
SN
5052 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
5053
5054 /* Request a Firmware Reset
5055 *
5056 * Same as Global reset, plus restarting the
5057 * embedded firmware engine.
5058 */
5059 /* enable EMP Reset */
5060 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
5061 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
5062 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
5063
5064 /* force the reset */
5065 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5066 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
5067 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5068 i40e_flush(&pf->hw);
5069
41c445ff
JB
5070 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
5071
5072 /* Request a PF Reset
5073 *
5074 * Resets only the PF-specific registers
5075 *
5076 * This goes directly to the tear-down and rebuild of
5077 * the switch, since we need to do all the recovery as
5078 * for the Core Reset.
5079 */
69bfb110 5080 dev_dbg(&pf->pdev->dev, "PFR requested\n");
41c445ff
JB
5081 i40e_handle_reset_warning(pf);
5082
5083 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
5084 int v;
5085
5086 /* Find the VSI(s) that requested a re-init */
5087 dev_info(&pf->pdev->dev,
5088 "VSI reinit requested\n");
505682cd 5089 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
5090 struct i40e_vsi *vsi = pf->vsi[v];
5091 if (vsi != NULL &&
5092 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5093 i40e_vsi_reinit_locked(pf->vsi[v]);
5094 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5095 }
5096 }
5097
b5d06f05
NP
5098 /* no further action needed, so return now */
5099 return;
5100 } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
5101 int v;
5102
5103 /* Find the VSI(s) that needs to be brought down */
5104 dev_info(&pf->pdev->dev, "VSI down requested\n");
5105 for (v = 0; v < pf->num_alloc_vsi; v++) {
5106 struct i40e_vsi *vsi = pf->vsi[v];
5107 if (vsi != NULL &&
5108 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5109 set_bit(__I40E_DOWN, &vsi->state);
5110 i40e_down(vsi);
5111 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5112 }
5113 }
5114
41c445ff
JB
5115 /* no further action needed, so return now */
5116 return;
5117 } else {
5118 dev_info(&pf->pdev->dev,
5119 "bad reset request 0x%08x\n", reset_flags);
5120 return;
5121 }
5122}
5123
4e3b35b0
NP
5124#ifdef CONFIG_I40E_DCB
5125/**
5126 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5127 * @pf: board private structure
5128 * @old_cfg: current DCB config
5129 * @new_cfg: new DCB config
5130 **/
5131bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5132 struct i40e_dcbx_config *old_cfg,
5133 struct i40e_dcbx_config *new_cfg)
5134{
5135 bool need_reconfig = false;
5136
5137 /* Check if ETS configuration has changed */
5138 if (memcmp(&new_cfg->etscfg,
5139 &old_cfg->etscfg,
5140 sizeof(new_cfg->etscfg))) {
5141 /* If Priority Table has changed reconfig is needed */
5142 if (memcmp(&new_cfg->etscfg.prioritytable,
5143 &old_cfg->etscfg.prioritytable,
5144 sizeof(new_cfg->etscfg.prioritytable))) {
5145 need_reconfig = true;
69bfb110 5146 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
4e3b35b0
NP
5147 }
5148
5149 if (memcmp(&new_cfg->etscfg.tcbwtable,
5150 &old_cfg->etscfg.tcbwtable,
5151 sizeof(new_cfg->etscfg.tcbwtable)))
69bfb110 5152 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
4e3b35b0
NP
5153
5154 if (memcmp(&new_cfg->etscfg.tsatable,
5155 &old_cfg->etscfg.tsatable,
5156 sizeof(new_cfg->etscfg.tsatable)))
69bfb110 5157 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
4e3b35b0
NP
5158 }
5159
5160 /* Check if PFC configuration has changed */
5161 if (memcmp(&new_cfg->pfc,
5162 &old_cfg->pfc,
5163 sizeof(new_cfg->pfc))) {
5164 need_reconfig = true;
69bfb110 5165 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
4e3b35b0
NP
5166 }
5167
5168 /* Check if APP Table has changed */
5169 if (memcmp(&new_cfg->app,
5170 &old_cfg->app,
3d9667a9 5171 sizeof(new_cfg->app))) {
4e3b35b0 5172 need_reconfig = true;
69bfb110 5173 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
3d9667a9 5174 }
4e3b35b0 5175
9fa61dd2
NP
5176 dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
5177 need_reconfig);
4e3b35b0
NP
5178 return need_reconfig;
5179}
5180
5181/**
5182 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5183 * @pf: board private structure
5184 * @e: event info posted on ARQ
5185 **/
5186static int i40e_handle_lldp_event(struct i40e_pf *pf,
5187 struct i40e_arq_event_info *e)
5188{
5189 struct i40e_aqc_lldp_get_mib *mib =
5190 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5191 struct i40e_hw *hw = &pf->hw;
5192 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
5193 struct i40e_dcbx_config tmp_dcbx_cfg;
5194 bool need_reconfig = false;
5195 int ret = 0;
5196 u8 type;
5197
4d9b6043
NP
5198 /* Not DCB capable or capability disabled */
5199 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5200 return ret;
5201
4e3b35b0
NP
5202 /* Ignore if event is not for Nearest Bridge */
5203 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5204 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9fa61dd2
NP
5205 dev_dbg(&pf->pdev->dev,
5206 "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
4e3b35b0
NP
5207 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5208 return ret;
5209
5210 /* Check MIB Type and return if event for Remote MIB update */
5211 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9fa61dd2
NP
5212 dev_dbg(&pf->pdev->dev,
5213 "%s: LLDP event mib type %s\n", __func__,
5214 type ? "remote" : "local");
4e3b35b0
NP
5215 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5216 /* Update the remote cached instance and return */
5217 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5218 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5219 &hw->remote_dcbx_config);
5220 goto exit;
5221 }
5222
4e3b35b0 5223 memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
9fa61dd2
NP
5224 /* Store the old configuration */
5225 tmp_dcbx_cfg = *dcbx_cfg;
5226
5227 /* Get updated DCBX data from firmware */
5228 ret = i40e_get_dcb_config(&pf->hw);
4e3b35b0 5229 if (ret) {
9fa61dd2 5230 dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
4e3b35b0
NP
5231 goto exit;
5232 }
5233
5234 /* No change detected in DCBX configs */
5235 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
69bfb110 5236 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
4e3b35b0
NP
5237 goto exit;
5238 }
5239
9fa61dd2 5240 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg);
4e3b35b0 5241
9fa61dd2 5242 i40e_dcbnl_flush_apps(pf, dcbx_cfg);
4e3b35b0
NP
5243
5244 if (!need_reconfig)
5245 goto exit;
5246
4d9b6043
NP
5247 /* Enable DCB tagging only when more than one TC */
5248 if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
5249 pf->flags |= I40E_FLAG_DCB_ENABLED;
5250 else
5251 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5252
69129dc3 5253 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
4e3b35b0
NP
5254 /* Reconfiguration needed quiesce all VSIs */
5255 i40e_pf_quiesce_all_vsi(pf);
5256
5257 /* Changes in configuration update VEB/VSI */
5258 i40e_dcb_reconfigure(pf);
5259
2fd75f31
NP
5260 ret = i40e_resume_port_tx(pf);
5261
69129dc3 5262 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
2fd75f31 5263 /* In case of error no point in resuming VSIs */
69129dc3
NP
5264 if (ret)
5265 goto exit;
5266
5267 /* Wait for the PF's Tx queues to be disabled */
5268 ret = i40e_pf_wait_txq_disabled(pf);
2fd75f31
NP
5269 if (!ret)
5270 i40e_pf_unquiesce_all_vsi(pf);
4e3b35b0
NP
5271exit:
5272 return ret;
5273}
5274#endif /* CONFIG_I40E_DCB */
5275
23326186
ASJ
5276/**
5277 * i40e_do_reset_safe - Protected reset path for userland calls.
5278 * @pf: board private structure
5279 * @reset_flags: which reset is requested
5280 *
5281 **/
5282void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5283{
5284 rtnl_lock();
5285 i40e_do_reset(pf, reset_flags);
5286 rtnl_unlock();
5287}
5288
41c445ff
JB
5289/**
5290 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5291 * @pf: board private structure
5292 * @e: event info posted on ARQ
5293 *
5294 * Handler for LAN Queue Overflow Event generated by the firmware for PF
5295 * and VF queues
5296 **/
5297static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5298 struct i40e_arq_event_info *e)
5299{
5300 struct i40e_aqc_lan_overflow *data =
5301 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5302 u32 queue = le32_to_cpu(data->prtdcb_rupto);
5303 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5304 struct i40e_hw *hw = &pf->hw;
5305 struct i40e_vf *vf;
5306 u16 vf_id;
5307
69bfb110
JB
5308 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5309 queue, qtx_ctl);
41c445ff
JB
5310
5311 /* Queue belongs to VF, find the VF and issue VF reset */
5312 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5313 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5314 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5315 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5316 vf_id -= hw->func_caps.vf_base_id;
5317 vf = &pf->vf[vf_id];
5318 i40e_vc_notify_vf_reset(vf);
5319 /* Allow VF to process pending reset notification */
5320 msleep(20);
5321 i40e_reset_vf(vf, false);
5322 }
5323}
5324
5325/**
5326 * i40e_service_event_complete - Finish up the service event
5327 * @pf: board private structure
5328 **/
5329static void i40e_service_event_complete(struct i40e_pf *pf)
5330{
5331 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5332
5333 /* flush memory to make sure state is correct before next watchog */
4e857c58 5334 smp_mb__before_atomic();
41c445ff
JB
5335 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5336}
5337
55a5e60b 5338/**
12957388
ASJ
5339 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5340 * @pf: board private structure
5341 **/
5342int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5343{
5344 int val, fcnt_prog;
5345
5346 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5347 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5348 return fcnt_prog;
5349}
5350
5351/**
5352 * i40e_get_current_fd_count - Get the count of total FD filters programmed
55a5e60b
ASJ
5353 * @pf: board private structure
5354 **/
5355int i40e_get_current_fd_count(struct i40e_pf *pf)
5356{
5357 int val, fcnt_prog;
5358 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5359 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5360 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5361 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5362 return fcnt_prog;
5363}
1e1be8f6 5364
55a5e60b
ASJ
5365/**
5366 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5367 * @pf: board private structure
5368 **/
5369void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5370{
5371 u32 fcnt_prog, fcnt_avail;
5372
1e1be8f6
ASJ
5373 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5374 return;
5375
55a5e60b
ASJ
5376 /* Check if, FD SB or ATR was auto disabled and if there is enough room
5377 * to re-enable
5378 */
12957388
ASJ
5379 fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
5380 fcnt_avail = pf->fdir_pf_filter_count;
1e1be8f6
ASJ
5381 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5382 (pf->fd_add_err == 0) ||
5383 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
55a5e60b
ASJ
5384 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5385 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5386 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5387 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5388 }
5389 }
5390 /* Wait for some more space to be available to turn on ATR */
5391 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5392 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5393 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5394 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5395 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5396 }
5397 }
5398}
5399
1e1be8f6
ASJ
5400#define I40E_MIN_FD_FLUSH_INTERVAL 10
5401/**
5402 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5403 * @pf: board private structure
5404 **/
5405static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5406{
5407 int flush_wait_retry = 50;
5408 int reg;
5409
1790ed0c
AA
5410 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5411 return;
5412
1e1be8f6
ASJ
5413 if (time_after(jiffies, pf->fd_flush_timestamp +
5414 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
5415 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5416 pf->fd_flush_timestamp = jiffies;
5417 pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
5418 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5419 /* flush all filters */
5420 wr32(&pf->hw, I40E_PFQF_CTL_1,
5421 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5422 i40e_flush(&pf->hw);
60793f4a 5423 pf->fd_flush_cnt++;
1e1be8f6
ASJ
5424 pf->fd_add_err = 0;
5425 do {
5426 /* Check FD flush status every 5-6msec */
5427 usleep_range(5000, 6000);
5428 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5429 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5430 break;
5431 } while (flush_wait_retry--);
5432 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5433 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5434 } else {
5435 /* replay sideband filters */
5436 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5437
5438 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5439 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5440 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5441 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5442 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5443 }
5444 }
5445}
5446
5447/**
5448 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5449 * @pf: board private structure
5450 **/
5451int i40e_get_current_atr_cnt(struct i40e_pf *pf)
5452{
5453 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5454}
5455
5456/* We can see up to 256 filter programming desc in transit if the filters are
5457 * being applied really fast; before we see the first
5458 * filter miss error on Rx queue 0. Accumulating enough error messages before
5459 * reacting will make sure we don't cause flush too often.
5460 */
5461#define I40E_MAX_FD_PROGRAM_ERROR 256
5462
41c445ff
JB
5463/**
5464 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5465 * @pf: board private structure
5466 **/
5467static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5468{
41c445ff 5469
41c445ff
JB
5470 /* if interface is down do nothing */
5471 if (test_bit(__I40E_DOWN, &pf->state))
5472 return;
1e1be8f6 5473
1790ed0c
AA
5474 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5475 return;
5476
1e1be8f6
ASJ
5477 if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
5478 (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
5479 (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
5480 i40e_fdir_flush_and_replay(pf);
5481
55a5e60b
ASJ
5482 i40e_fdir_check_and_reenable(pf);
5483
41c445ff
JB
5484}
5485
5486/**
5487 * i40e_vsi_link_event - notify VSI of a link event
5488 * @vsi: vsi to be notified
5489 * @link_up: link up or down
5490 **/
5491static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5492{
32b5b811 5493 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
41c445ff
JB
5494 return;
5495
5496 switch (vsi->type) {
5497 case I40E_VSI_MAIN:
38e00438
VD
5498#ifdef I40E_FCOE
5499 case I40E_VSI_FCOE:
5500#endif
41c445ff
JB
5501 if (!vsi->netdev || !vsi->netdev_registered)
5502 break;
5503
5504 if (link_up) {
5505 netif_carrier_on(vsi->netdev);
5506 netif_tx_wake_all_queues(vsi->netdev);
5507 } else {
5508 netif_carrier_off(vsi->netdev);
5509 netif_tx_stop_all_queues(vsi->netdev);
5510 }
5511 break;
5512
5513 case I40E_VSI_SRIOV:
41c445ff
JB
5514 case I40E_VSI_VMDQ2:
5515 case I40E_VSI_CTRL:
5516 case I40E_VSI_MIRROR:
5517 default:
5518 /* there is no notification for other VSIs */
5519 break;
5520 }
5521}
5522
5523/**
5524 * i40e_veb_link_event - notify elements on the veb of a link event
5525 * @veb: veb to be notified
5526 * @link_up: link up or down
5527 **/
5528static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
5529{
5530 struct i40e_pf *pf;
5531 int i;
5532
5533 if (!veb || !veb->pf)
5534 return;
5535 pf = veb->pf;
5536
5537 /* depth first... */
5538 for (i = 0; i < I40E_MAX_VEB; i++)
5539 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
5540 i40e_veb_link_event(pf->veb[i], link_up);
5541
5542 /* ... now the local VSIs */
505682cd 5543 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
5544 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
5545 i40e_vsi_link_event(pf->vsi[i], link_up);
5546}
5547
5548/**
5549 * i40e_link_event - Update netif_carrier status
5550 * @pf: board private structure
5551 **/
5552static void i40e_link_event(struct i40e_pf *pf)
5553{
5554 bool new_link, old_link;
320684cd 5555 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
fef59ddf 5556 u8 new_link_speed, old_link_speed;
41c445ff 5557
1e701e09
JB
5558 /* set this to force the get_link_status call to refresh state */
5559 pf->hw.phy.get_link_info = true;
5560
41c445ff 5561 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
1e701e09 5562 new_link = i40e_get_link_status(&pf->hw);
fef59ddf
CS
5563 old_link_speed = pf->hw.phy.link_info_old.link_speed;
5564 new_link_speed = pf->hw.phy.link_info.link_speed;
41c445ff 5565
1e701e09 5566 if (new_link == old_link &&
fef59ddf 5567 new_link_speed == old_link_speed &&
320684cd
MW
5568 (test_bit(__I40E_DOWN, &vsi->state) ||
5569 new_link == netif_carrier_ok(vsi->netdev)))
41c445ff 5570 return;
320684cd
MW
5571
5572 if (!test_bit(__I40E_DOWN, &vsi->state))
5573 i40e_print_link_message(vsi, new_link);
41c445ff
JB
5574
5575 /* Notify the base of the switch tree connected to
5576 * the link. Floating VEBs are not notified.
5577 */
5578 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
5579 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
5580 else
320684cd 5581 i40e_vsi_link_event(vsi, new_link);
41c445ff
JB
5582
5583 if (pf->vf)
5584 i40e_vc_notify_link_state(pf);
beb0dff1
JK
5585
5586 if (pf->flags & I40E_FLAG_PTP)
5587 i40e_ptp_set_increment(pf);
41c445ff
JB
5588}
5589
5590/**
5591 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
5592 * @pf: board private structure
5593 *
5594 * Set the per-queue flags to request a check for stuck queues in the irq
5595 * clean functions, then force interrupts to be sure the irq clean is called.
5596 **/
5597static void i40e_check_hang_subtask(struct i40e_pf *pf)
5598{
5599 int i, v;
5600
5601 /* If we're down or resetting, just bail */
b67a0335
AA
5602 if (test_bit(__I40E_DOWN, &pf->state) ||
5603 test_bit(__I40E_CONFIG_BUSY, &pf->state))
41c445ff
JB
5604 return;
5605
5606 /* for each VSI/netdev
5607 * for each Tx queue
5608 * set the check flag
5609 * for each q_vector
5610 * force an interrupt
5611 */
505682cd 5612 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
5613 struct i40e_vsi *vsi = pf->vsi[v];
5614 int armed = 0;
5615
5616 if (!pf->vsi[v] ||
5617 test_bit(__I40E_DOWN, &vsi->state) ||
5618 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
5619 continue;
5620
5621 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 5622 set_check_for_tx_hang(vsi->tx_rings[i]);
41c445ff 5623 if (test_bit(__I40E_HANG_CHECK_ARMED,
9f65e15b 5624 &vsi->tx_rings[i]->state))
41c445ff
JB
5625 armed++;
5626 }
5627
5628 if (armed) {
5629 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
5630 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
5631 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
5d1ff106
SN
5632 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
5633 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
5634 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
5635 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
41c445ff
JB
5636 } else {
5637 u16 vec = vsi->base_vector - 1;
5638 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
5d1ff106
SN
5639 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
5640 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
5641 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
5642 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
41c445ff
JB
5643 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
5644 wr32(&vsi->back->hw,
5645 I40E_PFINT_DYN_CTLN(vec), val);
5646 }
5647 i40e_flush(&vsi->back->hw);
5648 }
5649 }
5650}
5651
5652/**
21536717 5653 * i40e_watchdog_subtask - periodic checks not using event driven response
41c445ff
JB
5654 * @pf: board private structure
5655 **/
5656static void i40e_watchdog_subtask(struct i40e_pf *pf)
5657{
5658 int i;
5659
5660 /* if interface is down do nothing */
5661 if (test_bit(__I40E_DOWN, &pf->state) ||
5662 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5663 return;
5664
21536717
SN
5665 /* make sure we don't do these things too often */
5666 if (time_before(jiffies, (pf->service_timer_previous +
5667 pf->service_timer_period)))
5668 return;
5669 pf->service_timer_previous = jiffies;
5670
5671 i40e_check_hang_subtask(pf);
5672 i40e_link_event(pf);
5673
41c445ff
JB
5674 /* Update the stats for active netdevs so the network stack
5675 * can look at updated numbers whenever it cares to
5676 */
505682cd 5677 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
5678 if (pf->vsi[i] && pf->vsi[i]->netdev)
5679 i40e_update_stats(pf->vsi[i]);
5680
5681 /* Update the stats for the active switching components */
5682 for (i = 0; i < I40E_MAX_VEB; i++)
5683 if (pf->veb[i])
5684 i40e_update_veb_stats(pf->veb[i]);
beb0dff1
JK
5685
5686 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
41c445ff
JB
5687}
5688
5689/**
5690 * i40e_reset_subtask - Set up for resetting the device and driver
5691 * @pf: board private structure
5692 **/
5693static void i40e_reset_subtask(struct i40e_pf *pf)
5694{
5695 u32 reset_flags = 0;
5696
23326186 5697 rtnl_lock();
41c445ff
JB
5698 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5699 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
5700 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5701 }
5702 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5703 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
5704 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5705 }
5706 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5707 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
5708 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5709 }
5710 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5711 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
5712 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5713 }
b5d06f05
NP
5714 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
5715 reset_flags |= (1 << __I40E_DOWN_REQUESTED);
5716 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
5717 }
41c445ff
JB
5718
5719 /* If there's a recovery already waiting, it takes
5720 * precedence before starting a new reset sequence.
5721 */
5722 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5723 i40e_handle_reset_warning(pf);
23326186 5724 goto unlock;
41c445ff
JB
5725 }
5726
5727 /* If we're already down or resetting, just bail */
5728 if (reset_flags &&
5729 !test_bit(__I40E_DOWN, &pf->state) &&
5730 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5731 i40e_do_reset(pf, reset_flags);
23326186
ASJ
5732
5733unlock:
5734 rtnl_unlock();
41c445ff
JB
5735}
5736
5737/**
5738 * i40e_handle_link_event - Handle link event
5739 * @pf: board private structure
5740 * @e: event info posted on ARQ
5741 **/
5742static void i40e_handle_link_event(struct i40e_pf *pf,
5743 struct i40e_arq_event_info *e)
5744{
5745 struct i40e_hw *hw = &pf->hw;
5746 struct i40e_aqc_get_link_status *status =
5747 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5748 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
5749
5750 /* save off old link status information */
5751 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
5752 sizeof(pf->hw.phy.link_info_old));
5753
1e701e09
JB
5754 /* Do a new status request to re-enable LSE reporting
5755 * and load new status information into the hw struct
5756 * This completely ignores any state information
5757 * in the ARQ event info, instead choosing to always
5758 * issue the AQ update link status command.
5759 */
5760 i40e_link_event(pf);
5761
7b592f61
CW
5762 /* check for unqualified module, if link is down */
5763 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5764 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5765 (!(status->link_info & I40E_AQ_LINK_UP)))
5766 dev_err(&pf->pdev->dev,
5767 "The driver failed to link because an unqualified module was detected.\n");
41c445ff
JB
5768}
5769
5770/**
5771 * i40e_clean_adminq_subtask - Clean the AdminQ rings
5772 * @pf: board private structure
5773 **/
5774static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5775{
5776 struct i40e_arq_event_info event;
5777 struct i40e_hw *hw = &pf->hw;
5778 u16 pending, i = 0;
5779 i40e_status ret;
5780 u16 opcode;
86df242b 5781 u32 oldval;
41c445ff
JB
5782 u32 val;
5783
a316f651
ASJ
5784 /* Do not run clean AQ when PF reset fails */
5785 if (test_bit(__I40E_RESET_FAILED, &pf->state))
5786 return;
5787
86df242b
SN
5788 /* check for error indications */
5789 val = rd32(&pf->hw, pf->hw.aq.arq.len);
5790 oldval = val;
5791 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5792 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5793 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5794 }
5795 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5796 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5797 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5798 }
5799 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5800 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5801 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5802 }
5803 if (oldval != val)
5804 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5805
5806 val = rd32(&pf->hw, pf->hw.aq.asq.len);
5807 oldval = val;
5808 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5809 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5810 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5811 }
5812 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5813 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5814 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5815 }
5816 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5817 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5818 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5819 }
5820 if (oldval != val)
5821 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5822
1001dc37
MW
5823 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
5824 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
41c445ff
JB
5825 if (!event.msg_buf)
5826 return;
5827
5828 do {
5829 ret = i40e_clean_arq_element(hw, &event, &pending);
56497978 5830 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
41c445ff 5831 break;
56497978 5832 else if (ret) {
41c445ff
JB
5833 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
5834 break;
5835 }
5836
5837 opcode = le16_to_cpu(event.desc.opcode);
5838 switch (opcode) {
5839
5840 case i40e_aqc_opc_get_link_status:
5841 i40e_handle_link_event(pf, &event);
5842 break;
5843 case i40e_aqc_opc_send_msg_to_pf:
5844 ret = i40e_vc_process_vf_msg(pf,
5845 le16_to_cpu(event.desc.retval),
5846 le32_to_cpu(event.desc.cookie_high),
5847 le32_to_cpu(event.desc.cookie_low),
5848 event.msg_buf,
1001dc37 5849 event.msg_len);
41c445ff
JB
5850 break;
5851 case i40e_aqc_opc_lldp_update_mib:
69bfb110 5852 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4e3b35b0
NP
5853#ifdef CONFIG_I40E_DCB
5854 rtnl_lock();
5855 ret = i40e_handle_lldp_event(pf, &event);
5856 rtnl_unlock();
5857#endif /* CONFIG_I40E_DCB */
41c445ff
JB
5858 break;
5859 case i40e_aqc_opc_event_lan_overflow:
69bfb110 5860 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
41c445ff
JB
5861 i40e_handle_lan_overflow_event(pf, &event);
5862 break;
0467bc91
SN
5863 case i40e_aqc_opc_send_msg_to_peer:
5864 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
5865 break;
41c445ff
JB
5866 default:
5867 dev_info(&pf->pdev->dev,
0467bc91
SN
5868 "ARQ Error: Unknown event 0x%04x received\n",
5869 opcode);
41c445ff
JB
5870 break;
5871 }
5872 } while (pending && (i++ < pf->adminq_work_limit));
5873
5874 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
5875 /* re-enable Admin queue interrupt cause */
5876 val = rd32(hw, I40E_PFINT_ICR0_ENA);
5877 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
5878 wr32(hw, I40E_PFINT_ICR0_ENA, val);
5879 i40e_flush(hw);
5880
5881 kfree(event.msg_buf);
5882}
5883
4eb3f768
SN
5884/**
5885 * i40e_verify_eeprom - make sure eeprom is good to use
5886 * @pf: board private structure
5887 **/
5888static void i40e_verify_eeprom(struct i40e_pf *pf)
5889{
5890 int err;
5891
5892 err = i40e_diag_eeprom_test(&pf->hw);
5893 if (err) {
5894 /* retry in case of garbage read */
5895 err = i40e_diag_eeprom_test(&pf->hw);
5896 if (err) {
5897 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
5898 err);
5899 set_bit(__I40E_BAD_EEPROM, &pf->state);
5900 }
5901 }
5902
5903 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
5904 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
5905 clear_bit(__I40E_BAD_EEPROM, &pf->state);
5906 }
5907}
5908
41c445ff
JB
5909/**
5910 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
5911 * @veb: pointer to the VEB instance
5912 *
5913 * This is a recursive function that first builds the attached VSIs then
5914 * recurses in to build the next layer of VEB. We track the connections
5915 * through our own index numbers because the seid's from the HW could
5916 * change across the reset.
5917 **/
5918static int i40e_reconstitute_veb(struct i40e_veb *veb)
5919{
5920 struct i40e_vsi *ctl_vsi = NULL;
5921 struct i40e_pf *pf = veb->pf;
5922 int v, veb_idx;
5923 int ret;
5924
5925 /* build VSI that owns this VEB, temporarily attached to base VEB */
505682cd 5926 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
41c445ff
JB
5927 if (pf->vsi[v] &&
5928 pf->vsi[v]->veb_idx == veb->idx &&
5929 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
5930 ctl_vsi = pf->vsi[v];
5931 break;
5932 }
5933 }
5934 if (!ctl_vsi) {
5935 dev_info(&pf->pdev->dev,
5936 "missing owner VSI for veb_idx %d\n", veb->idx);
5937 ret = -ENOENT;
5938 goto end_reconstitute;
5939 }
5940 if (ctl_vsi != pf->vsi[pf->lan_vsi])
5941 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5942 ret = i40e_add_vsi(ctl_vsi);
5943 if (ret) {
5944 dev_info(&pf->pdev->dev,
5945 "rebuild of owner VSI failed: %d\n", ret);
5946 goto end_reconstitute;
5947 }
5948 i40e_vsi_reset_stats(ctl_vsi);
5949
5950 /* create the VEB in the switch and move the VSI onto the VEB */
5951 ret = i40e_add_veb(veb, ctl_vsi);
5952 if (ret)
5953 goto end_reconstitute;
5954
b64ba084
ASJ
5955 /* Enable LB mode for the main VSI now that it is on a VEB */
5956 i40e_enable_pf_switch_lb(pf);
5957
41c445ff 5958 /* create the remaining VSIs attached to this VEB */
505682cd 5959 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
5960 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
5961 continue;
5962
5963 if (pf->vsi[v]->veb_idx == veb->idx) {
5964 struct i40e_vsi *vsi = pf->vsi[v];
5965 vsi->uplink_seid = veb->seid;
5966 ret = i40e_add_vsi(vsi);
5967 if (ret) {
5968 dev_info(&pf->pdev->dev,
5969 "rebuild of vsi_idx %d failed: %d\n",
5970 v, ret);
5971 goto end_reconstitute;
5972 }
5973 i40e_vsi_reset_stats(vsi);
5974 }
5975 }
5976
5977 /* create any VEBs attached to this VEB - RECURSION */
5978 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
5979 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
5980 pf->veb[veb_idx]->uplink_seid = veb->seid;
5981 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
5982 if (ret)
5983 break;
5984 }
5985 }
5986
5987end_reconstitute:
5988 return ret;
5989}
5990
5991/**
5992 * i40e_get_capabilities - get info about the HW
5993 * @pf: the PF struct
5994 **/
5995static int i40e_get_capabilities(struct i40e_pf *pf)
5996{
5997 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
5998 u16 data_size;
5999 int buf_len;
6000 int err;
6001
6002 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6003 do {
6004 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6005 if (!cap_buf)
6006 return -ENOMEM;
6007
6008 /* this loads the data into the hw struct for us */
6009 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6010 &data_size,
6011 i40e_aqc_opc_list_func_capabilities,
6012 NULL);
6013 /* data loaded, buffer no longer needed */
6014 kfree(cap_buf);
6015
6016 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6017 /* retry with a larger buffer */
6018 buf_len = data_size;
6019 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6020 dev_info(&pf->pdev->dev,
6021 "capability discovery failed: aq=%d\n",
6022 pf->hw.aq.asq_last_status);
6023 return -ENODEV;
6024 }
6025 } while (err);
6026
ac71b7ba
ASJ
6027 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
6028 (pf->hw.aq.fw_maj_ver < 2)) {
6029 pf->hw.func_caps.num_msix_vectors++;
6030 pf->hw.func_caps.num_msix_vectors_vf++;
6031 }
6032
41c445ff
JB
6033 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6034 dev_info(&pf->pdev->dev,
6035 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6036 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6037 pf->hw.func_caps.num_msix_vectors,
6038 pf->hw.func_caps.num_msix_vectors_vf,
6039 pf->hw.func_caps.fd_filters_guaranteed,
6040 pf->hw.func_caps.fd_filters_best_effort,
6041 pf->hw.func_caps.num_tx_qp,
6042 pf->hw.func_caps.num_vsis);
6043
7134f9ce
JB
6044#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6045 + pf->hw.func_caps.num_vfs)
6046 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6047 dev_info(&pf->pdev->dev,
6048 "got num_vsis %d, setting num_vsis to %d\n",
6049 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6050 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6051 }
6052
41c445ff
JB
6053 return 0;
6054}
6055
cbf61325
ASJ
6056static int i40e_vsi_clear(struct i40e_vsi *vsi);
6057
41c445ff 6058/**
cbf61325 6059 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
41c445ff
JB
6060 * @pf: board private structure
6061 **/
cbf61325 6062static void i40e_fdir_sb_setup(struct i40e_pf *pf)
41c445ff
JB
6063{
6064 struct i40e_vsi *vsi;
8a9eb7d3 6065 int i;
41c445ff 6066
407e063c
JB
6067 /* quick workaround for an NVM issue that leaves a critical register
6068 * uninitialized
6069 */
6070 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6071 static const u32 hkey[] = {
6072 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6073 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6074 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6075 0x95b3a76d};
6076
6077 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6078 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6079 }
6080
cbf61325 6081 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
41c445ff
JB
6082 return;
6083
cbf61325 6084 /* find existing VSI and see if it needs configuring */
41c445ff 6085 vsi = NULL;
505682cd 6086 for (i = 0; i < pf->num_alloc_vsi; i++) {
cbf61325 6087 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
41c445ff 6088 vsi = pf->vsi[i];
cbf61325
ASJ
6089 break;
6090 }
6091 }
6092
6093 /* create a new VSI if none exists */
41c445ff 6094 if (!vsi) {
cbf61325
ASJ
6095 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6096 pf->vsi[pf->lan_vsi]->seid, 0);
41c445ff
JB
6097 if (!vsi) {
6098 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
8a9eb7d3
SN
6099 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6100 return;
41c445ff 6101 }
cbf61325 6102 }
41c445ff 6103
8a9eb7d3 6104 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
41c445ff
JB
6105}
6106
6107/**
6108 * i40e_fdir_teardown - release the Flow Director resources
6109 * @pf: board private structure
6110 **/
6111static void i40e_fdir_teardown(struct i40e_pf *pf)
6112{
6113 int i;
6114
17a73f6b 6115 i40e_fdir_filter_exit(pf);
505682cd 6116 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
6117 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6118 i40e_vsi_release(pf->vsi[i]);
6119 break;
6120 }
6121 }
6122}
6123
6124/**
f650a38b 6125 * i40e_prep_for_reset - prep for the core to reset
41c445ff
JB
6126 * @pf: board private structure
6127 *
f650a38b
ASJ
6128 * Close up the VFs and other things in prep for pf Reset.
6129 **/
23cfbe07 6130static void i40e_prep_for_reset(struct i40e_pf *pf)
41c445ff 6131{
41c445ff 6132 struct i40e_hw *hw = &pf->hw;
60442dea 6133 i40e_status ret = 0;
41c445ff
JB
6134 u32 v;
6135
6136 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6137 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
23cfbe07 6138 return;
41c445ff 6139
69bfb110 6140 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
41c445ff 6141
41c445ff
JB
6142 /* quiesce the VSIs and their queues that are not already DOWN */
6143 i40e_pf_quiesce_all_vsi(pf);
6144
505682cd 6145 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
6146 if (pf->vsi[v])
6147 pf->vsi[v]->seid = 0;
6148 }
6149
6150 i40e_shutdown_adminq(&pf->hw);
6151
f650a38b 6152 /* call shutdown HMC */
60442dea
SN
6153 if (hw->hmc.hmc_obj) {
6154 ret = i40e_shutdown_lan_hmc(hw);
23cfbe07 6155 if (ret)
60442dea
SN
6156 dev_warn(&pf->pdev->dev,
6157 "shutdown_lan_hmc failed: %d\n", ret);
f650a38b 6158 }
f650a38b
ASJ
6159}
6160
44033fac
JB
6161/**
6162 * i40e_send_version - update firmware with driver version
6163 * @pf: PF struct
6164 */
6165static void i40e_send_version(struct i40e_pf *pf)
6166{
6167 struct i40e_driver_version dv;
6168
6169 dv.major_version = DRV_VERSION_MAJOR;
6170 dv.minor_version = DRV_VERSION_MINOR;
6171 dv.build_version = DRV_VERSION_BUILD;
6172 dv.subbuild_version = 0;
35a7d804 6173 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
44033fac
JB
6174 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6175}
6176
f650a38b 6177/**
4dda12e6 6178 * i40e_reset_and_rebuild - reset and rebuild using a saved config
f650a38b 6179 * @pf: board private structure
bc7d338f 6180 * @reinit: if the Main VSI needs to re-initialized.
f650a38b 6181 **/
bc7d338f 6182static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
f650a38b 6183{
f650a38b 6184 struct i40e_hw *hw = &pf->hw;
cafa2ee6 6185 u8 set_fc_aq_fail = 0;
f650a38b
ASJ
6186 i40e_status ret;
6187 u32 v;
6188
41c445ff
JB
6189 /* Now we wait for GRST to settle out.
6190 * We don't have to delete the VEBs or VSIs from the hw switch
6191 * because the reset will make them disappear.
6192 */
6193 ret = i40e_pf_reset(hw);
b5565400 6194 if (ret) {
41c445ff 6195 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
a316f651
ASJ
6196 set_bit(__I40E_RESET_FAILED, &pf->state);
6197 goto clear_recovery;
b5565400 6198 }
41c445ff
JB
6199 pf->pfr_count++;
6200
6201 if (test_bit(__I40E_DOWN, &pf->state))
a316f651 6202 goto clear_recovery;
69bfb110 6203 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
41c445ff
JB
6204
6205 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6206 ret = i40e_init_adminq(&pf->hw);
6207 if (ret) {
6208 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
a316f651 6209 goto clear_recovery;
41c445ff
JB
6210 }
6211
4eb3f768
SN
6212 /* re-verify the eeprom if we just had an EMP reset */
6213 if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
6214 clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
6215 i40e_verify_eeprom(pf);
6216 }
6217
e78ac4bf 6218 i40e_clear_pxe_mode(hw);
41c445ff
JB
6219 ret = i40e_get_capabilities(pf);
6220 if (ret) {
6221 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
6222 ret);
6223 goto end_core_reset;
6224 }
6225
41c445ff
JB
6226 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6227 hw->func_caps.num_rx_qp,
6228 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6229 if (ret) {
6230 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6231 goto end_core_reset;
6232 }
6233 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6234 if (ret) {
6235 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6236 goto end_core_reset;
6237 }
6238
4e3b35b0
NP
6239#ifdef CONFIG_I40E_DCB
6240 ret = i40e_init_pf_dcb(pf);
6241 if (ret) {
aebfc816
SN
6242 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6243 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6244 /* Continue without DCB enabled */
4e3b35b0
NP
6245 }
6246#endif /* CONFIG_I40E_DCB */
38e00438
VD
6247#ifdef I40E_FCOE
6248 ret = i40e_init_pf_fcoe(pf);
6249 if (ret)
6250 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
4e3b35b0 6251
38e00438 6252#endif
41c445ff 6253 /* do basic switch setup */
bc7d338f 6254 ret = i40e_setup_pf_switch(pf, reinit);
41c445ff
JB
6255 if (ret)
6256 goto end_core_reset;
6257
7e2453fe
JB
6258 /* driver is only interested in link up/down and module qualification
6259 * reports from firmware
6260 */
6261 ret = i40e_aq_set_phy_int_mask(&pf->hw,
6262 I40E_AQ_EVENT_LINK_UPDOWN |
6263 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6264 if (ret)
6265 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
6266
cafa2ee6
ASJ
6267 /* make sure our flow control settings are restored */
6268 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6269 if (ret)
6270 dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
6271
41c445ff
JB
6272 /* Rebuild the VSIs and VEBs that existed before reset.
6273 * They are still in our local switch element arrays, so only
6274 * need to rebuild the switch model in the HW.
6275 *
6276 * If there were VEBs but the reconstitution failed, we'll try
6277 * try to recover minimal use by getting the basic PF VSI working.
6278 */
6279 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
69bfb110 6280 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
41c445ff
JB
6281 /* find the one VEB connected to the MAC, and find orphans */
6282 for (v = 0; v < I40E_MAX_VEB; v++) {
6283 if (!pf->veb[v])
6284 continue;
6285
6286 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6287 pf->veb[v]->uplink_seid == 0) {
6288 ret = i40e_reconstitute_veb(pf->veb[v]);
6289
6290 if (!ret)
6291 continue;
6292
6293 /* If Main VEB failed, we're in deep doodoo,
6294 * so give up rebuilding the switch and set up
6295 * for minimal rebuild of PF VSI.
6296 * If orphan failed, we'll report the error
6297 * but try to keep going.
6298 */
6299 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6300 dev_info(&pf->pdev->dev,
6301 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6302 ret);
6303 pf->vsi[pf->lan_vsi]->uplink_seid
6304 = pf->mac_seid;
6305 break;
6306 } else if (pf->veb[v]->uplink_seid == 0) {
6307 dev_info(&pf->pdev->dev,
6308 "rebuild of orphan VEB failed: %d\n",
6309 ret);
6310 }
6311 }
6312 }
6313 }
6314
6315 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
cde4cbc7 6316 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
41c445ff
JB
6317 /* no VEB, so rebuild only the Main VSI */
6318 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6319 if (ret) {
6320 dev_info(&pf->pdev->dev,
6321 "rebuild of Main VSI failed: %d\n", ret);
6322 goto end_core_reset;
6323 }
6324 }
6325
cafa2ee6
ASJ
6326 msleep(75);
6327 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6328 if (ret) {
6329 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
6330 pf->hw.aq.asq_last_status);
6331 }
6332
41c445ff
JB
6333 /* reinit the misc interrupt */
6334 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6335 ret = i40e_setup_misc_vector(pf);
6336
6337 /* restart the VSIs that were rebuilt and running before the reset */
6338 i40e_pf_unquiesce_all_vsi(pf);
6339
69f64b2b
MW
6340 if (pf->num_alloc_vfs) {
6341 for (v = 0; v < pf->num_alloc_vfs; v++)
6342 i40e_reset_vf(&pf->vf[v], true);
6343 }
6344
41c445ff 6345 /* tell the firmware that we're starting */
44033fac 6346 i40e_send_version(pf);
41c445ff
JB
6347
6348end_core_reset:
a316f651
ASJ
6349 clear_bit(__I40E_RESET_FAILED, &pf->state);
6350clear_recovery:
41c445ff
JB
6351 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6352}
6353
f650a38b
ASJ
6354/**
6355 * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
6356 * @pf: board private structure
6357 *
6358 * Close up the VFs and other things in prep for a Core Reset,
6359 * then get ready to rebuild the world.
6360 **/
6361static void i40e_handle_reset_warning(struct i40e_pf *pf)
6362{
23cfbe07
SN
6363 i40e_prep_for_reset(pf);
6364 i40e_reset_and_rebuild(pf, false);
f650a38b
ASJ
6365}
6366
41c445ff
JB
6367/**
6368 * i40e_handle_mdd_event
6369 * @pf: pointer to the pf structure
6370 *
6371 * Called from the MDD irq handler to identify possibly malicious vfs
6372 **/
6373static void i40e_handle_mdd_event(struct i40e_pf *pf)
6374{
6375 struct i40e_hw *hw = &pf->hw;
6376 bool mdd_detected = false;
df430b12 6377 bool pf_mdd_detected = false;
41c445ff
JB
6378 struct i40e_vf *vf;
6379 u32 reg;
6380 int i;
6381
6382 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6383 return;
6384
6385 /* find what triggered the MDD event */
6386 reg = rd32(hw, I40E_GL_MDET_TX);
6387 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4c33f83a
ASJ
6388 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6389 I40E_GL_MDET_TX_PF_NUM_SHIFT;
2089ad03 6390 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
4c33f83a 6391 I40E_GL_MDET_TX_VF_NUM_SHIFT;
013f6579 6392 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
4c33f83a 6393 I40E_GL_MDET_TX_EVENT_SHIFT;
2089ad03
MW
6394 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6395 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6396 pf->hw.func_caps.base_queue;
faf32978
JB
6397 if (netif_msg_tx_err(pf))
6398 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
6399 event, queue, pf_num, vf_num);
41c445ff
JB
6400 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6401 mdd_detected = true;
6402 }
6403 reg = rd32(hw, I40E_GL_MDET_RX);
6404 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4c33f83a
ASJ
6405 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6406 I40E_GL_MDET_RX_FUNCTION_SHIFT;
013f6579 6407 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
4c33f83a 6408 I40E_GL_MDET_RX_EVENT_SHIFT;
2089ad03
MW
6409 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6410 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6411 pf->hw.func_caps.base_queue;
faf32978
JB
6412 if (netif_msg_rx_err(pf))
6413 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6414 event, queue, func);
41c445ff
JB
6415 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6416 mdd_detected = true;
6417 }
6418
df430b12
NP
6419 if (mdd_detected) {
6420 reg = rd32(hw, I40E_PF_MDET_TX);
6421 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6422 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
faf32978 6423 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
df430b12
NP
6424 pf_mdd_detected = true;
6425 }
6426 reg = rd32(hw, I40E_PF_MDET_RX);
6427 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6428 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
faf32978 6429 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
df430b12
NP
6430 pf_mdd_detected = true;
6431 }
6432 /* Queue belongs to the PF, initiate a reset */
6433 if (pf_mdd_detected) {
6434 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6435 i40e_service_event_schedule(pf);
6436 }
6437 }
6438
41c445ff
JB
6439 /* see if one of the VFs needs its hand slapped */
6440 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6441 vf = &(pf->vf[i]);
6442 reg = rd32(hw, I40E_VP_MDET_TX(i));
6443 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6444 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6445 vf->num_mdd_events++;
faf32978
JB
6446 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6447 i);
41c445ff
JB
6448 }
6449
6450 reg = rd32(hw, I40E_VP_MDET_RX(i));
6451 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6452 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6453 vf->num_mdd_events++;
faf32978
JB
6454 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6455 i);
41c445ff
JB
6456 }
6457
6458 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
6459 dev_info(&pf->pdev->dev,
6460 "Too many MDD events on VF %d, disabled\n", i);
6461 dev_info(&pf->pdev->dev,
6462 "Use PF Control I/F to re-enable the VF\n");
6463 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
6464 }
6465 }
6466
6467 /* re-enable mdd interrupt cause */
6468 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
6469 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
6470 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
6471 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
6472 i40e_flush(hw);
6473}
6474
a1c9a9d9
JK
6475#ifdef CONFIG_I40E_VXLAN
6476/**
6477 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
6478 * @pf: board private structure
6479 **/
6480static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6481{
a1c9a9d9
JK
6482 struct i40e_hw *hw = &pf->hw;
6483 i40e_status ret;
6484 u8 filter_index;
6485 __be16 port;
6486 int i;
6487
6488 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
6489 return;
6490
6491 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
6492
6493 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6494 if (pf->pending_vxlan_bitmap & (1 << i)) {
6495 pf->pending_vxlan_bitmap &= ~(1 << i);
6496 port = pf->vxlan_ports[i];
6497 ret = port ?
6498 i40e_aq_add_udp_tunnel(hw, ntohs(port),
a1c9a9d9
JK
6499 I40E_AQC_TUNNEL_TYPE_VXLAN,
6500 &filter_index, NULL)
6501 : i40e_aq_del_udp_tunnel(hw, i, NULL);
6502
6503 if (ret) {
6504 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
6505 port ? "adding" : "deleting",
6506 ntohs(port), port ? i : i);
6507
6508 pf->vxlan_ports[i] = 0;
6509 } else {
6510 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
6511 port ? "Added" : "Deleted",
6512 ntohs(port), port ? i : filter_index);
6513 }
6514 }
6515 }
6516}
6517
6518#endif
41c445ff
JB
6519/**
6520 * i40e_service_task - Run the driver's async subtasks
6521 * @work: pointer to work_struct containing our data
6522 **/
6523static void i40e_service_task(struct work_struct *work)
6524{
6525 struct i40e_pf *pf = container_of(work,
6526 struct i40e_pf,
6527 service_task);
6528 unsigned long start_time = jiffies;
6529
e57a2fea
SN
6530 /* don't bother with service tasks if a reset is in progress */
6531 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6532 i40e_service_event_complete(pf);
6533 return;
6534 }
6535
41c445ff
JB
6536 i40e_reset_subtask(pf);
6537 i40e_handle_mdd_event(pf);
6538 i40e_vc_process_vflr_event(pf);
6539 i40e_watchdog_subtask(pf);
6540 i40e_fdir_reinit_subtask(pf);
41c445ff 6541 i40e_sync_filters_subtask(pf);
a1c9a9d9
JK
6542#ifdef CONFIG_I40E_VXLAN
6543 i40e_sync_vxlan_filters_subtask(pf);
6544#endif
41c445ff
JB
6545 i40e_clean_adminq_subtask(pf);
6546
6547 i40e_service_event_complete(pf);
6548
6549 /* If the tasks have taken longer than one timer cycle or there
6550 * is more work to be done, reschedule the service task now
6551 * rather than wait for the timer to tick again.
6552 */
6553 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
6554 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
6555 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
6556 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
6557 i40e_service_event_schedule(pf);
6558}
6559
6560/**
6561 * i40e_service_timer - timer callback
6562 * @data: pointer to PF struct
6563 **/
6564static void i40e_service_timer(unsigned long data)
6565{
6566 struct i40e_pf *pf = (struct i40e_pf *)data;
6567
6568 mod_timer(&pf->service_timer,
6569 round_jiffies(jiffies + pf->service_timer_period));
6570 i40e_service_event_schedule(pf);
6571}
6572
6573/**
6574 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
6575 * @vsi: the VSI being configured
6576 **/
6577static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6578{
6579 struct i40e_pf *pf = vsi->back;
6580
6581 switch (vsi->type) {
6582 case I40E_VSI_MAIN:
6583 vsi->alloc_queue_pairs = pf->num_lan_qps;
6584 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6585 I40E_REQ_DESCRIPTOR_MULTIPLE);
6586 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6587 vsi->num_q_vectors = pf->num_lan_msix;
6588 else
6589 vsi->num_q_vectors = 1;
6590
6591 break;
6592
6593 case I40E_VSI_FDIR:
6594 vsi->alloc_queue_pairs = 1;
6595 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
6596 I40E_REQ_DESCRIPTOR_MULTIPLE);
6597 vsi->num_q_vectors = 1;
6598 break;
6599
6600 case I40E_VSI_VMDQ2:
6601 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
6602 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6603 I40E_REQ_DESCRIPTOR_MULTIPLE);
6604 vsi->num_q_vectors = pf->num_vmdq_msix;
6605 break;
6606
6607 case I40E_VSI_SRIOV:
6608 vsi->alloc_queue_pairs = pf->num_vf_qps;
6609 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6610 I40E_REQ_DESCRIPTOR_MULTIPLE);
6611 break;
6612
38e00438
VD
6613#ifdef I40E_FCOE
6614 case I40E_VSI_FCOE:
6615 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6616 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6617 I40E_REQ_DESCRIPTOR_MULTIPLE);
6618 vsi->num_q_vectors = pf->num_fcoe_msix;
6619 break;
6620
6621#endif /* I40E_FCOE */
41c445ff
JB
6622 default:
6623 WARN_ON(1);
6624 return -ENODATA;
6625 }
6626
6627 return 0;
6628}
6629
f650a38b
ASJ
6630/**
6631 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
6632 * @type: VSI pointer
bc7d338f 6633 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
f650a38b
ASJ
6634 *
6635 * On error: returns error code (negative)
6636 * On success: returns 0
6637 **/
bc7d338f 6638static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
f650a38b
ASJ
6639{
6640 int size;
6641 int ret = 0;
6642
ac6c5e3d 6643 /* allocate memory for both Tx and Rx ring pointers */
f650a38b
ASJ
6644 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
6645 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
6646 if (!vsi->tx_rings)
6647 return -ENOMEM;
f650a38b
ASJ
6648 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
6649
bc7d338f
ASJ
6650 if (alloc_qvectors) {
6651 /* allocate memory for q_vector pointers */
f57e4fbd 6652 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
bc7d338f
ASJ
6653 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
6654 if (!vsi->q_vectors) {
6655 ret = -ENOMEM;
6656 goto err_vectors;
6657 }
f650a38b
ASJ
6658 }
6659 return ret;
6660
6661err_vectors:
6662 kfree(vsi->tx_rings);
6663 return ret;
6664}
6665
41c445ff
JB
6666/**
6667 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
6668 * @pf: board private structure
6669 * @type: type of VSI
6670 *
6671 * On error: returns error code (negative)
6672 * On success: returns vsi index in PF (positive)
6673 **/
6674static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
6675{
6676 int ret = -ENODEV;
6677 struct i40e_vsi *vsi;
6678 int vsi_idx;
6679 int i;
6680
6681 /* Need to protect the allocation of the VSIs at the PF level */
6682 mutex_lock(&pf->switch_mutex);
6683
6684 /* VSI list may be fragmented if VSI creation/destruction has
6685 * been happening. We can afford to do a quick scan to look
6686 * for any free VSIs in the list.
6687 *
6688 * find next empty vsi slot, looping back around if necessary
6689 */
6690 i = pf->next_vsi;
505682cd 6691 while (i < pf->num_alloc_vsi && pf->vsi[i])
41c445ff 6692 i++;
505682cd 6693 if (i >= pf->num_alloc_vsi) {
41c445ff
JB
6694 i = 0;
6695 while (i < pf->next_vsi && pf->vsi[i])
6696 i++;
6697 }
6698
505682cd 6699 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
41c445ff
JB
6700 vsi_idx = i; /* Found one! */
6701 } else {
6702 ret = -ENODEV;
493fb300 6703 goto unlock_pf; /* out of VSI slots! */
41c445ff
JB
6704 }
6705 pf->next_vsi = ++i;
6706
6707 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
6708 if (!vsi) {
6709 ret = -ENOMEM;
493fb300 6710 goto unlock_pf;
41c445ff
JB
6711 }
6712 vsi->type = type;
6713 vsi->back = pf;
6714 set_bit(__I40E_DOWN, &vsi->state);
6715 vsi->flags = 0;
6716 vsi->idx = vsi_idx;
6717 vsi->rx_itr_setting = pf->rx_itr_default;
6718 vsi->tx_itr_setting = pf->tx_itr_default;
6719 vsi->netdev_registered = false;
6720 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
6721 INIT_LIST_HEAD(&vsi->mac_filter_list);
63741846 6722 vsi->irqs_ready = false;
41c445ff 6723
9f65e15b
AD
6724 ret = i40e_set_num_rings_in_vsi(vsi);
6725 if (ret)
6726 goto err_rings;
6727
bc7d338f 6728 ret = i40e_vsi_alloc_arrays(vsi, true);
f650a38b 6729 if (ret)
9f65e15b 6730 goto err_rings;
493fb300 6731
41c445ff
JB
6732 /* Setup default MSIX irq handler for VSI */
6733 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
6734
6735 pf->vsi[vsi_idx] = vsi;
6736 ret = vsi_idx;
493fb300
AD
6737 goto unlock_pf;
6738
9f65e15b 6739err_rings:
493fb300
AD
6740 pf->next_vsi = i - 1;
6741 kfree(vsi);
6742unlock_pf:
41c445ff
JB
6743 mutex_unlock(&pf->switch_mutex);
6744 return ret;
6745}
6746
f650a38b
ASJ
6747/**
6748 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
6749 * @type: VSI pointer
bc7d338f 6750 * @free_qvectors: a bool to specify if q_vectors need to be freed.
f650a38b
ASJ
6751 *
6752 * On error: returns error code (negative)
6753 * On success: returns 0
6754 **/
bc7d338f 6755static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
f650a38b
ASJ
6756{
6757 /* free the ring and vector containers */
bc7d338f
ASJ
6758 if (free_qvectors) {
6759 kfree(vsi->q_vectors);
6760 vsi->q_vectors = NULL;
6761 }
f650a38b
ASJ
6762 kfree(vsi->tx_rings);
6763 vsi->tx_rings = NULL;
6764 vsi->rx_rings = NULL;
6765}
6766
41c445ff
JB
6767/**
6768 * i40e_vsi_clear - Deallocate the VSI provided
6769 * @vsi: the VSI being un-configured
6770 **/
6771static int i40e_vsi_clear(struct i40e_vsi *vsi)
6772{
6773 struct i40e_pf *pf;
6774
6775 if (!vsi)
6776 return 0;
6777
6778 if (!vsi->back)
6779 goto free_vsi;
6780 pf = vsi->back;
6781
6782 mutex_lock(&pf->switch_mutex);
6783 if (!pf->vsi[vsi->idx]) {
6784 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
6785 vsi->idx, vsi->idx, vsi, vsi->type);
6786 goto unlock_vsi;
6787 }
6788
6789 if (pf->vsi[vsi->idx] != vsi) {
6790 dev_err(&pf->pdev->dev,
6791 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
6792 pf->vsi[vsi->idx]->idx,
6793 pf->vsi[vsi->idx],
6794 pf->vsi[vsi->idx]->type,
6795 vsi->idx, vsi, vsi->type);
6796 goto unlock_vsi;
6797 }
6798
6799 /* updates the pf for this cleared vsi */
6800 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
6801 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
6802
bc7d338f 6803 i40e_vsi_free_arrays(vsi, true);
493fb300 6804
41c445ff
JB
6805 pf->vsi[vsi->idx] = NULL;
6806 if (vsi->idx < pf->next_vsi)
6807 pf->next_vsi = vsi->idx;
6808
6809unlock_vsi:
6810 mutex_unlock(&pf->switch_mutex);
6811free_vsi:
6812 kfree(vsi);
6813
6814 return 0;
6815}
6816
9f65e15b
AD
6817/**
6818 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
6819 * @vsi: the VSI being cleaned
6820 **/
be1d5eea 6821static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
9f65e15b
AD
6822{
6823 int i;
6824
8e9dca53 6825 if (vsi->tx_rings && vsi->tx_rings[0]) {
d7397644 6826 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
00403f04
MW
6827 kfree_rcu(vsi->tx_rings[i], rcu);
6828 vsi->tx_rings[i] = NULL;
6829 vsi->rx_rings[i] = NULL;
6830 }
be1d5eea 6831 }
9f65e15b
AD
6832}
6833
41c445ff
JB
6834/**
6835 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
6836 * @vsi: the VSI being configured
6837 **/
6838static int i40e_alloc_rings(struct i40e_vsi *vsi)
6839{
e7046ee1 6840 struct i40e_ring *tx_ring, *rx_ring;
41c445ff 6841 struct i40e_pf *pf = vsi->back;
41c445ff
JB
6842 int i;
6843
41c445ff 6844 /* Set basic values in the rings to be used later during open() */
d7397644 6845 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
ac6c5e3d 6846 /* allocate space for both Tx and Rx in one shot */
9f65e15b
AD
6847 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
6848 if (!tx_ring)
6849 goto err_out;
41c445ff
JB
6850
6851 tx_ring->queue_index = i;
6852 tx_ring->reg_idx = vsi->base_queue + i;
6853 tx_ring->ring_active = false;
6854 tx_ring->vsi = vsi;
6855 tx_ring->netdev = vsi->netdev;
6856 tx_ring->dev = &pf->pdev->dev;
6857 tx_ring->count = vsi->num_desc;
6858 tx_ring->size = 0;
6859 tx_ring->dcb_tc = 0;
9f65e15b 6860 vsi->tx_rings[i] = tx_ring;
41c445ff 6861
9f65e15b 6862 rx_ring = &tx_ring[1];
41c445ff
JB
6863 rx_ring->queue_index = i;
6864 rx_ring->reg_idx = vsi->base_queue + i;
6865 rx_ring->ring_active = false;
6866 rx_ring->vsi = vsi;
6867 rx_ring->netdev = vsi->netdev;
6868 rx_ring->dev = &pf->pdev->dev;
6869 rx_ring->count = vsi->num_desc;
6870 rx_ring->size = 0;
6871 rx_ring->dcb_tc = 0;
6872 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
6873 set_ring_16byte_desc_enabled(rx_ring);
6874 else
6875 clear_ring_16byte_desc_enabled(rx_ring);
9f65e15b 6876 vsi->rx_rings[i] = rx_ring;
41c445ff
JB
6877 }
6878
6879 return 0;
9f65e15b
AD
6880
6881err_out:
6882 i40e_vsi_clear_rings(vsi);
6883 return -ENOMEM;
41c445ff
JB
6884}
6885
6886/**
6887 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
6888 * @pf: board private structure
6889 * @vectors: the number of MSI-X vectors to request
6890 *
6891 * Returns the number of vectors reserved, or error
6892 **/
6893static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
6894{
7b37f376
AG
6895 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
6896 I40E_MIN_MSIX, vectors);
6897 if (vectors < 0) {
41c445ff 6898 dev_info(&pf->pdev->dev,
7b37f376 6899 "MSI-X vector reservation failed: %d\n", vectors);
41c445ff
JB
6900 vectors = 0;
6901 }
6902
6903 return vectors;
6904}
6905
6906/**
6907 * i40e_init_msix - Setup the MSIX capability
6908 * @pf: board private structure
6909 *
6910 * Work with the OS to set up the MSIX vectors needed.
6911 *
6912 * Returns 0 on success, negative on failure
6913 **/
6914static int i40e_init_msix(struct i40e_pf *pf)
6915{
6916 i40e_status err = 0;
6917 struct i40e_hw *hw = &pf->hw;
c135b0de 6918 int other_vecs = 0;
41c445ff
JB
6919 int v_budget, i;
6920 int vec;
6921
6922 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6923 return -ENODEV;
6924
6925 /* The number of vectors we'll request will be comprised of:
6926 * - Add 1 for "other" cause for Admin Queue events, etc.
6927 * - The number of LAN queue pairs
f8ff1464
ASJ
6928 * - Queues being used for RSS.
6929 * We don't need as many as max_rss_size vectors.
6930 * use rss_size instead in the calculation since that
6931 * is governed by number of cpus in the system.
6932 * - assumes symmetric Tx/Rx pairing
41c445ff 6933 * - The number of VMDq pairs
38e00438
VD
6934#ifdef I40E_FCOE
6935 * - The number of FCOE qps.
6936#endif
41c445ff
JB
6937 * Once we count this up, try the request.
6938 *
6939 * If we can't get what we want, we'll simplify to nearly nothing
6940 * and try again. If that still fails, we punt.
6941 */
f8ff1464 6942 pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
41c445ff 6943 pf->num_vmdq_msix = pf->num_vmdq_qps;
c135b0de
SN
6944 other_vecs = 1;
6945 other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
60ea5f83 6946 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
c135b0de 6947 other_vecs++;
41c445ff 6948
83840e4b
JL
6949 /* Scale down if necessary, and the rings will share vectors */
6950 pf->num_lan_msix = min_t(int, pf->num_lan_msix,
6951 (hw->func_caps.num_msix_vectors - other_vecs));
6952 v_budget = pf->num_lan_msix + other_vecs;
6953
38e00438
VD
6954#ifdef I40E_FCOE
6955 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
6956 pf->num_fcoe_msix = pf->num_fcoe_qps;
6957 v_budget += pf->num_fcoe_msix;
6958 }
38e00438 6959#endif
41c445ff
JB
6960
6961 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
6962 GFP_KERNEL);
6963 if (!pf->msix_entries)
6964 return -ENOMEM;
6965
6966 for (i = 0; i < v_budget; i++)
6967 pf->msix_entries[i].entry = i;
6968 vec = i40e_reserve_msix_vectors(pf, v_budget);
a34977ba
ASJ
6969
6970 if (vec != v_budget) {
6971 /* If we have limited resources, we will start with no vectors
6972 * for the special features and then allocate vectors to some
6973 * of these features based on the policy and at the end disable
6974 * the features that did not get any vectors.
6975 */
38e00438
VD
6976#ifdef I40E_FCOE
6977 pf->num_fcoe_qps = 0;
6978 pf->num_fcoe_msix = 0;
6979#endif
a34977ba
ASJ
6980 pf->num_vmdq_msix = 0;
6981 }
6982
41c445ff
JB
6983 if (vec < I40E_MIN_MSIX) {
6984 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
6985 kfree(pf->msix_entries);
6986 pf->msix_entries = NULL;
6987 return -ENODEV;
6988
6989 } else if (vec == I40E_MIN_MSIX) {
6990 /* Adjust for minimal MSIX use */
41c445ff
JB
6991 pf->num_vmdq_vsis = 0;
6992 pf->num_vmdq_qps = 0;
41c445ff
JB
6993 pf->num_lan_qps = 1;
6994 pf->num_lan_msix = 1;
6995
6996 } else if (vec != v_budget) {
a34977ba
ASJ
6997 /* reserve the misc vector */
6998 vec--;
6999
41c445ff
JB
7000 /* Scale vector usage down */
7001 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
a34977ba 7002 pf->num_vmdq_vsis = 1;
41c445ff
JB
7003
7004 /* partition out the remaining vectors */
7005 switch (vec) {
7006 case 2:
41c445ff
JB
7007 pf->num_lan_msix = 1;
7008 break;
7009 case 3:
38e00438
VD
7010#ifdef I40E_FCOE
7011 /* give one vector to FCoE */
7012 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7013 pf->num_lan_msix = 1;
7014 pf->num_fcoe_msix = 1;
7015 }
7016#else
41c445ff 7017 pf->num_lan_msix = 2;
38e00438 7018#endif
41c445ff
JB
7019 break;
7020 default:
38e00438
VD
7021#ifdef I40E_FCOE
7022 /* give one vector to FCoE */
7023 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7024 pf->num_fcoe_msix = 1;
7025 vec--;
7026 }
7027#endif
41c445ff
JB
7028 pf->num_lan_msix = min_t(int, (vec / 2),
7029 pf->num_lan_qps);
7030 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
7031 I40E_DEFAULT_NUM_VMDQ_VSI);
7032 break;
7033 }
7034 }
7035
a34977ba
ASJ
7036 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7037 (pf->num_vmdq_msix == 0)) {
7038 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7039 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7040 }
38e00438
VD
7041#ifdef I40E_FCOE
7042
7043 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7044 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7045 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7046 }
7047#endif
41c445ff
JB
7048 return err;
7049}
7050
493fb300 7051/**
90e04070 7052 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
493fb300
AD
7053 * @vsi: the VSI being configured
7054 * @v_idx: index of the vector in the vsi struct
7055 *
7056 * We allocate one q_vector. If allocation fails we return -ENOMEM.
7057 **/
90e04070 7058static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
493fb300
AD
7059{
7060 struct i40e_q_vector *q_vector;
7061
7062 /* allocate q_vector */
7063 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7064 if (!q_vector)
7065 return -ENOMEM;
7066
7067 q_vector->vsi = vsi;
7068 q_vector->v_idx = v_idx;
7069 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7070 if (vsi->netdev)
7071 netif_napi_add(vsi->netdev, &q_vector->napi,
eefeacee 7072 i40e_napi_poll, NAPI_POLL_WEIGHT);
493fb300 7073
cd0b6fa6
AD
7074 q_vector->rx.latency_range = I40E_LOW_LATENCY;
7075 q_vector->tx.latency_range = I40E_LOW_LATENCY;
7076
493fb300
AD
7077 /* tie q_vector and vsi together */
7078 vsi->q_vectors[v_idx] = q_vector;
7079
7080 return 0;
7081}
7082
41c445ff 7083/**
90e04070 7084 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
41c445ff
JB
7085 * @vsi: the VSI being configured
7086 *
7087 * We allocate one q_vector per queue interrupt. If allocation fails we
7088 * return -ENOMEM.
7089 **/
90e04070 7090static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
41c445ff
JB
7091{
7092 struct i40e_pf *pf = vsi->back;
7093 int v_idx, num_q_vectors;
493fb300 7094 int err;
41c445ff
JB
7095
7096 /* if not MSIX, give the one vector only to the LAN VSI */
7097 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7098 num_q_vectors = vsi->num_q_vectors;
7099 else if (vsi == pf->vsi[pf->lan_vsi])
7100 num_q_vectors = 1;
7101 else
7102 return -EINVAL;
7103
41c445ff 7104 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
90e04070 7105 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
493fb300
AD
7106 if (err)
7107 goto err_out;
41c445ff
JB
7108 }
7109
7110 return 0;
493fb300
AD
7111
7112err_out:
7113 while (v_idx--)
7114 i40e_free_q_vector(vsi, v_idx);
7115
7116 return err;
41c445ff
JB
7117}
7118
7119/**
7120 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7121 * @pf: board private structure to initialize
7122 **/
7123static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
7124{
7125 int err = 0;
7126
7127 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7128 err = i40e_init_msix(pf);
7129 if (err) {
60ea5f83 7130 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
38e00438
VD
7131#ifdef I40E_FCOE
7132 I40E_FLAG_FCOE_ENABLED |
7133#endif
60ea5f83 7134 I40E_FLAG_RSS_ENABLED |
4d9b6043 7135 I40E_FLAG_DCB_CAPABLE |
60ea5f83
JB
7136 I40E_FLAG_SRIOV_ENABLED |
7137 I40E_FLAG_FD_SB_ENABLED |
7138 I40E_FLAG_FD_ATR_ENABLED |
7139 I40E_FLAG_VMDQ_ENABLED);
41c445ff
JB
7140
7141 /* rework the queue expectations without MSIX */
7142 i40e_determine_queue_usage(pf);
7143 }
7144 }
7145
7146 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7147 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
77fa28be 7148 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
41c445ff
JB
7149 err = pci_enable_msi(pf->pdev);
7150 if (err) {
958a3e3b 7151 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
41c445ff
JB
7152 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7153 }
7154 }
7155
958a3e3b 7156 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
77fa28be 7157 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
958a3e3b 7158
41c445ff
JB
7159 /* track first vector for misc interrupts */
7160 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
7161}
7162
7163/**
7164 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7165 * @pf: board private structure
7166 *
7167 * This sets up the handler for MSIX 0, which is used to manage the
7168 * non-queue interrupts, e.g. AdminQ and errors. This is not used
7169 * when in MSI or Legacy interrupt mode.
7170 **/
7171static int i40e_setup_misc_vector(struct i40e_pf *pf)
7172{
7173 struct i40e_hw *hw = &pf->hw;
7174 int err = 0;
7175
7176 /* Only request the irq if this is the first time through, and
7177 * not when we're rebuilding after a Reset
7178 */
7179 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7180 err = request_irq(pf->msix_entries[0].vector,
b294ac70 7181 i40e_intr, 0, pf->int_name, pf);
41c445ff
JB
7182 if (err) {
7183 dev_info(&pf->pdev->dev,
77fa28be 7184 "request_irq for %s failed: %d\n",
b294ac70 7185 pf->int_name, err);
41c445ff
JB
7186 return -EFAULT;
7187 }
7188 }
7189
ab437b5a 7190 i40e_enable_misc_int_causes(pf);
41c445ff
JB
7191
7192 /* associate no queues to the misc vector */
7193 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7194 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7195
7196 i40e_flush(hw);
7197
7198 i40e_irq_dynamic_enable_icr0(pf);
7199
7200 return err;
7201}
7202
7203/**
7204 * i40e_config_rss - Prepare for RSS if used
7205 * @pf: board private structure
7206 **/
7207static int i40e_config_rss(struct i40e_pf *pf)
7208{
22f258a1 7209 u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
4617e8c0
ASJ
7210 struct i40e_hw *hw = &pf->hw;
7211 u32 lut = 0;
7212 int i, j;
7213 u64 hena;
e157ea30 7214 u32 reg_val;
41c445ff 7215
22f258a1 7216 netdev_rss_key_fill(rss_key, sizeof(rss_key));
41c445ff 7217 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
22f258a1 7218 wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
41c445ff
JB
7219
7220 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
7221 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
7222 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
12dc4fe3 7223 hena |= I40E_DEFAULT_RSS_HENA;
41c445ff
JB
7224 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
7225 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
7226
e157ea30
CW
7227 /* Check capability and Set table size and register per hw expectation*/
7228 reg_val = rd32(hw, I40E_PFQF_CTL_0);
7229 if (hw->func_caps.rss_table_size == 512) {
7230 reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7231 pf->rss_table_size = 512;
7232 } else {
7233 pf->rss_table_size = 128;
7234 reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7235 }
7236 wr32(hw, I40E_PFQF_CTL_0, reg_val);
7237
41c445ff 7238 /* Populate the LUT with max no. of queues in round robin fashion */
e157ea30 7239 for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
41c445ff
JB
7240
7241 /* The assumption is that lan qp count will be the highest
7242 * qp count for any PF VSI that needs RSS.
7243 * If multiple VSIs need RSS support, all the qp counts
7244 * for those VSIs should be a power of 2 for RSS to work.
7245 * If LAN VSI is the only consumer for RSS then this requirement
7246 * is not necessary.
7247 */
7248 if (j == pf->rss_size)
7249 j = 0;
7250 /* lut = 4-byte sliding window of 4 lut entries */
7251 lut = (lut << 8) | (j &
7252 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
7253 /* On i = 3, we have 4 entries in lut; write to the register */
7254 if ((i & 3) == 3)
7255 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
7256 }
7257 i40e_flush(hw);
7258
7259 return 0;
7260}
7261
f8ff1464
ASJ
7262/**
7263 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7264 * @pf: board private structure
7265 * @queue_count: the requested queue count for rss.
7266 *
7267 * returns 0 if rss is not enabled, if enabled returns the final rss queue
7268 * count which may be different from the requested queue count.
7269 **/
7270int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7271{
7272 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7273 return 0;
7274
7275 queue_count = min_t(int, queue_count, pf->rss_size_max);
f8ff1464
ASJ
7276
7277 if (queue_count != pf->rss_size) {
f8ff1464
ASJ
7278 i40e_prep_for_reset(pf);
7279
f8ff1464
ASJ
7280 pf->rss_size = queue_count;
7281
7282 i40e_reset_and_rebuild(pf, true);
7283 i40e_config_rss(pf);
7284 }
7285 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
7286 return pf->rss_size;
7287}
7288
41c445ff
JB
7289/**
7290 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
7291 * @pf: board private structure to initialize
7292 *
7293 * i40e_sw_init initializes the Adapter private data structure.
7294 * Fields are initialized based on PCI device information and
7295 * OS network device settings (MTU size).
7296 **/
7297static int i40e_sw_init(struct i40e_pf *pf)
7298{
7299 int err = 0;
7300 int size;
7301
7302 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
7303 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
2759997b 7304 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
41c445ff
JB
7305 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
7306 if (I40E_DEBUG_USER & debug)
7307 pf->hw.debug_mask = debug;
7308 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
7309 I40E_DEFAULT_MSG_ENABLE);
7310 }
7311
7312 /* Set default capability flags */
7313 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7314 I40E_FLAG_MSI_ENABLED |
7315 I40E_FLAG_MSIX_ENABLED |
41c445ff
JB
7316 I40E_FLAG_RX_1BUF_ENABLED;
7317
ca99eb99
MW
7318 /* Set default ITR */
7319 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
7320 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
7321
7134f9ce
JB
7322 /* Depending on PF configurations, it is possible that the RSS
7323 * maximum might end up larger than the available queues
7324 */
41c445ff 7325 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
ec9a7db7 7326 pf->rss_size = 1;
7134f9ce
JB
7327 pf->rss_size_max = min_t(int, pf->rss_size_max,
7328 pf->hw.func_caps.num_tx_qp);
41c445ff
JB
7329 if (pf->hw.func_caps.rss) {
7330 pf->flags |= I40E_FLAG_RSS_ENABLED;
bf051a3b 7331 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
41c445ff
JB
7332 }
7333
2050bc65
CS
7334 /* MFP mode enabled */
7335 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
7336 pf->flags |= I40E_FLAG_MFP_ENABLED;
7337 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
7338 }
7339
cbf61325
ASJ
7340 /* FW/NVM is not yet fixed in this regard */
7341 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
7342 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7343 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7344 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
433c47de
ASJ
7345 /* Setup a counter for fd_atr per pf */
7346 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
cbf61325 7347 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
60ea5f83 7348 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
433c47de
ASJ
7349 /* Setup a counter for fd_sb per pf */
7350 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
cbf61325
ASJ
7351 } else {
7352 dev_info(&pf->pdev->dev,
0b67584f 7353 "Flow Director Sideband mode Disabled in MFP mode\n");
41c445ff 7354 }
cbf61325
ASJ
7355 pf->fdir_pf_filter_count =
7356 pf->hw.func_caps.fd_filters_guaranteed;
7357 pf->hw.fdir_shared_filter_count =
7358 pf->hw.func_caps.fd_filters_best_effort;
41c445ff
JB
7359 }
7360
7361 if (pf->hw.func_caps.vmdq) {
7362 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7363 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7364 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
7365 }
7366
38e00438
VD
7367#ifdef I40E_FCOE
7368 err = i40e_init_pf_fcoe(pf);
7369 if (err)
7370 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
7371
7372#endif /* I40E_FCOE */
41c445ff 7373#ifdef CONFIG_PCI_IOV
ba252f13 7374 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
41c445ff
JB
7375 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7376 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7377 pf->num_req_vfs = min_t(int,
7378 pf->hw.func_caps.num_vfs,
7379 I40E_MAX_VF_COUNT);
7380 }
7381#endif /* CONFIG_PCI_IOV */
7382 pf->eeprom_version = 0xDEAD;
7383 pf->lan_veb = I40E_NO_VEB;
7384 pf->lan_vsi = I40E_NO_VSI;
7385
7386 /* set up queue assignment tracking */
7387 size = sizeof(struct i40e_lump_tracking)
7388 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
7389 pf->qp_pile = kzalloc(size, GFP_KERNEL);
7390 if (!pf->qp_pile) {
7391 err = -ENOMEM;
7392 goto sw_init_done;
7393 }
7394 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
7395 pf->qp_pile->search_hint = 0;
7396
7397 /* set up vector assignment tracking */
7398 size = sizeof(struct i40e_lump_tracking)
7399 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
7400 pf->irq_pile = kzalloc(size, GFP_KERNEL);
7401 if (!pf->irq_pile) {
7402 kfree(pf->qp_pile);
7403 err = -ENOMEM;
7404 goto sw_init_done;
7405 }
7406 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
7407 pf->irq_pile->search_hint = 0;
7408
327fe04b
ASJ
7409 pf->tx_timeout_recovery_level = 1;
7410
41c445ff
JB
7411 mutex_init(&pf->switch_mutex);
7412
7413sw_init_done:
7414 return err;
7415}
7416
7c3c288b
ASJ
7417/**
7418 * i40e_set_ntuple - set the ntuple feature flag and take action
7419 * @pf: board private structure to initialize
7420 * @features: the feature set that the stack is suggesting
7421 *
7422 * returns a bool to indicate if reset needs to happen
7423 **/
7424bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
7425{
7426 bool need_reset = false;
7427
7428 /* Check if Flow Director n-tuple support was enabled or disabled. If
7429 * the state changed, we need to reset.
7430 */
7431 if (features & NETIF_F_NTUPLE) {
7432 /* Enable filters and mark for reset */
7433 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7434 need_reset = true;
7435 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7436 } else {
7437 /* turn off filters, mark for reset and clear SW filter list */
7438 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7439 need_reset = true;
7440 i40e_fdir_filter_exit(pf);
7441 }
7442 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8a4f34fb 7443 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
1e1be8f6
ASJ
7444 /* reset fd counters */
7445 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
7446 pf->fdir_pf_active_filters = 0;
7447 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7448 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8a4f34fb
ASJ
7449 /* if ATR was auto disabled it can be re-enabled. */
7450 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
7451 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
7452 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
7c3c288b
ASJ
7453 }
7454 return need_reset;
7455}
7456
41c445ff
JB
7457/**
7458 * i40e_set_features - set the netdev feature flags
7459 * @netdev: ptr to the netdev being adjusted
7460 * @features: the feature set that the stack is suggesting
7461 **/
7462static int i40e_set_features(struct net_device *netdev,
7463 netdev_features_t features)
7464{
7465 struct i40e_netdev_priv *np = netdev_priv(netdev);
7466 struct i40e_vsi *vsi = np->vsi;
7c3c288b
ASJ
7467 struct i40e_pf *pf = vsi->back;
7468 bool need_reset;
41c445ff
JB
7469
7470 if (features & NETIF_F_HW_VLAN_CTAG_RX)
7471 i40e_vlan_stripping_enable(vsi);
7472 else
7473 i40e_vlan_stripping_disable(vsi);
7474
7c3c288b
ASJ
7475 need_reset = i40e_set_ntuple(pf, features);
7476
7477 if (need_reset)
7478 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
7479
41c445ff
JB
7480 return 0;
7481}
7482
a1c9a9d9
JK
7483#ifdef CONFIG_I40E_VXLAN
7484/**
7485 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
7486 * @pf: board private structure
7487 * @port: The UDP port to look up
7488 *
7489 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
7490 **/
7491static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
7492{
7493 u8 i;
7494
7495 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7496 if (pf->vxlan_ports[i] == port)
7497 return i;
7498 }
7499
7500 return i;
7501}
7502
7503/**
7504 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
7505 * @netdev: This physical port's netdev
7506 * @sa_family: Socket Family that VXLAN is notifying us about
7507 * @port: New UDP port number that VXLAN started listening to
7508 **/
7509static void i40e_add_vxlan_port(struct net_device *netdev,
7510 sa_family_t sa_family, __be16 port)
7511{
7512 struct i40e_netdev_priv *np = netdev_priv(netdev);
7513 struct i40e_vsi *vsi = np->vsi;
7514 struct i40e_pf *pf = vsi->back;
7515 u8 next_idx;
7516 u8 idx;
7517
7518 if (sa_family == AF_INET6)
7519 return;
7520
7521 idx = i40e_get_vxlan_port_idx(pf, port);
7522
7523 /* Check if port already exists */
7524 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7525 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
7526 return;
7527 }
7528
7529 /* Now check if there is space to add the new port */
7530 next_idx = i40e_get_vxlan_port_idx(pf, 0);
7531
7532 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7533 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
7534 ntohs(port));
7535 return;
7536 }
7537
7538 /* New port: add it and mark its index in the bitmap */
7539 pf->vxlan_ports[next_idx] = port;
7540 pf->pending_vxlan_bitmap |= (1 << next_idx);
7541
7542 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7543}
7544
7545/**
7546 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
7547 * @netdev: This physical port's netdev
7548 * @sa_family: Socket Family that VXLAN is notifying us about
7549 * @port: UDP port number that VXLAN stopped listening to
7550 **/
7551static void i40e_del_vxlan_port(struct net_device *netdev,
7552 sa_family_t sa_family, __be16 port)
7553{
7554 struct i40e_netdev_priv *np = netdev_priv(netdev);
7555 struct i40e_vsi *vsi = np->vsi;
7556 struct i40e_pf *pf = vsi->back;
7557 u8 idx;
7558
7559 if (sa_family == AF_INET6)
7560 return;
7561
7562 idx = i40e_get_vxlan_port_idx(pf, port);
7563
7564 /* Check if port already exists */
7565 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7566 /* if port exists, set it to 0 (mark for deletion)
7567 * and make it pending
7568 */
7569 pf->vxlan_ports[idx] = 0;
7570
7571 pf->pending_vxlan_bitmap |= (1 << idx);
7572
7573 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7574 } else {
7575 netdev_warn(netdev, "Port %d was not found, not deleting\n",
7576 ntohs(port));
7577 }
7578}
7579
7580#endif
1f224ad2 7581static int i40e_get_phys_port_id(struct net_device *netdev,
02637fce 7582 struct netdev_phys_item_id *ppid)
1f224ad2
NP
7583{
7584 struct i40e_netdev_priv *np = netdev_priv(netdev);
7585 struct i40e_pf *pf = np->vsi->back;
7586 struct i40e_hw *hw = &pf->hw;
7587
7588 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
7589 return -EOPNOTSUPP;
7590
7591 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
7592 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
7593
7594 return 0;
7595}
7596
2f90ade6
JB
7597/**
7598 * i40e_ndo_fdb_add - add an entry to the hardware database
7599 * @ndm: the input from the stack
7600 * @tb: pointer to array of nladdr (unused)
7601 * @dev: the net device pointer
7602 * @addr: the MAC address entry being added
7603 * @flags: instructions from stack about fdb operation
7604 */
4ba0dea5
GR
7605static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7606 struct net_device *dev,
f6f6424b 7607 const unsigned char *addr, u16 vid,
4ba0dea5 7608 u16 flags)
4ba0dea5
GR
7609{
7610 struct i40e_netdev_priv *np = netdev_priv(dev);
7611 struct i40e_pf *pf = np->vsi->back;
7612 int err = 0;
7613
7614 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
7615 return -EOPNOTSUPP;
7616
65891fea
OG
7617 if (vid) {
7618 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
7619 return -EINVAL;
7620 }
7621
4ba0dea5
GR
7622 /* Hardware does not support aging addresses so if a
7623 * ndm_state is given only allow permanent addresses
7624 */
7625 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
7626 netdev_info(dev, "FDB only supports static addresses\n");
7627 return -EINVAL;
7628 }
7629
7630 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
7631 err = dev_uc_add_excl(dev, addr);
7632 else if (is_multicast_ether_addr(addr))
7633 err = dev_mc_add_excl(dev, addr);
7634 else
7635 err = -EINVAL;
7636
7637 /* Only return duplicate errors if NLM_F_EXCL is set */
7638 if (err == -EEXIST && !(flags & NLM_F_EXCL))
7639 err = 0;
7640
7641 return err;
7642}
7643
41c445ff
JB
7644static const struct net_device_ops i40e_netdev_ops = {
7645 .ndo_open = i40e_open,
7646 .ndo_stop = i40e_close,
7647 .ndo_start_xmit = i40e_lan_xmit_frame,
7648 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
7649 .ndo_set_rx_mode = i40e_set_rx_mode,
7650 .ndo_validate_addr = eth_validate_addr,
7651 .ndo_set_mac_address = i40e_set_mac,
7652 .ndo_change_mtu = i40e_change_mtu,
beb0dff1 7653 .ndo_do_ioctl = i40e_ioctl,
41c445ff
JB
7654 .ndo_tx_timeout = i40e_tx_timeout,
7655 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
7656 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
7657#ifdef CONFIG_NET_POLL_CONTROLLER
7658 .ndo_poll_controller = i40e_netpoll,
7659#endif
7660 .ndo_setup_tc = i40e_setup_tc,
38e00438
VD
7661#ifdef I40E_FCOE
7662 .ndo_fcoe_enable = i40e_fcoe_enable,
7663 .ndo_fcoe_disable = i40e_fcoe_disable,
7664#endif
41c445ff
JB
7665 .ndo_set_features = i40e_set_features,
7666 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
7667 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
ed616689 7668 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
41c445ff 7669 .ndo_get_vf_config = i40e_ndo_get_vf_config,
588aefa0 7670 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
e6d9004d 7671 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
a1c9a9d9
JK
7672#ifdef CONFIG_I40E_VXLAN
7673 .ndo_add_vxlan_port = i40e_add_vxlan_port,
7674 .ndo_del_vxlan_port = i40e_del_vxlan_port,
7675#endif
1f224ad2 7676 .ndo_get_phys_port_id = i40e_get_phys_port_id,
4ba0dea5 7677 .ndo_fdb_add = i40e_ndo_fdb_add,
41c445ff
JB
7678};
7679
7680/**
7681 * i40e_config_netdev - Setup the netdev flags
7682 * @vsi: the VSI being configured
7683 *
7684 * Returns 0 on success, negative value on failure
7685 **/
7686static int i40e_config_netdev(struct i40e_vsi *vsi)
7687{
1a10370a 7688 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
41c445ff
JB
7689 struct i40e_pf *pf = vsi->back;
7690 struct i40e_hw *hw = &pf->hw;
7691 struct i40e_netdev_priv *np;
7692 struct net_device *netdev;
7693 u8 mac_addr[ETH_ALEN];
7694 int etherdev_size;
7695
7696 etherdev_size = sizeof(struct i40e_netdev_priv);
f8ff1464 7697 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
41c445ff
JB
7698 if (!netdev)
7699 return -ENOMEM;
7700
7701 vsi->netdev = netdev;
7702 np = netdev_priv(netdev);
7703 np->vsi = vsi;
7704
d70e941b 7705 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
41c445ff 7706 NETIF_F_GSO_UDP_TUNNEL |
d70e941b 7707 NETIF_F_TSO;
41c445ff
JB
7708
7709 netdev->features = NETIF_F_SG |
7710 NETIF_F_IP_CSUM |
7711 NETIF_F_SCTP_CSUM |
7712 NETIF_F_HIGHDMA |
7713 NETIF_F_GSO_UDP_TUNNEL |
7714 NETIF_F_HW_VLAN_CTAG_TX |
7715 NETIF_F_HW_VLAN_CTAG_RX |
7716 NETIF_F_HW_VLAN_CTAG_FILTER |
7717 NETIF_F_IPV6_CSUM |
7718 NETIF_F_TSO |
059dab69 7719 NETIF_F_TSO_ECN |
41c445ff
JB
7720 NETIF_F_TSO6 |
7721 NETIF_F_RXCSUM |
7722 NETIF_F_RXHASH |
7723 0;
7724
2e86a0b6
ASJ
7725 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
7726 netdev->features |= NETIF_F_NTUPLE;
7727
41c445ff
JB
7728 /* copy netdev features into list of user selectable features */
7729 netdev->hw_features |= netdev->features;
7730
7731 if (vsi->type == I40E_VSI_MAIN) {
7732 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9a173901 7733 ether_addr_copy(mac_addr, hw->mac.perm_addr);
30650cc5
SN
7734 /* The following steps are necessary to prevent reception
7735 * of tagged packets - some older NVM configurations load a
7736 * default a MAC-VLAN filter that accepts any tagged packet
7737 * which must be replaced by a normal filter.
8c27d42e 7738 */
30650cc5
SN
7739 if (!i40e_rm_default_mac_filter(vsi, mac_addr))
7740 i40e_add_filter(vsi, mac_addr,
7741 I40E_VLAN_ANY, false, true);
41c445ff
JB
7742 } else {
7743 /* relate the VSI_VMDQ name to the VSI_MAIN name */
7744 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
7745 pf->vsi[pf->lan_vsi]->netdev->name);
7746 random_ether_addr(mac_addr);
7747 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
7748 }
1a10370a 7749 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
41c445ff 7750
9a173901
GR
7751 ether_addr_copy(netdev->dev_addr, mac_addr);
7752 ether_addr_copy(netdev->perm_addr, mac_addr);
41c445ff
JB
7753 /* vlan gets same features (except vlan offload)
7754 * after any tweaks for specific VSI types
7755 */
7756 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
7757 NETIF_F_HW_VLAN_CTAG_RX |
7758 NETIF_F_HW_VLAN_CTAG_FILTER);
7759 netdev->priv_flags |= IFF_UNICAST_FLT;
7760 netdev->priv_flags |= IFF_SUPP_NOFCS;
7761 /* Setup netdev TC information */
7762 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
7763
7764 netdev->netdev_ops = &i40e_netdev_ops;
7765 netdev->watchdog_timeo = 5 * HZ;
7766 i40e_set_ethtool_ops(netdev);
38e00438
VD
7767#ifdef I40E_FCOE
7768 i40e_fcoe_config_netdev(netdev, vsi);
7769#endif
41c445ff
JB
7770
7771 return 0;
7772}
7773
7774/**
7775 * i40e_vsi_delete - Delete a VSI from the switch
7776 * @vsi: the VSI being removed
7777 *
7778 * Returns 0 on success, negative value on failure
7779 **/
7780static void i40e_vsi_delete(struct i40e_vsi *vsi)
7781{
7782 /* remove default VSI is not allowed */
7783 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
7784 return;
7785
41c445ff 7786 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
41c445ff
JB
7787}
7788
7789/**
7790 * i40e_add_vsi - Add a VSI to the switch
7791 * @vsi: the VSI being configured
7792 *
7793 * This initializes a VSI context depending on the VSI type to be added and
7794 * passes it down to the add_vsi aq command.
7795 **/
7796static int i40e_add_vsi(struct i40e_vsi *vsi)
7797{
7798 int ret = -ENODEV;
7799 struct i40e_mac_filter *f, *ftmp;
7800 struct i40e_pf *pf = vsi->back;
7801 struct i40e_hw *hw = &pf->hw;
7802 struct i40e_vsi_context ctxt;
7803 u8 enabled_tc = 0x1; /* TC0 enabled */
7804 int f_count = 0;
7805
7806 memset(&ctxt, 0, sizeof(ctxt));
7807 switch (vsi->type) {
7808 case I40E_VSI_MAIN:
7809 /* The PF's main VSI is already setup as part of the
7810 * device initialization, so we'll not bother with
7811 * the add_vsi call, but we will retrieve the current
7812 * VSI context.
7813 */
7814 ctxt.seid = pf->main_vsi_seid;
7815 ctxt.pf_num = pf->hw.pf_id;
7816 ctxt.vf_num = 0;
7817 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
7818 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
7819 if (ret) {
7820 dev_info(&pf->pdev->dev,
7821 "couldn't get pf vsi config, err %d, aq_err %d\n",
7822 ret, pf->hw.aq.asq_last_status);
7823 return -ENOENT;
7824 }
7825 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
7826 vsi->info.valid_sections = 0;
7827
7828 vsi->seid = ctxt.seid;
7829 vsi->id = ctxt.vsi_number;
7830
7831 enabled_tc = i40e_pf_get_tc_map(pf);
7832
7833 /* MFP mode setup queue map and update VSI */
63d7e5a4
NP
7834 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
7835 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
41c445ff
JB
7836 memset(&ctxt, 0, sizeof(ctxt));
7837 ctxt.seid = pf->main_vsi_seid;
7838 ctxt.pf_num = pf->hw.pf_id;
7839 ctxt.vf_num = 0;
7840 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
7841 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7842 if (ret) {
7843 dev_info(&pf->pdev->dev,
7844 "update vsi failed, aq_err=%d\n",
7845 pf->hw.aq.asq_last_status);
7846 ret = -ENOENT;
7847 goto err;
7848 }
7849 /* update the local VSI info queue map */
7850 i40e_vsi_update_queue_map(vsi, &ctxt);
7851 vsi->info.valid_sections = 0;
7852 } else {
7853 /* Default/Main VSI is only enabled for TC0
7854 * reconfigure it to enable all TCs that are
7855 * available on the port in SFP mode.
63d7e5a4
NP
7856 * For MFP case the iSCSI PF would use this
7857 * flow to enable LAN+iSCSI TC.
41c445ff
JB
7858 */
7859 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7860 if (ret) {
7861 dev_info(&pf->pdev->dev,
7862 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
7863 enabled_tc, ret,
7864 pf->hw.aq.asq_last_status);
7865 ret = -ENOENT;
7866 }
7867 }
7868 break;
7869
7870 case I40E_VSI_FDIR:
cbf61325
ASJ
7871 ctxt.pf_num = hw->pf_id;
7872 ctxt.vf_num = 0;
7873 ctxt.uplink_seid = vsi->uplink_seid;
7874 ctxt.connection_type = 0x1; /* regular data port */
7875 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
79c21a82
ASJ
7876 ctxt.info.valid_sections |=
7877 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7878 ctxt.info.switch_id =
7879 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
41c445ff 7880 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
41c445ff
JB
7881 break;
7882
7883 case I40E_VSI_VMDQ2:
7884 ctxt.pf_num = hw->pf_id;
7885 ctxt.vf_num = 0;
7886 ctxt.uplink_seid = vsi->uplink_seid;
7887 ctxt.connection_type = 0x1; /* regular data port */
7888 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
7889
7890 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7891
7892 /* This VSI is connected to VEB so the switch_id
7893 * should be set to zero by default.
7894 */
7895 ctxt.info.switch_id = 0;
41c445ff
JB
7896 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7897
7898 /* Setup the VSI tx/rx queue map for TC0 only for now */
7899 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7900 break;
7901
7902 case I40E_VSI_SRIOV:
7903 ctxt.pf_num = hw->pf_id;
7904 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
7905 ctxt.uplink_seid = vsi->uplink_seid;
7906 ctxt.connection_type = 0x1; /* regular data port */
7907 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
7908
7909 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7910
7911 /* This VSI is connected to VEB so the switch_id
7912 * should be set to zero by default.
7913 */
7914 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7915
7916 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
7917 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
c674d125
MW
7918 if (pf->vf[vsi->vf_id].spoofchk) {
7919 ctxt.info.valid_sections |=
7920 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
7921 ctxt.info.sec_flags |=
7922 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
7923 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
7924 }
41c445ff
JB
7925 /* Setup the VSI tx/rx queue map for TC0 only for now */
7926 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7927 break;
7928
38e00438
VD
7929#ifdef I40E_FCOE
7930 case I40E_VSI_FCOE:
7931 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
7932 if (ret) {
7933 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
7934 return ret;
7935 }
7936 break;
7937
7938#endif /* I40E_FCOE */
41c445ff
JB
7939 default:
7940 return -ENODEV;
7941 }
7942
7943 if (vsi->type != I40E_VSI_MAIN) {
7944 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
7945 if (ret) {
7946 dev_info(&vsi->back->pdev->dev,
7947 "add vsi failed, aq_err=%d\n",
7948 vsi->back->hw.aq.asq_last_status);
7949 ret = -ENOENT;
7950 goto err;
7951 }
7952 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
7953 vsi->info.valid_sections = 0;
7954 vsi->seid = ctxt.seid;
7955 vsi->id = ctxt.vsi_number;
7956 }
7957
7958 /* If macvlan filters already exist, force them to get loaded */
7959 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
7960 f->changed = true;
7961 f_count++;
6252c7e4
SN
7962
7963 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
30650cc5
SN
7964 struct i40e_aqc_remove_macvlan_element_data element;
7965
7966 memset(&element, 0, sizeof(element));
7967 ether_addr_copy(element.mac_addr, f->macaddr);
7968 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7969 ret = i40e_aq_remove_macvlan(hw, vsi->seid,
7970 &element, 1, NULL);
7971 if (ret) {
7972 /* some older FW has a different default */
7973 element.flags |=
7974 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7975 i40e_aq_remove_macvlan(hw, vsi->seid,
7976 &element, 1, NULL);
7977 }
7978
7979 i40e_aq_mac_address_write(hw,
6252c7e4
SN
7980 I40E_AQC_WRITE_TYPE_LAA_WOL,
7981 f->macaddr, NULL);
7982 }
41c445ff
JB
7983 }
7984 if (f_count) {
7985 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
7986 pf->flags |= I40E_FLAG_FILTER_SYNC;
7987 }
7988
7989 /* Update VSI BW information */
7990 ret = i40e_vsi_get_bw_info(vsi);
7991 if (ret) {
7992 dev_info(&pf->pdev->dev,
7993 "couldn't get vsi bw info, err %d, aq_err %d\n",
7994 ret, pf->hw.aq.asq_last_status);
7995 /* VSI is already added so not tearing that up */
7996 ret = 0;
7997 }
7998
7999err:
8000 return ret;
8001}
8002
8003/**
8004 * i40e_vsi_release - Delete a VSI and free its resources
8005 * @vsi: the VSI being removed
8006 *
8007 * Returns 0 on success or < 0 on error
8008 **/
8009int i40e_vsi_release(struct i40e_vsi *vsi)
8010{
8011 struct i40e_mac_filter *f, *ftmp;
8012 struct i40e_veb *veb = NULL;
8013 struct i40e_pf *pf;
8014 u16 uplink_seid;
8015 int i, n;
8016
8017 pf = vsi->back;
8018
8019 /* release of a VEB-owner or last VSI is not allowed */
8020 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
8021 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
8022 vsi->seid, vsi->uplink_seid);
8023 return -ENODEV;
8024 }
8025 if (vsi == pf->vsi[pf->lan_vsi] &&
8026 !test_bit(__I40E_DOWN, &pf->state)) {
8027 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
8028 return -ENODEV;
8029 }
8030
8031 uplink_seid = vsi->uplink_seid;
8032 if (vsi->type != I40E_VSI_SRIOV) {
8033 if (vsi->netdev_registered) {
8034 vsi->netdev_registered = false;
8035 if (vsi->netdev) {
8036 /* results in a call to i40e_close() */
8037 unregister_netdev(vsi->netdev);
41c445ff
JB
8038 }
8039 } else {
90ef8d47 8040 i40e_vsi_close(vsi);
41c445ff
JB
8041 }
8042 i40e_vsi_disable_irq(vsi);
8043 }
8044
8045 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
8046 i40e_del_filter(vsi, f->macaddr, f->vlan,
8047 f->is_vf, f->is_netdev);
8048 i40e_sync_vsi_filters(vsi);
8049
8050 i40e_vsi_delete(vsi);
8051 i40e_vsi_free_q_vectors(vsi);
a4866597
SN
8052 if (vsi->netdev) {
8053 free_netdev(vsi->netdev);
8054 vsi->netdev = NULL;
8055 }
41c445ff
JB
8056 i40e_vsi_clear_rings(vsi);
8057 i40e_vsi_clear(vsi);
8058
8059 /* If this was the last thing on the VEB, except for the
8060 * controlling VSI, remove the VEB, which puts the controlling
8061 * VSI onto the next level down in the switch.
8062 *
8063 * Well, okay, there's one more exception here: don't remove
8064 * the orphan VEBs yet. We'll wait for an explicit remove request
8065 * from up the network stack.
8066 */
505682cd 8067 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
8068 if (pf->vsi[i] &&
8069 pf->vsi[i]->uplink_seid == uplink_seid &&
8070 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8071 n++; /* count the VSIs */
8072 }
8073 }
8074 for (i = 0; i < I40E_MAX_VEB; i++) {
8075 if (!pf->veb[i])
8076 continue;
8077 if (pf->veb[i]->uplink_seid == uplink_seid)
8078 n++; /* count the VEBs */
8079 if (pf->veb[i]->seid == uplink_seid)
8080 veb = pf->veb[i];
8081 }
8082 if (n == 0 && veb && veb->uplink_seid != 0)
8083 i40e_veb_release(veb);
8084
8085 return 0;
8086}
8087
8088/**
8089 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
8090 * @vsi: ptr to the VSI
8091 *
8092 * This should only be called after i40e_vsi_mem_alloc() which allocates the
8093 * corresponding SW VSI structure and initializes num_queue_pairs for the
8094 * newly allocated VSI.
8095 *
8096 * Returns 0 on success or negative on failure
8097 **/
8098static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
8099{
8100 int ret = -ENOENT;
8101 struct i40e_pf *pf = vsi->back;
8102
493fb300 8103 if (vsi->q_vectors[0]) {
41c445ff
JB
8104 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
8105 vsi->seid);
8106 return -EEXIST;
8107 }
8108
8109 if (vsi->base_vector) {
f29eaa3d 8110 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
41c445ff
JB
8111 vsi->seid, vsi->base_vector);
8112 return -EEXIST;
8113 }
8114
90e04070 8115 ret = i40e_vsi_alloc_q_vectors(vsi);
41c445ff
JB
8116 if (ret) {
8117 dev_info(&pf->pdev->dev,
8118 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
8119 vsi->num_q_vectors, vsi->seid, ret);
8120 vsi->num_q_vectors = 0;
8121 goto vector_setup_out;
8122 }
8123
958a3e3b
SN
8124 if (vsi->num_q_vectors)
8125 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
8126 vsi->num_q_vectors, vsi->idx);
41c445ff
JB
8127 if (vsi->base_vector < 0) {
8128 dev_info(&pf->pdev->dev,
049a2be8
SN
8129 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
8130 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
41c445ff
JB
8131 i40e_vsi_free_q_vectors(vsi);
8132 ret = -ENOENT;
8133 goto vector_setup_out;
8134 }
8135
8136vector_setup_out:
8137 return ret;
8138}
8139
bc7d338f
ASJ
8140/**
8141 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
8142 * @vsi: pointer to the vsi.
8143 *
8144 * This re-allocates a vsi's queue resources.
8145 *
8146 * Returns pointer to the successfully allocated and configured VSI sw struct
8147 * on success, otherwise returns NULL on failure.
8148 **/
8149static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
8150{
8151 struct i40e_pf *pf = vsi->back;
8152 u8 enabled_tc;
8153 int ret;
8154
8155 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
8156 i40e_vsi_clear_rings(vsi);
8157
8158 i40e_vsi_free_arrays(vsi, false);
8159 i40e_set_num_rings_in_vsi(vsi);
8160 ret = i40e_vsi_alloc_arrays(vsi, false);
8161 if (ret)
8162 goto err_vsi;
8163
8164 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
8165 if (ret < 0) {
049a2be8
SN
8166 dev_info(&pf->pdev->dev,
8167 "failed to get tracking for %d queues for VSI %d err=%d\n",
8168 vsi->alloc_queue_pairs, vsi->seid, ret);
bc7d338f
ASJ
8169 goto err_vsi;
8170 }
8171 vsi->base_queue = ret;
8172
8173 /* Update the FW view of the VSI. Force a reset of TC and queue
8174 * layout configurations.
8175 */
8176 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8177 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8178 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8179 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8180
8181 /* assign it some queues */
8182 ret = i40e_alloc_rings(vsi);
8183 if (ret)
8184 goto err_rings;
8185
8186 /* map all of the rings to the q_vectors */
8187 i40e_vsi_map_rings_to_vectors(vsi);
8188 return vsi;
8189
8190err_rings:
8191 i40e_vsi_free_q_vectors(vsi);
8192 if (vsi->netdev_registered) {
8193 vsi->netdev_registered = false;
8194 unregister_netdev(vsi->netdev);
8195 free_netdev(vsi->netdev);
8196 vsi->netdev = NULL;
8197 }
8198 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8199err_vsi:
8200 i40e_vsi_clear(vsi);
8201 return NULL;
8202}
8203
41c445ff
JB
8204/**
8205 * i40e_vsi_setup - Set up a VSI by a given type
8206 * @pf: board private structure
8207 * @type: VSI type
8208 * @uplink_seid: the switch element to link to
8209 * @param1: usage depends upon VSI type. For VF types, indicates VF id
8210 *
8211 * This allocates the sw VSI structure and its queue resources, then add a VSI
8212 * to the identified VEB.
8213 *
8214 * Returns pointer to the successfully allocated and configure VSI sw struct on
8215 * success, otherwise returns NULL on failure.
8216 **/
8217struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
8218 u16 uplink_seid, u32 param1)
8219{
8220 struct i40e_vsi *vsi = NULL;
8221 struct i40e_veb *veb = NULL;
8222 int ret, i;
8223 int v_idx;
8224
8225 /* The requested uplink_seid must be either
8226 * - the PF's port seid
8227 * no VEB is needed because this is the PF
8228 * or this is a Flow Director special case VSI
8229 * - seid of an existing VEB
8230 * - seid of a VSI that owns an existing VEB
8231 * - seid of a VSI that doesn't own a VEB
8232 * a new VEB is created and the VSI becomes the owner
8233 * - seid of the PF VSI, which is what creates the first VEB
8234 * this is a special case of the previous
8235 *
8236 * Find which uplink_seid we were given and create a new VEB if needed
8237 */
8238 for (i = 0; i < I40E_MAX_VEB; i++) {
8239 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
8240 veb = pf->veb[i];
8241 break;
8242 }
8243 }
8244
8245 if (!veb && uplink_seid != pf->mac_seid) {
8246
505682cd 8247 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
8248 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
8249 vsi = pf->vsi[i];
8250 break;
8251 }
8252 }
8253 if (!vsi) {
8254 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
8255 uplink_seid);
8256 return NULL;
8257 }
8258
8259 if (vsi->uplink_seid == pf->mac_seid)
8260 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
8261 vsi->tc_config.enabled_tc);
8262 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
8263 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8264 vsi->tc_config.enabled_tc);
79c21a82
ASJ
8265 if (veb) {
8266 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
8267 dev_info(&vsi->back->pdev->dev,
8268 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
8269 __func__);
8270 return NULL;
8271 }
8272 i40e_enable_pf_switch_lb(pf);
8273 }
41c445ff
JB
8274 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8275 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8276 veb = pf->veb[i];
8277 }
8278 if (!veb) {
8279 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
8280 return NULL;
8281 }
8282
8283 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8284 uplink_seid = veb->seid;
8285 }
8286
8287 /* get vsi sw struct */
8288 v_idx = i40e_vsi_mem_alloc(pf, type);
8289 if (v_idx < 0)
8290 goto err_alloc;
8291 vsi = pf->vsi[v_idx];
cbf61325
ASJ
8292 if (!vsi)
8293 goto err_alloc;
41c445ff
JB
8294 vsi->type = type;
8295 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
8296
8297 if (type == I40E_VSI_MAIN)
8298 pf->lan_vsi = v_idx;
8299 else if (type == I40E_VSI_SRIOV)
8300 vsi->vf_id = param1;
8301 /* assign it some queues */
cbf61325
ASJ
8302 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
8303 vsi->idx);
41c445ff 8304 if (ret < 0) {
049a2be8
SN
8305 dev_info(&pf->pdev->dev,
8306 "failed to get tracking for %d queues for VSI %d err=%d\n",
8307 vsi->alloc_queue_pairs, vsi->seid, ret);
41c445ff
JB
8308 goto err_vsi;
8309 }
8310 vsi->base_queue = ret;
8311
8312 /* get a VSI from the hardware */
8313 vsi->uplink_seid = uplink_seid;
8314 ret = i40e_add_vsi(vsi);
8315 if (ret)
8316 goto err_vsi;
8317
8318 switch (vsi->type) {
8319 /* setup the netdev if needed */
8320 case I40E_VSI_MAIN:
8321 case I40E_VSI_VMDQ2:
38e00438 8322 case I40E_VSI_FCOE:
41c445ff
JB
8323 ret = i40e_config_netdev(vsi);
8324 if (ret)
8325 goto err_netdev;
8326 ret = register_netdev(vsi->netdev);
8327 if (ret)
8328 goto err_netdev;
8329 vsi->netdev_registered = true;
8330 netif_carrier_off(vsi->netdev);
4e3b35b0
NP
8331#ifdef CONFIG_I40E_DCB
8332 /* Setup DCB netlink interface */
8333 i40e_dcbnl_setup(vsi);
8334#endif /* CONFIG_I40E_DCB */
41c445ff
JB
8335 /* fall through */
8336
8337 case I40E_VSI_FDIR:
8338 /* set up vectors and rings if needed */
8339 ret = i40e_vsi_setup_vectors(vsi);
8340 if (ret)
8341 goto err_msix;
8342
8343 ret = i40e_alloc_rings(vsi);
8344 if (ret)
8345 goto err_rings;
8346
8347 /* map all of the rings to the q_vectors */
8348 i40e_vsi_map_rings_to_vectors(vsi);
8349
8350 i40e_vsi_reset_stats(vsi);
8351 break;
8352
8353 default:
8354 /* no netdev or rings for the other VSI types */
8355 break;
8356 }
8357
8358 return vsi;
8359
8360err_rings:
8361 i40e_vsi_free_q_vectors(vsi);
8362err_msix:
8363 if (vsi->netdev_registered) {
8364 vsi->netdev_registered = false;
8365 unregister_netdev(vsi->netdev);
8366 free_netdev(vsi->netdev);
8367 vsi->netdev = NULL;
8368 }
8369err_netdev:
8370 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8371err_vsi:
8372 i40e_vsi_clear(vsi);
8373err_alloc:
8374 return NULL;
8375}
8376
8377/**
8378 * i40e_veb_get_bw_info - Query VEB BW information
8379 * @veb: the veb to query
8380 *
8381 * Query the Tx scheduler BW configuration data for given VEB
8382 **/
8383static int i40e_veb_get_bw_info(struct i40e_veb *veb)
8384{
8385 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
8386 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
8387 struct i40e_pf *pf = veb->pf;
8388 struct i40e_hw *hw = &pf->hw;
8389 u32 tc_bw_max;
8390 int ret = 0;
8391 int i;
8392
8393 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
8394 &bw_data, NULL);
8395 if (ret) {
8396 dev_info(&pf->pdev->dev,
8397 "query veb bw config failed, aq_err=%d\n",
8398 hw->aq.asq_last_status);
8399 goto out;
8400 }
8401
8402 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
8403 &ets_data, NULL);
8404 if (ret) {
8405 dev_info(&pf->pdev->dev,
8406 "query veb bw ets config failed, aq_err=%d\n",
8407 hw->aq.asq_last_status);
8408 goto out;
8409 }
8410
8411 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
8412 veb->bw_max_quanta = ets_data.tc_bw_max;
8413 veb->is_abs_credits = bw_data.absolute_credits_enable;
23cd1f09 8414 veb->enabled_tc = ets_data.tc_valid_bits;
41c445ff
JB
8415 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
8416 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
8417 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8418 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
8419 veb->bw_tc_limit_credits[i] =
8420 le16_to_cpu(bw_data.tc_bw_limits[i]);
8421 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
8422 }
8423
8424out:
8425 return ret;
8426}
8427
8428/**
8429 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
8430 * @pf: board private structure
8431 *
8432 * On error: returns error code (negative)
8433 * On success: returns vsi index in PF (positive)
8434 **/
8435static int i40e_veb_mem_alloc(struct i40e_pf *pf)
8436{
8437 int ret = -ENOENT;
8438 struct i40e_veb *veb;
8439 int i;
8440
8441 /* Need to protect the allocation of switch elements at the PF level */
8442 mutex_lock(&pf->switch_mutex);
8443
8444 /* VEB list may be fragmented if VEB creation/destruction has
8445 * been happening. We can afford to do a quick scan to look
8446 * for any free slots in the list.
8447 *
8448 * find next empty veb slot, looping back around if necessary
8449 */
8450 i = 0;
8451 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
8452 i++;
8453 if (i >= I40E_MAX_VEB) {
8454 ret = -ENOMEM;
8455 goto err_alloc_veb; /* out of VEB slots! */
8456 }
8457
8458 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
8459 if (!veb) {
8460 ret = -ENOMEM;
8461 goto err_alloc_veb;
8462 }
8463 veb->pf = pf;
8464 veb->idx = i;
8465 veb->enabled_tc = 1;
8466
8467 pf->veb[i] = veb;
8468 ret = i;
8469err_alloc_veb:
8470 mutex_unlock(&pf->switch_mutex);
8471 return ret;
8472}
8473
8474/**
8475 * i40e_switch_branch_release - Delete a branch of the switch tree
8476 * @branch: where to start deleting
8477 *
8478 * This uses recursion to find the tips of the branch to be
8479 * removed, deleting until we get back to and can delete this VEB.
8480 **/
8481static void i40e_switch_branch_release(struct i40e_veb *branch)
8482{
8483 struct i40e_pf *pf = branch->pf;
8484 u16 branch_seid = branch->seid;
8485 u16 veb_idx = branch->idx;
8486 int i;
8487
8488 /* release any VEBs on this VEB - RECURSION */
8489 for (i = 0; i < I40E_MAX_VEB; i++) {
8490 if (!pf->veb[i])
8491 continue;
8492 if (pf->veb[i]->uplink_seid == branch->seid)
8493 i40e_switch_branch_release(pf->veb[i]);
8494 }
8495
8496 /* Release the VSIs on this VEB, but not the owner VSI.
8497 *
8498 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
8499 * the VEB itself, so don't use (*branch) after this loop.
8500 */
505682cd 8501 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
8502 if (!pf->vsi[i])
8503 continue;
8504 if (pf->vsi[i]->uplink_seid == branch_seid &&
8505 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8506 i40e_vsi_release(pf->vsi[i]);
8507 }
8508 }
8509
8510 /* There's one corner case where the VEB might not have been
8511 * removed, so double check it here and remove it if needed.
8512 * This case happens if the veb was created from the debugfs
8513 * commands and no VSIs were added to it.
8514 */
8515 if (pf->veb[veb_idx])
8516 i40e_veb_release(pf->veb[veb_idx]);
8517}
8518
8519/**
8520 * i40e_veb_clear - remove veb struct
8521 * @veb: the veb to remove
8522 **/
8523static void i40e_veb_clear(struct i40e_veb *veb)
8524{
8525 if (!veb)
8526 return;
8527
8528 if (veb->pf) {
8529 struct i40e_pf *pf = veb->pf;
8530
8531 mutex_lock(&pf->switch_mutex);
8532 if (pf->veb[veb->idx] == veb)
8533 pf->veb[veb->idx] = NULL;
8534 mutex_unlock(&pf->switch_mutex);
8535 }
8536
8537 kfree(veb);
8538}
8539
8540/**
8541 * i40e_veb_release - Delete a VEB and free its resources
8542 * @veb: the VEB being removed
8543 **/
8544void i40e_veb_release(struct i40e_veb *veb)
8545{
8546 struct i40e_vsi *vsi = NULL;
8547 struct i40e_pf *pf;
8548 int i, n = 0;
8549
8550 pf = veb->pf;
8551
8552 /* find the remaining VSI and check for extras */
505682cd 8553 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
8554 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
8555 n++;
8556 vsi = pf->vsi[i];
8557 }
8558 }
8559 if (n != 1) {
8560 dev_info(&pf->pdev->dev,
8561 "can't remove VEB %d with %d VSIs left\n",
8562 veb->seid, n);
8563 return;
8564 }
8565
8566 /* move the remaining VSI to uplink veb */
8567 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
8568 if (veb->uplink_seid) {
8569 vsi->uplink_seid = veb->uplink_seid;
8570 if (veb->uplink_seid == pf->mac_seid)
8571 vsi->veb_idx = I40E_NO_VEB;
8572 else
8573 vsi->veb_idx = veb->veb_idx;
8574 } else {
8575 /* floating VEB */
8576 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
8577 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
8578 }
8579
8580 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
8581 i40e_veb_clear(veb);
41c445ff
JB
8582}
8583
8584/**
8585 * i40e_add_veb - create the VEB in the switch
8586 * @veb: the VEB to be instantiated
8587 * @vsi: the controlling VSI
8588 **/
8589static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
8590{
56747264 8591 bool is_default = false;
e1c51b95 8592 bool is_cloud = false;
41c445ff
JB
8593 int ret;
8594
8595 /* get a VEB from the hardware */
8596 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
e1c51b95
KS
8597 veb->enabled_tc, is_default,
8598 is_cloud, &veb->seid, NULL);
41c445ff
JB
8599 if (ret) {
8600 dev_info(&veb->pf->pdev->dev,
8601 "couldn't add VEB, err %d, aq_err %d\n",
8602 ret, veb->pf->hw.aq.asq_last_status);
8603 return -EPERM;
8604 }
8605
8606 /* get statistics counter */
8607 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
8608 &veb->stats_idx, NULL, NULL, NULL);
8609 if (ret) {
8610 dev_info(&veb->pf->pdev->dev,
8611 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
8612 ret, veb->pf->hw.aq.asq_last_status);
8613 return -EPERM;
8614 }
8615 ret = i40e_veb_get_bw_info(veb);
8616 if (ret) {
8617 dev_info(&veb->pf->pdev->dev,
8618 "couldn't get VEB bw info, err %d, aq_err %d\n",
8619 ret, veb->pf->hw.aq.asq_last_status);
8620 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
8621 return -ENOENT;
8622 }
8623
8624 vsi->uplink_seid = veb->seid;
8625 vsi->veb_idx = veb->idx;
8626 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8627
8628 return 0;
8629}
8630
8631/**
8632 * i40e_veb_setup - Set up a VEB
8633 * @pf: board private structure
8634 * @flags: VEB setup flags
8635 * @uplink_seid: the switch element to link to
8636 * @vsi_seid: the initial VSI seid
8637 * @enabled_tc: Enabled TC bit-map
8638 *
8639 * This allocates the sw VEB structure and links it into the switch
8640 * It is possible and legal for this to be a duplicate of an already
8641 * existing VEB. It is also possible for both uplink and vsi seids
8642 * to be zero, in order to create a floating VEB.
8643 *
8644 * Returns pointer to the successfully allocated VEB sw struct on
8645 * success, otherwise returns NULL on failure.
8646 **/
8647struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
8648 u16 uplink_seid, u16 vsi_seid,
8649 u8 enabled_tc)
8650{
8651 struct i40e_veb *veb, *uplink_veb = NULL;
8652 int vsi_idx, veb_idx;
8653 int ret;
8654
8655 /* if one seid is 0, the other must be 0 to create a floating relay */
8656 if ((uplink_seid == 0 || vsi_seid == 0) &&
8657 (uplink_seid + vsi_seid != 0)) {
8658 dev_info(&pf->pdev->dev,
8659 "one, not both seid's are 0: uplink=%d vsi=%d\n",
8660 uplink_seid, vsi_seid);
8661 return NULL;
8662 }
8663
8664 /* make sure there is such a vsi and uplink */
505682cd 8665 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
41c445ff
JB
8666 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
8667 break;
505682cd 8668 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
41c445ff
JB
8669 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
8670 vsi_seid);
8671 return NULL;
8672 }
8673
8674 if (uplink_seid && uplink_seid != pf->mac_seid) {
8675 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
8676 if (pf->veb[veb_idx] &&
8677 pf->veb[veb_idx]->seid == uplink_seid) {
8678 uplink_veb = pf->veb[veb_idx];
8679 break;
8680 }
8681 }
8682 if (!uplink_veb) {
8683 dev_info(&pf->pdev->dev,
8684 "uplink seid %d not found\n", uplink_seid);
8685 return NULL;
8686 }
8687 }
8688
8689 /* get veb sw struct */
8690 veb_idx = i40e_veb_mem_alloc(pf);
8691 if (veb_idx < 0)
8692 goto err_alloc;
8693 veb = pf->veb[veb_idx];
8694 veb->flags = flags;
8695 veb->uplink_seid = uplink_seid;
8696 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
8697 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
8698
8699 /* create the VEB in the switch */
8700 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
8701 if (ret)
8702 goto err_veb;
1bb8b935
SN
8703 if (vsi_idx == pf->lan_vsi)
8704 pf->lan_veb = veb->idx;
41c445ff
JB
8705
8706 return veb;
8707
8708err_veb:
8709 i40e_veb_clear(veb);
8710err_alloc:
8711 return NULL;
8712}
8713
8714/**
8715 * i40e_setup_pf_switch_element - set pf vars based on switch type
8716 * @pf: board private structure
8717 * @ele: element we are building info from
8718 * @num_reported: total number of elements
8719 * @printconfig: should we print the contents
8720 *
8721 * helper function to assist in extracting a few useful SEID values.
8722 **/
8723static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
8724 struct i40e_aqc_switch_config_element_resp *ele,
8725 u16 num_reported, bool printconfig)
8726{
8727 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
8728 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
8729 u8 element_type = ele->element_type;
8730 u16 seid = le16_to_cpu(ele->seid);
8731
8732 if (printconfig)
8733 dev_info(&pf->pdev->dev,
8734 "type=%d seid=%d uplink=%d downlink=%d\n",
8735 element_type, seid, uplink_seid, downlink_seid);
8736
8737 switch (element_type) {
8738 case I40E_SWITCH_ELEMENT_TYPE_MAC:
8739 pf->mac_seid = seid;
8740 break;
8741 case I40E_SWITCH_ELEMENT_TYPE_VEB:
8742 /* Main VEB? */
8743 if (uplink_seid != pf->mac_seid)
8744 break;
8745 if (pf->lan_veb == I40E_NO_VEB) {
8746 int v;
8747
8748 /* find existing or else empty VEB */
8749 for (v = 0; v < I40E_MAX_VEB; v++) {
8750 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
8751 pf->lan_veb = v;
8752 break;
8753 }
8754 }
8755 if (pf->lan_veb == I40E_NO_VEB) {
8756 v = i40e_veb_mem_alloc(pf);
8757 if (v < 0)
8758 break;
8759 pf->lan_veb = v;
8760 }
8761 }
8762
8763 pf->veb[pf->lan_veb]->seid = seid;
8764 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
8765 pf->veb[pf->lan_veb]->pf = pf;
8766 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
8767 break;
8768 case I40E_SWITCH_ELEMENT_TYPE_VSI:
8769 if (num_reported != 1)
8770 break;
8771 /* This is immediately after a reset so we can assume this is
8772 * the PF's VSI
8773 */
8774 pf->mac_seid = uplink_seid;
8775 pf->pf_seid = downlink_seid;
8776 pf->main_vsi_seid = seid;
8777 if (printconfig)
8778 dev_info(&pf->pdev->dev,
8779 "pf_seid=%d main_vsi_seid=%d\n",
8780 pf->pf_seid, pf->main_vsi_seid);
8781 break;
8782 case I40E_SWITCH_ELEMENT_TYPE_PF:
8783 case I40E_SWITCH_ELEMENT_TYPE_VF:
8784 case I40E_SWITCH_ELEMENT_TYPE_EMP:
8785 case I40E_SWITCH_ELEMENT_TYPE_BMC:
8786 case I40E_SWITCH_ELEMENT_TYPE_PE:
8787 case I40E_SWITCH_ELEMENT_TYPE_PA:
8788 /* ignore these for now */
8789 break;
8790 default:
8791 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
8792 element_type, seid);
8793 break;
8794 }
8795}
8796
8797/**
8798 * i40e_fetch_switch_configuration - Get switch config from firmware
8799 * @pf: board private structure
8800 * @printconfig: should we print the contents
8801 *
8802 * Get the current switch configuration from the device and
8803 * extract a few useful SEID values.
8804 **/
8805int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
8806{
8807 struct i40e_aqc_get_switch_config_resp *sw_config;
8808 u16 next_seid = 0;
8809 int ret = 0;
8810 u8 *aq_buf;
8811 int i;
8812
8813 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
8814 if (!aq_buf)
8815 return -ENOMEM;
8816
8817 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
8818 do {
8819 u16 num_reported, num_total;
8820
8821 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
8822 I40E_AQ_LARGE_BUF,
8823 &next_seid, NULL);
8824 if (ret) {
8825 dev_info(&pf->pdev->dev,
8826 "get switch config failed %d aq_err=%x\n",
8827 ret, pf->hw.aq.asq_last_status);
8828 kfree(aq_buf);
8829 return -ENOENT;
8830 }
8831
8832 num_reported = le16_to_cpu(sw_config->header.num_reported);
8833 num_total = le16_to_cpu(sw_config->header.num_total);
8834
8835 if (printconfig)
8836 dev_info(&pf->pdev->dev,
8837 "header: %d reported %d total\n",
8838 num_reported, num_total);
8839
41c445ff
JB
8840 for (i = 0; i < num_reported; i++) {
8841 struct i40e_aqc_switch_config_element_resp *ele =
8842 &sw_config->element[i];
8843
8844 i40e_setup_pf_switch_element(pf, ele, num_reported,
8845 printconfig);
8846 }
8847 } while (next_seid != 0);
8848
8849 kfree(aq_buf);
8850 return ret;
8851}
8852
8853/**
8854 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
8855 * @pf: board private structure
bc7d338f 8856 * @reinit: if the Main VSI needs to re-initialized.
41c445ff
JB
8857 *
8858 * Returns 0 on success, negative value on failure
8859 **/
bc7d338f 8860static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
41c445ff
JB
8861{
8862 int ret;
8863
8864 /* find out what's out there already */
8865 ret = i40e_fetch_switch_configuration(pf, false);
8866 if (ret) {
8867 dev_info(&pf->pdev->dev,
8868 "couldn't fetch switch config, err %d, aq_err %d\n",
8869 ret, pf->hw.aq.asq_last_status);
8870 return ret;
8871 }
8872 i40e_pf_reset_stats(pf);
8873
41c445ff 8874 /* first time setup */
bc7d338f 8875 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
41c445ff
JB
8876 struct i40e_vsi *vsi = NULL;
8877 u16 uplink_seid;
8878
8879 /* Set up the PF VSI associated with the PF's main VSI
8880 * that is already in the HW switch
8881 */
8882 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8883 uplink_seid = pf->veb[pf->lan_veb]->seid;
8884 else
8885 uplink_seid = pf->mac_seid;
bc7d338f
ASJ
8886 if (pf->lan_vsi == I40E_NO_VSI)
8887 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
8888 else if (reinit)
8889 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
41c445ff
JB
8890 if (!vsi) {
8891 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
8892 i40e_fdir_teardown(pf);
8893 return -EAGAIN;
8894 }
41c445ff
JB
8895 } else {
8896 /* force a reset of TC and queue layout configurations */
8897 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8898 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8899 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8900 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8901 }
8902 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
8903
cbf61325
ASJ
8904 i40e_fdir_sb_setup(pf);
8905
41c445ff
JB
8906 /* Setup static PF queue filter control settings */
8907 ret = i40e_setup_pf_filter_control(pf);
8908 if (ret) {
8909 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
8910 ret);
8911 /* Failure here should not stop continuing other steps */
8912 }
8913
8914 /* enable RSS in the HW, even for only one queue, as the stack can use
8915 * the hash
8916 */
8917 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
8918 i40e_config_rss(pf);
8919
8920 /* fill in link information and enable LSE reporting */
a34a6711
MW
8921 i40e_update_link_info(&pf->hw, true);
8922 i40e_link_event(pf);
8923
8924 /* Initialize user-specific link properties */
8925 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
8926 I40E_AQ_AN_COMPLETED) ? true : false);
8927
8928 /* fill in link information and enable LSE reporting */
8109e123 8929 i40e_update_link_info(&pf->hw, true);
41c445ff
JB
8930 i40e_link_event(pf);
8931
d52c20b7 8932 /* Initialize user-specific link properties */
41c445ff
JB
8933 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
8934 I40E_AQ_AN_COMPLETED) ? true : false);
d52c20b7 8935
beb0dff1
JK
8936 i40e_ptp_init(pf);
8937
41c445ff
JB
8938 return ret;
8939}
8940
41c445ff
JB
8941/**
8942 * i40e_determine_queue_usage - Work out queue distribution
8943 * @pf: board private structure
8944 **/
8945static void i40e_determine_queue_usage(struct i40e_pf *pf)
8946{
41c445ff
JB
8947 int queues_left;
8948
8949 pf->num_lan_qps = 0;
38e00438
VD
8950#ifdef I40E_FCOE
8951 pf->num_fcoe_qps = 0;
8952#endif
41c445ff
JB
8953
8954 /* Find the max queues to be put into basic use. We'll always be
8955 * using TC0, whether or not DCB is running, and TC0 will get the
8956 * big RSS set.
8957 */
8958 queues_left = pf->hw.func_caps.num_tx_qp;
8959
cbf61325 8960 if ((queues_left == 1) ||
9aa7e935 8961 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
41c445ff
JB
8962 /* one qp for PF, no queues for anything else */
8963 queues_left = 0;
8964 pf->rss_size = pf->num_lan_qps = 1;
8965
8966 /* make sure all the fancies are disabled */
60ea5f83 8967 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
38e00438
VD
8968#ifdef I40E_FCOE
8969 I40E_FLAG_FCOE_ENABLED |
8970#endif
60ea5f83
JB
8971 I40E_FLAG_FD_SB_ENABLED |
8972 I40E_FLAG_FD_ATR_ENABLED |
4d9b6043 8973 I40E_FLAG_DCB_CAPABLE |
60ea5f83
JB
8974 I40E_FLAG_SRIOV_ENABLED |
8975 I40E_FLAG_VMDQ_ENABLED);
9aa7e935
FZ
8976 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
8977 I40E_FLAG_FD_SB_ENABLED |
bbe7d0e0 8978 I40E_FLAG_FD_ATR_ENABLED |
4d9b6043 8979 I40E_FLAG_DCB_CAPABLE))) {
9aa7e935
FZ
8980 /* one qp for PF */
8981 pf->rss_size = pf->num_lan_qps = 1;
8982 queues_left -= pf->num_lan_qps;
8983
8984 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
38e00438
VD
8985#ifdef I40E_FCOE
8986 I40E_FLAG_FCOE_ENABLED |
8987#endif
9aa7e935
FZ
8988 I40E_FLAG_FD_SB_ENABLED |
8989 I40E_FLAG_FD_ATR_ENABLED |
8990 I40E_FLAG_DCB_ENABLED |
8991 I40E_FLAG_VMDQ_ENABLED);
41c445ff 8992 } else {
cbf61325 8993 /* Not enough queues for all TCs */
4d9b6043 8994 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
cbf61325 8995 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
4d9b6043 8996 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
cbf61325
ASJ
8997 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
8998 }
8999 pf->num_lan_qps = pf->rss_size_max;
9000 queues_left -= pf->num_lan_qps;
9001 }
9002
38e00438
VD
9003#ifdef I40E_FCOE
9004 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
9005 if (I40E_DEFAULT_FCOE <= queues_left) {
9006 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
9007 } else if (I40E_MINIMUM_FCOE <= queues_left) {
9008 pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
9009 } else {
9010 pf->num_fcoe_qps = 0;
9011 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
9012 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
9013 }
9014
9015 queues_left -= pf->num_fcoe_qps;
9016 }
9017
9018#endif
cbf61325
ASJ
9019 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9020 if (queues_left > 1) {
9021 queues_left -= 1; /* save 1 queue for FD */
9022 } else {
9023 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9024 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
9025 }
41c445ff
JB
9026 }
9027
9028 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9029 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
cbf61325
ASJ
9030 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
9031 (queues_left / pf->num_vf_qps));
41c445ff
JB
9032 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
9033 }
9034
9035 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
9036 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
9037 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
9038 (queues_left / pf->num_vmdq_qps));
9039 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
9040 }
9041
f8ff1464 9042 pf->queues_left = queues_left;
38e00438
VD
9043#ifdef I40E_FCOE
9044 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
9045#endif
41c445ff
JB
9046}
9047
9048/**
9049 * i40e_setup_pf_filter_control - Setup PF static filter control
9050 * @pf: PF to be setup
9051 *
9052 * i40e_setup_pf_filter_control sets up a pf's initial filter control
9053 * settings. If PE/FCoE are enabled then it will also set the per PF
9054 * based filter sizes required for them. It also enables Flow director,
9055 * ethertype and macvlan type filter settings for the pf.
9056 *
9057 * Returns 0 on success, negative on failure
9058 **/
9059static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
9060{
9061 struct i40e_filter_control_settings *settings = &pf->filter_settings;
9062
9063 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
9064
9065 /* Flow Director is enabled */
60ea5f83 9066 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
41c445ff
JB
9067 settings->enable_fdir = true;
9068
9069 /* Ethtype and MACVLAN filters enabled for PF */
9070 settings->enable_ethtype = true;
9071 settings->enable_macvlan = true;
9072
9073 if (i40e_set_filter_control(&pf->hw, settings))
9074 return -ENOENT;
9075
9076 return 0;
9077}
9078
0c22b3dd
JB
9079#define INFO_STRING_LEN 255
9080static void i40e_print_features(struct i40e_pf *pf)
9081{
9082 struct i40e_hw *hw = &pf->hw;
9083 char *buf, *string;
9084
9085 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
9086 if (!string) {
9087 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
9088 return;
9089 }
9090
9091 buf = string;
9092
9093 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
9094#ifdef CONFIG_PCI_IOV
9095 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
9096#endif
9097 buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
9098 pf->vsi[pf->lan_vsi]->num_queue_pairs);
9099
9100 if (pf->flags & I40E_FLAG_RSS_ENABLED)
9101 buf += sprintf(buf, "RSS ");
0c22b3dd 9102 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
c6423ff1
AA
9103 buf += sprintf(buf, "FD_ATR ");
9104 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9105 buf += sprintf(buf, "FD_SB ");
0c22b3dd 9106 buf += sprintf(buf, "NTUPLE ");
c6423ff1 9107 }
4d9b6043 9108 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
0c22b3dd
JB
9109 buf += sprintf(buf, "DCB ");
9110 if (pf->flags & I40E_FLAG_PTP)
9111 buf += sprintf(buf, "PTP ");
38e00438
VD
9112#ifdef I40E_FCOE
9113 if (pf->flags & I40E_FLAG_FCOE_ENABLED)
9114 buf += sprintf(buf, "FCOE ");
9115#endif
0c22b3dd
JB
9116
9117 BUG_ON(buf > (string + INFO_STRING_LEN));
9118 dev_info(&pf->pdev->dev, "%s\n", string);
9119 kfree(string);
9120}
9121
41c445ff
JB
9122/**
9123 * i40e_probe - Device initialization routine
9124 * @pdev: PCI device information struct
9125 * @ent: entry in i40e_pci_tbl
9126 *
9127 * i40e_probe initializes a pf identified by a pci_dev structure.
9128 * The OS initialization, configuring of the pf private structure,
9129 * and a hardware reset occur.
9130 *
9131 * Returns 0 on success, negative on failure
9132 **/
9133static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9134{
41c445ff
JB
9135 struct i40e_pf *pf;
9136 struct i40e_hw *hw;
93cd765b 9137 static u16 pfs_found;
d4dfb81a 9138 u16 link_status;
41c445ff
JB
9139 int err = 0;
9140 u32 len;
8a9eb7d3 9141 u32 i;
41c445ff
JB
9142
9143 err = pci_enable_device_mem(pdev);
9144 if (err)
9145 return err;
9146
9147 /* set up for high or low dma */
6494294f 9148 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6494294f 9149 if (err) {
e3e3bfdd
JS
9150 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9151 if (err) {
9152 dev_err(&pdev->dev,
9153 "DMA configuration failed: 0x%x\n", err);
9154 goto err_dma;
9155 }
41c445ff
JB
9156 }
9157
9158 /* set up pci connections */
9159 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
9160 IORESOURCE_MEM), i40e_driver_name);
9161 if (err) {
9162 dev_info(&pdev->dev,
9163 "pci_request_selected_regions failed %d\n", err);
9164 goto err_pci_reg;
9165 }
9166
9167 pci_enable_pcie_error_reporting(pdev);
9168 pci_set_master(pdev);
9169
9170 /* Now that we have a PCI connection, we need to do the
9171 * low level device setup. This is primarily setting up
9172 * the Admin Queue structures and then querying for the
9173 * device's current profile information.
9174 */
9175 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
9176 if (!pf) {
9177 err = -ENOMEM;
9178 goto err_pf_alloc;
9179 }
9180 pf->next_vsi = 0;
9181 pf->pdev = pdev;
9182 set_bit(__I40E_DOWN, &pf->state);
9183
9184 hw = &pf->hw;
9185 hw->back = pf;
9186 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
9187 pci_resource_len(pdev, 0));
9188 if (!hw->hw_addr) {
9189 err = -EIO;
9190 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
9191 (unsigned int)pci_resource_start(pdev, 0),
9192 (unsigned int)pci_resource_len(pdev, 0), err);
9193 goto err_ioremap;
9194 }
9195 hw->vendor_id = pdev->vendor;
9196 hw->device_id = pdev->device;
9197 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
9198 hw->subsystem_vendor_id = pdev->subsystem_vendor;
9199 hw->subsystem_device_id = pdev->subsystem_device;
9200 hw->bus.device = PCI_SLOT(pdev->devfn);
9201 hw->bus.func = PCI_FUNC(pdev->devfn);
93cd765b 9202 pf->instance = pfs_found;
41c445ff 9203
5b5faa43
SN
9204 if (debug != -1) {
9205 pf->msg_enable = pf->hw.debug_mask;
9206 pf->msg_enable = debug;
9207 }
9208
7134f9ce
JB
9209 /* do a special CORER for clearing PXE mode once at init */
9210 if (hw->revision_id == 0 &&
9211 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
9212 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
9213 i40e_flush(hw);
9214 msleep(200);
9215 pf->corer_count++;
9216
9217 i40e_clear_pxe_mode(hw);
9218 }
9219
41c445ff 9220 /* Reset here to make sure all is clean and to define PF 'n' */
838d41d9 9221 i40e_clear_hw(hw);
41c445ff
JB
9222 err = i40e_pf_reset(hw);
9223 if (err) {
9224 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
9225 goto err_pf_reset;
9226 }
9227 pf->pfr_count++;
9228
9229 hw->aq.num_arq_entries = I40E_AQ_LEN;
9230 hw->aq.num_asq_entries = I40E_AQ_LEN;
9231 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9232 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9233 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
b2008cbf 9234
b294ac70 9235 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
b2008cbf
CW
9236 "%s-%s:misc",
9237 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
41c445ff
JB
9238
9239 err = i40e_init_shared_code(hw);
9240 if (err) {
9241 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
9242 goto err_pf_reset;
9243 }
9244
d52c20b7
JB
9245 /* set up a default setting for link flow control */
9246 pf->hw.fc.requested_mode = I40E_FC_NONE;
9247
41c445ff
JB
9248 err = i40e_init_adminq(hw);
9249 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
9250 if (err) {
9251 dev_info(&pdev->dev,
7aa67613 9252 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
41c445ff
JB
9253 goto err_pf_reset;
9254 }
9255
7aa67613
CS
9256 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
9257 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
278b6f62 9258 dev_info(&pdev->dev,
7aa67613
CS
9259 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
9260 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
9261 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
278b6f62 9262 dev_info(&pdev->dev,
7aa67613 9263 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
278b6f62
SN
9264
9265
4eb3f768
SN
9266 i40e_verify_eeprom(pf);
9267
2c5fe33b
JB
9268 /* Rev 0 hardware was never productized */
9269 if (hw->revision_id < 1)
9270 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
9271
6ff4ef86 9272 i40e_clear_pxe_mode(hw);
41c445ff
JB
9273 err = i40e_get_capabilities(pf);
9274 if (err)
9275 goto err_adminq_setup;
9276
9277 err = i40e_sw_init(pf);
9278 if (err) {
9279 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
9280 goto err_sw_init;
9281 }
9282
9283 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9284 hw->func_caps.num_rx_qp,
9285 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
9286 if (err) {
9287 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
9288 goto err_init_lan_hmc;
9289 }
9290
9291 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9292 if (err) {
9293 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
9294 err = -ENOENT;
9295 goto err_configure_lan_hmc;
9296 }
9297
b686ece5
NP
9298 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
9299 * Ignore error return codes because if it was already disabled via
9300 * hardware settings this will fail
9301 */
9302 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9303 (pf->hw.aq.fw_maj_ver < 4)) {
9304 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
9305 i40e_aq_stop_lldp(hw, true, NULL);
9306 }
9307
41c445ff 9308 i40e_get_mac_addr(hw, hw->mac.addr);
f62b5060 9309 if (!is_valid_ether_addr(hw->mac.addr)) {
41c445ff
JB
9310 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
9311 err = -EIO;
9312 goto err_mac_addr;
9313 }
9314 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
9a173901 9315 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
1f224ad2
NP
9316 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
9317 if (is_valid_ether_addr(hw->mac.port_addr))
9318 pf->flags |= I40E_FLAG_PORT_ID_VALID;
38e00438
VD
9319#ifdef I40E_FCOE
9320 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
9321 if (err)
9322 dev_info(&pdev->dev,
9323 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
9324 if (!is_valid_ether_addr(hw->mac.san_addr)) {
9325 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
9326 hw->mac.san_addr);
9327 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
9328 }
9329 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
9330#endif /* I40E_FCOE */
41c445ff
JB
9331
9332 pci_set_drvdata(pdev, pf);
9333 pci_save_state(pdev);
4e3b35b0
NP
9334#ifdef CONFIG_I40E_DCB
9335 err = i40e_init_pf_dcb(pf);
9336 if (err) {
aebfc816 9337 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
4d9b6043 9338 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
014269ff 9339 /* Continue without DCB enabled */
4e3b35b0
NP
9340 }
9341#endif /* CONFIG_I40E_DCB */
41c445ff
JB
9342
9343 /* set up periodic task facility */
9344 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
9345 pf->service_timer_period = HZ;
9346
9347 INIT_WORK(&pf->service_task, i40e_service_task);
9348 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
9349 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
9350 pf->link_check_timeout = jiffies;
9351
8e2773ae
SN
9352 /* WoL defaults to disabled */
9353 pf->wol_en = false;
9354 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
9355
41c445ff
JB
9356 /* set up the main switch operations */
9357 i40e_determine_queue_usage(pf);
9358 i40e_init_interrupt_scheme(pf);
9359
505682cd
MW
9360 /* The number of VSIs reported by the FW is the minimum guaranteed
9361 * to us; HW supports far more and we share the remaining pool with
9362 * the other PFs. We allocate space for more than the guarantee with
9363 * the understanding that we might not get them all later.
41c445ff 9364 */
505682cd
MW
9365 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
9366 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
9367 else
9368 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
9369
9370 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
9371 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
41c445ff 9372 pf->vsi = kzalloc(len, GFP_KERNEL);
ed87ac09
WY
9373 if (!pf->vsi) {
9374 err = -ENOMEM;
41c445ff 9375 goto err_switch_setup;
ed87ac09 9376 }
41c445ff 9377
bc7d338f 9378 err = i40e_setup_pf_switch(pf, false);
41c445ff
JB
9379 if (err) {
9380 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
9381 goto err_vsis;
9382 }
8a9eb7d3 9383 /* if FDIR VSI was set up, start it now */
505682cd 9384 for (i = 0; i < pf->num_alloc_vsi; i++) {
8a9eb7d3
SN
9385 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
9386 i40e_vsi_open(pf->vsi[i]);
9387 break;
9388 }
9389 }
41c445ff 9390
7e2453fe
JB
9391 /* driver is only interested in link up/down and module qualification
9392 * reports from firmware
9393 */
9394 err = i40e_aq_set_phy_int_mask(&pf->hw,
9395 I40E_AQ_EVENT_LINK_UPDOWN |
9396 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
9397 if (err)
9398 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
9399
cafa2ee6
ASJ
9400 msleep(75);
9401 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9402 if (err) {
9403 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
9404 pf->hw.aq.asq_last_status);
9405 }
9406
41c445ff
JB
9407 /* The main driver is (mostly) up and happy. We need to set this state
9408 * before setting up the misc vector or we get a race and the vector
9409 * ends up disabled forever.
9410 */
9411 clear_bit(__I40E_DOWN, &pf->state);
9412
9413 /* In case of MSIX we are going to setup the misc vector right here
9414 * to handle admin queue events etc. In case of legacy and MSI
9415 * the misc functionality and queue processing is combined in
9416 * the same vector and that gets setup at open.
9417 */
9418 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9419 err = i40e_setup_misc_vector(pf);
9420 if (err) {
9421 dev_info(&pdev->dev,
9422 "setup of misc vector failed: %d\n", err);
9423 goto err_vsis;
9424 }
9425 }
9426
df805f62 9427#ifdef CONFIG_PCI_IOV
41c445ff
JB
9428 /* prep for VF support */
9429 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
4eb3f768
SN
9430 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
9431 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
41c445ff
JB
9432 u32 val;
9433
9434 /* disable link interrupts for VFs */
9435 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
9436 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
9437 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
9438 i40e_flush(hw);
4aeec010
MW
9439
9440 if (pci_num_vf(pdev)) {
9441 dev_info(&pdev->dev,
9442 "Active VFs found, allocating resources.\n");
9443 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
9444 if (err)
9445 dev_info(&pdev->dev,
9446 "Error %d allocating resources for existing VFs\n",
9447 err);
9448 }
41c445ff 9449 }
df805f62 9450#endif /* CONFIG_PCI_IOV */
41c445ff 9451
93cd765b
ASJ
9452 pfs_found++;
9453
41c445ff
JB
9454 i40e_dbg_pf_init(pf);
9455
9456 /* tell the firmware that we're starting */
44033fac 9457 i40e_send_version(pf);
41c445ff
JB
9458
9459 /* since everything's happy, start the service_task timer */
9460 mod_timer(&pf->service_timer,
9461 round_jiffies(jiffies + pf->service_timer_period));
9462
38e00438
VD
9463#ifdef I40E_FCOE
9464 /* create FCoE interface */
9465 i40e_fcoe_vsi_setup(pf);
9466
9467#endif
d4dfb81a
CS
9468 /* Get the negotiated link width and speed from PCI config space */
9469 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
9470
9471 i40e_set_pci_config_data(hw, link_status);
9472
69bfb110 9473 dev_info(&pdev->dev, "PCI-Express: %s %s\n",
d4dfb81a
CS
9474 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
9475 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
9476 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
9477 "Unknown"),
9478 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
9479 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
9480 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
9481 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
9482 "Unknown"));
9483
9484 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
9485 hw->bus.speed < i40e_bus_speed_8000) {
9486 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
9487 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
9488 }
9489
0c22b3dd
JB
9490 /* print a string summarizing features */
9491 i40e_print_features(pf);
9492
41c445ff
JB
9493 return 0;
9494
9495 /* Unwind what we've done if something failed in the setup */
9496err_vsis:
9497 set_bit(__I40E_DOWN, &pf->state);
41c445ff
JB
9498 i40e_clear_interrupt_scheme(pf);
9499 kfree(pf->vsi);
04b03013
SN
9500err_switch_setup:
9501 i40e_reset_interrupt_capability(pf);
41c445ff
JB
9502 del_timer_sync(&pf->service_timer);
9503err_mac_addr:
9504err_configure_lan_hmc:
9505 (void)i40e_shutdown_lan_hmc(hw);
9506err_init_lan_hmc:
9507 kfree(pf->qp_pile);
9508 kfree(pf->irq_pile);
9509err_sw_init:
9510err_adminq_setup:
9511 (void)i40e_shutdown_adminq(hw);
9512err_pf_reset:
9513 iounmap(hw->hw_addr);
9514err_ioremap:
9515 kfree(pf);
9516err_pf_alloc:
9517 pci_disable_pcie_error_reporting(pdev);
9518 pci_release_selected_regions(pdev,
9519 pci_select_bars(pdev, IORESOURCE_MEM));
9520err_pci_reg:
9521err_dma:
9522 pci_disable_device(pdev);
9523 return err;
9524}
9525
9526/**
9527 * i40e_remove - Device removal routine
9528 * @pdev: PCI device information struct
9529 *
9530 * i40e_remove is called by the PCI subsystem to alert the driver
9531 * that is should release a PCI device. This could be caused by a
9532 * Hot-Plug event, or because the driver is going to be removed from
9533 * memory.
9534 **/
9535static void i40e_remove(struct pci_dev *pdev)
9536{
9537 struct i40e_pf *pf = pci_get_drvdata(pdev);
9538 i40e_status ret_code;
41c445ff
JB
9539 int i;
9540
9541 i40e_dbg_pf_exit(pf);
9542
beb0dff1
JK
9543 i40e_ptp_stop(pf);
9544
41c445ff
JB
9545 /* no more scheduling of any task */
9546 set_bit(__I40E_DOWN, &pf->state);
9547 del_timer_sync(&pf->service_timer);
9548 cancel_work_sync(&pf->service_task);
9549
eb2d80bc
MW
9550 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
9551 i40e_free_vfs(pf);
9552 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
9553 }
9554
41c445ff
JB
9555 i40e_fdir_teardown(pf);
9556
9557 /* If there is a switch structure or any orphans, remove them.
9558 * This will leave only the PF's VSI remaining.
9559 */
9560 for (i = 0; i < I40E_MAX_VEB; i++) {
9561 if (!pf->veb[i])
9562 continue;
9563
9564 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
9565 pf->veb[i]->uplink_seid == 0)
9566 i40e_switch_branch_release(pf->veb[i]);
9567 }
9568
9569 /* Now we can shutdown the PF's VSI, just before we kill
9570 * adminq and hmc.
9571 */
9572 if (pf->vsi[pf->lan_vsi])
9573 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
9574
9575 i40e_stop_misc_vector(pf);
9576 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9577 synchronize_irq(pf->msix_entries[0].vector);
9578 free_irq(pf->msix_entries[0].vector, pf);
9579 }
9580
9581 /* shutdown and destroy the HMC */
60442dea
SN
9582 if (pf->hw.hmc.hmc_obj) {
9583 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
9584 if (ret_code)
9585 dev_warn(&pdev->dev,
9586 "Failed to destroy the HMC resources: %d\n",
9587 ret_code);
9588 }
41c445ff
JB
9589
9590 /* shutdown the adminq */
41c445ff
JB
9591 ret_code = i40e_shutdown_adminq(&pf->hw);
9592 if (ret_code)
9593 dev_warn(&pdev->dev,
9594 "Failed to destroy the Admin Queue resources: %d\n",
9595 ret_code);
9596
9597 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
9598 i40e_clear_interrupt_scheme(pf);
505682cd 9599 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
9600 if (pf->vsi[i]) {
9601 i40e_vsi_clear_rings(pf->vsi[i]);
9602 i40e_vsi_clear(pf->vsi[i]);
9603 pf->vsi[i] = NULL;
9604 }
9605 }
9606
9607 for (i = 0; i < I40E_MAX_VEB; i++) {
9608 kfree(pf->veb[i]);
9609 pf->veb[i] = NULL;
9610 }
9611
9612 kfree(pf->qp_pile);
9613 kfree(pf->irq_pile);
41c445ff
JB
9614 kfree(pf->vsi);
9615
41c445ff
JB
9616 iounmap(pf->hw.hw_addr);
9617 kfree(pf);
9618 pci_release_selected_regions(pdev,
9619 pci_select_bars(pdev, IORESOURCE_MEM));
9620
9621 pci_disable_pcie_error_reporting(pdev);
9622 pci_disable_device(pdev);
9623}
9624
9625/**
9626 * i40e_pci_error_detected - warning that something funky happened in PCI land
9627 * @pdev: PCI device information struct
9628 *
9629 * Called to warn that something happened and the error handling steps
9630 * are in progress. Allows the driver to quiesce things, be ready for
9631 * remediation.
9632 **/
9633static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
9634 enum pci_channel_state error)
9635{
9636 struct i40e_pf *pf = pci_get_drvdata(pdev);
9637
9638 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
9639
9640 /* shutdown all operations */
9007bccd
SN
9641 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
9642 rtnl_lock();
9643 i40e_prep_for_reset(pf);
9644 rtnl_unlock();
9645 }
41c445ff
JB
9646
9647 /* Request a slot reset */
9648 return PCI_ERS_RESULT_NEED_RESET;
9649}
9650
9651/**
9652 * i40e_pci_error_slot_reset - a PCI slot reset just happened
9653 * @pdev: PCI device information struct
9654 *
9655 * Called to find if the driver can work with the device now that
9656 * the pci slot has been reset. If a basic connection seems good
9657 * (registers are readable and have sane content) then return a
9658 * happy little PCI_ERS_RESULT_xxx.
9659 **/
9660static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
9661{
9662 struct i40e_pf *pf = pci_get_drvdata(pdev);
9663 pci_ers_result_t result;
9664 int err;
9665 u32 reg;
9666
9667 dev_info(&pdev->dev, "%s\n", __func__);
9668 if (pci_enable_device_mem(pdev)) {
9669 dev_info(&pdev->dev,
9670 "Cannot re-enable PCI device after reset.\n");
9671 result = PCI_ERS_RESULT_DISCONNECT;
9672 } else {
9673 pci_set_master(pdev);
9674 pci_restore_state(pdev);
9675 pci_save_state(pdev);
9676 pci_wake_from_d3(pdev, false);
9677
9678 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9679 if (reg == 0)
9680 result = PCI_ERS_RESULT_RECOVERED;
9681 else
9682 result = PCI_ERS_RESULT_DISCONNECT;
9683 }
9684
9685 err = pci_cleanup_aer_uncorrect_error_status(pdev);
9686 if (err) {
9687 dev_info(&pdev->dev,
9688 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
9689 err);
9690 /* non-fatal, continue */
9691 }
9692
9693 return result;
9694}
9695
9696/**
9697 * i40e_pci_error_resume - restart operations after PCI error recovery
9698 * @pdev: PCI device information struct
9699 *
9700 * Called to allow the driver to bring things back up after PCI error
9701 * and/or reset recovery has finished.
9702 **/
9703static void i40e_pci_error_resume(struct pci_dev *pdev)
9704{
9705 struct i40e_pf *pf = pci_get_drvdata(pdev);
9706
9707 dev_info(&pdev->dev, "%s\n", __func__);
9007bccd
SN
9708 if (test_bit(__I40E_SUSPENDED, &pf->state))
9709 return;
9710
9711 rtnl_lock();
41c445ff 9712 i40e_handle_reset_warning(pf);
9007bccd
SN
9713 rtnl_lock();
9714}
9715
9716/**
9717 * i40e_shutdown - PCI callback for shutting down
9718 * @pdev: PCI device information struct
9719 **/
9720static void i40e_shutdown(struct pci_dev *pdev)
9721{
9722 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 9723 struct i40e_hw *hw = &pf->hw;
9007bccd
SN
9724
9725 set_bit(__I40E_SUSPENDED, &pf->state);
9726 set_bit(__I40E_DOWN, &pf->state);
9727 rtnl_lock();
9728 i40e_prep_for_reset(pf);
9729 rtnl_unlock();
9730
8e2773ae
SN
9731 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
9732 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
9733
9007bccd 9734 if (system_state == SYSTEM_POWER_OFF) {
8e2773ae 9735 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
9736 pci_set_power_state(pdev, PCI_D3hot);
9737 }
9738}
9739
9740#ifdef CONFIG_PM
9741/**
9742 * i40e_suspend - PCI callback for moving to D3
9743 * @pdev: PCI device information struct
9744 **/
9745static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
9746{
9747 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 9748 struct i40e_hw *hw = &pf->hw;
9007bccd
SN
9749
9750 set_bit(__I40E_SUSPENDED, &pf->state);
9751 set_bit(__I40E_DOWN, &pf->state);
88086e5d
MW
9752 del_timer_sync(&pf->service_timer);
9753 cancel_work_sync(&pf->service_task);
9007bccd
SN
9754 rtnl_lock();
9755 i40e_prep_for_reset(pf);
9756 rtnl_unlock();
9757
8e2773ae
SN
9758 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
9759 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
9760
9761 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
9762 pci_set_power_state(pdev, PCI_D3hot);
9763
9764 return 0;
41c445ff
JB
9765}
9766
9007bccd
SN
9767/**
9768 * i40e_resume - PCI callback for waking up from D3
9769 * @pdev: PCI device information struct
9770 **/
9771static int i40e_resume(struct pci_dev *pdev)
9772{
9773 struct i40e_pf *pf = pci_get_drvdata(pdev);
9774 u32 err;
9775
9776 pci_set_power_state(pdev, PCI_D0);
9777 pci_restore_state(pdev);
9778 /* pci_restore_state() clears dev->state_saves, so
9779 * call pci_save_state() again to restore it.
9780 */
9781 pci_save_state(pdev);
9782
9783 err = pci_enable_device_mem(pdev);
9784 if (err) {
9785 dev_err(&pdev->dev,
9786 "%s: Cannot enable PCI device from suspend\n",
9787 __func__);
9788 return err;
9789 }
9790 pci_set_master(pdev);
9791
9792 /* no wakeup events while running */
9793 pci_wake_from_d3(pdev, false);
9794
9795 /* handling the reset will rebuild the device state */
9796 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
9797 clear_bit(__I40E_DOWN, &pf->state);
9798 rtnl_lock();
9799 i40e_reset_and_rebuild(pf, false);
9800 rtnl_unlock();
9801 }
9802
9803 return 0;
9804}
9805
9806#endif
41c445ff
JB
9807static const struct pci_error_handlers i40e_err_handler = {
9808 .error_detected = i40e_pci_error_detected,
9809 .slot_reset = i40e_pci_error_slot_reset,
9810 .resume = i40e_pci_error_resume,
9811};
9812
9813static struct pci_driver i40e_driver = {
9814 .name = i40e_driver_name,
9815 .id_table = i40e_pci_tbl,
9816 .probe = i40e_probe,
9817 .remove = i40e_remove,
9007bccd
SN
9818#ifdef CONFIG_PM
9819 .suspend = i40e_suspend,
9820 .resume = i40e_resume,
9821#endif
9822 .shutdown = i40e_shutdown,
41c445ff
JB
9823 .err_handler = &i40e_err_handler,
9824 .sriov_configure = i40e_pci_sriov_configure,
9825};
9826
9827/**
9828 * i40e_init_module - Driver registration routine
9829 *
9830 * i40e_init_module is the first routine called when the driver is
9831 * loaded. All it does is register with the PCI subsystem.
9832 **/
9833static int __init i40e_init_module(void)
9834{
9835 pr_info("%s: %s - version %s\n", i40e_driver_name,
9836 i40e_driver_string, i40e_driver_version_str);
9837 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
9838 i40e_dbg_init();
9839 return pci_register_driver(&i40e_driver);
9840}
9841module_init(i40e_init_module);
9842
9843/**
9844 * i40e_exit_module - Driver exit cleanup routine
9845 *
9846 * i40e_exit_module is called just before the driver is removed
9847 * from memory.
9848 **/
9849static void __exit i40e_exit_module(void)
9850{
9851 pci_unregister_driver(&i40e_driver);
9852 i40e_dbg_exit();
9853}
9854module_exit(i40e_exit_module);
This page took 0.854912 seconds and 5 git commands to generate.