be2net: add support for spoofchk setting
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
6b7c5b94
SP
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
196
197 wmb();
8788fdc2 198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
199}
200
94d73aaa
VV
201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
6b7c5b94
SP
203{
204 u32 val = 0;
03d28ffe 205
94d73aaa 206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
208
209 wmb();
94d73aaa 210 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
211}
212
8788fdc2 213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 214 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
215{
216 u32 val = 0;
03d28ffe 217
6b7c5b94 218 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 219 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 220
f67ef7ba 221 if (adapter->eeh_error)
cf588477
SP
222 return;
223
6b7c5b94
SP
224 if (arm)
225 val |= 1 << DB_EQ_REARM_SHIFT;
226 if (clear_int)
227 val |= 1 << DB_EQ_CLR_SHIFT;
228 val |= 1 << DB_EQ_EVNT_SHIFT;
229 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 230 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
231}
232
8788fdc2 233void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
234{
235 u32 val = 0;
03d28ffe 236
6b7c5b94 237 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
238 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
239 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 240
f67ef7ba 241 if (adapter->eeh_error)
cf588477
SP
242 return;
243
6b7c5b94
SP
244 if (arm)
245 val |= 1 << DB_CQ_REARM_SHIFT;
246 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 247 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
248}
249
6b7c5b94
SP
250static int be_mac_addr_set(struct net_device *netdev, void *p)
251{
252 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 253 struct device *dev = &adapter->pdev->dev;
6b7c5b94 254 struct sockaddr *addr = p;
5a712c13
SP
255 int status;
256 u8 mac[ETH_ALEN];
257 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 258
ca9e4988
AK
259 if (!is_valid_ether_addr(addr->sa_data))
260 return -EADDRNOTAVAIL;
261
ff32f8ab
VV
262 /* Proceed further only if, User provided MAC is different
263 * from active MAC
264 */
265 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
266 return 0;
267
5a712c13
SP
268 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
269 * privilege or if PF did not provision the new MAC address.
270 * On BE3, this cmd will always fail if the VF doesn't have the
271 * FILTMGMT privilege. This failure is OK, only if the PF programmed
272 * the MAC for the VF.
704e4c88 273 */
5a712c13
SP
274 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
275 adapter->if_handle, &adapter->pmac_id[0], 0);
276 if (!status) {
277 curr_pmac_id = adapter->pmac_id[0];
278
279 /* Delete the old programmed MAC. This call may fail if the
280 * old MAC was already deleted by the PF driver.
281 */
282 if (adapter->pmac_id[0] != old_pmac_id)
283 be_cmd_pmac_del(adapter, adapter->if_handle,
284 old_pmac_id, 0);
704e4c88
PR
285 }
286
5a712c13
SP
287 /* Decide if the new MAC is successfully activated only after
288 * querying the FW
704e4c88 289 */
b188f090
SR
290 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
291 adapter->if_handle, true, 0);
a65027e4 292 if (status)
e3a7ae2c 293 goto err;
6b7c5b94 294
5a712c13
SP
295 /* The MAC change did not happen, either due to lack of privilege
296 * or PF didn't pre-provision.
297 */
61d23e9f 298 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
299 status = -EPERM;
300 goto err;
301 }
302
e3a7ae2c 303 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 304 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
305 return 0;
306err:
5a712c13 307 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
308 return status;
309}
310
ca34fe38
SP
311/* BE2 supports only v0 cmd */
312static void *hw_stats_from_cmd(struct be_adapter *adapter)
313{
314 if (BE2_chip(adapter)) {
315 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
316
317 return &cmd->hw_stats;
61000861 318 } else if (BE3_chip(adapter)) {
ca34fe38
SP
319 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
320
61000861
AK
321 return &cmd->hw_stats;
322 } else {
323 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
324
ca34fe38
SP
325 return &cmd->hw_stats;
326 }
327}
328
329/* BE2 supports only v0 cmd */
330static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
331{
332 if (BE2_chip(adapter)) {
333 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
334
335 return &hw_stats->erx;
61000861 336 } else if (BE3_chip(adapter)) {
ca34fe38
SP
337 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
338
61000861
AK
339 return &hw_stats->erx;
340 } else {
341 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
342
ca34fe38
SP
343 return &hw_stats->erx;
344 }
345}
346
347static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 348{
ac124ff9
SP
349 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
350 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
351 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 352 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
353 &rxf_stats->port[adapter->port_num];
354 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 355
ac124ff9 356 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
357 drvs->rx_pause_frames = port_stats->rx_pause_frames;
358 drvs->rx_crc_errors = port_stats->rx_crc_errors;
359 drvs->rx_control_frames = port_stats->rx_control_frames;
360 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
361 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
362 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
363 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
364 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
365 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
366 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
367 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
368 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
369 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
370 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 371 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
372 drvs->rx_dropped_header_too_small =
373 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
374 drvs->rx_address_filtered =
375 port_stats->rx_address_filtered +
376 port_stats->rx_vlan_filtered;
89a88ab8
AK
377 drvs->rx_alignment_symbol_errors =
378 port_stats->rx_alignment_symbol_errors;
379
380 drvs->tx_pauseframes = port_stats->tx_pauseframes;
381 drvs->tx_controlframes = port_stats->tx_controlframes;
382
383 if (adapter->port_num)
ac124ff9 384 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 385 else
ac124ff9 386 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 387 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 388 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
389 drvs->forwarded_packets = rxf_stats->forwarded_packets;
390 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
391 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
392 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
393 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
394}
395
ca34fe38 396static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 397{
ac124ff9
SP
398 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
399 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
400 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 401 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
402 &rxf_stats->port[adapter->port_num];
403 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 404
ac124ff9 405 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
406 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
407 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
408 drvs->rx_pause_frames = port_stats->rx_pause_frames;
409 drvs->rx_crc_errors = port_stats->rx_crc_errors;
410 drvs->rx_control_frames = port_stats->rx_control_frames;
411 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
412 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
413 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
414 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
415 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
416 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
417 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
418 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
419 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
420 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
421 drvs->rx_dropped_header_too_small =
422 port_stats->rx_dropped_header_too_small;
423 drvs->rx_input_fifo_overflow_drop =
424 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 425 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
426 drvs->rx_alignment_symbol_errors =
427 port_stats->rx_alignment_symbol_errors;
ac124ff9 428 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
429 drvs->tx_pauseframes = port_stats->tx_pauseframes;
430 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 431 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
432 drvs->jabber_events = port_stats->jabber_events;
433 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 434 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
435 drvs->forwarded_packets = rxf_stats->forwarded_packets;
436 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
437 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
438 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
439 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
440}
441
61000861
AK
442static void populate_be_v2_stats(struct be_adapter *adapter)
443{
444 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
445 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
446 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
447 struct be_port_rxf_stats_v2 *port_stats =
448 &rxf_stats->port[adapter->port_num];
449 struct be_drv_stats *drvs = &adapter->drv_stats;
450
451 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
452 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
453 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
454 drvs->rx_pause_frames = port_stats->rx_pause_frames;
455 drvs->rx_crc_errors = port_stats->rx_crc_errors;
456 drvs->rx_control_frames = port_stats->rx_control_frames;
457 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
458 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
459 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
460 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
461 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
462 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
463 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
464 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
465 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
466 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
467 drvs->rx_dropped_header_too_small =
468 port_stats->rx_dropped_header_too_small;
469 drvs->rx_input_fifo_overflow_drop =
470 port_stats->rx_input_fifo_overflow_drop;
471 drvs->rx_address_filtered = port_stats->rx_address_filtered;
472 drvs->rx_alignment_symbol_errors =
473 port_stats->rx_alignment_symbol_errors;
474 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
475 drvs->tx_pauseframes = port_stats->tx_pauseframes;
476 drvs->tx_controlframes = port_stats->tx_controlframes;
477 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
478 drvs->jabber_events = port_stats->jabber_events;
479 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
480 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
481 drvs->forwarded_packets = rxf_stats->forwarded_packets;
482 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
483 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
484 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
485 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 486 if (be_roce_supported(adapter)) {
461ae379
AK
487 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
488 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
489 drvs->rx_roce_frames = port_stats->roce_frames_received;
490 drvs->roce_drops_crc = port_stats->roce_drops_crc;
491 drvs->roce_drops_payload_len =
492 port_stats->roce_drops_payload_len;
493 }
61000861
AK
494}
495
005d5696
SX
496static void populate_lancer_stats(struct be_adapter *adapter)
497{
005d5696 498 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 499 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
500
501 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
502 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
503 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
504 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 505 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 506 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
507 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
508 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
509 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
510 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
511 drvs->rx_dropped_tcp_length =
512 pport_stats->rx_dropped_invalid_tcp_length;
513 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
514 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
515 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
516 drvs->rx_dropped_header_too_small =
517 pport_stats->rx_dropped_header_too_small;
518 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
519 drvs->rx_address_filtered =
520 pport_stats->rx_address_filtered +
521 pport_stats->rx_vlan_filtered;
ac124ff9 522 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 523 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
524 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
525 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 526 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
527 drvs->forwarded_packets = pport_stats->num_forwards_lo;
528 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 529 drvs->rx_drops_too_many_frags =
ac124ff9 530 pport_stats->rx_drops_too_many_frags_lo;
005d5696 531}
89a88ab8 532
09c1c68f
SP
533static void accumulate_16bit_val(u32 *acc, u16 val)
534{
535#define lo(x) (x & 0xFFFF)
536#define hi(x) (x & 0xFFFF0000)
537 bool wrapped = val < lo(*acc);
538 u32 newacc = hi(*acc) + val;
539
540 if (wrapped)
541 newacc += 65536;
542 ACCESS_ONCE(*acc) = newacc;
543}
544
4188e7df 545static void populate_erx_stats(struct be_adapter *adapter,
748b539a 546 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
547{
548 if (!BEx_chip(adapter))
549 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
550 else
551 /* below erx HW counter can actually wrap around after
552 * 65535. Driver accumulates a 32-bit value
553 */
554 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
555 (u16)erx_stat);
556}
557
89a88ab8
AK
558void be_parse_stats(struct be_adapter *adapter)
559{
61000861 560 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
561 struct be_rx_obj *rxo;
562 int i;
a6c578ef 563 u32 erx_stat;
ac124ff9 564
ca34fe38
SP
565 if (lancer_chip(adapter)) {
566 populate_lancer_stats(adapter);
005d5696 567 } else {
ca34fe38
SP
568 if (BE2_chip(adapter))
569 populate_be_v0_stats(adapter);
61000861
AK
570 else if (BE3_chip(adapter))
571 /* for BE3 */
ca34fe38 572 populate_be_v1_stats(adapter);
61000861
AK
573 else
574 populate_be_v2_stats(adapter);
d51ebd33 575
61000861 576 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 577 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
578 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
579 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 580 }
09c1c68f 581 }
89a88ab8
AK
582}
583
ab1594e9 584static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 585 struct rtnl_link_stats64 *stats)
6b7c5b94 586{
ab1594e9 587 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 588 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 589 struct be_rx_obj *rxo;
3c8def97 590 struct be_tx_obj *txo;
ab1594e9
SP
591 u64 pkts, bytes;
592 unsigned int start;
3abcdeda 593 int i;
6b7c5b94 594
3abcdeda 595 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 596 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 597
ab1594e9 598 do {
57a7744e 599 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
600 pkts = rx_stats(rxo)->rx_pkts;
601 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 602 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
603 stats->rx_packets += pkts;
604 stats->rx_bytes += bytes;
605 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
606 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
607 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
608 }
609
3c8def97 610 for_all_tx_queues(adapter, txo, i) {
ab1594e9 611 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 612
ab1594e9 613 do {
57a7744e 614 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
615 pkts = tx_stats(txo)->tx_pkts;
616 bytes = tx_stats(txo)->tx_bytes;
57a7744e 617 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
618 stats->tx_packets += pkts;
619 stats->tx_bytes += bytes;
3c8def97 620 }
6b7c5b94
SP
621
622 /* bad pkts received */
ab1594e9 623 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
624 drvs->rx_alignment_symbol_errors +
625 drvs->rx_in_range_errors +
626 drvs->rx_out_range_errors +
627 drvs->rx_frame_too_long +
628 drvs->rx_dropped_too_small +
629 drvs->rx_dropped_too_short +
630 drvs->rx_dropped_header_too_small +
631 drvs->rx_dropped_tcp_length +
ab1594e9 632 drvs->rx_dropped_runt;
68110868 633
6b7c5b94 634 /* detailed rx errors */
ab1594e9 635 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
636 drvs->rx_out_range_errors +
637 drvs->rx_frame_too_long;
68110868 638
ab1594e9 639 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
640
641 /* frame alignment errors */
ab1594e9 642 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 643
6b7c5b94
SP
644 /* receiver fifo overrun */
645 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 646 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
647 drvs->rx_input_fifo_overflow_drop +
648 drvs->rx_drops_no_pbuf;
ab1594e9 649 return stats;
6b7c5b94
SP
650}
651
b236916a 652void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 653{
6b7c5b94
SP
654 struct net_device *netdev = adapter->netdev;
655
b236916a 656 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 657 netif_carrier_off(netdev);
b236916a 658 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 659 }
b236916a 660
bdce2ad7 661 if (link_status)
b236916a
AK
662 netif_carrier_on(netdev);
663 else
664 netif_carrier_off(netdev);
18824894
IV
665
666 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
667}
668
5f07b3c5 669static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 670{
3c8def97
SP
671 struct be_tx_stats *stats = tx_stats(txo);
672
ab1594e9 673 u64_stats_update_begin(&stats->sync);
ac124ff9 674 stats->tx_reqs++;
5f07b3c5
SP
675 stats->tx_bytes += skb->len;
676 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 677 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
678}
679
5f07b3c5
SP
680/* Returns number of WRBs needed for the skb */
681static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 682{
5f07b3c5
SP
683 /* +1 for the header wrb */
684 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
685}
686
687static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
688{
f986afcb
SP
689 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
690 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
691 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
692 wrb->rsvd0 = 0;
693}
694
695/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
696 * to avoid the swap and shift/mask operations in wrb_fill().
697 */
698static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
699{
700 wrb->frag_pa_hi = 0;
701 wrb->frag_pa_lo = 0;
702 wrb->frag_len = 0;
89b1f496 703 wrb->rsvd0 = 0;
6b7c5b94
SP
704}
705
1ded132d 706static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 707 struct sk_buff *skb)
1ded132d
AK
708{
709 u8 vlan_prio;
710 u16 vlan_tag;
711
df8a39de 712 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
713 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
714 /* If vlan priority provided by OS is NOT in available bmap */
715 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
716 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
717 adapter->recommended_prio;
718
719 return vlan_tag;
720}
721
c9c47142
SP
722/* Used only for IP tunnel packets */
723static u16 skb_inner_ip_proto(struct sk_buff *skb)
724{
725 return (inner_ip_hdr(skb)->version == 4) ?
726 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
727}
728
729static u16 skb_ip_proto(struct sk_buff *skb)
730{
731 return (ip_hdr(skb)->version == 4) ?
732 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
733}
734
cf5671e6
SB
735static inline bool be_is_txq_full(struct be_tx_obj *txo)
736{
737 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
738}
739
740static inline bool be_can_txq_wake(struct be_tx_obj *txo)
741{
742 return atomic_read(&txo->q.used) < txo->q.len / 2;
743}
744
745static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
746{
747 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
748}
749
804abcdb
SB
750static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
751 struct sk_buff *skb,
752 struct be_wrb_params *wrb_params)
6b7c5b94 753{
804abcdb 754 u16 proto;
6b7c5b94 755
49e4b847 756 if (skb_is_gso(skb)) {
804abcdb
SB
757 BE_WRB_F_SET(wrb_params->features, LSO, 1);
758 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 759 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 760 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 761 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 762 if (skb->encapsulation) {
804abcdb 763 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
764 proto = skb_inner_ip_proto(skb);
765 } else {
766 proto = skb_ip_proto(skb);
767 }
768 if (proto == IPPROTO_TCP)
804abcdb 769 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 770 else if (proto == IPPROTO_UDP)
804abcdb 771 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
772 }
773
df8a39de 774 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
775 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
776 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
777 }
778
804abcdb
SB
779 BE_WRB_F_SET(wrb_params->features, CRC, 1);
780}
5f07b3c5 781
804abcdb
SB
782static void wrb_fill_hdr(struct be_adapter *adapter,
783 struct be_eth_hdr_wrb *hdr,
784 struct be_wrb_params *wrb_params,
785 struct sk_buff *skb)
786{
787 memset(hdr, 0, sizeof(*hdr));
788
789 SET_TX_WRB_HDR_BITS(crc, hdr,
790 BE_WRB_F_GET(wrb_params->features, CRC));
791 SET_TX_WRB_HDR_BITS(ipcs, hdr,
792 BE_WRB_F_GET(wrb_params->features, IPCS));
793 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
794 BE_WRB_F_GET(wrb_params->features, TCPCS));
795 SET_TX_WRB_HDR_BITS(udpcs, hdr,
796 BE_WRB_F_GET(wrb_params->features, UDPCS));
797
798 SET_TX_WRB_HDR_BITS(lso, hdr,
799 BE_WRB_F_GET(wrb_params->features, LSO));
800 SET_TX_WRB_HDR_BITS(lso6, hdr,
801 BE_WRB_F_GET(wrb_params->features, LSO6));
802 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
803
804 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
805 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 806 */
804abcdb
SB
807 SET_TX_WRB_HDR_BITS(event, hdr,
808 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
809 SET_TX_WRB_HDR_BITS(vlan, hdr,
810 BE_WRB_F_GET(wrb_params->features, VLAN));
811 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
812
813 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
814 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
6b7c5b94
SP
815}
816
2b7bcebf 817static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 818 bool unmap_single)
7101e111
SP
819{
820 dma_addr_t dma;
f986afcb 821 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 822
7101e111 823
f986afcb
SP
824 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
825 (u64)le32_to_cpu(wrb->frag_pa_lo);
826 if (frag_len) {
7101e111 827 if (unmap_single)
f986afcb 828 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 829 else
f986afcb 830 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
831 }
832}
6b7c5b94 833
79a0d7d8
SB
834/* Grab a WRB header for xmit */
835static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
836{
837 u16 head = txo->q.head;
838
839 queue_head_inc(&txo->q);
840 return head;
841}
842
843/* Set up the WRB header for xmit */
844static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
845 struct be_tx_obj *txo,
846 struct be_wrb_params *wrb_params,
847 struct sk_buff *skb, u16 head)
848{
849 u32 num_frags = skb_wrb_cnt(skb);
850 struct be_queue_info *txq = &txo->q;
851 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
852
853 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
854 be_dws_cpu_to_le(hdr, sizeof(*hdr));
855
856 BUG_ON(txo->sent_skb_list[head]);
857 txo->sent_skb_list[head] = skb;
858 txo->last_req_hdr = head;
859 atomic_add(num_frags, &txq->used);
860 txo->last_req_wrb_cnt = num_frags;
861 txo->pend_wrb_cnt += num_frags;
862}
863
864/* Setup a WRB fragment (buffer descriptor) for xmit */
865static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
866 int len)
867{
868 struct be_eth_wrb *wrb;
869 struct be_queue_info *txq = &txo->q;
870
871 wrb = queue_head_node(txq);
872 wrb_fill(wrb, busaddr, len);
873 queue_head_inc(txq);
874}
875
876/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
877 * was invoked. The producer index is restored to the previous packet and the
878 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
879 */
880static void be_xmit_restore(struct be_adapter *adapter,
881 struct be_tx_obj *txo, u16 head, bool map_single,
882 u32 copied)
883{
884 struct device *dev;
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
887
888 dev = &adapter->pdev->dev;
889 txq->head = head;
890
891 /* skip the first wrb (hdr); it's not mapped */
892 queue_head_inc(txq);
893 while (copied) {
894 wrb = queue_head_node(txq);
895 unmap_tx_frag(dev, wrb, map_single);
896 map_single = false;
897 copied -= le32_to_cpu(wrb->frag_len);
898 queue_head_inc(txq);
899 }
900
901 txq->head = head;
902}
903
904/* Enqueue the given packet for transmit. This routine allocates WRBs for the
905 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
906 * of WRBs used up by the packet.
907 */
5f07b3c5 908static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
909 struct sk_buff *skb,
910 struct be_wrb_params *wrb_params)
6b7c5b94 911{
5f07b3c5 912 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 913 struct device *dev = &adapter->pdev->dev;
5f07b3c5 914 struct be_queue_info *txq = &txo->q;
7101e111 915 bool map_single = false;
5f07b3c5 916 u16 head = txq->head;
79a0d7d8
SB
917 dma_addr_t busaddr;
918 int len;
6b7c5b94 919
79a0d7d8 920 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 921
ebc8d2ab 922 if (skb->len > skb->data_len) {
79a0d7d8 923 len = skb_headlen(skb);
03d28ffe 924
2b7bcebf
IV
925 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
926 if (dma_mapping_error(dev, busaddr))
7101e111
SP
927 goto dma_err;
928 map_single = true;
79a0d7d8 929 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
930 copied += len;
931 }
6b7c5b94 932
ebc8d2ab 933 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 934 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 935 len = skb_frag_size(frag);
03d28ffe 936
79a0d7d8 937 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 938 if (dma_mapping_error(dev, busaddr))
7101e111 939 goto dma_err;
79a0d7d8
SB
940 be_tx_setup_wrb_frag(txo, busaddr, len);
941 copied += len;
6b7c5b94
SP
942 }
943
79a0d7d8 944 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 945
5f07b3c5
SP
946 be_tx_stats_update(txo, skb);
947 return wrb_cnt;
6b7c5b94 948
7101e111 949dma_err:
79a0d7d8
SB
950 adapter->drv_stats.dma_map_errors++;
951 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 952 return 0;
6b7c5b94
SP
953}
954
f7062ee5
SP
955static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
956{
957 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
958}
959
93040ae5 960static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 961 struct sk_buff *skb,
804abcdb
SB
962 struct be_wrb_params
963 *wrb_params)
93040ae5
SK
964{
965 u16 vlan_tag = 0;
966
967 skb = skb_share_check(skb, GFP_ATOMIC);
968 if (unlikely(!skb))
969 return skb;
970
df8a39de 971 if (skb_vlan_tag_present(skb))
93040ae5 972 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
973
974 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
975 if (!vlan_tag)
976 vlan_tag = adapter->pvid;
977 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
978 * skip VLAN insertion
979 */
804abcdb 980 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 981 }
bc0c3405
AK
982
983 if (vlan_tag) {
62749e2c
JP
984 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
985 vlan_tag);
bc0c3405
AK
986 if (unlikely(!skb))
987 return skb;
bc0c3405
AK
988 skb->vlan_tci = 0;
989 }
990
991 /* Insert the outer VLAN, if any */
992 if (adapter->qnq_vid) {
993 vlan_tag = adapter->qnq_vid;
62749e2c
JP
994 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
995 vlan_tag);
bc0c3405
AK
996 if (unlikely(!skb))
997 return skb;
804abcdb 998 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
999 }
1000
93040ae5
SK
1001 return skb;
1002}
1003
bc0c3405
AK
1004static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1005{
1006 struct ethhdr *eh = (struct ethhdr *)skb->data;
1007 u16 offset = ETH_HLEN;
1008
1009 if (eh->h_proto == htons(ETH_P_IPV6)) {
1010 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1011
1012 offset += sizeof(struct ipv6hdr);
1013 if (ip6h->nexthdr != NEXTHDR_TCP &&
1014 ip6h->nexthdr != NEXTHDR_UDP) {
1015 struct ipv6_opt_hdr *ehdr =
504fbf1e 1016 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1017
1018 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1019 if (ehdr->hdrlen == 0xff)
1020 return true;
1021 }
1022 }
1023 return false;
1024}
1025
1026static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1027{
df8a39de 1028 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1029}
1030
748b539a 1031static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1032{
ee9c799c 1033 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1034}
1035
ec495fac
VV
1036static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1037 struct sk_buff *skb,
804abcdb
SB
1038 struct be_wrb_params
1039 *wrb_params)
6b7c5b94 1040{
d2cb6ce7 1041 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1042 unsigned int eth_hdr_len;
1043 struct iphdr *ip;
93040ae5 1044
1297f9db
AK
1045 /* For padded packets, BE HW modifies tot_len field in IP header
1046 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1047 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1048 */
ee9c799c
SP
1049 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1050 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1051 if (skb->len <= 60 &&
df8a39de 1052 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1053 is_ipv4_pkt(skb)) {
93040ae5
SK
1054 ip = (struct iphdr *)ip_hdr(skb);
1055 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1056 }
1ded132d 1057
d2cb6ce7 1058 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1059 * tagging in pvid-tagging mode
d2cb6ce7 1060 */
f93f160b 1061 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1062 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1063 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1064
93040ae5
SK
1065 /* HW has a bug wherein it will calculate CSUM for VLAN
1066 * pkts even though it is disabled.
1067 * Manually insert VLAN in pkt.
1068 */
1069 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1070 skb_vlan_tag_present(skb)) {
804abcdb 1071 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1072 if (unlikely(!skb))
c9128951 1073 goto err;
bc0c3405
AK
1074 }
1075
1076 /* HW may lockup when VLAN HW tagging is requested on
1077 * certain ipv6 packets. Drop such pkts if the HW workaround to
1078 * skip HW tagging is not enabled by FW.
1079 */
1080 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1081 (adapter->pvid || adapter->qnq_vid) &&
1082 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1083 goto tx_drop;
1084
1085 /* Manual VLAN tag insertion to prevent:
1086 * ASIC lockup when the ASIC inserts VLAN tag into
1087 * certain ipv6 packets. Insert VLAN tags in driver,
1088 * and set event, completion, vlan bits accordingly
1089 * in the Tx WRB.
1090 */
1091 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1092 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1093 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1094 if (unlikely(!skb))
c9128951 1095 goto err;
1ded132d
AK
1096 }
1097
ee9c799c
SP
1098 return skb;
1099tx_drop:
1100 dev_kfree_skb_any(skb);
c9128951 1101err:
ee9c799c
SP
1102 return NULL;
1103}
1104
ec495fac
VV
1105static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1106 struct sk_buff *skb,
804abcdb 1107 struct be_wrb_params *wrb_params)
ec495fac
VV
1108{
1109 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1110 * less may cause a transmit stall on that port. So the work-around is
1111 * to pad short packets (<= 32 bytes) to a 36-byte length.
1112 */
1113 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1114 if (skb_put_padto(skb, 36))
ec495fac 1115 return NULL;
ec495fac
VV
1116 }
1117
1118 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1119 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1120 if (!skb)
1121 return NULL;
1122 }
1123
1124 return skb;
1125}
1126
5f07b3c5
SP
1127static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1128{
1129 struct be_queue_info *txq = &txo->q;
1130 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1131
1132 /* Mark the last request eventable if it hasn't been marked already */
1133 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1134 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1135
1136 /* compose a dummy wrb if there are odd set of wrbs to notify */
1137 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1138 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1139 queue_head_inc(txq);
1140 atomic_inc(&txq->used);
1141 txo->pend_wrb_cnt++;
1142 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1143 TX_HDR_WRB_NUM_SHIFT);
1144 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1145 TX_HDR_WRB_NUM_SHIFT);
1146 }
1147 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1148 txo->pend_wrb_cnt = 0;
1149}
1150
ee9c799c
SP
1151static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1152{
1153 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1154 u16 q_idx = skb_get_queue_mapping(skb);
1155 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1156 struct be_wrb_params wrb_params = { 0 };
804abcdb 1157 bool flush = !skb->xmit_more;
5f07b3c5 1158 u16 wrb_cnt;
ee9c799c 1159
804abcdb 1160 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1161 if (unlikely(!skb))
1162 goto drop;
6b7c5b94 1163
804abcdb
SB
1164 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1165
1166 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1167 if (unlikely(!wrb_cnt)) {
1168 dev_kfree_skb_any(skb);
1169 goto drop;
1170 }
cd8f76c0 1171
cf5671e6 1172 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1173 netif_stop_subqueue(netdev, q_idx);
1174 tx_stats(txo)->tx_stops++;
1175 }
c190e3c8 1176
5f07b3c5
SP
1177 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1178 be_xmit_flush(adapter, txo);
6b7c5b94 1179
5f07b3c5
SP
1180 return NETDEV_TX_OK;
1181drop:
1182 tx_stats(txo)->tx_drv_drops++;
1183 /* Flush the already enqueued tx requests */
1184 if (flush && txo->pend_wrb_cnt)
1185 be_xmit_flush(adapter, txo);
6b7c5b94 1186
6b7c5b94
SP
1187 return NETDEV_TX_OK;
1188}
1189
1190static int be_change_mtu(struct net_device *netdev, int new_mtu)
1191{
1192 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1193 struct device *dev = &adapter->pdev->dev;
1194
1195 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1196 dev_info(dev, "MTU must be between %d and %d bytes\n",
1197 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1198 return -EINVAL;
1199 }
0d3f5cce
KA
1200
1201 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1202 netdev->mtu, new_mtu);
6b7c5b94
SP
1203 netdev->mtu = new_mtu;
1204 return 0;
1205}
1206
f66b7cfd
SP
1207static inline bool be_in_all_promisc(struct be_adapter *adapter)
1208{
1209 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1210 BE_IF_FLAGS_ALL_PROMISCUOUS;
1211}
1212
1213static int be_set_vlan_promisc(struct be_adapter *adapter)
1214{
1215 struct device *dev = &adapter->pdev->dev;
1216 int status;
1217
1218 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1219 return 0;
1220
1221 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1222 if (!status) {
1223 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1224 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1225 } else {
1226 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1227 }
1228 return status;
1229}
1230
1231static int be_clear_vlan_promisc(struct be_adapter *adapter)
1232{
1233 struct device *dev = &adapter->pdev->dev;
1234 int status;
1235
1236 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1237 if (!status) {
1238 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1239 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1240 }
1241 return status;
1242}
1243
6b7c5b94 1244/*
82903e4b
AK
1245 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1246 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1247 */
10329df8 1248static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1249{
50762667 1250 struct device *dev = &adapter->pdev->dev;
10329df8 1251 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1252 u16 num = 0, i = 0;
82903e4b 1253 int status = 0;
1da87b7f 1254
c0e64ef4 1255 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1256 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1257 return 0;
1258
92bf14ab 1259 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1260 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1261
1262 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1263 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1264 vids[num++] = cpu_to_le16(i);
0fc16ebf 1265
435452aa 1266 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1267 if (status) {
f66b7cfd 1268 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1269 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1270 if (addl_status(status) ==
1271 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1272 return be_set_vlan_promisc(adapter);
1273 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1274 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1275 }
0fc16ebf 1276 return status;
6b7c5b94
SP
1277}
1278
80d5c368 1279static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1280{
1281 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1282 int status = 0;
6b7c5b94 1283
a85e9986
PR
1284 /* Packets with VID 0 are always received by Lancer by default */
1285 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1286 return status;
1287
f6cbd364 1288 if (test_bit(vid, adapter->vids))
48291c22 1289 return status;
a85e9986 1290
f6cbd364 1291 set_bit(vid, adapter->vids);
a6b74e01 1292 adapter->vlans_added++;
8e586137 1293
a6b74e01
SK
1294 status = be_vid_config(adapter);
1295 if (status) {
1296 adapter->vlans_added--;
f6cbd364 1297 clear_bit(vid, adapter->vids);
a6b74e01 1298 }
48291c22 1299
80817cbf 1300 return status;
6b7c5b94
SP
1301}
1302
80d5c368 1303static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1304{
1305 struct be_adapter *adapter = netdev_priv(netdev);
1306
a85e9986
PR
1307 /* Packets with VID 0 are always received by Lancer by default */
1308 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1309 return 0;
a85e9986 1310
f6cbd364 1311 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1312 adapter->vlans_added--;
1313
1314 return be_vid_config(adapter);
6b7c5b94
SP
1315}
1316
f66b7cfd 1317static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1318{
ac34b743 1319 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1320 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1321}
1322
f66b7cfd
SP
1323static void be_set_all_promisc(struct be_adapter *adapter)
1324{
1325 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1326 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1327}
1328
1329static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1330{
0fc16ebf 1331 int status;
6b7c5b94 1332
f66b7cfd
SP
1333 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1334 return;
6b7c5b94 1335
f66b7cfd
SP
1336 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1337 if (!status)
1338 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1339}
1340
1341static void be_set_mc_list(struct be_adapter *adapter)
1342{
1343 int status;
1344
1345 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1346 if (!status)
1347 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1348 else
1349 be_set_mc_promisc(adapter);
1350}
1351
1352static void be_set_uc_list(struct be_adapter *adapter)
1353{
1354 struct netdev_hw_addr *ha;
1355 int i = 1; /* First slot is claimed by the Primary MAC */
1356
1357 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1358 be_cmd_pmac_del(adapter, adapter->if_handle,
1359 adapter->pmac_id[i], 0);
1360
1361 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1362 be_set_all_promisc(adapter);
1363 return;
6b7c5b94
SP
1364 }
1365
f66b7cfd
SP
1366 netdev_for_each_uc_addr(ha, adapter->netdev) {
1367 adapter->uc_macs++; /* First slot is for Primary MAC */
1368 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1369 &adapter->pmac_id[adapter->uc_macs], 0);
1370 }
1371}
6b7c5b94 1372
f66b7cfd
SP
1373static void be_clear_uc_list(struct be_adapter *adapter)
1374{
1375 int i;
fbc13f01 1376
f66b7cfd
SP
1377 for (i = 1; i < (adapter->uc_macs + 1); i++)
1378 be_cmd_pmac_del(adapter, adapter->if_handle,
1379 adapter->pmac_id[i], 0);
1380 adapter->uc_macs = 0;
1381}
fbc13f01 1382
f66b7cfd
SP
1383static void be_set_rx_mode(struct net_device *netdev)
1384{
1385 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1386
f66b7cfd
SP
1387 if (netdev->flags & IFF_PROMISC) {
1388 be_set_all_promisc(adapter);
1389 return;
fbc13f01
AK
1390 }
1391
f66b7cfd
SP
1392 /* Interface was previously in promiscuous mode; disable it */
1393 if (be_in_all_promisc(adapter)) {
1394 be_clear_all_promisc(adapter);
1395 if (adapter->vlans_added)
1396 be_vid_config(adapter);
0fc16ebf 1397 }
a0794885 1398
f66b7cfd
SP
1399 /* Enable multicast promisc if num configured exceeds what we support */
1400 if (netdev->flags & IFF_ALLMULTI ||
1401 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1402 be_set_mc_promisc(adapter);
a0794885 1403 return;
f66b7cfd 1404 }
a0794885 1405
f66b7cfd
SP
1406 if (netdev_uc_count(netdev) != adapter->uc_macs)
1407 be_set_uc_list(adapter);
1408
1409 be_set_mc_list(adapter);
6b7c5b94
SP
1410}
1411
ba343c77
SB
1412static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1413{
1414 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1415 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1416 int status;
1417
11ac75ed 1418 if (!sriov_enabled(adapter))
ba343c77
SB
1419 return -EPERM;
1420
11ac75ed 1421 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1422 return -EINVAL;
1423
3c31aaf3
VV
1424 /* Proceed further only if user provided MAC is different
1425 * from active MAC
1426 */
1427 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1428 return 0;
1429
3175d8c2
SP
1430 if (BEx_chip(adapter)) {
1431 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1432 vf + 1);
ba343c77 1433
11ac75ed
SP
1434 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1435 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1436 } else {
1437 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1438 vf + 1);
590c391d
PR
1439 }
1440
abccf23e
KA
1441 if (status) {
1442 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1443 mac, vf, status);
1444 return be_cmd_status(status);
1445 }
64600ea5 1446
abccf23e
KA
1447 ether_addr_copy(vf_cfg->mac_addr, mac);
1448
1449 return 0;
ba343c77
SB
1450}
1451
64600ea5 1452static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1453 struct ifla_vf_info *vi)
64600ea5
AK
1454{
1455 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1456 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1457
11ac75ed 1458 if (!sriov_enabled(adapter))
64600ea5
AK
1459 return -EPERM;
1460
11ac75ed 1461 if (vf >= adapter->num_vfs)
64600ea5
AK
1462 return -EINVAL;
1463
1464 vi->vf = vf;
ed616689
SC
1465 vi->max_tx_rate = vf_cfg->tx_rate;
1466 vi->min_tx_rate = 0;
a60b3a13
AK
1467 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1468 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1469 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1470 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1471 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1472
1473 return 0;
1474}
1475
435452aa
VV
1476static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1477{
1478 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1479 u16 vids[BE_NUM_VLANS_SUPPORTED];
1480 int vf_if_id = vf_cfg->if_handle;
1481 int status;
1482
1483 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1484 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1485 if (status)
1486 return status;
1487
1488 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1489 vids[0] = 0;
1490 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1491 if (!status)
1492 dev_info(&adapter->pdev->dev,
1493 "Cleared guest VLANs on VF%d", vf);
1494
1495 /* After TVT is enabled, disallow VFs to program VLAN filters */
1496 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1497 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1498 ~BE_PRIV_FILTMGMT, vf + 1);
1499 if (!status)
1500 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1501 }
1502 return 0;
1503}
1504
1505static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1506{
1507 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1508 struct device *dev = &adapter->pdev->dev;
1509 int status;
1510
1511 /* Reset Transparent VLAN Tagging. */
1512 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1513 vf_cfg->if_handle, 0, 0);
435452aa
VV
1514 if (status)
1515 return status;
1516
1517 /* Allow VFs to program VLAN filtering */
1518 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1519 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1520 BE_PRIV_FILTMGMT, vf + 1);
1521 if (!status) {
1522 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1523 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1524 }
1525 }
1526
1527 dev_info(dev,
1528 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1529 return 0;
1530}
1531
748b539a 1532static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1533{
1534 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1535 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1536 int status;
1da87b7f 1537
11ac75ed 1538 if (!sriov_enabled(adapter))
1da87b7f
AK
1539 return -EPERM;
1540
b9fc0e53 1541 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1542 return -EINVAL;
1543
b9fc0e53
AK
1544 if (vlan || qos) {
1545 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1546 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1547 } else {
435452aa 1548 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1549 }
1550
abccf23e
KA
1551 if (status) {
1552 dev_err(&adapter->pdev->dev,
435452aa
VV
1553 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1554 status);
abccf23e
KA
1555 return be_cmd_status(status);
1556 }
1557
1558 vf_cfg->vlan_tag = vlan;
abccf23e 1559 return 0;
1da87b7f
AK
1560}
1561
ed616689
SC
1562static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1563 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1564{
1565 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1566 struct device *dev = &adapter->pdev->dev;
1567 int percent_rate, status = 0;
1568 u16 link_speed = 0;
1569 u8 link_status;
e1d18735 1570
11ac75ed 1571 if (!sriov_enabled(adapter))
e1d18735
AK
1572 return -EPERM;
1573
94f434c2 1574 if (vf >= adapter->num_vfs)
e1d18735
AK
1575 return -EINVAL;
1576
ed616689
SC
1577 if (min_tx_rate)
1578 return -EINVAL;
1579
0f77ba73
RN
1580 if (!max_tx_rate)
1581 goto config_qos;
1582
1583 status = be_cmd_link_status_query(adapter, &link_speed,
1584 &link_status, 0);
1585 if (status)
1586 goto err;
1587
1588 if (!link_status) {
1589 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1590 status = -ENETDOWN;
0f77ba73
RN
1591 goto err;
1592 }
1593
1594 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1595 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1596 link_speed);
1597 status = -EINVAL;
1598 goto err;
1599 }
1600
1601 /* On Skyhawk the QOS setting must be done only as a % value */
1602 percent_rate = link_speed / 100;
1603 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1604 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1605 percent_rate);
1606 status = -EINVAL;
1607 goto err;
94f434c2 1608 }
e1d18735 1609
0f77ba73
RN
1610config_qos:
1611 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1612 if (status)
0f77ba73
RN
1613 goto err;
1614
1615 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1616 return 0;
1617
1618err:
1619 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1620 max_tx_rate, vf);
abccf23e 1621 return be_cmd_status(status);
e1d18735 1622}
e2fb1afa 1623
bdce2ad7
SR
1624static int be_set_vf_link_state(struct net_device *netdev, int vf,
1625 int link_state)
1626{
1627 struct be_adapter *adapter = netdev_priv(netdev);
1628 int status;
1629
1630 if (!sriov_enabled(adapter))
1631 return -EPERM;
1632
1633 if (vf >= adapter->num_vfs)
1634 return -EINVAL;
1635
1636 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1637 if (status) {
1638 dev_err(&adapter->pdev->dev,
1639 "Link state change on VF %d failed: %#x\n", vf, status);
1640 return be_cmd_status(status);
1641 }
bdce2ad7 1642
abccf23e
KA
1643 adapter->vf_cfg[vf].plink_tracking = link_state;
1644
1645 return 0;
bdce2ad7 1646}
e1d18735 1647
e7bcbd7b
KA
1648static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1649{
1650 struct be_adapter *adapter = netdev_priv(netdev);
1651 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1652 u8 spoofchk;
1653 int status;
1654
1655 if (!sriov_enabled(adapter))
1656 return -EPERM;
1657
1658 if (vf >= adapter->num_vfs)
1659 return -EINVAL;
1660
1661 if (BEx_chip(adapter))
1662 return -EOPNOTSUPP;
1663
1664 if (enable == vf_cfg->spoofchk)
1665 return 0;
1666
1667 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1668
1669 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1670 0, spoofchk);
1671 if (status) {
1672 dev_err(&adapter->pdev->dev,
1673 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1674 return be_cmd_status(status);
1675 }
1676
1677 vf_cfg->spoofchk = enable;
1678 return 0;
1679}
1680
2632bafd
SP
1681static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1682 ulong now)
6b7c5b94 1683{
2632bafd
SP
1684 aic->rx_pkts_prev = rx_pkts;
1685 aic->tx_reqs_prev = tx_pkts;
1686 aic->jiffies = now;
1687}
ac124ff9 1688
2632bafd
SP
1689static void be_eqd_update(struct be_adapter *adapter)
1690{
1691 struct be_set_eqd set_eqd[MAX_EVT_QS];
1692 int eqd, i, num = 0, start;
1693 struct be_aic_obj *aic;
1694 struct be_eq_obj *eqo;
1695 struct be_rx_obj *rxo;
1696 struct be_tx_obj *txo;
1697 u64 rx_pkts, tx_pkts;
1698 ulong now;
1699 u32 pps, delta;
10ef9ab4 1700
2632bafd
SP
1701 for_all_evt_queues(adapter, eqo, i) {
1702 aic = &adapter->aic_obj[eqo->idx];
1703 if (!aic->enable) {
1704 if (aic->jiffies)
1705 aic->jiffies = 0;
1706 eqd = aic->et_eqd;
1707 goto modify_eqd;
1708 }
6b7c5b94 1709
2632bafd
SP
1710 rxo = &adapter->rx_obj[eqo->idx];
1711 do {
57a7744e 1712 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1713 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1714 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1715
2632bafd
SP
1716 txo = &adapter->tx_obj[eqo->idx];
1717 do {
57a7744e 1718 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1719 tx_pkts = txo->stats.tx_reqs;
57a7744e 1720 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1721
2632bafd
SP
1722 /* Skip, if wrapped around or first calculation */
1723 now = jiffies;
1724 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1725 rx_pkts < aic->rx_pkts_prev ||
1726 tx_pkts < aic->tx_reqs_prev) {
1727 be_aic_update(aic, rx_pkts, tx_pkts, now);
1728 continue;
1729 }
1730
1731 delta = jiffies_to_msecs(now - aic->jiffies);
1732 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1733 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1734 eqd = (pps / 15000) << 2;
10ef9ab4 1735
2632bafd
SP
1736 if (eqd < 8)
1737 eqd = 0;
1738 eqd = min_t(u32, eqd, aic->max_eqd);
1739 eqd = max_t(u32, eqd, aic->min_eqd);
1740
1741 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1742modify_eqd:
2632bafd
SP
1743 if (eqd != aic->prev_eqd) {
1744 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1745 set_eqd[num].eq_id = eqo->q.id;
1746 aic->prev_eqd = eqd;
1747 num++;
1748 }
ac124ff9 1749 }
2632bafd
SP
1750
1751 if (num)
1752 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1753}
1754
3abcdeda 1755static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1756 struct be_rx_compl_info *rxcp)
4097f663 1757{
ac124ff9 1758 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1759
ab1594e9 1760 u64_stats_update_begin(&stats->sync);
3abcdeda 1761 stats->rx_compl++;
2e588f84 1762 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1763 stats->rx_pkts++;
2e588f84 1764 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1765 stats->rx_mcast_pkts++;
2e588f84 1766 if (rxcp->err)
ac124ff9 1767 stats->rx_compl_err++;
ab1594e9 1768 u64_stats_update_end(&stats->sync);
4097f663
SP
1769}
1770
2e588f84 1771static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1772{
19fad86f 1773 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1774 * Also ignore ipcksm for ipv6 pkts
1775 */
2e588f84 1776 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1777 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1778}
1779
0b0ef1d0 1780static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1781{
10ef9ab4 1782 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1783 struct be_rx_page_info *rx_page_info;
3abcdeda 1784 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1785 u16 frag_idx = rxq->tail;
6b7c5b94 1786
3abcdeda 1787 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1788 BUG_ON(!rx_page_info->page);
1789
e50287be 1790 if (rx_page_info->last_frag) {
2b7bcebf
IV
1791 dma_unmap_page(&adapter->pdev->dev,
1792 dma_unmap_addr(rx_page_info, bus),
1793 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1794 rx_page_info->last_frag = false;
1795 } else {
1796 dma_sync_single_for_cpu(&adapter->pdev->dev,
1797 dma_unmap_addr(rx_page_info, bus),
1798 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1799 }
6b7c5b94 1800
0b0ef1d0 1801 queue_tail_inc(rxq);
6b7c5b94
SP
1802 atomic_dec(&rxq->used);
1803 return rx_page_info;
1804}
1805
1806/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1807static void be_rx_compl_discard(struct be_rx_obj *rxo,
1808 struct be_rx_compl_info *rxcp)
6b7c5b94 1809{
6b7c5b94 1810 struct be_rx_page_info *page_info;
2e588f84 1811 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1812
e80d9da6 1813 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1814 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1815 put_page(page_info->page);
1816 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1817 }
1818}
1819
1820/*
1821 * skb_fill_rx_data forms a complete skb for an ether frame
1822 * indicated by rxcp.
1823 */
10ef9ab4
SP
1824static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1825 struct be_rx_compl_info *rxcp)
6b7c5b94 1826{
6b7c5b94 1827 struct be_rx_page_info *page_info;
2e588f84
SP
1828 u16 i, j;
1829 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1830 u8 *start;
6b7c5b94 1831
0b0ef1d0 1832 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1833 start = page_address(page_info->page) + page_info->page_offset;
1834 prefetch(start);
1835
1836 /* Copy data in the first descriptor of this completion */
2e588f84 1837 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1838
6b7c5b94
SP
1839 skb->len = curr_frag_len;
1840 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1841 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1842 /* Complete packet has now been moved to data */
1843 put_page(page_info->page);
1844 skb->data_len = 0;
1845 skb->tail += curr_frag_len;
1846 } else {
ac1ae5f3
ED
1847 hdr_len = ETH_HLEN;
1848 memcpy(skb->data, start, hdr_len);
6b7c5b94 1849 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1850 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1851 skb_shinfo(skb)->frags[0].page_offset =
1852 page_info->page_offset + hdr_len;
748b539a
SP
1853 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1854 curr_frag_len - hdr_len);
6b7c5b94 1855 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1856 skb->truesize += rx_frag_size;
6b7c5b94
SP
1857 skb->tail += hdr_len;
1858 }
205859a2 1859 page_info->page = NULL;
6b7c5b94 1860
2e588f84
SP
1861 if (rxcp->pkt_size <= rx_frag_size) {
1862 BUG_ON(rxcp->num_rcvd != 1);
1863 return;
6b7c5b94
SP
1864 }
1865
1866 /* More frags present for this completion */
2e588f84
SP
1867 remaining = rxcp->pkt_size - curr_frag_len;
1868 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1869 page_info = get_rx_page_info(rxo);
2e588f84 1870 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1871
bd46cb6c
AK
1872 /* Coalesce all frags from the same physical page in one slot */
1873 if (page_info->page_offset == 0) {
1874 /* Fresh page */
1875 j++;
b061b39e 1876 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1877 skb_shinfo(skb)->frags[j].page_offset =
1878 page_info->page_offset;
9e903e08 1879 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1880 skb_shinfo(skb)->nr_frags++;
1881 } else {
1882 put_page(page_info->page);
1883 }
1884
9e903e08 1885 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1886 skb->len += curr_frag_len;
1887 skb->data_len += curr_frag_len;
bdb28a97 1888 skb->truesize += rx_frag_size;
2e588f84 1889 remaining -= curr_frag_len;
205859a2 1890 page_info->page = NULL;
6b7c5b94 1891 }
bd46cb6c 1892 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1893}
1894
5be93b9a 1895/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1896static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1897 struct be_rx_compl_info *rxcp)
6b7c5b94 1898{
10ef9ab4 1899 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1900 struct net_device *netdev = adapter->netdev;
6b7c5b94 1901 struct sk_buff *skb;
89420424 1902
bb349bb4 1903 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1904 if (unlikely(!skb)) {
ac124ff9 1905 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1906 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1907 return;
1908 }
1909
10ef9ab4 1910 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1911
6332c8d3 1912 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1913 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1914 else
1915 skb_checksum_none_assert(skb);
6b7c5b94 1916
6332c8d3 1917 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1918 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1919 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1920 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1921
b6c0e89d 1922 skb->csum_level = rxcp->tunneled;
6384a4d0 1923 skb_mark_napi_id(skb, napi);
6b7c5b94 1924
343e43c0 1925 if (rxcp->vlanf)
86a9bad3 1926 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1927
1928 netif_receive_skb(skb);
6b7c5b94
SP
1929}
1930
5be93b9a 1931/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1932static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1933 struct napi_struct *napi,
1934 struct be_rx_compl_info *rxcp)
6b7c5b94 1935{
10ef9ab4 1936 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1937 struct be_rx_page_info *page_info;
5be93b9a 1938 struct sk_buff *skb = NULL;
2e588f84
SP
1939 u16 remaining, curr_frag_len;
1940 u16 i, j;
3968fa1e 1941
10ef9ab4 1942 skb = napi_get_frags(napi);
5be93b9a 1943 if (!skb) {
10ef9ab4 1944 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1945 return;
1946 }
1947
2e588f84
SP
1948 remaining = rxcp->pkt_size;
1949 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1950 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1951
1952 curr_frag_len = min(remaining, rx_frag_size);
1953
bd46cb6c
AK
1954 /* Coalesce all frags from the same physical page in one slot */
1955 if (i == 0 || page_info->page_offset == 0) {
1956 /* First frag or Fresh page */
1957 j++;
b061b39e 1958 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1959 skb_shinfo(skb)->frags[j].page_offset =
1960 page_info->page_offset;
9e903e08 1961 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1962 } else {
1963 put_page(page_info->page);
1964 }
9e903e08 1965 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1966 skb->truesize += rx_frag_size;
bd46cb6c 1967 remaining -= curr_frag_len;
6b7c5b94
SP
1968 memset(page_info, 0, sizeof(*page_info));
1969 }
bd46cb6c 1970 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1971
5be93b9a 1972 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1973 skb->len = rxcp->pkt_size;
1974 skb->data_len = rxcp->pkt_size;
5be93b9a 1975 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1976 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1977 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1978 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1979
b6c0e89d 1980 skb->csum_level = rxcp->tunneled;
6384a4d0 1981 skb_mark_napi_id(skb, napi);
5be93b9a 1982
343e43c0 1983 if (rxcp->vlanf)
86a9bad3 1984 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1985
10ef9ab4 1986 napi_gro_frags(napi);
2e588f84
SP
1987}
1988
10ef9ab4
SP
1989static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1990 struct be_rx_compl_info *rxcp)
2e588f84 1991{
c3c18bc1
SP
1992 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1993 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1994 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1995 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1996 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1997 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1998 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1999 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2000 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2001 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2002 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2003 if (rxcp->vlanf) {
c3c18bc1
SP
2004 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2005 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2006 }
c3c18bc1 2007 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2008 rxcp->tunneled =
c3c18bc1 2009 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2010}
2011
10ef9ab4
SP
2012static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2013 struct be_rx_compl_info *rxcp)
2e588f84 2014{
c3c18bc1
SP
2015 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2016 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2017 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2018 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2019 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2020 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2021 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2022 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2023 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2024 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2025 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2026 if (rxcp->vlanf) {
c3c18bc1
SP
2027 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2028 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2029 }
c3c18bc1
SP
2030 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2031 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2032}
2033
2034static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2035{
2036 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2037 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2038 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2039
2e588f84
SP
2040 /* For checking the valid bit it is Ok to use either definition as the
2041 * valid bit is at the same position in both v0 and v1 Rx compl */
2042 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2043 return NULL;
6b7c5b94 2044
2e588f84
SP
2045 rmb();
2046 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2047
2e588f84 2048 if (adapter->be3_native)
10ef9ab4 2049 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2050 else
10ef9ab4 2051 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2052
e38b1706
SK
2053 if (rxcp->ip_frag)
2054 rxcp->l4_csum = 0;
2055
15d72184 2056 if (rxcp->vlanf) {
f93f160b
VV
2057 /* In QNQ modes, if qnq bit is not set, then the packet was
2058 * tagged only with the transparent outer vlan-tag and must
2059 * not be treated as a vlan packet by host
2060 */
2061 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2062 rxcp->vlanf = 0;
6b7c5b94 2063
15d72184 2064 if (!lancer_chip(adapter))
3c709f8f 2065 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2066
939cf306 2067 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2068 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2069 rxcp->vlanf = 0;
2070 }
2e588f84
SP
2071
2072 /* As the compl has been parsed, reset it; we wont touch it again */
2073 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2074
3abcdeda 2075 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2076 return rxcp;
2077}
2078
1829b086 2079static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2080{
6b7c5b94 2081 u32 order = get_order(size);
1829b086 2082
6b7c5b94 2083 if (order > 0)
1829b086
ED
2084 gfp |= __GFP_COMP;
2085 return alloc_pages(gfp, order);
6b7c5b94
SP
2086}
2087
2088/*
2089 * Allocate a page, split it to fragments of size rx_frag_size and post as
2090 * receive buffers to BE
2091 */
c30d7266 2092static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2093{
3abcdeda 2094 struct be_adapter *adapter = rxo->adapter;
26d92f92 2095 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2096 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2097 struct page *pagep = NULL;
ba42fad0 2098 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2099 struct be_eth_rx_d *rxd;
2100 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2101 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2102
3abcdeda 2103 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2104 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2105 if (!pagep) {
1829b086 2106 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2107 if (unlikely(!pagep)) {
ac124ff9 2108 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2109 break;
2110 }
ba42fad0
IV
2111 page_dmaaddr = dma_map_page(dev, pagep, 0,
2112 adapter->big_page_size,
2b7bcebf 2113 DMA_FROM_DEVICE);
ba42fad0
IV
2114 if (dma_mapping_error(dev, page_dmaaddr)) {
2115 put_page(pagep);
2116 pagep = NULL;
d3de1540 2117 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2118 break;
2119 }
e50287be 2120 page_offset = 0;
6b7c5b94
SP
2121 } else {
2122 get_page(pagep);
e50287be 2123 page_offset += rx_frag_size;
6b7c5b94 2124 }
e50287be 2125 page_info->page_offset = page_offset;
6b7c5b94 2126 page_info->page = pagep;
6b7c5b94
SP
2127
2128 rxd = queue_head_node(rxq);
e50287be 2129 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2130 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2131 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2132
2133 /* Any space left in the current big page for another frag? */
2134 if ((page_offset + rx_frag_size + rx_frag_size) >
2135 adapter->big_page_size) {
2136 pagep = NULL;
e50287be
SP
2137 page_info->last_frag = true;
2138 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2139 } else {
2140 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2141 }
26d92f92
SP
2142
2143 prev_page_info = page_info;
2144 queue_head_inc(rxq);
10ef9ab4 2145 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2146 }
e50287be
SP
2147
2148 /* Mark the last frag of a page when we break out of the above loop
2149 * with no more slots available in the RXQ
2150 */
2151 if (pagep) {
2152 prev_page_info->last_frag = true;
2153 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2154 }
6b7c5b94
SP
2155
2156 if (posted) {
6b7c5b94 2157 atomic_add(posted, &rxq->used);
6384a4d0
SP
2158 if (rxo->rx_post_starved)
2159 rxo->rx_post_starved = false;
c30d7266 2160 do {
69304cc9 2161 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2162 be_rxq_notify(adapter, rxq->id, notify);
2163 posted -= notify;
2164 } while (posted);
ea1dae11
SP
2165 } else if (atomic_read(&rxq->used) == 0) {
2166 /* Let be_worker replenish when memory is available */
3abcdeda 2167 rxo->rx_post_starved = true;
6b7c5b94 2168 }
6b7c5b94
SP
2169}
2170
152ffe5b 2171static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2172{
152ffe5b
SB
2173 struct be_queue_info *tx_cq = &txo->cq;
2174 struct be_tx_compl_info *txcp = &txo->txcp;
2175 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2176
152ffe5b 2177 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2178 return NULL;
2179
152ffe5b 2180 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2181 rmb();
152ffe5b 2182 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2183
152ffe5b
SB
2184 txcp->status = GET_TX_COMPL_BITS(status, compl);
2185 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2186
152ffe5b 2187 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2188 queue_tail_inc(tx_cq);
2189 return txcp;
2190}
2191
3c8def97 2192static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2193 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2194{
5f07b3c5 2195 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2196 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2197 u16 frag_index, num_wrbs = 0;
2198 struct sk_buff *skb = NULL;
2199 bool unmap_skb_hdr = false;
a73b796e 2200 struct be_eth_wrb *wrb;
6b7c5b94 2201
ec43b1a6 2202 do {
5f07b3c5
SP
2203 if (sent_skbs[txq->tail]) {
2204 /* Free skb from prev req */
2205 if (skb)
2206 dev_consume_skb_any(skb);
2207 skb = sent_skbs[txq->tail];
2208 sent_skbs[txq->tail] = NULL;
2209 queue_tail_inc(txq); /* skip hdr wrb */
2210 num_wrbs++;
2211 unmap_skb_hdr = true;
2212 }
a73b796e 2213 wrb = queue_tail_node(txq);
5f07b3c5 2214 frag_index = txq->tail;
2b7bcebf 2215 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2216 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2217 unmap_skb_hdr = false;
6b7c5b94 2218 queue_tail_inc(txq);
5f07b3c5
SP
2219 num_wrbs++;
2220 } while (frag_index != last_index);
2221 dev_consume_skb_any(skb);
6b7c5b94 2222
4d586b82 2223 return num_wrbs;
6b7c5b94
SP
2224}
2225
10ef9ab4
SP
2226/* Return the number of events in the event queue */
2227static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2228{
10ef9ab4
SP
2229 struct be_eq_entry *eqe;
2230 int num = 0;
859b1e4e 2231
10ef9ab4
SP
2232 do {
2233 eqe = queue_tail_node(&eqo->q);
2234 if (eqe->evt == 0)
2235 break;
859b1e4e 2236
10ef9ab4
SP
2237 rmb();
2238 eqe->evt = 0;
2239 num++;
2240 queue_tail_inc(&eqo->q);
2241 } while (true);
2242
2243 return num;
859b1e4e
SP
2244}
2245
10ef9ab4
SP
2246/* Leaves the EQ is disarmed state */
2247static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2248{
10ef9ab4 2249 int num = events_get(eqo);
859b1e4e 2250
10ef9ab4 2251 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2252}
2253
10ef9ab4 2254static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2255{
2256 struct be_rx_page_info *page_info;
3abcdeda
SP
2257 struct be_queue_info *rxq = &rxo->q;
2258 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2259 struct be_rx_compl_info *rxcp;
d23e946c
SP
2260 struct be_adapter *adapter = rxo->adapter;
2261 int flush_wait = 0;
6b7c5b94 2262
d23e946c
SP
2263 /* Consume pending rx completions.
2264 * Wait for the flush completion (identified by zero num_rcvd)
2265 * to arrive. Notify CQ even when there are no more CQ entries
2266 * for HW to flush partially coalesced CQ entries.
2267 * In Lancer, there is no need to wait for flush compl.
2268 */
2269 for (;;) {
2270 rxcp = be_rx_compl_get(rxo);
ddf1169f 2271 if (!rxcp) {
d23e946c
SP
2272 if (lancer_chip(adapter))
2273 break;
2274
2275 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2276 dev_warn(&adapter->pdev->dev,
2277 "did not receive flush compl\n");
2278 break;
2279 }
2280 be_cq_notify(adapter, rx_cq->id, true, 0);
2281 mdelay(1);
2282 } else {
2283 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2284 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2285 if (rxcp->num_rcvd == 0)
2286 break;
2287 }
6b7c5b94
SP
2288 }
2289
d23e946c
SP
2290 /* After cleanup, leave the CQ in unarmed state */
2291 be_cq_notify(adapter, rx_cq->id, false, 0);
2292
2293 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2294 while (atomic_read(&rxq->used) > 0) {
2295 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2296 put_page(page_info->page);
2297 memset(page_info, 0, sizeof(*page_info));
2298 }
2299 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2300 rxq->tail = 0;
2301 rxq->head = 0;
6b7c5b94
SP
2302}
2303
0ae57bb3 2304static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2305{
5f07b3c5
SP
2306 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2307 struct device *dev = &adapter->pdev->dev;
152ffe5b 2308 struct be_tx_compl_info *txcp;
0ae57bb3 2309 struct be_queue_info *txq;
152ffe5b 2310 struct be_tx_obj *txo;
0ae57bb3 2311 int i, pending_txqs;
a8e9179a 2312
1a3d0717 2313 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2314 do {
0ae57bb3
SP
2315 pending_txqs = adapter->num_tx_qs;
2316
2317 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2318 cmpl = 0;
2319 num_wrbs = 0;
0ae57bb3 2320 txq = &txo->q;
152ffe5b
SB
2321 while ((txcp = be_tx_compl_get(txo))) {
2322 num_wrbs +=
2323 be_tx_compl_process(adapter, txo,
2324 txcp->end_index);
0ae57bb3
SP
2325 cmpl++;
2326 }
2327 if (cmpl) {
2328 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2329 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2330 timeo = 0;
0ae57bb3 2331 }
cf5671e6 2332 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2333 pending_txqs--;
a8e9179a
SP
2334 }
2335
1a3d0717 2336 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2337 break;
2338
2339 mdelay(1);
2340 } while (true);
2341
5f07b3c5 2342 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2343 for_all_tx_queues(adapter, txo, i) {
2344 txq = &txo->q;
0ae57bb3 2345
5f07b3c5
SP
2346 if (atomic_read(&txq->used)) {
2347 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2348 i, atomic_read(&txq->used));
2349 notified_idx = txq->tail;
0ae57bb3 2350 end_idx = txq->tail;
5f07b3c5
SP
2351 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2352 txq->len);
2353 /* Use the tx-compl process logic to handle requests
2354 * that were not sent to the HW.
2355 */
0ae57bb3
SP
2356 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2357 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2358 BUG_ON(atomic_read(&txq->used));
2359 txo->pend_wrb_cnt = 0;
2360 /* Since hw was never notified of these requests,
2361 * reset TXQ indices
2362 */
2363 txq->head = notified_idx;
2364 txq->tail = notified_idx;
0ae57bb3 2365 }
b03388d6 2366 }
6b7c5b94
SP
2367}
2368
10ef9ab4
SP
2369static void be_evt_queues_destroy(struct be_adapter *adapter)
2370{
2371 struct be_eq_obj *eqo;
2372 int i;
2373
2374 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2375 if (eqo->q.created) {
2376 be_eq_clean(eqo);
10ef9ab4 2377 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2378 napi_hash_del(&eqo->napi);
68d7bdcb 2379 netif_napi_del(&eqo->napi);
19d59aa7 2380 }
d658d98a 2381 free_cpumask_var(eqo->affinity_mask);
10ef9ab4
SP
2382 be_queue_free(adapter, &eqo->q);
2383 }
2384}
2385
2386static int be_evt_queues_create(struct be_adapter *adapter)
2387{
2388 struct be_queue_info *eq;
2389 struct be_eq_obj *eqo;
2632bafd 2390 struct be_aic_obj *aic;
10ef9ab4
SP
2391 int i, rc;
2392
92bf14ab
SP
2393 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2394 adapter->cfg_num_qs);
10ef9ab4
SP
2395
2396 for_all_evt_queues(adapter, eqo, i) {
d658d98a
PR
2397 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2398 return -ENOMEM;
2399 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2400 eqo->affinity_mask);
2401
68d7bdcb
SP
2402 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2403 BE_NAPI_WEIGHT);
6384a4d0 2404 napi_hash_add(&eqo->napi);
2632bafd 2405 aic = &adapter->aic_obj[i];
10ef9ab4 2406 eqo->adapter = adapter;
10ef9ab4 2407 eqo->idx = i;
2632bafd
SP
2408 aic->max_eqd = BE_MAX_EQD;
2409 aic->enable = true;
10ef9ab4
SP
2410
2411 eq = &eqo->q;
2412 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2413 sizeof(struct be_eq_entry));
10ef9ab4
SP
2414 if (rc)
2415 return rc;
2416
f2f781a7 2417 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2418 if (rc)
2419 return rc;
2420 }
1cfafab9 2421 return 0;
10ef9ab4
SP
2422}
2423
5fb379ee
SP
2424static void be_mcc_queues_destroy(struct be_adapter *adapter)
2425{
2426 struct be_queue_info *q;
5fb379ee 2427
8788fdc2 2428 q = &adapter->mcc_obj.q;
5fb379ee 2429 if (q->created)
8788fdc2 2430 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2431 be_queue_free(adapter, q);
2432
8788fdc2 2433 q = &adapter->mcc_obj.cq;
5fb379ee 2434 if (q->created)
8788fdc2 2435 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2436 be_queue_free(adapter, q);
2437}
2438
2439/* Must be called only after TX qs are created as MCC shares TX EQ */
2440static int be_mcc_queues_create(struct be_adapter *adapter)
2441{
2442 struct be_queue_info *q, *cq;
5fb379ee 2443
8788fdc2 2444 cq = &adapter->mcc_obj.cq;
5fb379ee 2445 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2446 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2447 goto err;
2448
10ef9ab4
SP
2449 /* Use the default EQ for MCC completions */
2450 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2451 goto mcc_cq_free;
2452
8788fdc2 2453 q = &adapter->mcc_obj.q;
5fb379ee
SP
2454 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2455 goto mcc_cq_destroy;
2456
8788fdc2 2457 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2458 goto mcc_q_free;
2459
2460 return 0;
2461
2462mcc_q_free:
2463 be_queue_free(adapter, q);
2464mcc_cq_destroy:
8788fdc2 2465 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2466mcc_cq_free:
2467 be_queue_free(adapter, cq);
2468err:
2469 return -1;
2470}
2471
6b7c5b94
SP
2472static void be_tx_queues_destroy(struct be_adapter *adapter)
2473{
2474 struct be_queue_info *q;
3c8def97
SP
2475 struct be_tx_obj *txo;
2476 u8 i;
6b7c5b94 2477
3c8def97
SP
2478 for_all_tx_queues(adapter, txo, i) {
2479 q = &txo->q;
2480 if (q->created)
2481 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2482 be_queue_free(adapter, q);
6b7c5b94 2483
3c8def97
SP
2484 q = &txo->cq;
2485 if (q->created)
2486 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2487 be_queue_free(adapter, q);
2488 }
6b7c5b94
SP
2489}
2490
7707133c 2491static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2492{
73f394e6 2493 struct be_queue_info *cq;
3c8def97 2494 struct be_tx_obj *txo;
73f394e6 2495 struct be_eq_obj *eqo;
92bf14ab 2496 int status, i;
6b7c5b94 2497
92bf14ab 2498 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2499
10ef9ab4
SP
2500 for_all_tx_queues(adapter, txo, i) {
2501 cq = &txo->cq;
2502 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2503 sizeof(struct be_eth_tx_compl));
2504 if (status)
2505 return status;
3c8def97 2506
827da44c
JS
2507 u64_stats_init(&txo->stats.sync);
2508 u64_stats_init(&txo->stats.sync_compl);
2509
10ef9ab4
SP
2510 /* If num_evt_qs is less than num_tx_qs, then more than
2511 * one txq share an eq
2512 */
73f394e6
SP
2513 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2514 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2515 if (status)
2516 return status;
6b7c5b94 2517
10ef9ab4
SP
2518 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2519 sizeof(struct be_eth_wrb));
2520 if (status)
2521 return status;
6b7c5b94 2522
94d73aaa 2523 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2524 if (status)
2525 return status;
73f394e6
SP
2526
2527 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2528 eqo->idx);
3c8def97 2529 }
6b7c5b94 2530
d379142b
SP
2531 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2532 adapter->num_tx_qs);
10ef9ab4 2533 return 0;
6b7c5b94
SP
2534}
2535
10ef9ab4 2536static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2537{
2538 struct be_queue_info *q;
3abcdeda
SP
2539 struct be_rx_obj *rxo;
2540 int i;
2541
2542 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2543 q = &rxo->cq;
2544 if (q->created)
2545 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2546 be_queue_free(adapter, q);
ac6a0c4a
SP
2547 }
2548}
2549
10ef9ab4 2550static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2551{
10ef9ab4 2552 struct be_queue_info *eq, *cq;
3abcdeda
SP
2553 struct be_rx_obj *rxo;
2554 int rc, i;
6b7c5b94 2555
92bf14ab 2556 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2557 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2558
71bb8bd0
VV
2559 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2560 if (adapter->num_rss_qs <= 1)
2561 adapter->num_rss_qs = 0;
2562
2563 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2564
2565 /* When the interface is not capable of RSS rings (and there is no
2566 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2567 */
71bb8bd0
VV
2568 if (adapter->num_rx_qs == 0)
2569 adapter->num_rx_qs = 1;
92bf14ab 2570
6b7c5b94 2571 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2572 for_all_rx_queues(adapter, rxo, i) {
2573 rxo->adapter = adapter;
3abcdeda
SP
2574 cq = &rxo->cq;
2575 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2576 sizeof(struct be_eth_rx_compl));
3abcdeda 2577 if (rc)
10ef9ab4 2578 return rc;
3abcdeda 2579
827da44c 2580 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2581 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2582 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2583 if (rc)
10ef9ab4 2584 return rc;
3abcdeda 2585 }
6b7c5b94 2586
d379142b 2587 dev_info(&adapter->pdev->dev,
71bb8bd0 2588 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2589 return 0;
b628bde2
SP
2590}
2591
6b7c5b94
SP
2592static irqreturn_t be_intx(int irq, void *dev)
2593{
e49cc34f
SP
2594 struct be_eq_obj *eqo = dev;
2595 struct be_adapter *adapter = eqo->adapter;
2596 int num_evts = 0;
6b7c5b94 2597
d0b9cec3
SP
2598 /* IRQ is not expected when NAPI is scheduled as the EQ
2599 * will not be armed.
2600 * But, this can happen on Lancer INTx where it takes
2601 * a while to de-assert INTx or in BE2 where occasionaly
2602 * an interrupt may be raised even when EQ is unarmed.
2603 * If NAPI is already scheduled, then counting & notifying
2604 * events will orphan them.
e49cc34f 2605 */
d0b9cec3 2606 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2607 num_evts = events_get(eqo);
d0b9cec3
SP
2608 __napi_schedule(&eqo->napi);
2609 if (num_evts)
2610 eqo->spurious_intr = 0;
2611 }
2612 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2613
d0b9cec3
SP
2614 /* Return IRQ_HANDLED only for the the first spurious intr
2615 * after a valid intr to stop the kernel from branding
2616 * this irq as a bad one!
e49cc34f 2617 */
d0b9cec3
SP
2618 if (num_evts || eqo->spurious_intr++ == 0)
2619 return IRQ_HANDLED;
2620 else
2621 return IRQ_NONE;
6b7c5b94
SP
2622}
2623
10ef9ab4 2624static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2625{
10ef9ab4 2626 struct be_eq_obj *eqo = dev;
6b7c5b94 2627
0b545a62
SP
2628 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2629 napi_schedule(&eqo->napi);
6b7c5b94
SP
2630 return IRQ_HANDLED;
2631}
2632
2e588f84 2633static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2634{
e38b1706 2635 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2636}
2637
10ef9ab4 2638static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2639 int budget, int polling)
6b7c5b94 2640{
3abcdeda
SP
2641 struct be_adapter *adapter = rxo->adapter;
2642 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2643 struct be_rx_compl_info *rxcp;
6b7c5b94 2644 u32 work_done;
c30d7266 2645 u32 frags_consumed = 0;
6b7c5b94
SP
2646
2647 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2648 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2649 if (!rxcp)
2650 break;
2651
12004ae9
SP
2652 /* Is it a flush compl that has no data */
2653 if (unlikely(rxcp->num_rcvd == 0))
2654 goto loop_continue;
2655
2656 /* Discard compl with partial DMA Lancer B0 */
2657 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2658 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2659 goto loop_continue;
2660 }
2661
2662 /* On BE drop pkts that arrive due to imperfect filtering in
2663 * promiscuous mode on some skews
2664 */
2665 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2666 !lancer_chip(adapter))) {
10ef9ab4 2667 be_rx_compl_discard(rxo, rxcp);
12004ae9 2668 goto loop_continue;
64642811 2669 }
009dd872 2670
6384a4d0
SP
2671 /* Don't do gro when we're busy_polling */
2672 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2673 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2674 else
6384a4d0
SP
2675 be_rx_compl_process(rxo, napi, rxcp);
2676
12004ae9 2677loop_continue:
c30d7266 2678 frags_consumed += rxcp->num_rcvd;
2e588f84 2679 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2680 }
2681
10ef9ab4
SP
2682 if (work_done) {
2683 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2684
6384a4d0
SP
2685 /* When an rx-obj gets into post_starved state, just
2686 * let be_worker do the posting.
2687 */
2688 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2689 !rxo->rx_post_starved)
c30d7266
AK
2690 be_post_rx_frags(rxo, GFP_ATOMIC,
2691 max_t(u32, MAX_RX_POST,
2692 frags_consumed));
6b7c5b94 2693 }
10ef9ab4 2694
6b7c5b94
SP
2695 return work_done;
2696}
2697
152ffe5b 2698static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2699{
2700 switch (status) {
2701 case BE_TX_COMP_HDR_PARSE_ERR:
2702 tx_stats(txo)->tx_hdr_parse_err++;
2703 break;
2704 case BE_TX_COMP_NDMA_ERR:
2705 tx_stats(txo)->tx_dma_err++;
2706 break;
2707 case BE_TX_COMP_ACL_ERR:
2708 tx_stats(txo)->tx_spoof_check_err++;
2709 break;
2710 }
2711}
2712
152ffe5b 2713static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2714{
2715 switch (status) {
2716 case LANCER_TX_COMP_LSO_ERR:
2717 tx_stats(txo)->tx_tso_err++;
2718 break;
2719 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2720 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2721 tx_stats(txo)->tx_spoof_check_err++;
2722 break;
2723 case LANCER_TX_COMP_QINQ_ERR:
2724 tx_stats(txo)->tx_qinq_err++;
2725 break;
2726 case LANCER_TX_COMP_PARITY_ERR:
2727 tx_stats(txo)->tx_internal_parity_err++;
2728 break;
2729 case LANCER_TX_COMP_DMA_ERR:
2730 tx_stats(txo)->tx_dma_err++;
2731 break;
2732 }
2733}
2734
c8f64615
SP
2735static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2736 int idx)
6b7c5b94 2737{
c8f64615 2738 int num_wrbs = 0, work_done = 0;
152ffe5b 2739 struct be_tx_compl_info *txcp;
c8f64615 2740
152ffe5b
SB
2741 while ((txcp = be_tx_compl_get(txo))) {
2742 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2743 work_done++;
3c8def97 2744
152ffe5b 2745 if (txcp->status) {
512bb8a2 2746 if (lancer_chip(adapter))
152ffe5b 2747 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2748 else
152ffe5b 2749 be_update_tx_err(txo, txcp->status);
512bb8a2 2750 }
10ef9ab4 2751 }
6b7c5b94 2752
10ef9ab4
SP
2753 if (work_done) {
2754 be_cq_notify(adapter, txo->cq.id, true, work_done);
2755 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2756
10ef9ab4
SP
2757 /* As Tx wrbs have been freed up, wake up netdev queue
2758 * if it was stopped due to lack of tx wrbs. */
2759 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2760 be_can_txq_wake(txo)) {
10ef9ab4 2761 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2762 }
10ef9ab4
SP
2763
2764 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2765 tx_stats(txo)->tx_compl += work_done;
2766 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2767 }
10ef9ab4 2768}
6b7c5b94 2769
f7062ee5
SP
2770#ifdef CONFIG_NET_RX_BUSY_POLL
2771static inline bool be_lock_napi(struct be_eq_obj *eqo)
2772{
2773 bool status = true;
2774
2775 spin_lock(&eqo->lock); /* BH is already disabled */
2776 if (eqo->state & BE_EQ_LOCKED) {
2777 WARN_ON(eqo->state & BE_EQ_NAPI);
2778 eqo->state |= BE_EQ_NAPI_YIELD;
2779 status = false;
2780 } else {
2781 eqo->state = BE_EQ_NAPI;
2782 }
2783 spin_unlock(&eqo->lock);
2784 return status;
2785}
2786
2787static inline void be_unlock_napi(struct be_eq_obj *eqo)
2788{
2789 spin_lock(&eqo->lock); /* BH is already disabled */
2790
2791 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2792 eqo->state = BE_EQ_IDLE;
2793
2794 spin_unlock(&eqo->lock);
2795}
2796
2797static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2798{
2799 bool status = true;
2800
2801 spin_lock_bh(&eqo->lock);
2802 if (eqo->state & BE_EQ_LOCKED) {
2803 eqo->state |= BE_EQ_POLL_YIELD;
2804 status = false;
2805 } else {
2806 eqo->state |= BE_EQ_POLL;
2807 }
2808 spin_unlock_bh(&eqo->lock);
2809 return status;
2810}
2811
2812static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2813{
2814 spin_lock_bh(&eqo->lock);
2815
2816 WARN_ON(eqo->state & (BE_EQ_NAPI));
2817 eqo->state = BE_EQ_IDLE;
2818
2819 spin_unlock_bh(&eqo->lock);
2820}
2821
2822static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2823{
2824 spin_lock_init(&eqo->lock);
2825 eqo->state = BE_EQ_IDLE;
2826}
2827
2828static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2829{
2830 local_bh_disable();
2831
2832 /* It's enough to just acquire napi lock on the eqo to stop
2833 * be_busy_poll() from processing any queueus.
2834 */
2835 while (!be_lock_napi(eqo))
2836 mdelay(1);
2837
2838 local_bh_enable();
2839}
2840
2841#else /* CONFIG_NET_RX_BUSY_POLL */
2842
2843static inline bool be_lock_napi(struct be_eq_obj *eqo)
2844{
2845 return true;
2846}
2847
2848static inline void be_unlock_napi(struct be_eq_obj *eqo)
2849{
2850}
2851
2852static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2853{
2854 return false;
2855}
2856
2857static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2858{
2859}
2860
2861static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2862{
2863}
2864
2865static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2866{
2867}
2868#endif /* CONFIG_NET_RX_BUSY_POLL */
2869
68d7bdcb 2870int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2871{
2872 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2873 struct be_adapter *adapter = eqo->adapter;
0b545a62 2874 int max_work = 0, work, i, num_evts;
6384a4d0 2875 struct be_rx_obj *rxo;
a4906ea0 2876 struct be_tx_obj *txo;
f31e50a8 2877
0b545a62
SP
2878 num_evts = events_get(eqo);
2879
a4906ea0
SP
2880 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2881 be_process_tx(adapter, txo, i);
f31e50a8 2882
6384a4d0
SP
2883 if (be_lock_napi(eqo)) {
2884 /* This loop will iterate twice for EQ0 in which
2885 * completions of the last RXQ (default one) are also processed
2886 * For other EQs the loop iterates only once
2887 */
2888 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2889 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2890 max_work = max(work, max_work);
2891 }
2892 be_unlock_napi(eqo);
2893 } else {
2894 max_work = budget;
10ef9ab4 2895 }
6b7c5b94 2896
10ef9ab4
SP
2897 if (is_mcc_eqo(eqo))
2898 be_process_mcc(adapter);
93c86700 2899
10ef9ab4
SP
2900 if (max_work < budget) {
2901 napi_complete(napi);
0b545a62 2902 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2903 } else {
2904 /* As we'll continue in polling mode, count and clear events */
0b545a62 2905 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2906 }
10ef9ab4 2907 return max_work;
6b7c5b94
SP
2908}
2909
6384a4d0
SP
2910#ifdef CONFIG_NET_RX_BUSY_POLL
2911static int be_busy_poll(struct napi_struct *napi)
2912{
2913 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2914 struct be_adapter *adapter = eqo->adapter;
2915 struct be_rx_obj *rxo;
2916 int i, work = 0;
2917
2918 if (!be_lock_busy_poll(eqo))
2919 return LL_FLUSH_BUSY;
2920
2921 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2922 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2923 if (work)
2924 break;
2925 }
2926
2927 be_unlock_busy_poll(eqo);
2928 return work;
2929}
2930#endif
2931
f67ef7ba 2932void be_detect_error(struct be_adapter *adapter)
7c185276 2933{
e1cfb67a
PR
2934 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2935 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2936 u32 i;
eb0eecc1
SK
2937 bool error_detected = false;
2938 struct device *dev = &adapter->pdev->dev;
2939 struct net_device *netdev = adapter->netdev;
7c185276 2940
d23e946c 2941 if (be_hw_error(adapter))
72f02485
SP
2942 return;
2943
e1cfb67a
PR
2944 if (lancer_chip(adapter)) {
2945 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2946 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2947 sliport_err1 = ioread32(adapter->db +
748b539a 2948 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2949 sliport_err2 = ioread32(adapter->db +
748b539a 2950 SLIPORT_ERROR2_OFFSET);
eb0eecc1 2951 adapter->hw_error = true;
d0e1b319 2952 error_detected = true;
eb0eecc1
SK
2953 /* Do not log error messages if its a FW reset */
2954 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2955 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2956 dev_info(dev, "Firmware update in progress\n");
2957 } else {
eb0eecc1
SK
2958 dev_err(dev, "Error detected in the card\n");
2959 dev_err(dev, "ERR: sliport status 0x%x\n",
2960 sliport_status);
2961 dev_err(dev, "ERR: sliport error1 0x%x\n",
2962 sliport_err1);
2963 dev_err(dev, "ERR: sliport error2 0x%x\n",
2964 sliport_err2);
2965 }
e1cfb67a
PR
2966 }
2967 } else {
25848c90
SR
2968 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2969 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2970 ue_lo_mask = ioread32(adapter->pcicfg +
2971 PCICFG_UE_STATUS_LOW_MASK);
2972 ue_hi_mask = ioread32(adapter->pcicfg +
2973 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 2974
f67ef7ba
PR
2975 ue_lo = (ue_lo & ~ue_lo_mask);
2976 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2977
eb0eecc1
SK
2978 /* On certain platforms BE hardware can indicate spurious UEs.
2979 * Allow HW to stop working completely in case of a real UE.
2980 * Hence not setting the hw_error for UE detection.
2981 */
f67ef7ba 2982
eb0eecc1
SK
2983 if (ue_lo || ue_hi) {
2984 error_detected = true;
2985 dev_err(dev,
2986 "Unrecoverable Error detected in the adapter");
2987 dev_err(dev, "Please reboot server to recover");
2988 if (skyhawk_chip(adapter))
2989 adapter->hw_error = true;
2990 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2991 if (ue_lo & 1)
2992 dev_err(dev, "UE: %s bit set\n",
2993 ue_status_low_desc[i]);
2994 }
2995 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2996 if (ue_hi & 1)
2997 dev_err(dev, "UE: %s bit set\n",
2998 ue_status_hi_desc[i]);
2999 }
7c185276
AK
3000 }
3001 }
eb0eecc1
SK
3002 if (error_detected)
3003 netif_carrier_off(netdev);
7c185276
AK
3004}
3005
8d56ff11
SP
3006static void be_msix_disable(struct be_adapter *adapter)
3007{
ac6a0c4a 3008 if (msix_enabled(adapter)) {
8d56ff11 3009 pci_disable_msix(adapter->pdev);
ac6a0c4a 3010 adapter->num_msix_vec = 0;
68d7bdcb 3011 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3012 }
3013}
3014
c2bba3df 3015static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3016{
7dc4c064 3017 int i, num_vec;
d379142b 3018 struct device *dev = &adapter->pdev->dev;
6b7c5b94 3019
92bf14ab
SP
3020 /* If RoCE is supported, program the max number of NIC vectors that
3021 * may be configured via set-channels, along with vectors needed for
3022 * RoCe. Else, just program the number we'll use initially.
3023 */
3024 if (be_roce_supported(adapter))
3025 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3026 2 * num_online_cpus());
3027 else
3028 num_vec = adapter->cfg_num_qs;
3abcdeda 3029
ac6a0c4a 3030 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3031 adapter->msix_entries[i].entry = i;
3032
7dc4c064
AG
3033 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3034 MIN_MSIX_VECTORS, num_vec);
3035 if (num_vec < 0)
3036 goto fail;
92bf14ab 3037
92bf14ab
SP
3038 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3039 adapter->num_msix_roce_vec = num_vec / 2;
3040 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3041 adapter->num_msix_roce_vec);
3042 }
3043
3044 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3045
3046 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3047 adapter->num_msix_vec);
c2bba3df 3048 return 0;
7dc4c064
AG
3049
3050fail:
3051 dev_warn(dev, "MSIx enable failed\n");
3052
3053 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3054 if (!be_physfn(adapter))
3055 return num_vec;
3056 return 0;
6b7c5b94
SP
3057}
3058
fe6d2a38 3059static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3060 struct be_eq_obj *eqo)
b628bde2 3061{
f2f781a7 3062 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3063}
6b7c5b94 3064
b628bde2
SP
3065static int be_msix_register(struct be_adapter *adapter)
3066{
10ef9ab4
SP
3067 struct net_device *netdev = adapter->netdev;
3068 struct be_eq_obj *eqo;
3069 int status, i, vec;
6b7c5b94 3070
10ef9ab4
SP
3071 for_all_evt_queues(adapter, eqo, i) {
3072 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3073 vec = be_msix_vec_get(adapter, eqo);
3074 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3075 if (status)
3076 goto err_msix;
d658d98a
PR
3077
3078 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3079 }
b628bde2 3080
6b7c5b94 3081 return 0;
3abcdeda 3082err_msix:
10ef9ab4
SP
3083 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3084 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3085 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3086 status);
ac6a0c4a 3087 be_msix_disable(adapter);
6b7c5b94
SP
3088 return status;
3089}
3090
3091static int be_irq_register(struct be_adapter *adapter)
3092{
3093 struct net_device *netdev = adapter->netdev;
3094 int status;
3095
ac6a0c4a 3096 if (msix_enabled(adapter)) {
6b7c5b94
SP
3097 status = be_msix_register(adapter);
3098 if (status == 0)
3099 goto done;
ba343c77
SB
3100 /* INTx is not supported for VF */
3101 if (!be_physfn(adapter))
3102 return status;
6b7c5b94
SP
3103 }
3104
e49cc34f 3105 /* INTx: only the first EQ is used */
6b7c5b94
SP
3106 netdev->irq = adapter->pdev->irq;
3107 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3108 &adapter->eq_obj[0]);
6b7c5b94
SP
3109 if (status) {
3110 dev_err(&adapter->pdev->dev,
3111 "INTx request IRQ failed - err %d\n", status);
3112 return status;
3113 }
3114done:
3115 adapter->isr_registered = true;
3116 return 0;
3117}
3118
3119static void be_irq_unregister(struct be_adapter *adapter)
3120{
3121 struct net_device *netdev = adapter->netdev;
10ef9ab4 3122 struct be_eq_obj *eqo;
d658d98a 3123 int i, vec;
6b7c5b94
SP
3124
3125 if (!adapter->isr_registered)
3126 return;
3127
3128 /* INTx */
ac6a0c4a 3129 if (!msix_enabled(adapter)) {
e49cc34f 3130 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3131 goto done;
3132 }
3133
3134 /* MSIx */
d658d98a
PR
3135 for_all_evt_queues(adapter, eqo, i) {
3136 vec = be_msix_vec_get(adapter, eqo);
3137 irq_set_affinity_hint(vec, NULL);
3138 free_irq(vec, eqo);
3139 }
3abcdeda 3140
6b7c5b94
SP
3141done:
3142 adapter->isr_registered = false;
6b7c5b94
SP
3143}
3144
10ef9ab4 3145static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3146{
3147 struct be_queue_info *q;
3148 struct be_rx_obj *rxo;
3149 int i;
3150
3151 for_all_rx_queues(adapter, rxo, i) {
3152 q = &rxo->q;
3153 if (q->created) {
3154 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3155 be_rx_cq_clean(rxo);
482c9e79 3156 }
10ef9ab4 3157 be_queue_free(adapter, q);
482c9e79
SP
3158 }
3159}
3160
889cd4b2
SP
3161static int be_close(struct net_device *netdev)
3162{
3163 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3164 struct be_eq_obj *eqo;
3165 int i;
889cd4b2 3166
e1ad8e33
KA
3167 /* This protection is needed as be_close() may be called even when the
3168 * adapter is in cleared state (after eeh perm failure)
3169 */
3170 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3171 return 0;
3172
045508a8
PP
3173 be_roce_dev_close(adapter);
3174
dff345c5
IV
3175 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3176 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3177 napi_disable(&eqo->napi);
6384a4d0
SP
3178 be_disable_busy_poll(eqo);
3179 }
71237b6f 3180 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3181 }
a323d9bf
SP
3182
3183 be_async_mcc_disable(adapter);
3184
3185 /* Wait for all pending tx completions to arrive so that
3186 * all tx skbs are freed.
3187 */
fba87559 3188 netif_tx_disable(netdev);
6e1f9975 3189 be_tx_compl_clean(adapter);
a323d9bf
SP
3190
3191 be_rx_qs_destroy(adapter);
f66b7cfd 3192 be_clear_uc_list(adapter);
d11a347d 3193
a323d9bf 3194 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3195 if (msix_enabled(adapter))
3196 synchronize_irq(be_msix_vec_get(adapter, eqo));
3197 else
3198 synchronize_irq(netdev->irq);
3199 be_eq_clean(eqo);
63fcb27f
PR
3200 }
3201
889cd4b2
SP
3202 be_irq_unregister(adapter);
3203
482c9e79
SP
3204 return 0;
3205}
3206
10ef9ab4 3207static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3208{
1dcf7b1c
ED
3209 struct rss_info *rss = &adapter->rss_info;
3210 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3211 struct be_rx_obj *rxo;
e9008ee9 3212 int rc, i, j;
482c9e79
SP
3213
3214 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3215 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3216 sizeof(struct be_eth_rx_d));
3217 if (rc)
3218 return rc;
3219 }
3220
71bb8bd0
VV
3221 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3222 rxo = default_rxo(adapter);
3223 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3224 rx_frag_size, adapter->if_handle,
3225 false, &rxo->rss_id);
3226 if (rc)
3227 return rc;
3228 }
10ef9ab4
SP
3229
3230 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3231 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3232 rx_frag_size, adapter->if_handle,
3233 true, &rxo->rss_id);
482c9e79
SP
3234 if (rc)
3235 return rc;
3236 }
3237
3238 if (be_multi_rxq(adapter)) {
71bb8bd0 3239 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3240 for_all_rss_queues(adapter, rxo, i) {
e2557877 3241 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3242 break;
e2557877
VD
3243 rss->rsstable[j + i] = rxo->rss_id;
3244 rss->rss_queue[j + i] = i;
e9008ee9
PR
3245 }
3246 }
e2557877
VD
3247 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3248 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3249
3250 if (!BEx_chip(adapter))
e2557877
VD
3251 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3252 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3253 } else {
3254 /* Disable RSS, if only default RX Q is created */
e2557877 3255 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3256 }
594ad54a 3257
1dcf7b1c 3258 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3259 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3260 128, rss_key);
da1388d6 3261 if (rc) {
e2557877 3262 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3263 return rc;
482c9e79
SP
3264 }
3265
1dcf7b1c 3266 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3267
482c9e79 3268 /* First time posting */
10ef9ab4 3269 for_all_rx_queues(adapter, rxo, i)
c30d7266 3270 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
3271 return 0;
3272}
3273
6b7c5b94
SP
3274static int be_open(struct net_device *netdev)
3275{
3276 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3277 struct be_eq_obj *eqo;
3abcdeda 3278 struct be_rx_obj *rxo;
10ef9ab4 3279 struct be_tx_obj *txo;
b236916a 3280 u8 link_status;
3abcdeda 3281 int status, i;
5fb379ee 3282
10ef9ab4 3283 status = be_rx_qs_create(adapter);
482c9e79
SP
3284 if (status)
3285 goto err;
3286
c2bba3df
SK
3287 status = be_irq_register(adapter);
3288 if (status)
3289 goto err;
5fb379ee 3290
10ef9ab4 3291 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3292 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3293
10ef9ab4
SP
3294 for_all_tx_queues(adapter, txo, i)
3295 be_cq_notify(adapter, txo->cq.id, true, 0);
3296
7a1e9b20
SP
3297 be_async_mcc_enable(adapter);
3298
10ef9ab4
SP
3299 for_all_evt_queues(adapter, eqo, i) {
3300 napi_enable(&eqo->napi);
6384a4d0 3301 be_enable_busy_poll(eqo);
4cad9f3b 3302 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 3303 }
04d3d624 3304 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3305
323ff71e 3306 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3307 if (!status)
3308 be_link_status_update(adapter, link_status);
3309
fba87559 3310 netif_tx_start_all_queues(netdev);
045508a8 3311 be_roce_dev_open(adapter);
c9c47142 3312
c5abe7c0 3313#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3314 if (skyhawk_chip(adapter))
3315 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3316#endif
3317
889cd4b2
SP
3318 return 0;
3319err:
3320 be_close(adapter->netdev);
3321 return -EIO;
5fb379ee
SP
3322}
3323
71d8d1b5
AK
3324static int be_setup_wol(struct be_adapter *adapter, bool enable)
3325{
3326 struct be_dma_mem cmd;
3327 int status = 0;
3328 u8 mac[ETH_ALEN];
3329
c7bf7169 3330 eth_zero_addr(mac);
71d8d1b5
AK
3331
3332 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3333 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3334 GFP_KERNEL);
ddf1169f 3335 if (!cmd.va)
6b568689 3336 return -ENOMEM;
71d8d1b5
AK
3337
3338 if (enable) {
3339 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3340 PCICFG_PM_CONTROL_OFFSET,
3341 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3342 if (status) {
3343 dev_err(&adapter->pdev->dev,
2381a55c 3344 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3345 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3346 cmd.dma);
71d8d1b5
AK
3347 return status;
3348 }
3349 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3350 adapter->netdev->dev_addr,
3351 &cmd);
71d8d1b5
AK
3352 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3353 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3354 } else {
3355 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3356 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3357 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3358 }
3359
2b7bcebf 3360 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3361 return status;
3362}
3363
f7062ee5
SP
3364static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3365{
3366 u32 addr;
3367
3368 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3369
3370 mac[5] = (u8)(addr & 0xFF);
3371 mac[4] = (u8)((addr >> 8) & 0xFF);
3372 mac[3] = (u8)((addr >> 16) & 0xFF);
3373 /* Use the OUI from the current MAC address */
3374 memcpy(mac, adapter->netdev->dev_addr, 3);
3375}
3376
6d87f5c3
AK
3377/*
3378 * Generate a seed MAC address from the PF MAC Address using jhash.
3379 * MAC Address for VFs are assigned incrementally starting from the seed.
3380 * These addresses are programmed in the ASIC by the PF and the VF driver
3381 * queries for the MAC address during its probe.
3382 */
4c876616 3383static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3384{
f9449ab7 3385 u32 vf;
3abcdeda 3386 int status = 0;
6d87f5c3 3387 u8 mac[ETH_ALEN];
11ac75ed 3388 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3389
3390 be_vf_eth_addr_generate(adapter, mac);
3391
11ac75ed 3392 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3393 if (BEx_chip(adapter))
590c391d 3394 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3395 vf_cfg->if_handle,
3396 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3397 else
3398 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3399 vf + 1);
590c391d 3400
6d87f5c3
AK
3401 if (status)
3402 dev_err(&adapter->pdev->dev,
748b539a
SP
3403 "Mac address assignment failed for VF %d\n",
3404 vf);
6d87f5c3 3405 else
11ac75ed 3406 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3407
3408 mac[5] += 1;
3409 }
3410 return status;
3411}
3412
4c876616
SP
3413static int be_vfs_mac_query(struct be_adapter *adapter)
3414{
3415 int status, vf;
3416 u8 mac[ETH_ALEN];
3417 struct be_vf_cfg *vf_cfg;
4c876616
SP
3418
3419 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3420 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3421 mac, vf_cfg->if_handle,
3422 false, vf+1);
4c876616
SP
3423 if (status)
3424 return status;
3425 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3426 }
3427 return 0;
3428}
3429
f9449ab7 3430static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3431{
11ac75ed 3432 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3433 u32 vf;
3434
257a3feb 3435 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3436 dev_warn(&adapter->pdev->dev,
3437 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3438 goto done;
3439 }
3440
b4c1df93
SP
3441 pci_disable_sriov(adapter->pdev);
3442
11ac75ed 3443 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3444 if (BEx_chip(adapter))
11ac75ed
SP
3445 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3446 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3447 else
3448 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3449 vf + 1);
f9449ab7 3450
11ac75ed
SP
3451 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3452 }
39f1d94d
SP
3453done:
3454 kfree(adapter->vf_cfg);
3455 adapter->num_vfs = 0;
f174c7ec 3456 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3457}
3458
7707133c
SP
3459static void be_clear_queues(struct be_adapter *adapter)
3460{
3461 be_mcc_queues_destroy(adapter);
3462 be_rx_cqs_destroy(adapter);
3463 be_tx_queues_destroy(adapter);
3464 be_evt_queues_destroy(adapter);
3465}
3466
68d7bdcb 3467static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3468{
191eb756
SP
3469 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3470 cancel_delayed_work_sync(&adapter->work);
3471 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3472 }
68d7bdcb
SP
3473}
3474
eb7dd46c
SP
3475static void be_cancel_err_detection(struct be_adapter *adapter)
3476{
3477 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3478 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3479 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3480 }
3481}
3482
b05004ad 3483static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3484{
b05004ad 3485 if (adapter->pmac_id) {
f66b7cfd
SP
3486 be_cmd_pmac_del(adapter, adapter->if_handle,
3487 adapter->pmac_id[0], 0);
b05004ad
SK
3488 kfree(adapter->pmac_id);
3489 adapter->pmac_id = NULL;
3490 }
3491}
3492
c5abe7c0 3493#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3494static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3495{
630f4b70
SB
3496 struct net_device *netdev = adapter->netdev;
3497
c9c47142
SP
3498 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3499 be_cmd_manage_iface(adapter, adapter->if_handle,
3500 OP_CONVERT_TUNNEL_TO_NORMAL);
3501
3502 if (adapter->vxlan_port)
3503 be_cmd_set_vxlan_port(adapter, 0);
3504
3505 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3506 adapter->vxlan_port = 0;
630f4b70
SB
3507
3508 netdev->hw_enc_features = 0;
3509 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3510 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3511}
c5abe7c0 3512#endif
c9c47142 3513
f2858738
VV
3514static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3515{
3516 struct be_resources res = adapter->pool_res;
3517 u16 num_vf_qs = 1;
3518
3519 /* Distribute the queue resources equally among the PF and it's VFs
3520 * Do not distribute queue resources in multi-channel configuration.
3521 */
3522 if (num_vfs && !be_is_mc(adapter)) {
3523 /* If number of VFs requested is 8 less than max supported,
3524 * assign 8 queue pairs to the PF and divide the remaining
3525 * resources evenly among the VFs
3526 */
3527 if (num_vfs < (be_max_vfs(adapter) - 8))
3528 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3529 else
3530 num_vf_qs = res.max_rss_qs / num_vfs;
3531
3532 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3533 * interfaces per port. Provide RSS on VFs, only if number
3534 * of VFs requested is less than MAX_RSS_IFACES limit.
3535 */
3536 if (num_vfs >= MAX_RSS_IFACES)
3537 num_vf_qs = 1;
3538 }
3539 return num_vf_qs;
3540}
3541
b05004ad
SK
3542static int be_clear(struct be_adapter *adapter)
3543{
f2858738
VV
3544 struct pci_dev *pdev = adapter->pdev;
3545 u16 num_vf_qs;
3546
68d7bdcb 3547 be_cancel_worker(adapter);
191eb756 3548
11ac75ed 3549 if (sriov_enabled(adapter))
f9449ab7
SP
3550 be_vf_clear(adapter);
3551
bec84e6b
VV
3552 /* Re-configure FW to distribute resources evenly across max-supported
3553 * number of VFs, only when VFs are not already enabled.
3554 */
ace40aff
VV
3555 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3556 !pci_vfs_assigned(pdev)) {
f2858738
VV
3557 num_vf_qs = be_calculate_vf_qs(adapter,
3558 pci_sriov_get_totalvfs(pdev));
bec84e6b 3559 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3560 pci_sriov_get_totalvfs(pdev),
3561 num_vf_qs);
3562 }
bec84e6b 3563
c5abe7c0 3564#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3565 be_disable_vxlan_offloads(adapter);
c5abe7c0 3566#endif
2d17f403 3567 /* delete the primary mac along with the uc-mac list */
b05004ad 3568 be_mac_clear(adapter);
fbc13f01 3569
f9449ab7 3570 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3571
7707133c 3572 be_clear_queues(adapter);
a54769f5 3573
10ef9ab4 3574 be_msix_disable(adapter);
e1ad8e33 3575 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3576 return 0;
3577}
3578
0700d816
KA
3579static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3580 u32 cap_flags, u32 vf)
3581{
3582 u32 en_flags;
0700d816
KA
3583
3584 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3585 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
71bb8bd0 3586 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
0700d816
KA
3587
3588 en_flags &= cap_flags;
3589
435452aa 3590 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
0700d816
KA
3591}
3592
4c876616 3593static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3594{
92bf14ab 3595 struct be_resources res = {0};
4c876616 3596 struct be_vf_cfg *vf_cfg;
0700d816
KA
3597 u32 cap_flags, vf;
3598 int status;
abb93951 3599
0700d816 3600 /* If a FW profile exists, then cap_flags are updated */
4c876616
SP
3601 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3602 BE_IF_FLAGS_MULTICAST;
abb93951 3603
4c876616 3604 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3605 if (!BE3_chip(adapter)) {
3606 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3607 RESOURCE_LIMITS,
92bf14ab 3608 vf + 1);
435452aa 3609 if (!status) {
92bf14ab 3610 cap_flags = res.if_cap_flags;
435452aa
VV
3611 /* Prevent VFs from enabling VLAN promiscuous
3612 * mode
3613 */
3614 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3615 }
92bf14ab 3616 }
4c876616 3617
0700d816
KA
3618 status = be_if_create(adapter, &vf_cfg->if_handle,
3619 cap_flags, vf + 1);
4c876616 3620 if (status)
0700d816 3621 return status;
4c876616 3622 }
0700d816
KA
3623
3624 return 0;
abb93951
PR
3625}
3626
39f1d94d 3627static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3628{
11ac75ed 3629 struct be_vf_cfg *vf_cfg;
30128031
SP
3630 int vf;
3631
39f1d94d
SP
3632 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3633 GFP_KERNEL);
3634 if (!adapter->vf_cfg)
3635 return -ENOMEM;
3636
11ac75ed
SP
3637 for_all_vfs(adapter, vf_cfg, vf) {
3638 vf_cfg->if_handle = -1;
3639 vf_cfg->pmac_id = -1;
30128031 3640 }
39f1d94d 3641 return 0;
30128031
SP
3642}
3643
f9449ab7
SP
3644static int be_vf_setup(struct be_adapter *adapter)
3645{
c502224e 3646 struct device *dev = &adapter->pdev->dev;
11ac75ed 3647 struct be_vf_cfg *vf_cfg;
4c876616 3648 int status, old_vfs, vf;
e7bcbd7b 3649 bool spoofchk;
39f1d94d 3650
257a3feb 3651 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3652
3653 status = be_vf_setup_init(adapter);
3654 if (status)
3655 goto err;
30128031 3656
4c876616
SP
3657 if (old_vfs) {
3658 for_all_vfs(adapter, vf_cfg, vf) {
3659 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3660 if (status)
3661 goto err;
3662 }
f9449ab7 3663
4c876616
SP
3664 status = be_vfs_mac_query(adapter);
3665 if (status)
3666 goto err;
3667 } else {
bec84e6b
VV
3668 status = be_vfs_if_create(adapter);
3669 if (status)
3670 goto err;
3671
39f1d94d
SP
3672 status = be_vf_eth_addr_config(adapter);
3673 if (status)
3674 goto err;
3675 }
f9449ab7 3676
11ac75ed 3677 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3678 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3679 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3680 vf + 1);
3681 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3682 status = be_cmd_set_fn_privileges(adapter,
435452aa 3683 vf_cfg->privileges |
04a06028
SP
3684 BE_PRIV_FILTMGMT,
3685 vf + 1);
435452aa
VV
3686 if (!status) {
3687 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3688 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3689 vf);
435452aa 3690 }
04a06028
SP
3691 }
3692
0f77ba73
RN
3693 /* Allow full available bandwidth */
3694 if (!old_vfs)
3695 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3696
e7bcbd7b
KA
3697 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3698 vf_cfg->if_handle, NULL,
3699 &spoofchk);
3700 if (!status)
3701 vf_cfg->spoofchk = spoofchk;
3702
bdce2ad7 3703 if (!old_vfs) {
0599863d 3704 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3705 be_cmd_set_logical_link_config(adapter,
3706 IFLA_VF_LINK_STATE_AUTO,
3707 vf+1);
3708 }
f9449ab7 3709 }
b4c1df93
SP
3710
3711 if (!old_vfs) {
3712 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3713 if (status) {
3714 dev_err(dev, "SRIOV enable failed\n");
3715 adapter->num_vfs = 0;
3716 goto err;
3717 }
3718 }
f174c7ec
VV
3719
3720 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3721 return 0;
3722err:
4c876616
SP
3723 dev_err(dev, "VF setup failed\n");
3724 be_vf_clear(adapter);
f9449ab7
SP
3725 return status;
3726}
3727
f93f160b
VV
3728/* Converting function_mode bits on BE3 to SH mc_type enums */
3729
3730static u8 be_convert_mc_type(u32 function_mode)
3731{
66064dbc 3732 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3733 return vNIC1;
66064dbc 3734 else if (function_mode & QNQ_MODE)
f93f160b
VV
3735 return FLEX10;
3736 else if (function_mode & VNIC_MODE)
3737 return vNIC2;
3738 else if (function_mode & UMC_ENABLED)
3739 return UMC;
3740 else
3741 return MC_NONE;
3742}
3743
92bf14ab
SP
3744/* On BE2/BE3 FW does not suggest the supported limits */
3745static void BEx_get_resources(struct be_adapter *adapter,
3746 struct be_resources *res)
3747{
bec84e6b 3748 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3749
3750 if (be_physfn(adapter))
3751 res->max_uc_mac = BE_UC_PMAC_COUNT;
3752 else
3753 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3754
f93f160b
VV
3755 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3756
3757 if (be_is_mc(adapter)) {
3758 /* Assuming that there are 4 channels per port,
3759 * when multi-channel is enabled
3760 */
3761 if (be_is_qnq_mode(adapter))
3762 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3763 else
3764 /* In a non-qnq multichannel mode, the pvid
3765 * takes up one vlan entry
3766 */
3767 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3768 } else {
92bf14ab 3769 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3770 }
3771
92bf14ab
SP
3772 res->max_mcast_mac = BE_MAX_MC;
3773
a5243dab
VV
3774 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3775 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3776 * *only* if it is RSS-capable.
3777 */
3778 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3779 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3780 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3781 res->max_tx_qs = 1;
a28277dc
SR
3782 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3783 struct be_resources super_nic_res = {0};
3784
3785 /* On a SuperNIC profile, the driver needs to use the
3786 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3787 */
f2858738
VV
3788 be_cmd_get_profile_config(adapter, &super_nic_res,
3789 RESOURCE_LIMITS, 0);
a28277dc
SR
3790 /* Some old versions of BE3 FW don't report max_tx_qs value */
3791 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3792 } else {
92bf14ab 3793 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3794 }
92bf14ab
SP
3795
3796 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3797 !use_sriov && be_physfn(adapter))
3798 res->max_rss_qs = (adapter->be3_native) ?
3799 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3800 res->max_rx_qs = res->max_rss_qs + 1;
3801
e3dc867c 3802 if (be_physfn(adapter))
d3518e21 3803 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3804 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3805 else
3806 res->max_evt_qs = 1;
92bf14ab
SP
3807
3808 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 3809 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
3810 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3811 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3812}
3813
30128031
SP
3814static void be_setup_init(struct be_adapter *adapter)
3815{
3816 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3817 adapter->phy.link_speed = -1;
30128031
SP
3818 adapter->if_handle = -1;
3819 adapter->be3_native = false;
f66b7cfd 3820 adapter->if_flags = 0;
f25b119c
PR
3821 if (be_physfn(adapter))
3822 adapter->cmd_privileges = MAX_PRIVILEGES;
3823 else
3824 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3825}
3826
bec84e6b
VV
3827static int be_get_sriov_config(struct be_adapter *adapter)
3828{
bec84e6b 3829 struct be_resources res = {0};
d3d18312 3830 int max_vfs, old_vfs;
bec84e6b 3831
f2858738 3832 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 3833
ace40aff 3834 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
3835 if (BE3_chip(adapter) && !res.max_vfs) {
3836 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3837 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3838 }
3839
d3d18312 3840 adapter->pool_res = res;
bec84e6b 3841
ace40aff
VV
3842 /* If during previous unload of the driver, the VFs were not disabled,
3843 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3844 * Instead use the TotalVFs value stored in the pci-dev struct.
3845 */
bec84e6b
VV
3846 old_vfs = pci_num_vf(adapter->pdev);
3847 if (old_vfs) {
ace40aff
VV
3848 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3849 old_vfs);
3850
3851 adapter->pool_res.max_vfs =
3852 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 3853 adapter->num_vfs = old_vfs;
bec84e6b
VV
3854 }
3855
3856 return 0;
3857}
3858
ace40aff
VV
3859static void be_alloc_sriov_res(struct be_adapter *adapter)
3860{
3861 int old_vfs = pci_num_vf(adapter->pdev);
3862 u16 num_vf_qs;
3863 int status;
3864
3865 be_get_sriov_config(adapter);
3866
3867 if (!old_vfs)
3868 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3869
3870 /* When the HW is in SRIOV capable configuration, the PF-pool
3871 * resources are given to PF during driver load, if there are no
3872 * old VFs. This facility is not available in BE3 FW.
3873 * Also, this is done by FW in Lancer chip.
3874 */
3875 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3876 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3877 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3878 num_vf_qs);
3879 if (status)
3880 dev_err(&adapter->pdev->dev,
3881 "Failed to optimize SRIOV resources\n");
3882 }
3883}
3884
92bf14ab 3885static int be_get_resources(struct be_adapter *adapter)
abb93951 3886{
92bf14ab
SP
3887 struct device *dev = &adapter->pdev->dev;
3888 struct be_resources res = {0};
3889 int status;
abb93951 3890
92bf14ab
SP
3891 if (BEx_chip(adapter)) {
3892 BEx_get_resources(adapter, &res);
3893 adapter->res = res;
abb93951
PR
3894 }
3895
92bf14ab
SP
3896 /* For Lancer, SH etc read per-function resource limits from FW.
3897 * GET_FUNC_CONFIG returns per function guaranteed limits.
3898 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3899 */
3900 if (!BEx_chip(adapter)) {
3901 status = be_cmd_get_func_config(adapter, &res);
3902 if (status)
3903 return status;
abb93951 3904
71bb8bd0
VV
3905 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3906 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3907 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3908 res.max_rss_qs -= 1;
3909
92bf14ab
SP
3910 /* If RoCE may be enabled stash away half the EQs for RoCE */
3911 if (be_roce_supported(adapter))
3912 res.max_evt_qs /= 2;
3913 adapter->res = res;
abb93951 3914 }
4c876616 3915
71bb8bd0
VV
3916 /* If FW supports RSS default queue, then skip creating non-RSS
3917 * queue for non-IP traffic.
3918 */
3919 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3920 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3921
acbafeb1
SP
3922 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3923 be_max_txqs(adapter), be_max_rxqs(adapter),
3924 be_max_rss(adapter), be_max_eqs(adapter),
3925 be_max_vfs(adapter));
3926 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3927 be_max_uc(adapter), be_max_mc(adapter),
3928 be_max_vlans(adapter));
3929
ace40aff
VV
3930 /* Sanitize cfg_num_qs based on HW and platform limits */
3931 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
3932 be_max_qs(adapter));
92bf14ab 3933 return 0;
abb93951
PR
3934}
3935
39f1d94d
SP
3936static int be_get_config(struct be_adapter *adapter)
3937{
6b085ba9 3938 int status, level;
542963b7 3939 u16 profile_id;
6b085ba9
SP
3940
3941 status = be_cmd_get_cntl_attributes(adapter);
3942 if (status)
3943 return status;
39f1d94d 3944
e97e3cda 3945 status = be_cmd_query_fw_cfg(adapter);
abb93951 3946 if (status)
92bf14ab 3947 return status;
abb93951 3948
6b085ba9
SP
3949 if (BEx_chip(adapter)) {
3950 level = be_cmd_get_fw_log_level(adapter);
3951 adapter->msg_enable =
3952 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3953 }
3954
3955 be_cmd_get_acpi_wol_cap(adapter);
3956
21252377
VV
3957 be_cmd_query_port_name(adapter);
3958
3959 if (be_physfn(adapter)) {
542963b7
VV
3960 status = be_cmd_get_active_profile(adapter, &profile_id);
3961 if (!status)
3962 dev_info(&adapter->pdev->dev,
3963 "Using profile 0x%x\n", profile_id);
962bcb75 3964 }
bec84e6b 3965
92bf14ab
SP
3966 status = be_get_resources(adapter);
3967 if (status)
3968 return status;
abb93951 3969
46ee9c14
RN
3970 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3971 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3972 if (!adapter->pmac_id)
3973 return -ENOMEM;
abb93951 3974
92bf14ab 3975 return 0;
39f1d94d
SP
3976}
3977
95046b92
SP
3978static int be_mac_setup(struct be_adapter *adapter)
3979{
3980 u8 mac[ETH_ALEN];
3981 int status;
3982
3983 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3984 status = be_cmd_get_perm_mac(adapter, mac);
3985 if (status)
3986 return status;
3987
3988 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3989 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3990 } else {
3991 /* Maybe the HW was reset; dev_addr must be re-programmed */
3992 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3993 }
3994
2c7a9dc1
AK
3995 /* For BE3-R VFs, the PF programs the initial MAC address */
3996 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3997 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3998 &adapter->pmac_id[0], 0);
95046b92
SP
3999 return 0;
4000}
4001
68d7bdcb
SP
4002static void be_schedule_worker(struct be_adapter *adapter)
4003{
4004 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4005 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4006}
4007
eb7dd46c
SP
4008static void be_schedule_err_detection(struct be_adapter *adapter)
4009{
4010 schedule_delayed_work(&adapter->be_err_detection_work,
4011 msecs_to_jiffies(1000));
4012 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4013}
4014
7707133c 4015static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4016{
68d7bdcb 4017 struct net_device *netdev = adapter->netdev;
10ef9ab4 4018 int status;
ba343c77 4019
7707133c 4020 status = be_evt_queues_create(adapter);
abb93951
PR
4021 if (status)
4022 goto err;
73d540f2 4023
7707133c 4024 status = be_tx_qs_create(adapter);
c2bba3df
SK
4025 if (status)
4026 goto err;
10ef9ab4 4027
7707133c 4028 status = be_rx_cqs_create(adapter);
10ef9ab4 4029 if (status)
a54769f5 4030 goto err;
6b7c5b94 4031
7707133c 4032 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4033 if (status)
4034 goto err;
4035
68d7bdcb
SP
4036 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4037 if (status)
4038 goto err;
4039
4040 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4041 if (status)
4042 goto err;
4043
7707133c
SP
4044 return 0;
4045err:
4046 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4047 return status;
4048}
4049
68d7bdcb
SP
4050int be_update_queues(struct be_adapter *adapter)
4051{
4052 struct net_device *netdev = adapter->netdev;
4053 int status;
4054
4055 if (netif_running(netdev))
4056 be_close(netdev);
4057
4058 be_cancel_worker(adapter);
4059
4060 /* If any vectors have been shared with RoCE we cannot re-program
4061 * the MSIx table.
4062 */
4063 if (!adapter->num_msix_roce_vec)
4064 be_msix_disable(adapter);
4065
4066 be_clear_queues(adapter);
4067
4068 if (!msix_enabled(adapter)) {
4069 status = be_msix_enable(adapter);
4070 if (status)
4071 return status;
4072 }
4073
4074 status = be_setup_queues(adapter);
4075 if (status)
4076 return status;
4077
4078 be_schedule_worker(adapter);
4079
4080 if (netif_running(netdev))
4081 status = be_open(netdev);
4082
4083 return status;
4084}
4085
f7062ee5
SP
4086static inline int fw_major_num(const char *fw_ver)
4087{
4088 int fw_major = 0, i;
4089
4090 i = sscanf(fw_ver, "%d.", &fw_major);
4091 if (i != 1)
4092 return 0;
4093
4094 return fw_major;
4095}
4096
f962f840
SP
4097/* If any VFs are already enabled don't FLR the PF */
4098static bool be_reset_required(struct be_adapter *adapter)
4099{
4100 return pci_num_vf(adapter->pdev) ? false : true;
4101}
4102
4103/* Wait for the FW to be ready and perform the required initialization */
4104static int be_func_init(struct be_adapter *adapter)
4105{
4106 int status;
4107
4108 status = be_fw_wait_ready(adapter);
4109 if (status)
4110 return status;
4111
4112 if (be_reset_required(adapter)) {
4113 status = be_cmd_reset_function(adapter);
4114 if (status)
4115 return status;
4116
4117 /* Wait for interrupts to quiesce after an FLR */
4118 msleep(100);
4119
4120 /* We can clear all errors when function reset succeeds */
4121 be_clear_all_error(adapter);
4122 }
4123
4124 /* Tell FW we're ready to fire cmds */
4125 status = be_cmd_fw_init(adapter);
4126 if (status)
4127 return status;
4128
4129 /* Allow interrupts for other ULPs running on NIC function */
4130 be_intr_set(adapter, true);
4131
4132 return 0;
4133}
4134
7707133c
SP
4135static int be_setup(struct be_adapter *adapter)
4136{
4137 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4138 int status;
4139
f962f840
SP
4140 status = be_func_init(adapter);
4141 if (status)
4142 return status;
4143
7707133c
SP
4144 be_setup_init(adapter);
4145
4146 if (!lancer_chip(adapter))
4147 be_cmd_req_native_mode(adapter);
4148
ace40aff
VV
4149 if (!BE2_chip(adapter) && be_physfn(adapter))
4150 be_alloc_sriov_res(adapter);
4151
7707133c 4152 status = be_get_config(adapter);
10ef9ab4 4153 if (status)
a54769f5 4154 goto err;
6b7c5b94 4155
7707133c 4156 status = be_msix_enable(adapter);
10ef9ab4 4157 if (status)
a54769f5 4158 goto err;
6b7c5b94 4159
0700d816
KA
4160 status = be_if_create(adapter, &adapter->if_handle,
4161 be_if_cap_flags(adapter), 0);
7707133c 4162 if (status)
a54769f5 4163 goto err;
6b7c5b94 4164
68d7bdcb
SP
4165 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4166 rtnl_lock();
7707133c 4167 status = be_setup_queues(adapter);
68d7bdcb 4168 rtnl_unlock();
95046b92 4169 if (status)
1578e777
PR
4170 goto err;
4171
7707133c 4172 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4173
4174 status = be_mac_setup(adapter);
10ef9ab4
SP
4175 if (status)
4176 goto err;
4177
e97e3cda 4178 be_cmd_get_fw_ver(adapter);
acbafeb1 4179 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4180
e9e2a904 4181 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4182 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4183 adapter->fw_ver);
4184 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4185 }
4186
1d1e9a46 4187 if (adapter->vlans_added)
10329df8 4188 be_vid_config(adapter);
7ab8b0b4 4189
a54769f5 4190 be_set_rx_mode(adapter->netdev);
5fb379ee 4191
00d594c3
KA
4192 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4193 adapter->rx_fc);
4194 if (status)
4195 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4196 &adapter->rx_fc);
590c391d 4197
00d594c3
KA
4198 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4199 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4200
bdce2ad7
SR
4201 if (be_physfn(adapter))
4202 be_cmd_set_logical_link_config(adapter,
4203 IFLA_VF_LINK_STATE_AUTO, 0);
4204
bec84e6b
VV
4205 if (adapter->num_vfs)
4206 be_vf_setup(adapter);
f9449ab7 4207
f25b119c
PR
4208 status = be_cmd_get_phy_info(adapter);
4209 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4210 adapter->phy.fc_autoneg = 1;
4211
68d7bdcb 4212 be_schedule_worker(adapter);
e1ad8e33 4213 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4214 return 0;
a54769f5
SP
4215err:
4216 be_clear(adapter);
4217 return status;
4218}
6b7c5b94 4219
66268739
IV
4220#ifdef CONFIG_NET_POLL_CONTROLLER
4221static void be_netpoll(struct net_device *netdev)
4222{
4223 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4224 struct be_eq_obj *eqo;
66268739
IV
4225 int i;
4226
e49cc34f
SP
4227 for_all_evt_queues(adapter, eqo, i) {
4228 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
4229 napi_schedule(&eqo->napi);
4230 }
66268739
IV
4231}
4232#endif
4233
96c9b2e4 4234static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 4235
306f1348
SP
4236static bool phy_flashing_required(struct be_adapter *adapter)
4237{
e02cfd96 4238 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 4239 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
4240}
4241
c165541e
PR
4242static bool is_comp_in_ufi(struct be_adapter *adapter,
4243 struct flash_section_info *fsec, int type)
4244{
4245 int i = 0, img_type = 0;
4246 struct flash_section_info_g2 *fsec_g2 = NULL;
4247
ca34fe38 4248 if (BE2_chip(adapter))
c165541e
PR
4249 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4250
4251 for (i = 0; i < MAX_FLASH_COMP; i++) {
4252 if (fsec_g2)
4253 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4254 else
4255 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4256
4257 if (img_type == type)
4258 return true;
4259 }
4260 return false;
4261
4262}
4263
4188e7df 4264static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4265 int header_size,
4266 const struct firmware *fw)
c165541e
PR
4267{
4268 struct flash_section_info *fsec = NULL;
4269 const u8 *p = fw->data;
4270
4271 p += header_size;
4272 while (p < (fw->data + fw->size)) {
4273 fsec = (struct flash_section_info *)p;
4274 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4275 return fsec;
4276 p += 32;
4277 }
4278 return NULL;
4279}
4280
96c9b2e4
VV
4281static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4282 u32 img_offset, u32 img_size, int hdr_size,
4283 u16 img_optype, bool *crc_match)
4284{
4285 u32 crc_offset;
4286 int status;
4287 u8 crc[4];
4288
70a7b525
VV
4289 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4290 img_size - 4);
96c9b2e4
VV
4291 if (status)
4292 return status;
4293
4294 crc_offset = hdr_size + img_offset + img_size - 4;
4295
4296 /* Skip flashing, if crc of flashed region matches */
4297 if (!memcmp(crc, p + crc_offset, 4))
4298 *crc_match = true;
4299 else
4300 *crc_match = false;
4301
4302 return status;
4303}
4304
773a2d7c 4305static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4306 struct be_dma_mem *flash_cmd, int optype, int img_size,
4307 u32 img_offset)
773a2d7c 4308{
70a7b525 4309 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4310 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4311 int status;
773a2d7c 4312
773a2d7c
PR
4313 while (total_bytes) {
4314 num_bytes = min_t(u32, 32*1024, total_bytes);
4315
4316 total_bytes -= num_bytes;
4317
4318 if (!total_bytes) {
4319 if (optype == OPTYPE_PHY_FW)
4320 flash_op = FLASHROM_OPER_PHY_FLASH;
4321 else
4322 flash_op = FLASHROM_OPER_FLASH;
4323 } else {
4324 if (optype == OPTYPE_PHY_FW)
4325 flash_op = FLASHROM_OPER_PHY_SAVE;
4326 else
4327 flash_op = FLASHROM_OPER_SAVE;
4328 }
4329
be716446 4330 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4331 img += num_bytes;
4332 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4333 flash_op, img_offset +
4334 bytes_sent, num_bytes);
4c60005f 4335 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4336 optype == OPTYPE_PHY_FW)
4337 break;
4338 else if (status)
773a2d7c 4339 return status;
70a7b525
VV
4340
4341 bytes_sent += num_bytes;
773a2d7c
PR
4342 }
4343 return 0;
4344}
4345
0ad3157e 4346/* For BE2, BE3 and BE3-R */
ca34fe38 4347static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4348 const struct firmware *fw,
4349 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4350{
c165541e 4351 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4352 struct device *dev = &adapter->pdev->dev;
c165541e 4353 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4354 int status, i, filehdr_size, num_comp;
4355 const struct flash_comp *pflashcomp;
4356 bool crc_match;
4357 const u8 *p;
c165541e
PR
4358
4359 struct flash_comp gen3_flash_types[] = {
4360 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4361 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4362 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4363 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4364 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4365 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4366 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4367 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4368 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4369 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4370 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4371 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4372 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4373 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4374 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4375 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4376 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4377 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4378 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4379 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4380 };
c165541e
PR
4381
4382 struct flash_comp gen2_flash_types[] = {
4383 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4384 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4385 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4386 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4387 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4388 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4389 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4390 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4391 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4392 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4393 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4394 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4395 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4396 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4397 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4398 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4399 };
4400
ca34fe38 4401 if (BE3_chip(adapter)) {
3f0d4560
AK
4402 pflashcomp = gen3_flash_types;
4403 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4404 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4405 } else {
4406 pflashcomp = gen2_flash_types;
4407 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4408 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4409 img_hdrs_size = 0;
84517482 4410 }
ca34fe38 4411
c165541e
PR
4412 /* Get flash section info*/
4413 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4414 if (!fsec) {
96c9b2e4 4415 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4416 return -1;
4417 }
9fe96934 4418 for (i = 0; i < num_comp; i++) {
c165541e 4419 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4420 continue;
c165541e
PR
4421
4422 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4423 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4424 continue;
4425
773a2d7c
PR
4426 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4427 !phy_flashing_required(adapter))
306f1348 4428 continue;
c165541e 4429
773a2d7c 4430 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4431 status = be_check_flash_crc(adapter, fw->data,
4432 pflashcomp[i].offset,
4433 pflashcomp[i].size,
4434 filehdr_size +
4435 img_hdrs_size,
4436 OPTYPE_REDBOOT, &crc_match);
4437 if (status) {
4438 dev_err(dev,
4439 "Could not get CRC for 0x%x region\n",
4440 pflashcomp[i].optype);
4441 continue;
4442 }
4443
4444 if (crc_match)
773a2d7c
PR
4445 continue;
4446 }
c165541e 4447
96c9b2e4
VV
4448 p = fw->data + filehdr_size + pflashcomp[i].offset +
4449 img_hdrs_size;
306f1348
SP
4450 if (p + pflashcomp[i].size > fw->data + fw->size)
4451 return -1;
773a2d7c
PR
4452
4453 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4454 pflashcomp[i].size, 0);
773a2d7c 4455 if (status) {
96c9b2e4 4456 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4457 pflashcomp[i].img_type);
4458 return status;
84517482 4459 }
84517482 4460 }
84517482
AK
4461 return 0;
4462}
4463
96c9b2e4
VV
4464static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4465{
4466 u32 img_type = le32_to_cpu(fsec_entry.type);
4467 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4468
4469 if (img_optype != 0xFFFF)
4470 return img_optype;
4471
4472 switch (img_type) {
4473 case IMAGE_FIRMWARE_iSCSI:
4474 img_optype = OPTYPE_ISCSI_ACTIVE;
4475 break;
4476 case IMAGE_BOOT_CODE:
4477 img_optype = OPTYPE_REDBOOT;
4478 break;
4479 case IMAGE_OPTION_ROM_ISCSI:
4480 img_optype = OPTYPE_BIOS;
4481 break;
4482 case IMAGE_OPTION_ROM_PXE:
4483 img_optype = OPTYPE_PXE_BIOS;
4484 break;
4485 case IMAGE_OPTION_ROM_FCoE:
4486 img_optype = OPTYPE_FCOE_BIOS;
4487 break;
4488 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4489 img_optype = OPTYPE_ISCSI_BACKUP;
4490 break;
4491 case IMAGE_NCSI:
4492 img_optype = OPTYPE_NCSI_FW;
4493 break;
4494 case IMAGE_FLASHISM_JUMPVECTOR:
4495 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4496 break;
4497 case IMAGE_FIRMWARE_PHY:
4498 img_optype = OPTYPE_SH_PHY_FW;
4499 break;
4500 case IMAGE_REDBOOT_DIR:
4501 img_optype = OPTYPE_REDBOOT_DIR;
4502 break;
4503 case IMAGE_REDBOOT_CONFIG:
4504 img_optype = OPTYPE_REDBOOT_CONFIG;
4505 break;
4506 case IMAGE_UFI_DIR:
4507 img_optype = OPTYPE_UFI_DIR;
4508 break;
4509 default:
4510 break;
4511 }
4512
4513 return img_optype;
4514}
4515
773a2d7c 4516static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4517 const struct firmware *fw,
4518 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4519{
773a2d7c 4520 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4521 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4522 struct device *dev = &adapter->pdev->dev;
773a2d7c 4523 struct flash_section_info *fsec = NULL;
96c9b2e4 4524 u32 img_offset, img_size, img_type;
70a7b525 4525 u16 img_optype, flash_optype;
96c9b2e4 4526 int status, i, filehdr_size;
96c9b2e4 4527 const u8 *p;
773a2d7c
PR
4528
4529 filehdr_size = sizeof(struct flash_file_hdr_g3);
4530 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4531 if (!fsec) {
96c9b2e4 4532 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4533 return -EINVAL;
773a2d7c
PR
4534 }
4535
70a7b525 4536retry_flash:
773a2d7c
PR
4537 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4538 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4539 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4540 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4541 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4542 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4543
96c9b2e4 4544 if (img_optype == 0xFFFF)
773a2d7c 4545 continue;
70a7b525
VV
4546
4547 if (flash_offset_support)
4548 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4549 else
4550 flash_optype = img_optype;
4551
96c9b2e4
VV
4552 /* Don't bother verifying CRC if an old FW image is being
4553 * flashed
4554 */
4555 if (old_fw_img)
4556 goto flash;
4557
4558 status = be_check_flash_crc(adapter, fw->data, img_offset,
4559 img_size, filehdr_size +
70a7b525 4560 img_hdrs_size, flash_optype,
96c9b2e4 4561 &crc_match);
4c60005f
KA
4562 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4563 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4564 /* The current FW image on the card does not support
4565 * OFFSET based flashing. Retry using older mechanism
4566 * of OPTYPE based flashing
4567 */
4568 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4569 flash_offset_support = false;
4570 goto retry_flash;
4571 }
4572
4573 /* The current FW image on the card does not recognize
4574 * the new FLASH op_type. The FW download is partially
4575 * complete. Reboot the server now to enable FW image
4576 * to recognize the new FLASH op_type. To complete the
4577 * remaining process, download the same FW again after
4578 * the reboot.
4579 */
96c9b2e4
VV
4580 dev_err(dev, "Flash incomplete. Reset the server\n");
4581 dev_err(dev, "Download FW image again after reset\n");
4582 return -EAGAIN;
4583 } else if (status) {
4584 dev_err(dev, "Could not get CRC for 0x%x region\n",
4585 img_optype);
4586 return -EFAULT;
773a2d7c
PR
4587 }
4588
96c9b2e4
VV
4589 if (crc_match)
4590 continue;
773a2d7c 4591
96c9b2e4
VV
4592flash:
4593 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4594 if (p + img_size > fw->data + fw->size)
4595 return -1;
4596
70a7b525
VV
4597 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4598 img_offset);
4599
4600 /* The current FW image on the card does not support OFFSET
4601 * based flashing. Retry using older mechanism of OPTYPE based
4602 * flashing
4603 */
4604 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4605 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4606 flash_offset_support = false;
4607 goto retry_flash;
4608 }
4609
96c9b2e4
VV
4610 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4611 * UFI_DIR region
4612 */
4c60005f
KA
4613 if (old_fw_img &&
4614 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4615 (img_optype == OPTYPE_UFI_DIR &&
4616 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4617 continue;
4618 } else if (status) {
4619 dev_err(dev, "Flashing section type 0x%x failed\n",
4620 img_type);
4621 return -EFAULT;
773a2d7c
PR
4622 }
4623 }
4624 return 0;
3f0d4560
AK
4625}
4626
485bf569 4627static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4628 const struct firmware *fw)
84517482 4629{
485bf569
SN
4630#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4631#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4632 struct device *dev = &adapter->pdev->dev;
84517482 4633 struct be_dma_mem flash_cmd;
485bf569
SN
4634 const u8 *data_ptr = NULL;
4635 u8 *dest_image_ptr = NULL;
4636 size_t image_size = 0;
4637 u32 chunk_size = 0;
4638 u32 data_written = 0;
4639 u32 offset = 0;
4640 int status = 0;
4641 u8 add_status = 0;
f67ef7ba 4642 u8 change_status;
84517482 4643
485bf569 4644 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4645 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4646 return -EINVAL;
d9efd2af
SB
4647 }
4648
485bf569
SN
4649 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4650 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4651 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4652 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4653 if (!flash_cmd.va)
4654 return -ENOMEM;
84517482 4655
485bf569
SN
4656 dest_image_ptr = flash_cmd.va +
4657 sizeof(struct lancer_cmd_req_write_object);
4658 image_size = fw->size;
4659 data_ptr = fw->data;
4660
4661 while (image_size) {
4662 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4663
4664 /* Copy the image chunk content. */
4665 memcpy(dest_image_ptr, data_ptr, chunk_size);
4666
4667 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4668 chunk_size, offset,
4669 LANCER_FW_DOWNLOAD_LOCATION,
4670 &data_written, &change_status,
4671 &add_status);
485bf569
SN
4672 if (status)
4673 break;
4674
4675 offset += data_written;
4676 data_ptr += data_written;
4677 image_size -= data_written;
4678 }
4679
4680 if (!status) {
4681 /* Commit the FW written */
4682 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4683 0, offset,
4684 LANCER_FW_DOWNLOAD_LOCATION,
4685 &data_written, &change_status,
4686 &add_status);
485bf569
SN
4687 }
4688
bb864e07 4689 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4690 if (status) {
bb864e07 4691 dev_err(dev, "Firmware load error\n");
3fb8cb80 4692 return be_cmd_status(status);
485bf569
SN
4693 }
4694
bb864e07
KA
4695 dev_info(dev, "Firmware flashed successfully\n");
4696
f67ef7ba 4697 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4698 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4699 status = lancer_physdev_ctrl(adapter,
4700 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4701 if (status) {
bb864e07
KA
4702 dev_err(dev, "Adapter busy, could not reset FW\n");
4703 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4704 }
4705 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4706 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4707 }
3fb8cb80
KA
4708
4709 return 0;
485bf569
SN
4710}
4711
5d3acd0d
VV
4712#define BE2_UFI 2
4713#define BE3_UFI 3
4714#define BE3R_UFI 10
4715#define SH_UFI 4
81a9e226 4716#define SH_P2_UFI 11
5d3acd0d 4717
ca34fe38 4718static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4719 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4720{
5d3acd0d
VV
4721 if (!fhdr) {
4722 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4723 return -1;
4724 }
773a2d7c 4725
5d3acd0d
VV
4726 /* First letter of the build version is used to identify
4727 * which chip this image file is meant for.
4728 */
4729 switch (fhdr->build[0]) {
4730 case BLD_STR_UFI_TYPE_SH:
81a9e226
VV
4731 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4732 SH_UFI;
5d3acd0d
VV
4733 case BLD_STR_UFI_TYPE_BE3:
4734 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4735 BE3_UFI;
4736 case BLD_STR_UFI_TYPE_BE2:
4737 return BE2_UFI;
4738 default:
4739 return -1;
4740 }
4741}
773a2d7c 4742
5d3acd0d
VV
4743/* Check if the flash image file is compatible with the adapter that
4744 * is being flashed.
4745 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
81a9e226 4746 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
5d3acd0d
VV
4747 */
4748static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4749 struct flash_file_hdr_g3 *fhdr)
4750{
4751 int ufi_type = be_get_ufi_type(adapter, fhdr);
4752
4753 switch (ufi_type) {
81a9e226 4754 case SH_P2_UFI:
5d3acd0d 4755 return skyhawk_chip(adapter);
81a9e226
VV
4756 case SH_UFI:
4757 return (skyhawk_chip(adapter) &&
4758 adapter->asic_rev < ASIC_REV_P2);
5d3acd0d
VV
4759 case BE3R_UFI:
4760 return BE3_chip(adapter);
4761 case BE3_UFI:
4762 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4763 case BE2_UFI:
4764 return BE2_chip(adapter);
4765 default:
4766 return false;
4767 }
773a2d7c
PR
4768}
4769
485bf569
SN
4770static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4771{
5d3acd0d 4772 struct device *dev = &adapter->pdev->dev;
485bf569 4773 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4774 struct image_hdr *img_hdr_ptr;
4775 int status = 0, i, num_imgs;
485bf569 4776 struct be_dma_mem flash_cmd;
84517482 4777
5d3acd0d
VV
4778 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4779 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4780 dev_err(dev, "Flash image is not compatible with adapter\n");
4781 return -EINVAL;
84517482
AK
4782 }
4783
5d3acd0d
VV
4784 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4785 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4786 GFP_KERNEL);
4787 if (!flash_cmd.va)
4788 return -ENOMEM;
773a2d7c 4789
773a2d7c
PR
4790 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4791 for (i = 0; i < num_imgs; i++) {
4792 img_hdr_ptr = (struct image_hdr *)(fw->data +
4793 (sizeof(struct flash_file_hdr_g3) +
4794 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4795 if (!BE2_chip(adapter) &&
4796 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4797 continue;
84517482 4798
5d3acd0d
VV
4799 if (skyhawk_chip(adapter))
4800 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4801 num_imgs);
4802 else
4803 status = be_flash_BEx(adapter, fw, &flash_cmd,
4804 num_imgs);
84517482
AK
4805 }
4806
5d3acd0d
VV
4807 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4808 if (!status)
4809 dev_info(dev, "Firmware flashed successfully\n");
84517482 4810
485bf569
SN
4811 return status;
4812}
4813
4814int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4815{
4816 const struct firmware *fw;
4817 int status;
4818
4819 if (!netif_running(adapter->netdev)) {
4820 dev_err(&adapter->pdev->dev,
4821 "Firmware load not allowed (interface is down)\n");
940a3fcd 4822 return -ENETDOWN;
485bf569
SN
4823 }
4824
4825 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4826 if (status)
4827 goto fw_exit;
4828
4829 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4830
4831 if (lancer_chip(adapter))
4832 status = lancer_fw_download(adapter, fw);
4833 else
4834 status = be_fw_download(adapter, fw);
4835
eeb65ced 4836 if (!status)
e97e3cda 4837 be_cmd_get_fw_ver(adapter);
eeb65ced 4838
84517482
AK
4839fw_exit:
4840 release_firmware(fw);
4841 return status;
4842}
4843
add511b3
RP
4844static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4845 u16 flags)
a77dcb8c
AK
4846{
4847 struct be_adapter *adapter = netdev_priv(dev);
4848 struct nlattr *attr, *br_spec;
4849 int rem;
4850 int status = 0;
4851 u16 mode = 0;
4852
4853 if (!sriov_enabled(adapter))
4854 return -EOPNOTSUPP;
4855
4856 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4857 if (!br_spec)
4858 return -EINVAL;
a77dcb8c
AK
4859
4860 nla_for_each_nested(attr, br_spec, rem) {
4861 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4862 continue;
4863
b7c1a314
TG
4864 if (nla_len(attr) < sizeof(mode))
4865 return -EINVAL;
4866
a77dcb8c
AK
4867 mode = nla_get_u16(attr);
4868 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4869 return -EINVAL;
4870
4871 status = be_cmd_set_hsw_config(adapter, 0, 0,
4872 adapter->if_handle,
4873 mode == BRIDGE_MODE_VEPA ?
4874 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4875 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4876 if (status)
4877 goto err;
4878
4879 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4880 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4881
4882 return status;
4883 }
4884err:
4885 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4886 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4887
4888 return status;
4889}
4890
4891static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4892 struct net_device *dev, u32 filter_mask,
4893 int nlflags)
a77dcb8c
AK
4894{
4895 struct be_adapter *adapter = netdev_priv(dev);
4896 int status = 0;
4897 u8 hsw_mode;
4898
4899 if (!sriov_enabled(adapter))
4900 return 0;
4901
4902 /* BE and Lancer chips support VEB mode only */
4903 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4904 hsw_mode = PORT_FWD_TYPE_VEB;
4905 } else {
4906 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
4907 adapter->if_handle, &hsw_mode,
4908 NULL);
a77dcb8c
AK
4909 if (status)
4910 return 0;
4911 }
4912
4913 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4914 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4915 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
46c264da 4916 0, 0, nlflags);
a77dcb8c
AK
4917}
4918
c5abe7c0 4919#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4920/* VxLAN offload Notes:
4921 *
4922 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4923 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4924 * is expected to work across all types of IP tunnels once exported. Skyhawk
4925 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4926 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4927 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4928 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4929 *
4930 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4931 * adds more than one port, disable offloads and don't re-enable them again
4932 * until after all the tunnels are removed.
4933 */
c9c47142
SP
4934static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4935 __be16 port)
4936{
4937 struct be_adapter *adapter = netdev_priv(netdev);
4938 struct device *dev = &adapter->pdev->dev;
4939 int status;
4940
4941 if (lancer_chip(adapter) || BEx_chip(adapter))
4942 return;
4943
4944 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4945 dev_info(dev,
4946 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4947 dev_info(dev, "Disabling VxLAN offloads\n");
4948 adapter->vxlan_port_count++;
4949 goto err;
c9c47142
SP
4950 }
4951
630f4b70
SB
4952 if (adapter->vxlan_port_count++ >= 1)
4953 return;
4954
c9c47142
SP
4955 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4956 OP_CONVERT_NORMAL_TO_TUNNEL);
4957 if (status) {
4958 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4959 goto err;
4960 }
4961
4962 status = be_cmd_set_vxlan_port(adapter, port);
4963 if (status) {
4964 dev_warn(dev, "Failed to add VxLAN port\n");
4965 goto err;
4966 }
4967 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4968 adapter->vxlan_port = port;
4969
630f4b70
SB
4970 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4971 NETIF_F_TSO | NETIF_F_TSO6 |
4972 NETIF_F_GSO_UDP_TUNNEL;
4973 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4974 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4975
c9c47142
SP
4976 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4977 be16_to_cpu(port));
4978 return;
4979err:
4980 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4981}
4982
4983static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4984 __be16 port)
4985{
4986 struct be_adapter *adapter = netdev_priv(netdev);
4987
4988 if (lancer_chip(adapter) || BEx_chip(adapter))
4989 return;
4990
4991 if (adapter->vxlan_port != port)
630f4b70 4992 goto done;
c9c47142
SP
4993
4994 be_disable_vxlan_offloads(adapter);
4995
4996 dev_info(&adapter->pdev->dev,
4997 "Disabled VxLAN offloads for UDP port %d\n",
4998 be16_to_cpu(port));
630f4b70
SB
4999done:
5000 adapter->vxlan_port_count--;
c9c47142 5001}
725d548f 5002
5f35227e
JG
5003static netdev_features_t be_features_check(struct sk_buff *skb,
5004 struct net_device *dev,
5005 netdev_features_t features)
725d548f 5006{
16dde0d6
SB
5007 struct be_adapter *adapter = netdev_priv(dev);
5008 u8 l4_hdr = 0;
5009
5010 /* The code below restricts offload features for some tunneled packets.
5011 * Offload features for normal (non tunnel) packets are unchanged.
5012 */
5013 if (!skb->encapsulation ||
5014 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5015 return features;
5016
5017 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5018 * should disable tunnel offload features if it's not a VxLAN packet,
5019 * as tunnel offloads have been enabled only for VxLAN. This is done to
5020 * allow other tunneled traffic like GRE work fine while VxLAN
5021 * offloads are configured in Skyhawk-R.
5022 */
5023 switch (vlan_get_protocol(skb)) {
5024 case htons(ETH_P_IP):
5025 l4_hdr = ip_hdr(skb)->protocol;
5026 break;
5027 case htons(ETH_P_IPV6):
5028 l4_hdr = ipv6_hdr(skb)->nexthdr;
5029 break;
5030 default:
5031 return features;
5032 }
5033
5034 if (l4_hdr != IPPROTO_UDP ||
5035 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5036 skb->inner_protocol != htons(ETH_P_TEB) ||
5037 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5038 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5039 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5040
5041 return features;
725d548f 5042}
c5abe7c0 5043#endif
c9c47142 5044
e5686ad8 5045static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5046 .ndo_open = be_open,
5047 .ndo_stop = be_close,
5048 .ndo_start_xmit = be_xmit,
a54769f5 5049 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
5050 .ndo_set_mac_address = be_mac_addr_set,
5051 .ndo_change_mtu = be_change_mtu,
ab1594e9 5052 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5053 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5054 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5055 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5056 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5057 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5058 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5059 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5060 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5061 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
5062#ifdef CONFIG_NET_POLL_CONTROLLER
5063 .ndo_poll_controller = be_netpoll,
5064#endif
a77dcb8c
AK
5065 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5066 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 5067#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 5068 .ndo_busy_poll = be_busy_poll,
6384a4d0 5069#endif
c5abe7c0 5070#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
5071 .ndo_add_vxlan_port = be_add_vxlan_port,
5072 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 5073 .ndo_features_check = be_features_check,
c5abe7c0 5074#endif
6b7c5b94
SP
5075};
5076
5077static void be_netdev_init(struct net_device *netdev)
5078{
5079 struct be_adapter *adapter = netdev_priv(netdev);
5080
6332c8d3 5081 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5082 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5083 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
5084 if (be_multi_rxq(adapter))
5085 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5086
5087 netdev->features |= netdev->hw_features |
f646968f 5088 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5089
eb8a50d9 5090 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5091 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5092
fbc13f01
AK
5093 netdev->priv_flags |= IFF_UNICAST_FLT;
5094
6b7c5b94
SP
5095 netdev->flags |= IFF_MULTICAST;
5096
b7e5887e 5097 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 5098
10ef9ab4 5099 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5100
7ad24ea4 5101 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
5102}
5103
87ac1a52
KA
5104static void be_cleanup(struct be_adapter *adapter)
5105{
5106 struct net_device *netdev = adapter->netdev;
5107
5108 rtnl_lock();
5109 netif_device_detach(netdev);
5110 if (netif_running(netdev))
5111 be_close(netdev);
5112 rtnl_unlock();
5113
5114 be_clear(adapter);
5115}
5116
484d76fd 5117static int be_resume(struct be_adapter *adapter)
78fad34e 5118{
d0e1b319 5119 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5120 int status;
5121
78fad34e
SP
5122 status = be_setup(adapter);
5123 if (status)
484d76fd 5124 return status;
78fad34e 5125
d0e1b319
KA
5126 if (netif_running(netdev)) {
5127 status = be_open(netdev);
78fad34e 5128 if (status)
484d76fd 5129 return status;
78fad34e
SP
5130 }
5131
d0e1b319
KA
5132 netif_device_attach(netdev);
5133
484d76fd
KA
5134 return 0;
5135}
5136
5137static int be_err_recover(struct be_adapter *adapter)
5138{
5139 struct device *dev = &adapter->pdev->dev;
5140 int status;
5141
5142 status = be_resume(adapter);
5143 if (status)
5144 goto err;
5145
9fa465c0 5146 dev_info(dev, "Adapter recovery successful\n");
78fad34e
SP
5147 return 0;
5148err:
9fa465c0 5149 if (be_physfn(adapter))
78fad34e 5150 dev_err(dev, "Adapter recovery failed\n");
9fa465c0
SP
5151 else
5152 dev_err(dev, "Re-trying adapter recovery\n");
78fad34e
SP
5153
5154 return status;
5155}
5156
eb7dd46c 5157static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
5158{
5159 struct be_adapter *adapter =
eb7dd46c
SP
5160 container_of(work, struct be_adapter,
5161 be_err_detection_work.work);
78fad34e
SP
5162 int status = 0;
5163
5164 be_detect_error(adapter);
5165
d0e1b319 5166 if (adapter->hw_error) {
87ac1a52 5167 be_cleanup(adapter);
d0e1b319
KA
5168
5169 /* As of now error recovery support is in Lancer only */
5170 if (lancer_chip(adapter))
5171 status = be_err_recover(adapter);
78fad34e
SP
5172 }
5173
9fa465c0
SP
5174 /* Always attempt recovery on VFs */
5175 if (!status || be_virtfn(adapter))
eb7dd46c 5176 be_schedule_err_detection(adapter);
78fad34e
SP
5177}
5178
5179static void be_log_sfp_info(struct be_adapter *adapter)
5180{
5181 int status;
5182
5183 status = be_cmd_query_sfp_info(adapter);
5184 if (!status) {
5185 dev_err(&adapter->pdev->dev,
5186 "Unqualified SFP+ detected on %c from %s part no: %s",
5187 adapter->port_name, adapter->phy.vendor_name,
5188 adapter->phy.vendor_pn);
5189 }
5190 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5191}
5192
5193static void be_worker(struct work_struct *work)
5194{
5195 struct be_adapter *adapter =
5196 container_of(work, struct be_adapter, work.work);
5197 struct be_rx_obj *rxo;
5198 int i;
5199
5200 /* when interrupts are not yet enabled, just reap any pending
5201 * mcc completions
5202 */
5203 if (!netif_running(adapter->netdev)) {
5204 local_bh_disable();
5205 be_process_mcc(adapter);
5206 local_bh_enable();
5207 goto reschedule;
5208 }
5209
5210 if (!adapter->stats_cmd_sent) {
5211 if (lancer_chip(adapter))
5212 lancer_cmd_get_pport_stats(adapter,
5213 &adapter->stats_cmd);
5214 else
5215 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5216 }
5217
5218 if (be_physfn(adapter) &&
5219 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5220 be_cmd_get_die_temperature(adapter);
5221
5222 for_all_rx_queues(adapter, rxo, i) {
5223 /* Replenish RX-queues starved due to memory
5224 * allocation failures.
5225 */
5226 if (rxo->rx_post_starved)
5227 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5228 }
5229
5230 be_eqd_update(adapter);
5231
5232 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5233 be_log_sfp_info(adapter);
5234
5235reschedule:
5236 adapter->work_counter++;
5237 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5238}
5239
6b7c5b94
SP
5240static void be_unmap_pci_bars(struct be_adapter *adapter)
5241{
c5b3ad4c
SP
5242 if (adapter->csr)
5243 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5244 if (adapter->db)
ce66f781 5245 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5246}
5247
ce66f781
SP
5248static int db_bar(struct be_adapter *adapter)
5249{
5250 if (lancer_chip(adapter) || !be_physfn(adapter))
5251 return 0;
5252 else
5253 return 4;
5254}
5255
5256static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5257{
dbf0f2a7 5258 if (skyhawk_chip(adapter)) {
ce66f781
SP
5259 adapter->roce_db.size = 4096;
5260 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5261 db_bar(adapter));
5262 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5263 db_bar(adapter));
5264 }
045508a8 5265 return 0;
6b7c5b94
SP
5266}
5267
5268static int be_map_pci_bars(struct be_adapter *adapter)
5269{
0fa74a4b 5270 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5271 u8 __iomem *addr;
78fad34e
SP
5272 u32 sli_intf;
5273
5274 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5275 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5276 SLI_INTF_FAMILY_SHIFT;
5277 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5278
c5b3ad4c 5279 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5280 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5281 if (!adapter->csr)
c5b3ad4c
SP
5282 return -ENOMEM;
5283 }
5284
25848c90 5285 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5286 if (!addr)
6b7c5b94 5287 goto pci_map_err;
ba343c77 5288 adapter->db = addr;
ce66f781 5289
25848c90
SR
5290 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5291 if (be_physfn(adapter)) {
5292 /* PCICFG is the 2nd BAR in BE2 */
5293 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5294 if (!addr)
5295 goto pci_map_err;
5296 adapter->pcicfg = addr;
5297 } else {
5298 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5299 }
5300 }
5301
ce66f781 5302 be_roce_map_pci_bars(adapter);
6b7c5b94 5303 return 0;
ce66f781 5304
6b7c5b94 5305pci_map_err:
25848c90 5306 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5307 be_unmap_pci_bars(adapter);
5308 return -ENOMEM;
5309}
5310
78fad34e 5311static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5312{
8788fdc2 5313 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5314 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5315
5316 if (mem->va)
78fad34e 5317 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5318
5b8821b7 5319 mem = &adapter->rx_filter;
e7b909a6 5320 if (mem->va)
78fad34e
SP
5321 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5322
5323 mem = &adapter->stats_cmd;
5324 if (mem->va)
5325 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5326}
5327
78fad34e
SP
5328/* Allocate and initialize various fields in be_adapter struct */
5329static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5330{
8788fdc2
SP
5331 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5332 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5333 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5334 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5335 struct device *dev = &adapter->pdev->dev;
5336 int status = 0;
6b7c5b94
SP
5337
5338 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
78fad34e 5339 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
2b7bcebf
IV
5340 &mbox_mem_alloc->dma,
5341 GFP_KERNEL);
78fad34e
SP
5342 if (!mbox_mem_alloc->va)
5343 return -ENOMEM;
5344
6b7c5b94
SP
5345 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5346 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5347 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5348 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 5349
5b8821b7 5350 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5351 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5352 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5353 if (!rx_filter->va) {
e7b909a6
SP
5354 status = -ENOMEM;
5355 goto free_mbox;
5356 }
1f9061d2 5357
78fad34e
SP
5358 if (lancer_chip(adapter))
5359 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5360 else if (BE2_chip(adapter))
5361 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5362 else if (BE3_chip(adapter))
5363 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5364 else
5365 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5366 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5367 &stats_cmd->dma, GFP_KERNEL);
5368 if (!stats_cmd->va) {
5369 status = -ENOMEM;
5370 goto free_rx_filter;
5371 }
5372
2984961c 5373 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5374 spin_lock_init(&adapter->mcc_lock);
5375 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5376 init_completion(&adapter->et_cmd_compl);
e7b909a6 5377
78fad34e 5378 pci_save_state(adapter->pdev);
6b7c5b94 5379
78fad34e 5380 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5381 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5382 be_err_detection_task);
6b7c5b94 5383
78fad34e
SP
5384 adapter->rx_fc = true;
5385 adapter->tx_fc = true;
6b7c5b94 5386
78fad34e
SP
5387 /* Must be a power of 2 or else MODULO will BUG_ON */
5388 adapter->be_get_temp_freq = 64;
ca34fe38 5389
6b7c5b94 5390 return 0;
78fad34e
SP
5391
5392free_rx_filter:
5393 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5394free_mbox:
5395 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5396 mbox_mem_alloc->dma);
5397 return status;
6b7c5b94
SP
5398}
5399
3bc6b06c 5400static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5401{
5402 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5403
6b7c5b94
SP
5404 if (!adapter)
5405 return;
5406
045508a8 5407 be_roce_dev_remove(adapter);
8cef7a78 5408 be_intr_set(adapter, false);
045508a8 5409
eb7dd46c 5410 be_cancel_err_detection(adapter);
f67ef7ba 5411
6b7c5b94
SP
5412 unregister_netdev(adapter->netdev);
5413
5fb379ee
SP
5414 be_clear(adapter);
5415
bf99e50d
PR
5416 /* tell fw we're done with firing cmds */
5417 be_cmd_fw_clean(adapter);
5418
78fad34e
SP
5419 be_unmap_pci_bars(adapter);
5420 be_drv_cleanup(adapter);
6b7c5b94 5421
d6b6d987
SP
5422 pci_disable_pcie_error_reporting(pdev);
5423
6b7c5b94
SP
5424 pci_release_regions(pdev);
5425 pci_disable_device(pdev);
5426
5427 free_netdev(adapter->netdev);
5428}
5429
d379142b
SP
5430static char *mc_name(struct be_adapter *adapter)
5431{
f93f160b
VV
5432 char *str = ""; /* default */
5433
5434 switch (adapter->mc_type) {
5435 case UMC:
5436 str = "UMC";
5437 break;
5438 case FLEX10:
5439 str = "FLEX10";
5440 break;
5441 case vNIC1:
5442 str = "vNIC-1";
5443 break;
5444 case nPAR:
5445 str = "nPAR";
5446 break;
5447 case UFP:
5448 str = "UFP";
5449 break;
5450 case vNIC2:
5451 str = "vNIC-2";
5452 break;
5453 default:
5454 str = "";
5455 }
5456
5457 return str;
d379142b
SP
5458}
5459
5460static inline char *func_name(struct be_adapter *adapter)
5461{
5462 return be_physfn(adapter) ? "PF" : "VF";
5463}
5464
f7062ee5
SP
5465static inline char *nic_name(struct pci_dev *pdev)
5466{
5467 switch (pdev->device) {
5468 case OC_DEVICE_ID1:
5469 return OC_NAME;
5470 case OC_DEVICE_ID2:
5471 return OC_NAME_BE;
5472 case OC_DEVICE_ID3:
5473 case OC_DEVICE_ID4:
5474 return OC_NAME_LANCER;
5475 case BE_DEVICE_ID2:
5476 return BE3_NAME;
5477 case OC_DEVICE_ID5:
5478 case OC_DEVICE_ID6:
5479 return OC_NAME_SH;
5480 default:
5481 return BE_NAME;
5482 }
5483}
5484
1dd06ae8 5485static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5486{
6b7c5b94
SP
5487 struct be_adapter *adapter;
5488 struct net_device *netdev;
21252377 5489 int status = 0;
6b7c5b94 5490
acbafeb1
SP
5491 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5492
6b7c5b94
SP
5493 status = pci_enable_device(pdev);
5494 if (status)
5495 goto do_none;
5496
5497 status = pci_request_regions(pdev, DRV_NAME);
5498 if (status)
5499 goto disable_dev;
5500 pci_set_master(pdev);
5501
7f640062 5502 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5503 if (!netdev) {
6b7c5b94
SP
5504 status = -ENOMEM;
5505 goto rel_reg;
5506 }
5507 adapter = netdev_priv(netdev);
5508 adapter->pdev = pdev;
5509 pci_set_drvdata(pdev, adapter);
5510 adapter->netdev = netdev;
2243e2e9 5511 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5512
4c15c243 5513 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5514 if (!status) {
5515 netdev->features |= NETIF_F_HIGHDMA;
5516 } else {
4c15c243 5517 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5518 if (status) {
5519 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5520 goto free_netdev;
5521 }
5522 }
5523
2f951a9a
KA
5524 status = pci_enable_pcie_error_reporting(pdev);
5525 if (!status)
5526 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5527
78fad34e 5528 status = be_map_pci_bars(adapter);
6b7c5b94 5529 if (status)
39f1d94d 5530 goto free_netdev;
6b7c5b94 5531
78fad34e
SP
5532 status = be_drv_init(adapter);
5533 if (status)
5534 goto unmap_bars;
5535
5fb379ee
SP
5536 status = be_setup(adapter);
5537 if (status)
78fad34e 5538 goto drv_cleanup;
2243e2e9 5539
3abcdeda 5540 be_netdev_init(netdev);
6b7c5b94
SP
5541 status = register_netdev(netdev);
5542 if (status != 0)
5fb379ee 5543 goto unsetup;
6b7c5b94 5544
045508a8
PP
5545 be_roce_dev_add(adapter);
5546
eb7dd46c 5547 be_schedule_err_detection(adapter);
b4e32a71 5548
d379142b 5549 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5550 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5551
6b7c5b94
SP
5552 return 0;
5553
5fb379ee
SP
5554unsetup:
5555 be_clear(adapter);
78fad34e
SP
5556drv_cleanup:
5557 be_drv_cleanup(adapter);
5558unmap_bars:
5559 be_unmap_pci_bars(adapter);
f9449ab7 5560free_netdev:
fe6d2a38 5561 free_netdev(netdev);
6b7c5b94
SP
5562rel_reg:
5563 pci_release_regions(pdev);
5564disable_dev:
5565 pci_disable_device(pdev);
5566do_none:
c4ca2374 5567 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5568 return status;
5569}
5570
5571static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5572{
5573 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5574
76a9e08e 5575 if (adapter->wol_en)
71d8d1b5
AK
5576 be_setup_wol(adapter, true);
5577
d4360d6f 5578 be_intr_set(adapter, false);
eb7dd46c 5579 be_cancel_err_detection(adapter);
f67ef7ba 5580
87ac1a52 5581 be_cleanup(adapter);
6b7c5b94
SP
5582
5583 pci_save_state(pdev);
5584 pci_disable_device(pdev);
5585 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5586 return 0;
5587}
5588
484d76fd 5589static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5590{
6b7c5b94 5591 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5592 int status = 0;
6b7c5b94
SP
5593
5594 status = pci_enable_device(pdev);
5595 if (status)
5596 return status;
5597
1ca01512 5598 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5599 pci_restore_state(pdev);
5600
484d76fd 5601 status = be_resume(adapter);
2243e2e9
SP
5602 if (status)
5603 return status;
5604
eb7dd46c
SP
5605 be_schedule_err_detection(adapter);
5606
76a9e08e 5607 if (adapter->wol_en)
71d8d1b5 5608 be_setup_wol(adapter, false);
a4ca055f 5609
6b7c5b94
SP
5610 return 0;
5611}
5612
82456b03
SP
5613/*
5614 * An FLR will stop BE from DMAing any data.
5615 */
5616static void be_shutdown(struct pci_dev *pdev)
5617{
5618 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5619
2d5d4154
AK
5620 if (!adapter)
5621 return;
82456b03 5622
d114f99a 5623 be_roce_dev_shutdown(adapter);
0f4a6828 5624 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5625 be_cancel_err_detection(adapter);
a4ca055f 5626
2d5d4154 5627 netif_device_detach(adapter->netdev);
82456b03 5628
57841869
AK
5629 be_cmd_reset_function(adapter);
5630
82456b03 5631 pci_disable_device(pdev);
82456b03
SP
5632}
5633
cf588477 5634static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5635 pci_channel_state_t state)
cf588477
SP
5636{
5637 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5638
5639 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5640
01e5b2c4
SK
5641 if (!adapter->eeh_error) {
5642 adapter->eeh_error = true;
cf588477 5643
eb7dd46c 5644 be_cancel_err_detection(adapter);
cf588477 5645
87ac1a52 5646 be_cleanup(adapter);
cf588477 5647 }
cf588477
SP
5648
5649 if (state == pci_channel_io_perm_failure)
5650 return PCI_ERS_RESULT_DISCONNECT;
5651
5652 pci_disable_device(pdev);
5653
eeb7fc7b
SK
5654 /* The error could cause the FW to trigger a flash debug dump.
5655 * Resetting the card while flash dump is in progress
c8a54163
PR
5656 * can cause it not to recover; wait for it to finish.
5657 * Wait only for first function as it is needed only once per
5658 * adapter.
eeb7fc7b 5659 */
c8a54163
PR
5660 if (pdev->devfn == 0)
5661 ssleep(30);
5662
cf588477
SP
5663 return PCI_ERS_RESULT_NEED_RESET;
5664}
5665
5666static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5667{
5668 struct be_adapter *adapter = pci_get_drvdata(pdev);
5669 int status;
5670
5671 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5672
5673 status = pci_enable_device(pdev);
5674 if (status)
5675 return PCI_ERS_RESULT_DISCONNECT;
5676
5677 pci_set_master(pdev);
1ca01512 5678 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5679 pci_restore_state(pdev);
5680
5681 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5682 dev_info(&adapter->pdev->dev,
5683 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5684 status = be_fw_wait_ready(adapter);
cf588477
SP
5685 if (status)
5686 return PCI_ERS_RESULT_DISCONNECT;
5687
d6b6d987 5688 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5689 be_clear_all_error(adapter);
cf588477
SP
5690 return PCI_ERS_RESULT_RECOVERED;
5691}
5692
5693static void be_eeh_resume(struct pci_dev *pdev)
5694{
5695 int status = 0;
5696 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5697
5698 dev_info(&adapter->pdev->dev, "EEH resume\n");
5699
5700 pci_save_state(pdev);
5701
484d76fd 5702 status = be_resume(adapter);
bf99e50d
PR
5703 if (status)
5704 goto err;
5705
eb7dd46c 5706 be_schedule_err_detection(adapter);
cf588477
SP
5707 return;
5708err:
5709 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5710}
5711
ace40aff
VV
5712static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5713{
5714 struct be_adapter *adapter = pci_get_drvdata(pdev);
5715 u16 num_vf_qs;
5716 int status;
5717
5718 if (!num_vfs)
5719 be_vf_clear(adapter);
5720
5721 adapter->num_vfs = num_vfs;
5722
5723 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5724 dev_warn(&pdev->dev,
5725 "Cannot disable VFs while they are assigned\n");
5726 return -EBUSY;
5727 }
5728
5729 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5730 * are equally distributed across the max-number of VFs. The user may
5731 * request only a subset of the max-vfs to be enabled.
5732 * Based on num_vfs, redistribute the resources across num_vfs so that
5733 * each VF will have access to more number of resources.
5734 * This facility is not available in BE3 FW.
5735 * Also, this is done by FW in Lancer chip.
5736 */
5737 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5738 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5739 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5740 adapter->num_vfs, num_vf_qs);
5741 if (status)
5742 dev_err(&pdev->dev,
5743 "Failed to optimize SR-IOV resources\n");
5744 }
5745
5746 status = be_get_resources(adapter);
5747 if (status)
5748 return be_cmd_status(status);
5749
5750 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5751 rtnl_lock();
5752 status = be_update_queues(adapter);
5753 rtnl_unlock();
5754 if (status)
5755 return be_cmd_status(status);
5756
5757 if (adapter->num_vfs)
5758 status = be_vf_setup(adapter);
5759
5760 if (!status)
5761 return adapter->num_vfs;
5762
5763 return 0;
5764}
5765
3646f0e5 5766static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5767 .error_detected = be_eeh_err_detected,
5768 .slot_reset = be_eeh_reset,
5769 .resume = be_eeh_resume,
5770};
5771
6b7c5b94
SP
5772static struct pci_driver be_driver = {
5773 .name = DRV_NAME,
5774 .id_table = be_dev_ids,
5775 .probe = be_probe,
5776 .remove = be_remove,
5777 .suspend = be_suspend,
484d76fd 5778 .resume = be_pci_resume,
82456b03 5779 .shutdown = be_shutdown,
ace40aff 5780 .sriov_configure = be_pci_sriov_configure,
cf588477 5781 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5782};
5783
5784static int __init be_init_module(void)
5785{
8e95a202
JP
5786 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5787 rx_frag_size != 2048) {
6b7c5b94
SP
5788 printk(KERN_WARNING DRV_NAME
5789 " : Module param rx_frag_size must be 2048/4096/8192."
5790 " Using 2048\n");
5791 rx_frag_size = 2048;
5792 }
6b7c5b94 5793
ace40aff
VV
5794 if (num_vfs > 0) {
5795 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5796 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5797 }
5798
6b7c5b94
SP
5799 return pci_register_driver(&be_driver);
5800}
5801module_init(be_init_module);
5802
5803static void __exit be_exit_module(void)
5804{
5805 pci_unregister_driver(&be_driver);
5806}
5807module_exit(be_exit_module);
This page took 1.087757 seconds and 5 git commands to generate.