Merge tag 'master-2014-11-25' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
9baa3c34 42static const struct pci_device_id be_dev_ids[] = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276 88};
e2fb1afa 89
7c185276 90/* UE Status High CSR */
42c8b11e 91static const char * const ue_status_hi_desc[] = {
7c185276
AK
92 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
6bdf8f55
VV
113 "ECRC",
114 "Poison TLP",
42c8b11e 115 "NETC",
6bdf8f55
VV
116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
7c185276
AK
123 "Unknown"
124};
6b7c5b94
SP
125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 129
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 138 u16 len, u16 entry_size)
6b7c5b94
SP
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
ede23fa8
JP
146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 return 0;
151}
152
68c45a2d 153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 154{
db3ea781 155 u32 reg, enabled;
5f0b849e 156
db3ea781 157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 158 &reg);
db3ea781
SP
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781 168 pci_write_config_dword(adapter->pdev,
748b539a 169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
68c45a2d
SK
172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
8788fdc2 188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
03d28ffe 191
6b7c5b94
SP
192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
194
195 wmb();
8788fdc2 196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
197}
198
94d73aaa
VV
199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
6b7c5b94
SP
201{
202 u32 val = 0;
03d28ffe 203
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 212 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
03d28ffe 215
6b7c5b94 216 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
03d28ffe 234
6b7c5b94 235 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 238
f67ef7ba 239 if (adapter->eeh_error)
cf588477
SP
240 return;
241
6b7c5b94
SP
242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
246}
247
6b7c5b94
SP
248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 251 struct device *dev = &adapter->pdev->dev;
6b7c5b94 252 struct sockaddr *addr = p;
5a712c13
SP
253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 256
ca9e4988
AK
257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
ff32f8ab
VV
260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
5a712c13
SP
266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
704e4c88 271 */
5a712c13
SP
272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
704e4c88
PR
283 }
284
5a712c13
SP
285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
704e4c88 287 */
b188f090
SR
288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
a65027e4 290 if (status)
e3a7ae2c 291 goto err;
6b7c5b94 292
5a712c13
SP
293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
61d23e9f 296 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
297 status = -EPERM;
298 goto err;
299 }
300
e3a7ae2c 301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 302 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
303 return 0;
304err:
5a712c13 305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
306 return status;
307}
308
ca34fe38
SP
309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
61000861 316 } else if (BE3_chip(adapter)) {
ca34fe38
SP
317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
61000861
AK
319 return &cmd->hw_stats;
320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
ca34fe38
SP
323 return &cmd->hw_stats;
324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
61000861 334 } else if (BE3_chip(adapter)) {
ca34fe38
SP
335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
61000861
AK
337 return &hw_stats->erx;
338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
ca34fe38
SP
341 return &hw_stats->erx;
342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 346{
ac124ff9
SP
347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 350 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 353
ac124ff9 354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
89a88ab8
AK
375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
ac124ff9 382 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 383 else
ac124ff9 384 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
ca34fe38 394static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 395{
ac124ff9
SP
396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 399 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 402
ac124ff9 403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
ac124ff9 426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
61000861
AK
440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 484 if (be_roce_supported(adapter)) {
461ae379
AK
485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
61000861
AK
492}
493
005d5696
SX
494static void populate_lancer_stats(struct be_adapter *adapter)
495{
005d5696 496 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
ac124ff9 520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 524 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 527 drvs->rx_drops_too_many_frags =
ac124ff9 528 pport_stats->rx_drops_too_many_frags_lo;
005d5696 529}
89a88ab8 530
09c1c68f
SP
531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
4188e7df 543static void populate_erx_stats(struct be_adapter *adapter,
748b539a 544 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
89a88ab8
AK
556void be_parse_stats(struct be_adapter *adapter)
557{
61000861 558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
559 struct be_rx_obj *rxo;
560 int i;
a6c578ef 561 u32 erx_stat;
ac124ff9 562
ca34fe38
SP
563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
005d5696 565 } else {
ca34fe38
SP
566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
61000861
AK
568 else if (BE3_chip(adapter))
569 /* for BE3 */
ca34fe38 570 populate_be_v1_stats(adapter);
61000861
AK
571 else
572 populate_be_v2_stats(adapter);
d51ebd33 573
61000861 574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 575 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 578 }
09c1c68f 579 }
89a88ab8
AK
580}
581
ab1594e9 582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 583 struct rtnl_link_stats64 *stats)
6b7c5b94 584{
ab1594e9 585 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 586 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 587 struct be_rx_obj *rxo;
3c8def97 588 struct be_tx_obj *txo;
ab1594e9
SP
589 u64 pkts, bytes;
590 unsigned int start;
3abcdeda 591 int i;
6b7c5b94 592
3abcdeda 593 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 595
ab1594e9 596 do {
57a7744e 597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
606 }
607
3c8def97 608 for_all_tx_queues(adapter, txo, i) {
ab1594e9 609 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 610
ab1594e9 611 do {
57a7744e 612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
57a7744e 615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
3c8def97 618 }
6b7c5b94
SP
619
620 /* bad pkts received */
ab1594e9 621 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
ab1594e9 630 drvs->rx_dropped_runt;
68110868 631
6b7c5b94 632 /* detailed rx errors */
ab1594e9 633 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
68110868 636
ab1594e9 637 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
638
639 /* frame alignment errors */
ab1594e9 640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 641
6b7c5b94
SP
642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
ab1594e9 647 return stats;
6b7c5b94
SP
648}
649
b236916a 650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 651{
6b7c5b94
SP
652 struct net_device *netdev = adapter->netdev;
653
b236916a 654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 655 netif_carrier_off(netdev);
b236916a 656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 657 }
b236916a 658
bdce2ad7 659 if (link_status)
b236916a
AK
660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
6b7c5b94
SP
663}
664
3c8def97 665static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
666 u32 wrb_cnt, u32 copied, u32 gso_segs,
667 bool stopped)
6b7c5b94 668{
3c8def97
SP
669 struct be_tx_stats *stats = tx_stats(txo);
670
ab1594e9 671 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
672 stats->tx_reqs++;
673 stats->tx_wrbs += wrb_cnt;
674 stats->tx_bytes += copied;
675 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 676 if (stopped)
ac124ff9 677 stats->tx_stops++;
ab1594e9 678 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
679}
680
681/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 682static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 683 bool *dummy)
6b7c5b94 684{
ebc8d2ab
DM
685 int cnt = (skb->len > skb->data_len);
686
687 cnt += skb_shinfo(skb)->nr_frags;
688
6b7c5b94
SP
689 /* to account for hdr wrb */
690 cnt++;
fe6d2a38
SP
691 if (lancer_chip(adapter) || !(cnt & 1)) {
692 *dummy = false;
693 } else {
6b7c5b94
SP
694 /* add a dummy to make it an even num */
695 cnt++;
696 *dummy = true;
fe6d2a38 697 }
6b7c5b94
SP
698 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
699 return cnt;
700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
704 wrb->frag_pa_hi = upper_32_bits(addr);
705 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
706 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 707 wrb->rsvd0 = 0;
6b7c5b94
SP
708}
709
1ded132d 710static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 711 struct sk_buff *skb)
1ded132d
AK
712{
713 u8 vlan_prio;
714 u16 vlan_tag;
715
716 vlan_tag = vlan_tx_tag_get(skb);
717 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 /* If vlan priority provided by OS is NOT in available bmap */
719 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
720 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
721 adapter->recommended_prio;
722
723 return vlan_tag;
724}
725
c9c47142
SP
726/* Used only for IP tunnel packets */
727static u16 skb_inner_ip_proto(struct sk_buff *skb)
728{
729 return (inner_ip_hdr(skb)->version == 4) ?
730 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
731}
732
733static u16 skb_ip_proto(struct sk_buff *skb)
734{
735 return (ip_hdr(skb)->version == 4) ?
736 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
737}
738
cc4ce020 739static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
740 struct sk_buff *skb, u32 wrb_cnt, u32 len,
741 bool skip_hw_vlan)
6b7c5b94 742{
c9c47142 743 u16 vlan_tag, proto;
cc4ce020 744
6b7c5b94
SP
745 memset(hdr, 0, sizeof(*hdr));
746
c3c18bc1 747 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
6b7c5b94 748
49e4b847 749 if (skb_is_gso(skb)) {
c3c18bc1
SP
750 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
751 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 752 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
c3c18bc1 753 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
6b7c5b94 754 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 755 if (skb->encapsulation) {
c3c18bc1 756 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
c9c47142
SP
757 proto = skb_inner_ip_proto(skb);
758 } else {
759 proto = skb_ip_proto(skb);
760 }
761 if (proto == IPPROTO_TCP)
c3c18bc1 762 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
c9c47142 763 else if (proto == IPPROTO_UDP)
c3c18bc1 764 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
6b7c5b94
SP
765 }
766
4c5102f9 767 if (vlan_tx_tag_present(skb)) {
c3c18bc1 768 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
1ded132d 769 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c3c18bc1 770 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
771 }
772
bc0c3405 773 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
c3c18bc1
SP
774 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
775 SET_TX_WRB_HDR_BITS(event, hdr, 1);
776 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
777 SET_TX_WRB_HDR_BITS(len, hdr, len);
6b7c5b94
SP
778}
779
2b7bcebf 780static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 781 bool unmap_single)
7101e111
SP
782{
783 dma_addr_t dma;
784
785 be_dws_le_to_cpu(wrb, sizeof(*wrb));
786
787 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 788 if (wrb->frag_len) {
7101e111 789 if (unmap_single)
2b7bcebf
IV
790 dma_unmap_single(dev, dma, wrb->frag_len,
791 DMA_TO_DEVICE);
7101e111 792 else
2b7bcebf 793 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
794 }
795}
6b7c5b94 796
3c8def97 797static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
798 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
799 bool skip_hw_vlan)
6b7c5b94 800{
7101e111
SP
801 dma_addr_t busaddr;
802 int i, copied = 0;
2b7bcebf 803 struct device *dev = &adapter->pdev->dev;
6b7c5b94 804 struct sk_buff *first_skb = skb;
6b7c5b94
SP
805 struct be_eth_wrb *wrb;
806 struct be_eth_hdr_wrb *hdr;
7101e111
SP
807 bool map_single = false;
808 u16 map_head;
6b7c5b94 809
6b7c5b94
SP
810 hdr = queue_head_node(txq);
811 queue_head_inc(txq);
7101e111 812 map_head = txq->head;
6b7c5b94 813
ebc8d2ab 814 if (skb->len > skb->data_len) {
e743d313 815 int len = skb_headlen(skb);
03d28ffe 816
2b7bcebf
IV
817 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
818 if (dma_mapping_error(dev, busaddr))
7101e111
SP
819 goto dma_err;
820 map_single = true;
ebc8d2ab
DM
821 wrb = queue_head_node(txq);
822 wrb_fill(wrb, busaddr, len);
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
825 copied += len;
826 }
6b7c5b94 827
ebc8d2ab 828 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 829 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
03d28ffe 830
b061b39e 831 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 832 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 833 if (dma_mapping_error(dev, busaddr))
7101e111 834 goto dma_err;
ebc8d2ab 835 wrb = queue_head_node(txq);
9e903e08 836 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
9e903e08 839 copied += skb_frag_size(frag);
6b7c5b94
SP
840 }
841
842 if (dummy_wrb) {
843 wrb = queue_head_node(txq);
844 wrb_fill(wrb, 0, 0);
845 be_dws_cpu_to_le(wrb, sizeof(*wrb));
846 queue_head_inc(txq);
847 }
848
bc0c3405 849 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
850 be_dws_cpu_to_le(hdr, sizeof(*hdr));
851
852 return copied;
7101e111
SP
853dma_err:
854 txq->head = map_head;
855 while (copied) {
856 wrb = queue_head_node(txq);
2b7bcebf 857 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
858 map_single = false;
859 copied -= wrb->frag_len;
d3de1540 860 adapter->drv_stats.dma_map_errors++;
7101e111
SP
861 queue_head_inc(txq);
862 }
863 return 0;
6b7c5b94
SP
864}
865
93040ae5 866static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
867 struct sk_buff *skb,
868 bool *skip_hw_vlan)
93040ae5
SK
869{
870 u16 vlan_tag = 0;
871
872 skb = skb_share_check(skb, GFP_ATOMIC);
873 if (unlikely(!skb))
874 return skb;
875
efee8e87 876 if (vlan_tx_tag_present(skb))
93040ae5 877 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
878
879 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
880 if (!vlan_tag)
881 vlan_tag = adapter->pvid;
882 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
883 * skip VLAN insertion
884 */
885 if (skip_hw_vlan)
886 *skip_hw_vlan = true;
887 }
bc0c3405
AK
888
889 if (vlan_tag) {
58717686 890 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
891 if (unlikely(!skb))
892 return skb;
bc0c3405
AK
893 skb->vlan_tci = 0;
894 }
895
896 /* Insert the outer VLAN, if any */
897 if (adapter->qnq_vid) {
898 vlan_tag = adapter->qnq_vid;
58717686 899 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
900 if (unlikely(!skb))
901 return skb;
902 if (skip_hw_vlan)
903 *skip_hw_vlan = true;
904 }
905
93040ae5
SK
906 return skb;
907}
908
bc0c3405
AK
909static bool be_ipv6_exthdr_check(struct sk_buff *skb)
910{
911 struct ethhdr *eh = (struct ethhdr *)skb->data;
912 u16 offset = ETH_HLEN;
913
914 if (eh->h_proto == htons(ETH_P_IPV6)) {
915 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
916
917 offset += sizeof(struct ipv6hdr);
918 if (ip6h->nexthdr != NEXTHDR_TCP &&
919 ip6h->nexthdr != NEXTHDR_UDP) {
920 struct ipv6_opt_hdr *ehdr =
504fbf1e 921 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
922
923 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
924 if (ehdr->hdrlen == 0xff)
925 return true;
926 }
927 }
928 return false;
929}
930
931static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
932{
933 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
934}
935
748b539a 936static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 937{
ee9c799c 938 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
939}
940
ec495fac
VV
941static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
942 struct sk_buff *skb,
943 bool *skip_hw_vlan)
6b7c5b94 944{
d2cb6ce7 945 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
946 unsigned int eth_hdr_len;
947 struct iphdr *ip;
93040ae5 948
1297f9db
AK
949 /* For padded packets, BE HW modifies tot_len field in IP header
950 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 951 * For padded packets, Lancer computes incorrect checksum.
1ded132d 952 */
ee9c799c
SP
953 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
954 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
955 if (skb->len <= 60 &&
956 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 957 is_ipv4_pkt(skb)) {
93040ae5
SK
958 ip = (struct iphdr *)ip_hdr(skb);
959 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
960 }
1ded132d 961
d2cb6ce7 962 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 963 * tagging in pvid-tagging mode
d2cb6ce7 964 */
f93f160b 965 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 966 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 967 *skip_hw_vlan = true;
d2cb6ce7 968
93040ae5
SK
969 /* HW has a bug wherein it will calculate CSUM for VLAN
970 * pkts even though it is disabled.
971 * Manually insert VLAN in pkt.
972 */
973 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
974 vlan_tx_tag_present(skb)) {
975 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 976 if (unlikely(!skb))
c9128951 977 goto err;
bc0c3405
AK
978 }
979
980 /* HW may lockup when VLAN HW tagging is requested on
981 * certain ipv6 packets. Drop such pkts if the HW workaround to
982 * skip HW tagging is not enabled by FW.
983 */
984 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
985 (adapter->pvid || adapter->qnq_vid) &&
986 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
987 goto tx_drop;
988
989 /* Manual VLAN tag insertion to prevent:
990 * ASIC lockup when the ASIC inserts VLAN tag into
991 * certain ipv6 packets. Insert VLAN tags in driver,
992 * and set event, completion, vlan bits accordingly
993 * in the Tx WRB.
994 */
995 if (be_ipv6_tx_stall_chk(adapter, skb) &&
996 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 997 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 998 if (unlikely(!skb))
c9128951 999 goto err;
1ded132d
AK
1000 }
1001
ee9c799c
SP
1002 return skb;
1003tx_drop:
1004 dev_kfree_skb_any(skb);
c9128951 1005err:
ee9c799c
SP
1006 return NULL;
1007}
1008
ec495fac
VV
1009static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1010 struct sk_buff *skb,
1011 bool *skip_hw_vlan)
1012{
1013 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1014 * less may cause a transmit stall on that port. So the work-around is
1015 * to pad short packets (<= 32 bytes) to a 36-byte length.
1016 */
1017 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1018 if (skb_padto(skb, 36))
1019 return NULL;
1020 skb->len = 36;
1021 }
1022
1023 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1024 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1025 if (!skb)
1026 return NULL;
1027 }
1028
1029 return skb;
1030}
1031
ee9c799c
SP
1032static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1033{
1034 struct be_adapter *adapter = netdev_priv(netdev);
1035 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1036 struct be_queue_info *txq = &txo->q;
1037 bool dummy_wrb, stopped = false;
1038 u32 wrb_cnt = 0, copied = 0;
1039 bool skip_hw_vlan = false;
1040 u32 start = txq->head;
1041
1042 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1043 if (!skb) {
1044 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1045 return NETDEV_TX_OK;
bc617526 1046 }
ee9c799c 1047
fe6d2a38 1048 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1049
bc0c3405
AK
1050 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1051 skip_hw_vlan);
c190e3c8 1052 if (copied) {
cd8f76c0
ED
1053 int gso_segs = skb_shinfo(skb)->gso_segs;
1054
c190e3c8 1055 /* record the sent skb in the sent_skb table */
3c8def97
SP
1056 BUG_ON(txo->sent_skb_list[start]);
1057 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1058
1059 /* Ensure txq has space for the next skb; Else stop the queue
1060 * *BEFORE* ringing the tx doorbell, so that we serialze the
1061 * tx compls of the current transmit which'll wake up the queue
1062 */
7101e111 1063 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1064 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1065 txq->len) {
3c8def97 1066 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1067 stopped = true;
1068 }
6b7c5b94 1069
94d73aaa 1070 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1071
cd8f76c0 1072 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1073 } else {
1074 txq->head = start;
bc617526 1075 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1076 dev_kfree_skb_any(skb);
6b7c5b94 1077 }
6b7c5b94
SP
1078 return NETDEV_TX_OK;
1079}
1080
1081static int be_change_mtu(struct net_device *netdev, int new_mtu)
1082{
1083 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1084 struct device *dev = &adapter->pdev->dev;
1085
1086 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1087 dev_info(dev, "MTU must be between %d and %d bytes\n",
1088 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1089 return -EINVAL;
1090 }
0d3f5cce
KA
1091
1092 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1093 netdev->mtu, new_mtu);
6b7c5b94
SP
1094 netdev->mtu = new_mtu;
1095 return 0;
1096}
1097
1098/*
82903e4b
AK
1099 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1100 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1101 */
10329df8 1102static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1103{
50762667 1104 struct device *dev = &adapter->pdev->dev;
10329df8 1105 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1106 u16 num = 0, i = 0;
82903e4b 1107 int status = 0;
1da87b7f 1108
c0e64ef4
SP
1109 /* No need to further configure vids if in promiscuous mode */
1110 if (adapter->promiscuous)
1111 return 0;
1112
92bf14ab 1113 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1114 goto set_vlan_promisc;
1115
1116 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1117 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1118 vids[num++] = cpu_to_le16(i);
0fc16ebf 1119
4d567d97 1120 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1121 if (status) {
d9d604f8 1122 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1123 if (addl_status(status) ==
1124 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8 1125 goto set_vlan_promisc;
50762667 1126 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8
AK
1127 } else {
1128 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1129 /* hw VLAN filtering re-enabled. */
1130 status = be_cmd_rx_filter(adapter,
1131 BE_FLAGS_VLAN_PROMISC, OFF);
1132 if (!status) {
50762667
VV
1133 dev_info(dev,
1134 "Disabling VLAN Promiscuous mode\n");
d9d604f8 1135 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1136 }
1137 }
6b7c5b94 1138 }
1da87b7f 1139
b31c50a7 1140 return status;
0fc16ebf
PR
1141
1142set_vlan_promisc:
a6b74e01
SK
1143 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1144 return 0;
d9d604f8
AK
1145
1146 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1147 if (!status) {
50762667 1148 dev_info(dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1149 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1150 } else
50762667 1151 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
0fc16ebf 1152 return status;
6b7c5b94
SP
1153}
1154
80d5c368 1155static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1156{
1157 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1158 int status = 0;
6b7c5b94 1159
a85e9986
PR
1160 /* Packets with VID 0 are always received by Lancer by default */
1161 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1162 return status;
1163
f6cbd364 1164 if (test_bit(vid, adapter->vids))
48291c22 1165 return status;
a85e9986 1166
f6cbd364 1167 set_bit(vid, adapter->vids);
a6b74e01 1168 adapter->vlans_added++;
8e586137 1169
a6b74e01
SK
1170 status = be_vid_config(adapter);
1171 if (status) {
1172 adapter->vlans_added--;
f6cbd364 1173 clear_bit(vid, adapter->vids);
a6b74e01 1174 }
48291c22 1175
80817cbf 1176 return status;
6b7c5b94
SP
1177}
1178
80d5c368 1179static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1180{
1181 struct be_adapter *adapter = netdev_priv(netdev);
1182
a85e9986
PR
1183 /* Packets with VID 0 are always received by Lancer by default */
1184 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1185 return 0;
a85e9986 1186
f6cbd364 1187 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1188 adapter->vlans_added--;
1189
1190 return be_vid_config(adapter);
6b7c5b94
SP
1191}
1192
7ad09458
S
1193static void be_clear_promisc(struct be_adapter *adapter)
1194{
1195 adapter->promiscuous = false;
a0794885 1196 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1197
1198 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1199}
1200
a54769f5 1201static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1202{
1203 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1204 int status;
6b7c5b94 1205
24307eef 1206 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1207 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1208 adapter->promiscuous = true;
1209 goto done;
6b7c5b94
SP
1210 }
1211
25985edc 1212 /* BE was previously in promiscuous mode; disable it */
24307eef 1213 if (adapter->promiscuous) {
7ad09458 1214 be_clear_promisc(adapter);
c0e64ef4 1215 if (adapter->vlans_added)
10329df8 1216 be_vid_config(adapter);
6b7c5b94
SP
1217 }
1218
e7b909a6 1219 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1220 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1221 netdev_mc_count(netdev) > be_max_mc(adapter))
1222 goto set_mcast_promisc;
6b7c5b94 1223
fbc13f01
AK
1224 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1225 struct netdev_hw_addr *ha;
1226 int i = 1; /* First slot is claimed by the Primary MAC */
1227
1228 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1229 be_cmd_pmac_del(adapter, adapter->if_handle,
1230 adapter->pmac_id[i], 0);
1231 }
1232
92bf14ab 1233 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1234 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1235 adapter->promiscuous = true;
1236 goto done;
1237 }
1238
1239 netdev_for_each_uc_addr(ha, adapter->netdev) {
1240 adapter->uc_macs++; /* First slot is for Primary MAC */
1241 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1242 adapter->if_handle,
1243 &adapter->pmac_id[adapter->uc_macs], 0);
1244 }
1245 }
1246
0fc16ebf 1247 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1248 if (!status) {
1249 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1250 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1251 goto done;
0fc16ebf 1252 }
a0794885
KA
1253
1254set_mcast_promisc:
1255 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1256 return;
1257
1258 /* Set to MCAST promisc mode if setting MULTICAST address fails
1259 * or if num configured exceeds what we support
1260 */
1261 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1262 if (!status)
1263 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1264done:
1265 return;
6b7c5b94
SP
1266}
1267
ba343c77
SB
1268static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1269{
1270 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1271 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1272 int status;
1273
11ac75ed 1274 if (!sriov_enabled(adapter))
ba343c77
SB
1275 return -EPERM;
1276
11ac75ed 1277 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1278 return -EINVAL;
1279
3c31aaf3
VV
1280 /* Proceed further only if user provided MAC is different
1281 * from active MAC
1282 */
1283 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1284 return 0;
1285
3175d8c2
SP
1286 if (BEx_chip(adapter)) {
1287 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1288 vf + 1);
ba343c77 1289
11ac75ed
SP
1290 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1291 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1292 } else {
1293 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1294 vf + 1);
590c391d
PR
1295 }
1296
abccf23e
KA
1297 if (status) {
1298 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1299 mac, vf, status);
1300 return be_cmd_status(status);
1301 }
64600ea5 1302
abccf23e
KA
1303 ether_addr_copy(vf_cfg->mac_addr, mac);
1304
1305 return 0;
ba343c77
SB
1306}
1307
64600ea5 1308static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1309 struct ifla_vf_info *vi)
64600ea5
AK
1310{
1311 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1312 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1313
11ac75ed 1314 if (!sriov_enabled(adapter))
64600ea5
AK
1315 return -EPERM;
1316
11ac75ed 1317 if (vf >= adapter->num_vfs)
64600ea5
AK
1318 return -EINVAL;
1319
1320 vi->vf = vf;
ed616689
SC
1321 vi->max_tx_rate = vf_cfg->tx_rate;
1322 vi->min_tx_rate = 0;
a60b3a13
AK
1323 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1324 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1325 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1326 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1327
1328 return 0;
1329}
1330
748b539a 1331static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1332{
1333 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1334 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1335 int status = 0;
1336
11ac75ed 1337 if (!sriov_enabled(adapter))
1da87b7f
AK
1338 return -EPERM;
1339
b9fc0e53 1340 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1341 return -EINVAL;
1342
b9fc0e53
AK
1343 if (vlan || qos) {
1344 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1345 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1346 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1347 vf_cfg->if_handle, 0);
1da87b7f 1348 } else {
f1f3ee1b 1349 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1350 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1351 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1352 }
1353
abccf23e
KA
1354 if (status) {
1355 dev_err(&adapter->pdev->dev,
1356 "VLAN %d config on VF %d failed : %#x\n", vlan,
1357 vf, status);
1358 return be_cmd_status(status);
1359 }
1360
1361 vf_cfg->vlan_tag = vlan;
1362
1363 return 0;
1da87b7f
AK
1364}
1365
ed616689
SC
1366static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1367 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1368{
1369 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1370 struct device *dev = &adapter->pdev->dev;
1371 int percent_rate, status = 0;
1372 u16 link_speed = 0;
1373 u8 link_status;
e1d18735 1374
11ac75ed 1375 if (!sriov_enabled(adapter))
e1d18735
AK
1376 return -EPERM;
1377
94f434c2 1378 if (vf >= adapter->num_vfs)
e1d18735
AK
1379 return -EINVAL;
1380
ed616689
SC
1381 if (min_tx_rate)
1382 return -EINVAL;
1383
0f77ba73
RN
1384 if (!max_tx_rate)
1385 goto config_qos;
1386
1387 status = be_cmd_link_status_query(adapter, &link_speed,
1388 &link_status, 0);
1389 if (status)
1390 goto err;
1391
1392 if (!link_status) {
1393 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1394 status = -ENETDOWN;
0f77ba73
RN
1395 goto err;
1396 }
1397
1398 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1399 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1400 link_speed);
1401 status = -EINVAL;
1402 goto err;
1403 }
1404
1405 /* On Skyhawk the QOS setting must be done only as a % value */
1406 percent_rate = link_speed / 100;
1407 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1408 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1409 percent_rate);
1410 status = -EINVAL;
1411 goto err;
94f434c2 1412 }
e1d18735 1413
0f77ba73
RN
1414config_qos:
1415 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1416 if (status)
0f77ba73
RN
1417 goto err;
1418
1419 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1420 return 0;
1421
1422err:
1423 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1424 max_tx_rate, vf);
abccf23e 1425 return be_cmd_status(status);
e1d18735 1426}
e2fb1afa 1427
bdce2ad7
SR
1428static int be_set_vf_link_state(struct net_device *netdev, int vf,
1429 int link_state)
1430{
1431 struct be_adapter *adapter = netdev_priv(netdev);
1432 int status;
1433
1434 if (!sriov_enabled(adapter))
1435 return -EPERM;
1436
1437 if (vf >= adapter->num_vfs)
1438 return -EINVAL;
1439
1440 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1441 if (status) {
1442 dev_err(&adapter->pdev->dev,
1443 "Link state change on VF %d failed: %#x\n", vf, status);
1444 return be_cmd_status(status);
1445 }
bdce2ad7 1446
abccf23e
KA
1447 adapter->vf_cfg[vf].plink_tracking = link_state;
1448
1449 return 0;
bdce2ad7 1450}
e1d18735 1451
2632bafd
SP
1452static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1453 ulong now)
6b7c5b94 1454{
2632bafd
SP
1455 aic->rx_pkts_prev = rx_pkts;
1456 aic->tx_reqs_prev = tx_pkts;
1457 aic->jiffies = now;
1458}
ac124ff9 1459
2632bafd
SP
1460static void be_eqd_update(struct be_adapter *adapter)
1461{
1462 struct be_set_eqd set_eqd[MAX_EVT_QS];
1463 int eqd, i, num = 0, start;
1464 struct be_aic_obj *aic;
1465 struct be_eq_obj *eqo;
1466 struct be_rx_obj *rxo;
1467 struct be_tx_obj *txo;
1468 u64 rx_pkts, tx_pkts;
1469 ulong now;
1470 u32 pps, delta;
10ef9ab4 1471
2632bafd
SP
1472 for_all_evt_queues(adapter, eqo, i) {
1473 aic = &adapter->aic_obj[eqo->idx];
1474 if (!aic->enable) {
1475 if (aic->jiffies)
1476 aic->jiffies = 0;
1477 eqd = aic->et_eqd;
1478 goto modify_eqd;
1479 }
6b7c5b94 1480
2632bafd
SP
1481 rxo = &adapter->rx_obj[eqo->idx];
1482 do {
57a7744e 1483 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1484 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1485 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1486
2632bafd
SP
1487 txo = &adapter->tx_obj[eqo->idx];
1488 do {
57a7744e 1489 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1490 tx_pkts = txo->stats.tx_reqs;
57a7744e 1491 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1492
2632bafd
SP
1493 /* Skip, if wrapped around or first calculation */
1494 now = jiffies;
1495 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1496 rx_pkts < aic->rx_pkts_prev ||
1497 tx_pkts < aic->tx_reqs_prev) {
1498 be_aic_update(aic, rx_pkts, tx_pkts, now);
1499 continue;
1500 }
1501
1502 delta = jiffies_to_msecs(now - aic->jiffies);
1503 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1504 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1505 eqd = (pps / 15000) << 2;
10ef9ab4 1506
2632bafd
SP
1507 if (eqd < 8)
1508 eqd = 0;
1509 eqd = min_t(u32, eqd, aic->max_eqd);
1510 eqd = max_t(u32, eqd, aic->min_eqd);
1511
1512 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1513modify_eqd:
2632bafd
SP
1514 if (eqd != aic->prev_eqd) {
1515 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1516 set_eqd[num].eq_id = eqo->q.id;
1517 aic->prev_eqd = eqd;
1518 num++;
1519 }
ac124ff9 1520 }
2632bafd
SP
1521
1522 if (num)
1523 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1524}
1525
3abcdeda 1526static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1527 struct be_rx_compl_info *rxcp)
4097f663 1528{
ac124ff9 1529 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1530
ab1594e9 1531 u64_stats_update_begin(&stats->sync);
3abcdeda 1532 stats->rx_compl++;
2e588f84 1533 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1534 stats->rx_pkts++;
2e588f84 1535 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1536 stats->rx_mcast_pkts++;
2e588f84 1537 if (rxcp->err)
ac124ff9 1538 stats->rx_compl_err++;
ab1594e9 1539 u64_stats_update_end(&stats->sync);
4097f663
SP
1540}
1541
2e588f84 1542static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1543{
19fad86f 1544 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1545 * Also ignore ipcksm for ipv6 pkts
1546 */
2e588f84 1547 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1548 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1549}
1550
0b0ef1d0 1551static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1552{
10ef9ab4 1553 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1554 struct be_rx_page_info *rx_page_info;
3abcdeda 1555 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1556 u16 frag_idx = rxq->tail;
6b7c5b94 1557
3abcdeda 1558 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1559 BUG_ON(!rx_page_info->page);
1560
e50287be 1561 if (rx_page_info->last_frag) {
2b7bcebf
IV
1562 dma_unmap_page(&adapter->pdev->dev,
1563 dma_unmap_addr(rx_page_info, bus),
1564 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1565 rx_page_info->last_frag = false;
1566 } else {
1567 dma_sync_single_for_cpu(&adapter->pdev->dev,
1568 dma_unmap_addr(rx_page_info, bus),
1569 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1570 }
6b7c5b94 1571
0b0ef1d0 1572 queue_tail_inc(rxq);
6b7c5b94
SP
1573 atomic_dec(&rxq->used);
1574 return rx_page_info;
1575}
1576
1577/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1578static void be_rx_compl_discard(struct be_rx_obj *rxo,
1579 struct be_rx_compl_info *rxcp)
6b7c5b94 1580{
6b7c5b94 1581 struct be_rx_page_info *page_info;
2e588f84 1582 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1583
e80d9da6 1584 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1585 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1586 put_page(page_info->page);
1587 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1588 }
1589}
1590
1591/*
1592 * skb_fill_rx_data forms a complete skb for an ether frame
1593 * indicated by rxcp.
1594 */
10ef9ab4
SP
1595static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1596 struct be_rx_compl_info *rxcp)
6b7c5b94 1597{
6b7c5b94 1598 struct be_rx_page_info *page_info;
2e588f84
SP
1599 u16 i, j;
1600 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1601 u8 *start;
6b7c5b94 1602
0b0ef1d0 1603 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1604 start = page_address(page_info->page) + page_info->page_offset;
1605 prefetch(start);
1606
1607 /* Copy data in the first descriptor of this completion */
2e588f84 1608 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1609
6b7c5b94
SP
1610 skb->len = curr_frag_len;
1611 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1612 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1613 /* Complete packet has now been moved to data */
1614 put_page(page_info->page);
1615 skb->data_len = 0;
1616 skb->tail += curr_frag_len;
1617 } else {
ac1ae5f3
ED
1618 hdr_len = ETH_HLEN;
1619 memcpy(skb->data, start, hdr_len);
6b7c5b94 1620 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1621 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1622 skb_shinfo(skb)->frags[0].page_offset =
1623 page_info->page_offset + hdr_len;
748b539a
SP
1624 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1625 curr_frag_len - hdr_len);
6b7c5b94 1626 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1627 skb->truesize += rx_frag_size;
6b7c5b94
SP
1628 skb->tail += hdr_len;
1629 }
205859a2 1630 page_info->page = NULL;
6b7c5b94 1631
2e588f84
SP
1632 if (rxcp->pkt_size <= rx_frag_size) {
1633 BUG_ON(rxcp->num_rcvd != 1);
1634 return;
6b7c5b94
SP
1635 }
1636
1637 /* More frags present for this completion */
2e588f84
SP
1638 remaining = rxcp->pkt_size - curr_frag_len;
1639 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1640 page_info = get_rx_page_info(rxo);
2e588f84 1641 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1642
bd46cb6c
AK
1643 /* Coalesce all frags from the same physical page in one slot */
1644 if (page_info->page_offset == 0) {
1645 /* Fresh page */
1646 j++;
b061b39e 1647 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1648 skb_shinfo(skb)->frags[j].page_offset =
1649 page_info->page_offset;
9e903e08 1650 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1651 skb_shinfo(skb)->nr_frags++;
1652 } else {
1653 put_page(page_info->page);
1654 }
1655
9e903e08 1656 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1657 skb->len += curr_frag_len;
1658 skb->data_len += curr_frag_len;
bdb28a97 1659 skb->truesize += rx_frag_size;
2e588f84 1660 remaining -= curr_frag_len;
205859a2 1661 page_info->page = NULL;
6b7c5b94 1662 }
bd46cb6c 1663 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1664}
1665
5be93b9a 1666/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1667static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1668 struct be_rx_compl_info *rxcp)
6b7c5b94 1669{
10ef9ab4 1670 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1671 struct net_device *netdev = adapter->netdev;
6b7c5b94 1672 struct sk_buff *skb;
89420424 1673
bb349bb4 1674 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1675 if (unlikely(!skb)) {
ac124ff9 1676 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1677 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1678 return;
1679 }
1680
10ef9ab4 1681 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1682
6332c8d3 1683 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1684 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1685 else
1686 skb_checksum_none_assert(skb);
6b7c5b94 1687
6332c8d3 1688 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1689 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1690 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1691 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1692
b6c0e89d 1693 skb->csum_level = rxcp->tunneled;
6384a4d0 1694 skb_mark_napi_id(skb, napi);
6b7c5b94 1695
343e43c0 1696 if (rxcp->vlanf)
86a9bad3 1697 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1698
1699 netif_receive_skb(skb);
6b7c5b94
SP
1700}
1701
5be93b9a 1702/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1703static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1704 struct napi_struct *napi,
1705 struct be_rx_compl_info *rxcp)
6b7c5b94 1706{
10ef9ab4 1707 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1708 struct be_rx_page_info *page_info;
5be93b9a 1709 struct sk_buff *skb = NULL;
2e588f84
SP
1710 u16 remaining, curr_frag_len;
1711 u16 i, j;
3968fa1e 1712
10ef9ab4 1713 skb = napi_get_frags(napi);
5be93b9a 1714 if (!skb) {
10ef9ab4 1715 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1716 return;
1717 }
1718
2e588f84
SP
1719 remaining = rxcp->pkt_size;
1720 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1721 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1722
1723 curr_frag_len = min(remaining, rx_frag_size);
1724
bd46cb6c
AK
1725 /* Coalesce all frags from the same physical page in one slot */
1726 if (i == 0 || page_info->page_offset == 0) {
1727 /* First frag or Fresh page */
1728 j++;
b061b39e 1729 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1730 skb_shinfo(skb)->frags[j].page_offset =
1731 page_info->page_offset;
9e903e08 1732 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1733 } else {
1734 put_page(page_info->page);
1735 }
9e903e08 1736 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1737 skb->truesize += rx_frag_size;
bd46cb6c 1738 remaining -= curr_frag_len;
6b7c5b94
SP
1739 memset(page_info, 0, sizeof(*page_info));
1740 }
bd46cb6c 1741 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1742
5be93b9a 1743 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1744 skb->len = rxcp->pkt_size;
1745 skb->data_len = rxcp->pkt_size;
5be93b9a 1746 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1747 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1748 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1749 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1750
b6c0e89d 1751 skb->csum_level = rxcp->tunneled;
6384a4d0 1752 skb_mark_napi_id(skb, napi);
5be93b9a 1753
343e43c0 1754 if (rxcp->vlanf)
86a9bad3 1755 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1756
10ef9ab4 1757 napi_gro_frags(napi);
2e588f84
SP
1758}
1759
10ef9ab4
SP
1760static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1761 struct be_rx_compl_info *rxcp)
2e588f84 1762{
c3c18bc1
SP
1763 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1764 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1765 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1766 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1767 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1768 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1769 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1770 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1771 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1772 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1773 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1774 if (rxcp->vlanf) {
c3c18bc1
SP
1775 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1776 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1777 }
c3c18bc1 1778 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1779 rxcp->tunneled =
c3c18bc1 1780 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1781}
1782
10ef9ab4
SP
1783static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1784 struct be_rx_compl_info *rxcp)
2e588f84 1785{
c3c18bc1
SP
1786 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1787 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1788 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1789 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1790 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1791 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1792 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1793 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1794 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1795 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1796 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1797 if (rxcp->vlanf) {
c3c18bc1
SP
1798 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1799 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1800 }
c3c18bc1
SP
1801 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1802 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1803}
1804
1805static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1806{
1807 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1808 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1809 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1810
2e588f84
SP
1811 /* For checking the valid bit it is Ok to use either definition as the
1812 * valid bit is at the same position in both v0 and v1 Rx compl */
1813 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1814 return NULL;
6b7c5b94 1815
2e588f84
SP
1816 rmb();
1817 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1818
2e588f84 1819 if (adapter->be3_native)
10ef9ab4 1820 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1821 else
10ef9ab4 1822 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1823
e38b1706
SK
1824 if (rxcp->ip_frag)
1825 rxcp->l4_csum = 0;
1826
15d72184 1827 if (rxcp->vlanf) {
f93f160b
VV
1828 /* In QNQ modes, if qnq bit is not set, then the packet was
1829 * tagged only with the transparent outer vlan-tag and must
1830 * not be treated as a vlan packet by host
1831 */
1832 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1833 rxcp->vlanf = 0;
6b7c5b94 1834
15d72184 1835 if (!lancer_chip(adapter))
3c709f8f 1836 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1837
939cf306 1838 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1839 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1840 rxcp->vlanf = 0;
1841 }
2e588f84
SP
1842
1843 /* As the compl has been parsed, reset it; we wont touch it again */
1844 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1845
3abcdeda 1846 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1847 return rxcp;
1848}
1849
1829b086 1850static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1851{
6b7c5b94 1852 u32 order = get_order(size);
1829b086 1853
6b7c5b94 1854 if (order > 0)
1829b086
ED
1855 gfp |= __GFP_COMP;
1856 return alloc_pages(gfp, order);
6b7c5b94
SP
1857}
1858
1859/*
1860 * Allocate a page, split it to fragments of size rx_frag_size and post as
1861 * receive buffers to BE
1862 */
c30d7266 1863static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 1864{
3abcdeda 1865 struct be_adapter *adapter = rxo->adapter;
26d92f92 1866 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1867 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1868 struct page *pagep = NULL;
ba42fad0 1869 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1870 struct be_eth_rx_d *rxd;
1871 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 1872 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 1873
3abcdeda 1874 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 1875 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 1876 if (!pagep) {
1829b086 1877 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1878 if (unlikely(!pagep)) {
ac124ff9 1879 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1880 break;
1881 }
ba42fad0
IV
1882 page_dmaaddr = dma_map_page(dev, pagep, 0,
1883 adapter->big_page_size,
2b7bcebf 1884 DMA_FROM_DEVICE);
ba42fad0
IV
1885 if (dma_mapping_error(dev, page_dmaaddr)) {
1886 put_page(pagep);
1887 pagep = NULL;
d3de1540 1888 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
1889 break;
1890 }
e50287be 1891 page_offset = 0;
6b7c5b94
SP
1892 } else {
1893 get_page(pagep);
e50287be 1894 page_offset += rx_frag_size;
6b7c5b94 1895 }
e50287be 1896 page_info->page_offset = page_offset;
6b7c5b94 1897 page_info->page = pagep;
6b7c5b94
SP
1898
1899 rxd = queue_head_node(rxq);
e50287be 1900 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1901 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1902 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1903
1904 /* Any space left in the current big page for another frag? */
1905 if ((page_offset + rx_frag_size + rx_frag_size) >
1906 adapter->big_page_size) {
1907 pagep = NULL;
e50287be
SP
1908 page_info->last_frag = true;
1909 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1910 } else {
1911 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1912 }
26d92f92
SP
1913
1914 prev_page_info = page_info;
1915 queue_head_inc(rxq);
10ef9ab4 1916 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1917 }
e50287be
SP
1918
1919 /* Mark the last frag of a page when we break out of the above loop
1920 * with no more slots available in the RXQ
1921 */
1922 if (pagep) {
1923 prev_page_info->last_frag = true;
1924 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1925 }
6b7c5b94
SP
1926
1927 if (posted) {
6b7c5b94 1928 atomic_add(posted, &rxq->used);
6384a4d0
SP
1929 if (rxo->rx_post_starved)
1930 rxo->rx_post_starved = false;
c30d7266
AK
1931 do {
1932 notify = min(256u, posted);
1933 be_rxq_notify(adapter, rxq->id, notify);
1934 posted -= notify;
1935 } while (posted);
ea1dae11
SP
1936 } else if (atomic_read(&rxq->used) == 0) {
1937 /* Let be_worker replenish when memory is available */
3abcdeda 1938 rxo->rx_post_starved = true;
6b7c5b94 1939 }
6b7c5b94
SP
1940}
1941
5fb379ee 1942static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1943{
6b7c5b94
SP
1944 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1945
1946 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1947 return NULL;
1948
f3eb62d2 1949 rmb();
6b7c5b94
SP
1950 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1951
1952 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1953
1954 queue_tail_inc(tx_cq);
1955 return txcp;
1956}
1957
3c8def97 1958static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1959 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1960{
3c8def97 1961 struct be_queue_info *txq = &txo->q;
a73b796e 1962 struct be_eth_wrb *wrb;
3c8def97 1963 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1964 struct sk_buff *sent_skb;
ec43b1a6
SP
1965 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1966 bool unmap_skb_hdr = true;
6b7c5b94 1967
ec43b1a6 1968 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1969 BUG_ON(!sent_skb);
ec43b1a6
SP
1970 sent_skbs[txq->tail] = NULL;
1971
1972 /* skip header wrb */
a73b796e 1973 queue_tail_inc(txq);
6b7c5b94 1974
ec43b1a6 1975 do {
6b7c5b94 1976 cur_index = txq->tail;
a73b796e 1977 wrb = queue_tail_node(txq);
2b7bcebf
IV
1978 unmap_tx_frag(&adapter->pdev->dev, wrb,
1979 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1980 unmap_skb_hdr = false;
1981
6b7c5b94
SP
1982 num_wrbs++;
1983 queue_tail_inc(txq);
ec43b1a6 1984 } while (cur_index != last_index);
6b7c5b94 1985
96d49225 1986 dev_consume_skb_any(sent_skb);
4d586b82 1987 return num_wrbs;
6b7c5b94
SP
1988}
1989
10ef9ab4
SP
1990/* Return the number of events in the event queue */
1991static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1992{
10ef9ab4
SP
1993 struct be_eq_entry *eqe;
1994 int num = 0;
859b1e4e 1995
10ef9ab4
SP
1996 do {
1997 eqe = queue_tail_node(&eqo->q);
1998 if (eqe->evt == 0)
1999 break;
859b1e4e 2000
10ef9ab4
SP
2001 rmb();
2002 eqe->evt = 0;
2003 num++;
2004 queue_tail_inc(&eqo->q);
2005 } while (true);
2006
2007 return num;
859b1e4e
SP
2008}
2009
10ef9ab4
SP
2010/* Leaves the EQ is disarmed state */
2011static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2012{
10ef9ab4 2013 int num = events_get(eqo);
859b1e4e 2014
10ef9ab4 2015 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2016}
2017
10ef9ab4 2018static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2019{
2020 struct be_rx_page_info *page_info;
3abcdeda
SP
2021 struct be_queue_info *rxq = &rxo->q;
2022 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2023 struct be_rx_compl_info *rxcp;
d23e946c
SP
2024 struct be_adapter *adapter = rxo->adapter;
2025 int flush_wait = 0;
6b7c5b94 2026
d23e946c
SP
2027 /* Consume pending rx completions.
2028 * Wait for the flush completion (identified by zero num_rcvd)
2029 * to arrive. Notify CQ even when there are no more CQ entries
2030 * for HW to flush partially coalesced CQ entries.
2031 * In Lancer, there is no need to wait for flush compl.
2032 */
2033 for (;;) {
2034 rxcp = be_rx_compl_get(rxo);
ddf1169f 2035 if (!rxcp) {
d23e946c
SP
2036 if (lancer_chip(adapter))
2037 break;
2038
2039 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2040 dev_warn(&adapter->pdev->dev,
2041 "did not receive flush compl\n");
2042 break;
2043 }
2044 be_cq_notify(adapter, rx_cq->id, true, 0);
2045 mdelay(1);
2046 } else {
2047 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2048 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2049 if (rxcp->num_rcvd == 0)
2050 break;
2051 }
6b7c5b94
SP
2052 }
2053
d23e946c
SP
2054 /* After cleanup, leave the CQ in unarmed state */
2055 be_cq_notify(adapter, rx_cq->id, false, 0);
2056
2057 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2058 while (atomic_read(&rxq->used) > 0) {
2059 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2060 put_page(page_info->page);
2061 memset(page_info, 0, sizeof(*page_info));
2062 }
2063 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2064 rxq->tail = 0;
2065 rxq->head = 0;
6b7c5b94
SP
2066}
2067
0ae57bb3 2068static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2069{
0ae57bb3
SP
2070 struct be_tx_obj *txo;
2071 struct be_queue_info *txq;
a8e9179a 2072 struct be_eth_tx_compl *txcp;
4d586b82 2073 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2074 struct sk_buff *sent_skb;
2075 bool dummy_wrb;
0ae57bb3 2076 int i, pending_txqs;
a8e9179a 2077
1a3d0717 2078 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2079 do {
0ae57bb3
SP
2080 pending_txqs = adapter->num_tx_qs;
2081
2082 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2083 cmpl = 0;
2084 num_wrbs = 0;
0ae57bb3
SP
2085 txq = &txo->q;
2086 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2087 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2088 num_wrbs += be_tx_compl_process(adapter, txo,
2089 end_idx);
2090 cmpl++;
2091 }
2092 if (cmpl) {
2093 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2094 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2095 timeo = 0;
0ae57bb3
SP
2096 }
2097 if (atomic_read(&txq->used) == 0)
2098 pending_txqs--;
a8e9179a
SP
2099 }
2100
1a3d0717 2101 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2102 break;
2103
2104 mdelay(1);
2105 } while (true);
2106
0ae57bb3
SP
2107 for_all_tx_queues(adapter, txo, i) {
2108 txq = &txo->q;
2109 if (atomic_read(&txq->used))
2110 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2111 atomic_read(&txq->used));
2112
2113 /* free posted tx for which compls will never arrive */
2114 while (atomic_read(&txq->used)) {
2115 sent_skb = txo->sent_skb_list[txq->tail];
2116 end_idx = txq->tail;
2117 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2118 &dummy_wrb);
2119 index_adv(&end_idx, num_wrbs - 1, txq->len);
2120 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2121 atomic_sub(num_wrbs, &txq->used);
2122 }
b03388d6 2123 }
6b7c5b94
SP
2124}
2125
10ef9ab4
SP
2126static void be_evt_queues_destroy(struct be_adapter *adapter)
2127{
2128 struct be_eq_obj *eqo;
2129 int i;
2130
2131 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2132 if (eqo->q.created) {
2133 be_eq_clean(eqo);
10ef9ab4 2134 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2135 napi_hash_del(&eqo->napi);
68d7bdcb 2136 netif_napi_del(&eqo->napi);
19d59aa7 2137 }
10ef9ab4
SP
2138 be_queue_free(adapter, &eqo->q);
2139 }
2140}
2141
2142static int be_evt_queues_create(struct be_adapter *adapter)
2143{
2144 struct be_queue_info *eq;
2145 struct be_eq_obj *eqo;
2632bafd 2146 struct be_aic_obj *aic;
10ef9ab4
SP
2147 int i, rc;
2148
92bf14ab
SP
2149 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2150 adapter->cfg_num_qs);
10ef9ab4
SP
2151
2152 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2153 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2154 BE_NAPI_WEIGHT);
6384a4d0 2155 napi_hash_add(&eqo->napi);
2632bafd 2156 aic = &adapter->aic_obj[i];
10ef9ab4 2157 eqo->adapter = adapter;
10ef9ab4 2158 eqo->idx = i;
2632bafd
SP
2159 aic->max_eqd = BE_MAX_EQD;
2160 aic->enable = true;
10ef9ab4
SP
2161
2162 eq = &eqo->q;
2163 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2164 sizeof(struct be_eq_entry));
10ef9ab4
SP
2165 if (rc)
2166 return rc;
2167
f2f781a7 2168 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2169 if (rc)
2170 return rc;
2171 }
1cfafab9 2172 return 0;
10ef9ab4
SP
2173}
2174
5fb379ee
SP
2175static void be_mcc_queues_destroy(struct be_adapter *adapter)
2176{
2177 struct be_queue_info *q;
5fb379ee 2178
8788fdc2 2179 q = &adapter->mcc_obj.q;
5fb379ee 2180 if (q->created)
8788fdc2 2181 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2182 be_queue_free(adapter, q);
2183
8788fdc2 2184 q = &adapter->mcc_obj.cq;
5fb379ee 2185 if (q->created)
8788fdc2 2186 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2187 be_queue_free(adapter, q);
2188}
2189
2190/* Must be called only after TX qs are created as MCC shares TX EQ */
2191static int be_mcc_queues_create(struct be_adapter *adapter)
2192{
2193 struct be_queue_info *q, *cq;
5fb379ee 2194
8788fdc2 2195 cq = &adapter->mcc_obj.cq;
5fb379ee 2196 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2197 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2198 goto err;
2199
10ef9ab4
SP
2200 /* Use the default EQ for MCC completions */
2201 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2202 goto mcc_cq_free;
2203
8788fdc2 2204 q = &adapter->mcc_obj.q;
5fb379ee
SP
2205 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2206 goto mcc_cq_destroy;
2207
8788fdc2 2208 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2209 goto mcc_q_free;
2210
2211 return 0;
2212
2213mcc_q_free:
2214 be_queue_free(adapter, q);
2215mcc_cq_destroy:
8788fdc2 2216 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2217mcc_cq_free:
2218 be_queue_free(adapter, cq);
2219err:
2220 return -1;
2221}
2222
6b7c5b94
SP
2223static void be_tx_queues_destroy(struct be_adapter *adapter)
2224{
2225 struct be_queue_info *q;
3c8def97
SP
2226 struct be_tx_obj *txo;
2227 u8 i;
6b7c5b94 2228
3c8def97
SP
2229 for_all_tx_queues(adapter, txo, i) {
2230 q = &txo->q;
2231 if (q->created)
2232 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2233 be_queue_free(adapter, q);
6b7c5b94 2234
3c8def97
SP
2235 q = &txo->cq;
2236 if (q->created)
2237 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2238 be_queue_free(adapter, q);
2239 }
6b7c5b94
SP
2240}
2241
7707133c 2242static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2243{
10ef9ab4 2244 struct be_queue_info *cq, *eq;
3c8def97 2245 struct be_tx_obj *txo;
92bf14ab 2246 int status, i;
6b7c5b94 2247
92bf14ab 2248 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2249
10ef9ab4
SP
2250 for_all_tx_queues(adapter, txo, i) {
2251 cq = &txo->cq;
2252 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2253 sizeof(struct be_eth_tx_compl));
2254 if (status)
2255 return status;
3c8def97 2256
827da44c
JS
2257 u64_stats_init(&txo->stats.sync);
2258 u64_stats_init(&txo->stats.sync_compl);
2259
10ef9ab4
SP
2260 /* If num_evt_qs is less than num_tx_qs, then more than
2261 * one txq share an eq
2262 */
2263 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2264 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2265 if (status)
2266 return status;
6b7c5b94 2267
10ef9ab4
SP
2268 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2269 sizeof(struct be_eth_wrb));
2270 if (status)
2271 return status;
6b7c5b94 2272
94d73aaa 2273 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2274 if (status)
2275 return status;
3c8def97 2276 }
6b7c5b94 2277
d379142b
SP
2278 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2279 adapter->num_tx_qs);
10ef9ab4 2280 return 0;
6b7c5b94
SP
2281}
2282
10ef9ab4 2283static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2284{
2285 struct be_queue_info *q;
3abcdeda
SP
2286 struct be_rx_obj *rxo;
2287 int i;
2288
2289 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2290 q = &rxo->cq;
2291 if (q->created)
2292 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2293 be_queue_free(adapter, q);
ac6a0c4a
SP
2294 }
2295}
2296
10ef9ab4 2297static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2298{
10ef9ab4 2299 struct be_queue_info *eq, *cq;
3abcdeda
SP
2300 struct be_rx_obj *rxo;
2301 int rc, i;
6b7c5b94 2302
92bf14ab
SP
2303 /* We can create as many RSS rings as there are EQs. */
2304 adapter->num_rx_qs = adapter->num_evt_qs;
2305
2306 /* We'll use RSS only if atleast 2 RSS rings are supported.
2307 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2308 */
92bf14ab
SP
2309 if (adapter->num_rx_qs > 1)
2310 adapter->num_rx_qs++;
2311
6b7c5b94 2312 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2313 for_all_rx_queues(adapter, rxo, i) {
2314 rxo->adapter = adapter;
3abcdeda
SP
2315 cq = &rxo->cq;
2316 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2317 sizeof(struct be_eth_rx_compl));
3abcdeda 2318 if (rc)
10ef9ab4 2319 return rc;
3abcdeda 2320
827da44c 2321 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2322 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2323 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2324 if (rc)
10ef9ab4 2325 return rc;
3abcdeda 2326 }
6b7c5b94 2327
d379142b
SP
2328 dev_info(&adapter->pdev->dev,
2329 "created %d RSS queue(s) and 1 default RX queue\n",
2330 adapter->num_rx_qs - 1);
10ef9ab4 2331 return 0;
b628bde2
SP
2332}
2333
6b7c5b94
SP
2334static irqreturn_t be_intx(int irq, void *dev)
2335{
e49cc34f
SP
2336 struct be_eq_obj *eqo = dev;
2337 struct be_adapter *adapter = eqo->adapter;
2338 int num_evts = 0;
6b7c5b94 2339
d0b9cec3
SP
2340 /* IRQ is not expected when NAPI is scheduled as the EQ
2341 * will not be armed.
2342 * But, this can happen on Lancer INTx where it takes
2343 * a while to de-assert INTx or in BE2 where occasionaly
2344 * an interrupt may be raised even when EQ is unarmed.
2345 * If NAPI is already scheduled, then counting & notifying
2346 * events will orphan them.
e49cc34f 2347 */
d0b9cec3 2348 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2349 num_evts = events_get(eqo);
d0b9cec3
SP
2350 __napi_schedule(&eqo->napi);
2351 if (num_evts)
2352 eqo->spurious_intr = 0;
2353 }
2354 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2355
d0b9cec3
SP
2356 /* Return IRQ_HANDLED only for the the first spurious intr
2357 * after a valid intr to stop the kernel from branding
2358 * this irq as a bad one!
e49cc34f 2359 */
d0b9cec3
SP
2360 if (num_evts || eqo->spurious_intr++ == 0)
2361 return IRQ_HANDLED;
2362 else
2363 return IRQ_NONE;
6b7c5b94
SP
2364}
2365
10ef9ab4 2366static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2367{
10ef9ab4 2368 struct be_eq_obj *eqo = dev;
6b7c5b94 2369
0b545a62
SP
2370 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2371 napi_schedule(&eqo->napi);
6b7c5b94
SP
2372 return IRQ_HANDLED;
2373}
2374
2e588f84 2375static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2376{
e38b1706 2377 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2378}
2379
10ef9ab4 2380static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2381 int budget, int polling)
6b7c5b94 2382{
3abcdeda
SP
2383 struct be_adapter *adapter = rxo->adapter;
2384 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2385 struct be_rx_compl_info *rxcp;
6b7c5b94 2386 u32 work_done;
c30d7266 2387 u32 frags_consumed = 0;
6b7c5b94
SP
2388
2389 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2390 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2391 if (!rxcp)
2392 break;
2393
12004ae9
SP
2394 /* Is it a flush compl that has no data */
2395 if (unlikely(rxcp->num_rcvd == 0))
2396 goto loop_continue;
2397
2398 /* Discard compl with partial DMA Lancer B0 */
2399 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2400 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2401 goto loop_continue;
2402 }
2403
2404 /* On BE drop pkts that arrive due to imperfect filtering in
2405 * promiscuous mode on some skews
2406 */
2407 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2408 !lancer_chip(adapter))) {
10ef9ab4 2409 be_rx_compl_discard(rxo, rxcp);
12004ae9 2410 goto loop_continue;
64642811 2411 }
009dd872 2412
6384a4d0
SP
2413 /* Don't do gro when we're busy_polling */
2414 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2415 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2416 else
6384a4d0
SP
2417 be_rx_compl_process(rxo, napi, rxcp);
2418
12004ae9 2419loop_continue:
c30d7266 2420 frags_consumed += rxcp->num_rcvd;
2e588f84 2421 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2422 }
2423
10ef9ab4
SP
2424 if (work_done) {
2425 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2426
6384a4d0
SP
2427 /* When an rx-obj gets into post_starved state, just
2428 * let be_worker do the posting.
2429 */
2430 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2431 !rxo->rx_post_starved)
c30d7266
AK
2432 be_post_rx_frags(rxo, GFP_ATOMIC,
2433 max_t(u32, MAX_RX_POST,
2434 frags_consumed));
6b7c5b94 2435 }
10ef9ab4 2436
6b7c5b94
SP
2437 return work_done;
2438}
2439
512bb8a2
KA
2440static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2441{
2442 switch (status) {
2443 case BE_TX_COMP_HDR_PARSE_ERR:
2444 tx_stats(txo)->tx_hdr_parse_err++;
2445 break;
2446 case BE_TX_COMP_NDMA_ERR:
2447 tx_stats(txo)->tx_dma_err++;
2448 break;
2449 case BE_TX_COMP_ACL_ERR:
2450 tx_stats(txo)->tx_spoof_check_err++;
2451 break;
2452 }
2453}
2454
2455static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2456{
2457 switch (status) {
2458 case LANCER_TX_COMP_LSO_ERR:
2459 tx_stats(txo)->tx_tso_err++;
2460 break;
2461 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2462 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2463 tx_stats(txo)->tx_spoof_check_err++;
2464 break;
2465 case LANCER_TX_COMP_QINQ_ERR:
2466 tx_stats(txo)->tx_qinq_err++;
2467 break;
2468 case LANCER_TX_COMP_PARITY_ERR:
2469 tx_stats(txo)->tx_internal_parity_err++;
2470 break;
2471 case LANCER_TX_COMP_DMA_ERR:
2472 tx_stats(txo)->tx_dma_err++;
2473 break;
2474 }
2475}
2476
c8f64615
SP
2477static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2478 int idx)
6b7c5b94 2479{
6b7c5b94 2480 struct be_eth_tx_compl *txcp;
c8f64615 2481 int num_wrbs = 0, work_done = 0;
512bb8a2 2482 u32 compl_status;
c8f64615
SP
2483 u16 last_idx;
2484
2485 while ((txcp = be_tx_compl_get(&txo->cq))) {
2486 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2487 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2488 work_done++;
3c8def97 2489
512bb8a2
KA
2490 compl_status = GET_TX_COMPL_BITS(status, txcp);
2491 if (compl_status) {
2492 if (lancer_chip(adapter))
2493 lancer_update_tx_err(txo, compl_status);
2494 else
2495 be_update_tx_err(txo, compl_status);
2496 }
10ef9ab4 2497 }
6b7c5b94 2498
10ef9ab4
SP
2499 if (work_done) {
2500 be_cq_notify(adapter, txo->cq.id, true, work_done);
2501 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2502
10ef9ab4
SP
2503 /* As Tx wrbs have been freed up, wake up netdev queue
2504 * if it was stopped due to lack of tx wrbs. */
2505 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2506 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2507 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2508 }
10ef9ab4
SP
2509
2510 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2511 tx_stats(txo)->tx_compl += work_done;
2512 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2513 }
10ef9ab4 2514}
6b7c5b94 2515
68d7bdcb 2516int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2517{
2518 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2519 struct be_adapter *adapter = eqo->adapter;
0b545a62 2520 int max_work = 0, work, i, num_evts;
6384a4d0 2521 struct be_rx_obj *rxo;
a4906ea0 2522 struct be_tx_obj *txo;
f31e50a8 2523
0b545a62
SP
2524 num_evts = events_get(eqo);
2525
a4906ea0
SP
2526 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2527 be_process_tx(adapter, txo, i);
f31e50a8 2528
6384a4d0
SP
2529 if (be_lock_napi(eqo)) {
2530 /* This loop will iterate twice for EQ0 in which
2531 * completions of the last RXQ (default one) are also processed
2532 * For other EQs the loop iterates only once
2533 */
2534 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2535 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2536 max_work = max(work, max_work);
2537 }
2538 be_unlock_napi(eqo);
2539 } else {
2540 max_work = budget;
10ef9ab4 2541 }
6b7c5b94 2542
10ef9ab4
SP
2543 if (is_mcc_eqo(eqo))
2544 be_process_mcc(adapter);
93c86700 2545
10ef9ab4
SP
2546 if (max_work < budget) {
2547 napi_complete(napi);
0b545a62 2548 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2549 } else {
2550 /* As we'll continue in polling mode, count and clear events */
0b545a62 2551 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2552 }
10ef9ab4 2553 return max_work;
6b7c5b94
SP
2554}
2555
6384a4d0
SP
2556#ifdef CONFIG_NET_RX_BUSY_POLL
2557static int be_busy_poll(struct napi_struct *napi)
2558{
2559 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2560 struct be_adapter *adapter = eqo->adapter;
2561 struct be_rx_obj *rxo;
2562 int i, work = 0;
2563
2564 if (!be_lock_busy_poll(eqo))
2565 return LL_FLUSH_BUSY;
2566
2567 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2568 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2569 if (work)
2570 break;
2571 }
2572
2573 be_unlock_busy_poll(eqo);
2574 return work;
2575}
2576#endif
2577
f67ef7ba 2578void be_detect_error(struct be_adapter *adapter)
7c185276 2579{
e1cfb67a
PR
2580 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2581 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2582 u32 i;
eb0eecc1
SK
2583 bool error_detected = false;
2584 struct device *dev = &adapter->pdev->dev;
2585 struct net_device *netdev = adapter->netdev;
7c185276 2586
d23e946c 2587 if (be_hw_error(adapter))
72f02485
SP
2588 return;
2589
e1cfb67a
PR
2590 if (lancer_chip(adapter)) {
2591 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2592 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2593 sliport_err1 = ioread32(adapter->db +
748b539a 2594 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2595 sliport_err2 = ioread32(adapter->db +
748b539a 2596 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2597 adapter->hw_error = true;
2598 /* Do not log error messages if its a FW reset */
2599 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2600 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2601 dev_info(dev, "Firmware update in progress\n");
2602 } else {
2603 error_detected = true;
2604 dev_err(dev, "Error detected in the card\n");
2605 dev_err(dev, "ERR: sliport status 0x%x\n",
2606 sliport_status);
2607 dev_err(dev, "ERR: sliport error1 0x%x\n",
2608 sliport_err1);
2609 dev_err(dev, "ERR: sliport error2 0x%x\n",
2610 sliport_err2);
2611 }
e1cfb67a
PR
2612 }
2613 } else {
2614 pci_read_config_dword(adapter->pdev,
748b539a 2615 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2616 pci_read_config_dword(adapter->pdev,
748b539a 2617 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2618 pci_read_config_dword(adapter->pdev,
748b539a 2619 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2620 pci_read_config_dword(adapter->pdev,
748b539a 2621 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2622
f67ef7ba
PR
2623 ue_lo = (ue_lo & ~ue_lo_mask);
2624 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2625
eb0eecc1
SK
2626 /* On certain platforms BE hardware can indicate spurious UEs.
2627 * Allow HW to stop working completely in case of a real UE.
2628 * Hence not setting the hw_error for UE detection.
2629 */
f67ef7ba 2630
eb0eecc1
SK
2631 if (ue_lo || ue_hi) {
2632 error_detected = true;
2633 dev_err(dev,
2634 "Unrecoverable Error detected in the adapter");
2635 dev_err(dev, "Please reboot server to recover");
2636 if (skyhawk_chip(adapter))
2637 adapter->hw_error = true;
2638 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2639 if (ue_lo & 1)
2640 dev_err(dev, "UE: %s bit set\n",
2641 ue_status_low_desc[i]);
2642 }
2643 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2644 if (ue_hi & 1)
2645 dev_err(dev, "UE: %s bit set\n",
2646 ue_status_hi_desc[i]);
2647 }
7c185276
AK
2648 }
2649 }
eb0eecc1
SK
2650 if (error_detected)
2651 netif_carrier_off(netdev);
7c185276
AK
2652}
2653
8d56ff11
SP
2654static void be_msix_disable(struct be_adapter *adapter)
2655{
ac6a0c4a 2656 if (msix_enabled(adapter)) {
8d56ff11 2657 pci_disable_msix(adapter->pdev);
ac6a0c4a 2658 adapter->num_msix_vec = 0;
68d7bdcb 2659 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2660 }
2661}
2662
c2bba3df 2663static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2664{
7dc4c064 2665 int i, num_vec;
d379142b 2666 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2667
92bf14ab
SP
2668 /* If RoCE is supported, program the max number of NIC vectors that
2669 * may be configured via set-channels, along with vectors needed for
2670 * RoCe. Else, just program the number we'll use initially.
2671 */
2672 if (be_roce_supported(adapter))
2673 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2674 2 * num_online_cpus());
2675 else
2676 num_vec = adapter->cfg_num_qs;
3abcdeda 2677
ac6a0c4a 2678 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2679 adapter->msix_entries[i].entry = i;
2680
7dc4c064
AG
2681 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2682 MIN_MSIX_VECTORS, num_vec);
2683 if (num_vec < 0)
2684 goto fail;
92bf14ab 2685
92bf14ab
SP
2686 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2687 adapter->num_msix_roce_vec = num_vec / 2;
2688 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2689 adapter->num_msix_roce_vec);
2690 }
2691
2692 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2693
2694 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2695 adapter->num_msix_vec);
c2bba3df 2696 return 0;
7dc4c064
AG
2697
2698fail:
2699 dev_warn(dev, "MSIx enable failed\n");
2700
2701 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2702 if (!be_physfn(adapter))
2703 return num_vec;
2704 return 0;
6b7c5b94
SP
2705}
2706
fe6d2a38 2707static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2708 struct be_eq_obj *eqo)
b628bde2 2709{
f2f781a7 2710 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2711}
6b7c5b94 2712
b628bde2
SP
2713static int be_msix_register(struct be_adapter *adapter)
2714{
10ef9ab4
SP
2715 struct net_device *netdev = adapter->netdev;
2716 struct be_eq_obj *eqo;
2717 int status, i, vec;
6b7c5b94 2718
10ef9ab4
SP
2719 for_all_evt_queues(adapter, eqo, i) {
2720 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2721 vec = be_msix_vec_get(adapter, eqo);
2722 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2723 if (status)
2724 goto err_msix;
2725 }
b628bde2 2726
6b7c5b94 2727 return 0;
3abcdeda 2728err_msix:
10ef9ab4
SP
2729 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2730 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2731 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2732 status);
ac6a0c4a 2733 be_msix_disable(adapter);
6b7c5b94
SP
2734 return status;
2735}
2736
2737static int be_irq_register(struct be_adapter *adapter)
2738{
2739 struct net_device *netdev = adapter->netdev;
2740 int status;
2741
ac6a0c4a 2742 if (msix_enabled(adapter)) {
6b7c5b94
SP
2743 status = be_msix_register(adapter);
2744 if (status == 0)
2745 goto done;
ba343c77
SB
2746 /* INTx is not supported for VF */
2747 if (!be_physfn(adapter))
2748 return status;
6b7c5b94
SP
2749 }
2750
e49cc34f 2751 /* INTx: only the first EQ is used */
6b7c5b94
SP
2752 netdev->irq = adapter->pdev->irq;
2753 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2754 &adapter->eq_obj[0]);
6b7c5b94
SP
2755 if (status) {
2756 dev_err(&adapter->pdev->dev,
2757 "INTx request IRQ failed - err %d\n", status);
2758 return status;
2759 }
2760done:
2761 adapter->isr_registered = true;
2762 return 0;
2763}
2764
2765static void be_irq_unregister(struct be_adapter *adapter)
2766{
2767 struct net_device *netdev = adapter->netdev;
10ef9ab4 2768 struct be_eq_obj *eqo;
3abcdeda 2769 int i;
6b7c5b94
SP
2770
2771 if (!adapter->isr_registered)
2772 return;
2773
2774 /* INTx */
ac6a0c4a 2775 if (!msix_enabled(adapter)) {
e49cc34f 2776 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2777 goto done;
2778 }
2779
2780 /* MSIx */
10ef9ab4
SP
2781 for_all_evt_queues(adapter, eqo, i)
2782 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2783
6b7c5b94
SP
2784done:
2785 adapter->isr_registered = false;
6b7c5b94
SP
2786}
2787
10ef9ab4 2788static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2789{
2790 struct be_queue_info *q;
2791 struct be_rx_obj *rxo;
2792 int i;
2793
2794 for_all_rx_queues(adapter, rxo, i) {
2795 q = &rxo->q;
2796 if (q->created) {
2797 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2798 be_rx_cq_clean(rxo);
482c9e79 2799 }
10ef9ab4 2800 be_queue_free(adapter, q);
482c9e79
SP
2801 }
2802}
2803
889cd4b2
SP
2804static int be_close(struct net_device *netdev)
2805{
2806 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2807 struct be_eq_obj *eqo;
2808 int i;
889cd4b2 2809
e1ad8e33
KA
2810 /* This protection is needed as be_close() may be called even when the
2811 * adapter is in cleared state (after eeh perm failure)
2812 */
2813 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2814 return 0;
2815
045508a8
PP
2816 be_roce_dev_close(adapter);
2817
dff345c5
IV
2818 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2819 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2820 napi_disable(&eqo->napi);
6384a4d0
SP
2821 be_disable_busy_poll(eqo);
2822 }
71237b6f 2823 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2824 }
a323d9bf
SP
2825
2826 be_async_mcc_disable(adapter);
2827
2828 /* Wait for all pending tx completions to arrive so that
2829 * all tx skbs are freed.
2830 */
fba87559 2831 netif_tx_disable(netdev);
6e1f9975 2832 be_tx_compl_clean(adapter);
a323d9bf
SP
2833
2834 be_rx_qs_destroy(adapter);
2835
d11a347d
AK
2836 for (i = 1; i < (adapter->uc_macs + 1); i++)
2837 be_cmd_pmac_del(adapter, adapter->if_handle,
2838 adapter->pmac_id[i], 0);
2839 adapter->uc_macs = 0;
2840
a323d9bf 2841 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2842 if (msix_enabled(adapter))
2843 synchronize_irq(be_msix_vec_get(adapter, eqo));
2844 else
2845 synchronize_irq(netdev->irq);
2846 be_eq_clean(eqo);
63fcb27f
PR
2847 }
2848
889cd4b2
SP
2849 be_irq_unregister(adapter);
2850
482c9e79
SP
2851 return 0;
2852}
2853
10ef9ab4 2854static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2855{
2856 struct be_rx_obj *rxo;
e9008ee9 2857 int rc, i, j;
e2557877
VD
2858 u8 rss_hkey[RSS_HASH_KEY_LEN];
2859 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
2860
2861 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2862 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2863 sizeof(struct be_eth_rx_d));
2864 if (rc)
2865 return rc;
2866 }
2867
2868 /* The FW would like the default RXQ to be created first */
2869 rxo = default_rxo(adapter);
2870 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2871 adapter->if_handle, false, &rxo->rss_id);
2872 if (rc)
2873 return rc;
2874
2875 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2876 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2877 rx_frag_size, adapter->if_handle,
2878 true, &rxo->rss_id);
482c9e79
SP
2879 if (rc)
2880 return rc;
2881 }
2882
2883 if (be_multi_rxq(adapter)) {
e2557877
VD
2884 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2885 j += adapter->num_rx_qs - 1) {
e9008ee9 2886 for_all_rss_queues(adapter, rxo, i) {
e2557877 2887 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2888 break;
e2557877
VD
2889 rss->rsstable[j + i] = rxo->rss_id;
2890 rss->rss_queue[j + i] = i;
e9008ee9
PR
2891 }
2892 }
e2557877
VD
2893 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2894 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2895
2896 if (!BEx_chip(adapter))
e2557877
VD
2897 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2898 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2899 } else {
2900 /* Disable RSS, if only default RX Q is created */
e2557877 2901 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2902 }
594ad54a 2903
e2557877 2904 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
748b539a 2905 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
e2557877 2906 128, rss_hkey);
da1388d6 2907 if (rc) {
e2557877 2908 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2909 return rc;
482c9e79
SP
2910 }
2911
e2557877
VD
2912 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2913
482c9e79 2914 /* First time posting */
10ef9ab4 2915 for_all_rx_queues(adapter, rxo, i)
c30d7266 2916 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
2917 return 0;
2918}
2919
6b7c5b94
SP
2920static int be_open(struct net_device *netdev)
2921{
2922 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2923 struct be_eq_obj *eqo;
3abcdeda 2924 struct be_rx_obj *rxo;
10ef9ab4 2925 struct be_tx_obj *txo;
b236916a 2926 u8 link_status;
3abcdeda 2927 int status, i;
5fb379ee 2928
10ef9ab4 2929 status = be_rx_qs_create(adapter);
482c9e79
SP
2930 if (status)
2931 goto err;
2932
c2bba3df
SK
2933 status = be_irq_register(adapter);
2934 if (status)
2935 goto err;
5fb379ee 2936
10ef9ab4 2937 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2938 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2939
10ef9ab4
SP
2940 for_all_tx_queues(adapter, txo, i)
2941 be_cq_notify(adapter, txo->cq.id, true, 0);
2942
7a1e9b20
SP
2943 be_async_mcc_enable(adapter);
2944
10ef9ab4
SP
2945 for_all_evt_queues(adapter, eqo, i) {
2946 napi_enable(&eqo->napi);
6384a4d0 2947 be_enable_busy_poll(eqo);
4cad9f3b 2948 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 2949 }
04d3d624 2950 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2951
323ff71e 2952 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2953 if (!status)
2954 be_link_status_update(adapter, link_status);
2955
fba87559 2956 netif_tx_start_all_queues(netdev);
045508a8 2957 be_roce_dev_open(adapter);
c9c47142 2958
c5abe7c0 2959#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2960 if (skyhawk_chip(adapter))
2961 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2962#endif
2963
889cd4b2
SP
2964 return 0;
2965err:
2966 be_close(adapter->netdev);
2967 return -EIO;
5fb379ee
SP
2968}
2969
71d8d1b5
AK
2970static int be_setup_wol(struct be_adapter *adapter, bool enable)
2971{
2972 struct be_dma_mem cmd;
2973 int status = 0;
2974 u8 mac[ETH_ALEN];
2975
2976 memset(mac, 0, ETH_ALEN);
2977
2978 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2979 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2980 GFP_KERNEL);
ddf1169f 2981 if (!cmd.va)
6b568689 2982 return -ENOMEM;
71d8d1b5
AK
2983
2984 if (enable) {
2985 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2986 PCICFG_PM_CONTROL_OFFSET,
2987 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2988 if (status) {
2989 dev_err(&adapter->pdev->dev,
2381a55c 2990 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2991 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2992 cmd.dma);
71d8d1b5
AK
2993 return status;
2994 }
2995 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2996 adapter->netdev->dev_addr,
2997 &cmd);
71d8d1b5
AK
2998 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2999 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3000 } else {
3001 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3002 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3003 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3004 }
3005
2b7bcebf 3006 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3007 return status;
3008}
3009
6d87f5c3
AK
3010/*
3011 * Generate a seed MAC address from the PF MAC Address using jhash.
3012 * MAC Address for VFs are assigned incrementally starting from the seed.
3013 * These addresses are programmed in the ASIC by the PF and the VF driver
3014 * queries for the MAC address during its probe.
3015 */
4c876616 3016static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3017{
f9449ab7 3018 u32 vf;
3abcdeda 3019 int status = 0;
6d87f5c3 3020 u8 mac[ETH_ALEN];
11ac75ed 3021 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3022
3023 be_vf_eth_addr_generate(adapter, mac);
3024
11ac75ed 3025 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3026 if (BEx_chip(adapter))
590c391d 3027 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3028 vf_cfg->if_handle,
3029 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3030 else
3031 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3032 vf + 1);
590c391d 3033
6d87f5c3
AK
3034 if (status)
3035 dev_err(&adapter->pdev->dev,
748b539a
SP
3036 "Mac address assignment failed for VF %d\n",
3037 vf);
6d87f5c3 3038 else
11ac75ed 3039 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3040
3041 mac[5] += 1;
3042 }
3043 return status;
3044}
3045
4c876616
SP
3046static int be_vfs_mac_query(struct be_adapter *adapter)
3047{
3048 int status, vf;
3049 u8 mac[ETH_ALEN];
3050 struct be_vf_cfg *vf_cfg;
4c876616
SP
3051
3052 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3053 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3054 mac, vf_cfg->if_handle,
3055 false, vf+1);
4c876616
SP
3056 if (status)
3057 return status;
3058 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3059 }
3060 return 0;
3061}
3062
f9449ab7 3063static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3064{
11ac75ed 3065 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3066 u32 vf;
3067
257a3feb 3068 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3069 dev_warn(&adapter->pdev->dev,
3070 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3071 goto done;
3072 }
3073
b4c1df93
SP
3074 pci_disable_sriov(adapter->pdev);
3075
11ac75ed 3076 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3077 if (BEx_chip(adapter))
11ac75ed
SP
3078 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3079 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3080 else
3081 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3082 vf + 1);
f9449ab7 3083
11ac75ed
SP
3084 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3085 }
39f1d94d
SP
3086done:
3087 kfree(adapter->vf_cfg);
3088 adapter->num_vfs = 0;
f174c7ec 3089 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3090}
3091
7707133c
SP
3092static void be_clear_queues(struct be_adapter *adapter)
3093{
3094 be_mcc_queues_destroy(adapter);
3095 be_rx_cqs_destroy(adapter);
3096 be_tx_queues_destroy(adapter);
3097 be_evt_queues_destroy(adapter);
3098}
3099
68d7bdcb 3100static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3101{
191eb756
SP
3102 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3103 cancel_delayed_work_sync(&adapter->work);
3104 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3105 }
68d7bdcb
SP
3106}
3107
b05004ad 3108static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3109{
3110 int i;
3111
b05004ad
SK
3112 if (adapter->pmac_id) {
3113 for (i = 0; i < (adapter->uc_macs + 1); i++)
3114 be_cmd_pmac_del(adapter, adapter->if_handle,
3115 adapter->pmac_id[i], 0);
3116 adapter->uc_macs = 0;
3117
3118 kfree(adapter->pmac_id);
3119 adapter->pmac_id = NULL;
3120 }
3121}
3122
c5abe7c0 3123#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3124static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3125{
3126 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3127 be_cmd_manage_iface(adapter, adapter->if_handle,
3128 OP_CONVERT_TUNNEL_TO_NORMAL);
3129
3130 if (adapter->vxlan_port)
3131 be_cmd_set_vxlan_port(adapter, 0);
3132
3133 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3134 adapter->vxlan_port = 0;
3135}
c5abe7c0 3136#endif
c9c47142 3137
b05004ad
SK
3138static int be_clear(struct be_adapter *adapter)
3139{
68d7bdcb 3140 be_cancel_worker(adapter);
191eb756 3141
11ac75ed 3142 if (sriov_enabled(adapter))
f9449ab7
SP
3143 be_vf_clear(adapter);
3144
bec84e6b
VV
3145 /* Re-configure FW to distribute resources evenly across max-supported
3146 * number of VFs, only when VFs are not already enabled.
3147 */
3148 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3149 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3150 pci_sriov_get_totalvfs(adapter->pdev));
3151
c5abe7c0 3152#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3153 be_disable_vxlan_offloads(adapter);
c5abe7c0 3154#endif
2d17f403 3155 /* delete the primary mac along with the uc-mac list */
b05004ad 3156 be_mac_clear(adapter);
fbc13f01 3157
f9449ab7 3158 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3159
7707133c 3160 be_clear_queues(adapter);
a54769f5 3161
10ef9ab4 3162 be_msix_disable(adapter);
e1ad8e33 3163 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3164 return 0;
3165}
3166
4c876616 3167static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3168{
92bf14ab 3169 struct be_resources res = {0};
4c876616
SP
3170 struct be_vf_cfg *vf_cfg;
3171 u32 cap_flags, en_flags, vf;
922bbe88 3172 int status = 0;
abb93951 3173
4c876616
SP
3174 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3175 BE_IF_FLAGS_MULTICAST;
abb93951 3176
4c876616 3177 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3178 if (!BE3_chip(adapter)) {
3179 status = be_cmd_get_profile_config(adapter, &res,
3180 vf + 1);
3181 if (!status)
3182 cap_flags = res.if_cap_flags;
3183 }
4c876616
SP
3184
3185 /* If a FW profile exists, then cap_flags are updated */
3186 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3187 BE_IF_FLAGS_BROADCAST |
3188 BE_IF_FLAGS_MULTICAST);
3189 status =
3190 be_cmd_if_create(adapter, cap_flags, en_flags,
3191 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3192 if (status)
3193 goto err;
3194 }
3195err:
3196 return status;
abb93951
PR
3197}
3198
39f1d94d 3199static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3200{
11ac75ed 3201 struct be_vf_cfg *vf_cfg;
30128031
SP
3202 int vf;
3203
39f1d94d
SP
3204 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3205 GFP_KERNEL);
3206 if (!adapter->vf_cfg)
3207 return -ENOMEM;
3208
11ac75ed
SP
3209 for_all_vfs(adapter, vf_cfg, vf) {
3210 vf_cfg->if_handle = -1;
3211 vf_cfg->pmac_id = -1;
30128031 3212 }
39f1d94d 3213 return 0;
30128031
SP
3214}
3215
f9449ab7
SP
3216static int be_vf_setup(struct be_adapter *adapter)
3217{
c502224e 3218 struct device *dev = &adapter->pdev->dev;
11ac75ed 3219 struct be_vf_cfg *vf_cfg;
4c876616 3220 int status, old_vfs, vf;
04a06028 3221 u32 privileges;
39f1d94d 3222
257a3feb 3223 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3224
3225 status = be_vf_setup_init(adapter);
3226 if (status)
3227 goto err;
30128031 3228
4c876616
SP
3229 if (old_vfs) {
3230 for_all_vfs(adapter, vf_cfg, vf) {
3231 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3232 if (status)
3233 goto err;
3234 }
f9449ab7 3235
4c876616
SP
3236 status = be_vfs_mac_query(adapter);
3237 if (status)
3238 goto err;
3239 } else {
bec84e6b
VV
3240 status = be_vfs_if_create(adapter);
3241 if (status)
3242 goto err;
3243
39f1d94d
SP
3244 status = be_vf_eth_addr_config(adapter);
3245 if (status)
3246 goto err;
3247 }
f9449ab7 3248
11ac75ed 3249 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3250 /* Allow VFs to programs MAC/VLAN filters */
3251 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3252 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3253 status = be_cmd_set_fn_privileges(adapter,
3254 privileges |
3255 BE_PRIV_FILTMGMT,
3256 vf + 1);
3257 if (!status)
3258 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3259 vf);
3260 }
3261
0f77ba73
RN
3262 /* Allow full available bandwidth */
3263 if (!old_vfs)
3264 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3265
bdce2ad7 3266 if (!old_vfs) {
0599863d 3267 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3268 be_cmd_set_logical_link_config(adapter,
3269 IFLA_VF_LINK_STATE_AUTO,
3270 vf+1);
3271 }
f9449ab7 3272 }
b4c1df93
SP
3273
3274 if (!old_vfs) {
3275 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3276 if (status) {
3277 dev_err(dev, "SRIOV enable failed\n");
3278 adapter->num_vfs = 0;
3279 goto err;
3280 }
3281 }
f174c7ec
VV
3282
3283 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3284 return 0;
3285err:
4c876616
SP
3286 dev_err(dev, "VF setup failed\n");
3287 be_vf_clear(adapter);
f9449ab7
SP
3288 return status;
3289}
3290
f93f160b
VV
3291/* Converting function_mode bits on BE3 to SH mc_type enums */
3292
3293static u8 be_convert_mc_type(u32 function_mode)
3294{
66064dbc 3295 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3296 return vNIC1;
66064dbc 3297 else if (function_mode & QNQ_MODE)
f93f160b
VV
3298 return FLEX10;
3299 else if (function_mode & VNIC_MODE)
3300 return vNIC2;
3301 else if (function_mode & UMC_ENABLED)
3302 return UMC;
3303 else
3304 return MC_NONE;
3305}
3306
92bf14ab
SP
3307/* On BE2/BE3 FW does not suggest the supported limits */
3308static void BEx_get_resources(struct be_adapter *adapter,
3309 struct be_resources *res)
3310{
bec84e6b 3311 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3312
3313 if (be_physfn(adapter))
3314 res->max_uc_mac = BE_UC_PMAC_COUNT;
3315 else
3316 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3317
f93f160b
VV
3318 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3319
3320 if (be_is_mc(adapter)) {
3321 /* Assuming that there are 4 channels per port,
3322 * when multi-channel is enabled
3323 */
3324 if (be_is_qnq_mode(adapter))
3325 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3326 else
3327 /* In a non-qnq multichannel mode, the pvid
3328 * takes up one vlan entry
3329 */
3330 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3331 } else {
92bf14ab 3332 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3333 }
3334
92bf14ab
SP
3335 res->max_mcast_mac = BE_MAX_MC;
3336
a5243dab
VV
3337 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3338 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3339 * *only* if it is RSS-capable.
3340 */
3341 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3342 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3343 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3344 res->max_tx_qs = 1;
a28277dc
SR
3345 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3346 struct be_resources super_nic_res = {0};
3347
3348 /* On a SuperNIC profile, the driver needs to use the
3349 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3350 */
3351 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3352 /* Some old versions of BE3 FW don't report max_tx_qs value */
3353 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3354 } else {
92bf14ab 3355 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3356 }
92bf14ab
SP
3357
3358 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3359 !use_sriov && be_physfn(adapter))
3360 res->max_rss_qs = (adapter->be3_native) ?
3361 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3362 res->max_rx_qs = res->max_rss_qs + 1;
3363
e3dc867c 3364 if (be_physfn(adapter))
d3518e21 3365 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3366 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3367 else
3368 res->max_evt_qs = 1;
92bf14ab
SP
3369
3370 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3371 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3372 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3373}
3374
30128031
SP
3375static void be_setup_init(struct be_adapter *adapter)
3376{
3377 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3378 adapter->phy.link_speed = -1;
30128031
SP
3379 adapter->if_handle = -1;
3380 adapter->be3_native = false;
3381 adapter->promiscuous = false;
f25b119c
PR
3382 if (be_physfn(adapter))
3383 adapter->cmd_privileges = MAX_PRIVILEGES;
3384 else
3385 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3386}
3387
bec84e6b
VV
3388static int be_get_sriov_config(struct be_adapter *adapter)
3389{
3390 struct device *dev = &adapter->pdev->dev;
3391 struct be_resources res = {0};
d3d18312 3392 int max_vfs, old_vfs;
bec84e6b
VV
3393
3394 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3395 be_cmd_get_profile_config(adapter, &res, 0);
3396
bec84e6b
VV
3397 if (BE3_chip(adapter) && !res.max_vfs) {
3398 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3399 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3400 }
3401
d3d18312 3402 adapter->pool_res = res;
bec84e6b
VV
3403
3404 if (!be_max_vfs(adapter)) {
3405 if (num_vfs)
50762667 3406 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
bec84e6b
VV
3407 adapter->num_vfs = 0;
3408 return 0;
3409 }
3410
d3d18312
SP
3411 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3412
bec84e6b
VV
3413 /* validate num_vfs module param */
3414 old_vfs = pci_num_vf(adapter->pdev);
3415 if (old_vfs) {
3416 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3417 if (old_vfs != num_vfs)
3418 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3419 adapter->num_vfs = old_vfs;
3420 } else {
3421 if (num_vfs > be_max_vfs(adapter)) {
3422 dev_info(dev, "Resources unavailable to init %d VFs\n",
3423 num_vfs);
3424 dev_info(dev, "Limiting to %d VFs\n",
3425 be_max_vfs(adapter));
3426 }
3427 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3428 }
3429
3430 return 0;
3431}
3432
92bf14ab 3433static int be_get_resources(struct be_adapter *adapter)
abb93951 3434{
92bf14ab
SP
3435 struct device *dev = &adapter->pdev->dev;
3436 struct be_resources res = {0};
3437 int status;
abb93951 3438
92bf14ab
SP
3439 if (BEx_chip(adapter)) {
3440 BEx_get_resources(adapter, &res);
3441 adapter->res = res;
abb93951
PR
3442 }
3443
92bf14ab
SP
3444 /* For Lancer, SH etc read per-function resource limits from FW.
3445 * GET_FUNC_CONFIG returns per function guaranteed limits.
3446 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3447 */
3448 if (!BEx_chip(adapter)) {
3449 status = be_cmd_get_func_config(adapter, &res);
3450 if (status)
3451 return status;
abb93951 3452
92bf14ab
SP
3453 /* If RoCE may be enabled stash away half the EQs for RoCE */
3454 if (be_roce_supported(adapter))
3455 res.max_evt_qs /= 2;
3456 adapter->res = res;
abb93951 3457 }
4c876616 3458
acbafeb1
SP
3459 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3460 be_max_txqs(adapter), be_max_rxqs(adapter),
3461 be_max_rss(adapter), be_max_eqs(adapter),
3462 be_max_vfs(adapter));
3463 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3464 be_max_uc(adapter), be_max_mc(adapter),
3465 be_max_vlans(adapter));
3466
92bf14ab 3467 return 0;
abb93951
PR
3468}
3469
d3d18312
SP
3470static void be_sriov_config(struct be_adapter *adapter)
3471{
3472 struct device *dev = &adapter->pdev->dev;
3473 int status;
3474
3475 status = be_get_sriov_config(adapter);
3476 if (status) {
3477 dev_err(dev, "Failed to query SR-IOV configuration\n");
3478 dev_err(dev, "SR-IOV cannot be enabled\n");
3479 return;
3480 }
3481
3482 /* When the HW is in SRIOV capable configuration, the PF-pool
3483 * resources are equally distributed across the max-number of
3484 * VFs. The user may request only a subset of the max-vfs to be
3485 * enabled. Based on num_vfs, redistribute the resources across
3486 * num_vfs so that each VF will have access to more number of
3487 * resources. This facility is not available in BE3 FW.
3488 * Also, this is done by FW in Lancer chip.
3489 */
3490 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3491 status = be_cmd_set_sriov_config(adapter,
3492 adapter->pool_res,
3493 adapter->num_vfs);
3494 if (status)
3495 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3496 }
3497}
3498
39f1d94d
SP
3499static int be_get_config(struct be_adapter *adapter)
3500{
542963b7 3501 u16 profile_id;
4c876616 3502 int status;
39f1d94d 3503
e97e3cda 3504 status = be_cmd_query_fw_cfg(adapter);
abb93951 3505 if (status)
92bf14ab 3506 return status;
abb93951 3507
542963b7
VV
3508 if (be_physfn(adapter)) {
3509 status = be_cmd_get_active_profile(adapter, &profile_id);
3510 if (!status)
3511 dev_info(&adapter->pdev->dev,
3512 "Using profile 0x%x\n", profile_id);
962bcb75 3513 }
bec84e6b 3514
d3d18312
SP
3515 if (!BE2_chip(adapter) && be_physfn(adapter))
3516 be_sriov_config(adapter);
542963b7 3517
92bf14ab
SP
3518 status = be_get_resources(adapter);
3519 if (status)
3520 return status;
abb93951 3521
46ee9c14
RN
3522 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3523 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3524 if (!adapter->pmac_id)
3525 return -ENOMEM;
abb93951 3526
92bf14ab
SP
3527 /* Sanitize cfg_num_qs based on HW and platform limits */
3528 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3529
3530 return 0;
39f1d94d
SP
3531}
3532
95046b92
SP
3533static int be_mac_setup(struct be_adapter *adapter)
3534{
3535 u8 mac[ETH_ALEN];
3536 int status;
3537
3538 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3539 status = be_cmd_get_perm_mac(adapter, mac);
3540 if (status)
3541 return status;
3542
3543 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3544 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3545 } else {
3546 /* Maybe the HW was reset; dev_addr must be re-programmed */
3547 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3548 }
3549
2c7a9dc1
AK
3550 /* For BE3-R VFs, the PF programs the initial MAC address */
3551 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3552 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3553 &adapter->pmac_id[0], 0);
95046b92
SP
3554 return 0;
3555}
3556
68d7bdcb
SP
3557static void be_schedule_worker(struct be_adapter *adapter)
3558{
3559 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3560 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3561}
3562
7707133c 3563static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3564{
68d7bdcb 3565 struct net_device *netdev = adapter->netdev;
10ef9ab4 3566 int status;
ba343c77 3567
7707133c 3568 status = be_evt_queues_create(adapter);
abb93951
PR
3569 if (status)
3570 goto err;
73d540f2 3571
7707133c 3572 status = be_tx_qs_create(adapter);
c2bba3df
SK
3573 if (status)
3574 goto err;
10ef9ab4 3575
7707133c 3576 status = be_rx_cqs_create(adapter);
10ef9ab4 3577 if (status)
a54769f5 3578 goto err;
6b7c5b94 3579
7707133c 3580 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3581 if (status)
3582 goto err;
3583
68d7bdcb
SP
3584 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3585 if (status)
3586 goto err;
3587
3588 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3589 if (status)
3590 goto err;
3591
7707133c
SP
3592 return 0;
3593err:
3594 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3595 return status;
3596}
3597
68d7bdcb
SP
3598int be_update_queues(struct be_adapter *adapter)
3599{
3600 struct net_device *netdev = adapter->netdev;
3601 int status;
3602
3603 if (netif_running(netdev))
3604 be_close(netdev);
3605
3606 be_cancel_worker(adapter);
3607
3608 /* If any vectors have been shared with RoCE we cannot re-program
3609 * the MSIx table.
3610 */
3611 if (!adapter->num_msix_roce_vec)
3612 be_msix_disable(adapter);
3613
3614 be_clear_queues(adapter);
3615
3616 if (!msix_enabled(adapter)) {
3617 status = be_msix_enable(adapter);
3618 if (status)
3619 return status;
3620 }
3621
3622 status = be_setup_queues(adapter);
3623 if (status)
3624 return status;
3625
3626 be_schedule_worker(adapter);
3627
3628 if (netif_running(netdev))
3629 status = be_open(netdev);
3630
3631 return status;
3632}
3633
7707133c
SP
3634static int be_setup(struct be_adapter *adapter)
3635{
3636 struct device *dev = &adapter->pdev->dev;
3637 u32 tx_fc, rx_fc, en_flags;
3638 int status;
3639
3640 be_setup_init(adapter);
3641
3642 if (!lancer_chip(adapter))
3643 be_cmd_req_native_mode(adapter);
3644
3645 status = be_get_config(adapter);
10ef9ab4 3646 if (status)
a54769f5 3647 goto err;
6b7c5b94 3648
7707133c 3649 status = be_msix_enable(adapter);
10ef9ab4 3650 if (status)
a54769f5 3651 goto err;
6b7c5b94 3652
f9449ab7 3653 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3654 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3655 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3656 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3657 en_flags = en_flags & be_if_cap_flags(adapter);
3658 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3659 &adapter->if_handle, 0);
7707133c 3660 if (status)
a54769f5 3661 goto err;
6b7c5b94 3662
68d7bdcb
SP
3663 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3664 rtnl_lock();
7707133c 3665 status = be_setup_queues(adapter);
68d7bdcb 3666 rtnl_unlock();
95046b92 3667 if (status)
1578e777
PR
3668 goto err;
3669
7707133c 3670 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3671
3672 status = be_mac_setup(adapter);
10ef9ab4
SP
3673 if (status)
3674 goto err;
3675
e97e3cda 3676 be_cmd_get_fw_ver(adapter);
acbafeb1 3677 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3678
e9e2a904 3679 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 3680 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
3681 adapter->fw_ver);
3682 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3683 }
3684
1d1e9a46 3685 if (adapter->vlans_added)
10329df8 3686 be_vid_config(adapter);
7ab8b0b4 3687
a54769f5 3688 be_set_rx_mode(adapter->netdev);
5fb379ee 3689
76a9e08e
SR
3690 be_cmd_get_acpi_wol_cap(adapter);
3691
ddc3f5cb 3692 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3693
ddc3f5cb
AK
3694 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3695 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3696 adapter->rx_fc);
2dc1deb6 3697
bdce2ad7
SR
3698 if (be_physfn(adapter))
3699 be_cmd_set_logical_link_config(adapter,
3700 IFLA_VF_LINK_STATE_AUTO, 0);
3701
bec84e6b
VV
3702 if (adapter->num_vfs)
3703 be_vf_setup(adapter);
f9449ab7 3704
f25b119c
PR
3705 status = be_cmd_get_phy_info(adapter);
3706 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3707 adapter->phy.fc_autoneg = 1;
3708
68d7bdcb 3709 be_schedule_worker(adapter);
e1ad8e33 3710 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3711 return 0;
a54769f5
SP
3712err:
3713 be_clear(adapter);
3714 return status;
3715}
6b7c5b94 3716
66268739
IV
3717#ifdef CONFIG_NET_POLL_CONTROLLER
3718static void be_netpoll(struct net_device *netdev)
3719{
3720 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3721 struct be_eq_obj *eqo;
66268739
IV
3722 int i;
3723
e49cc34f
SP
3724 for_all_evt_queues(adapter, eqo, i) {
3725 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3726 napi_schedule(&eqo->napi);
3727 }
66268739
IV
3728}
3729#endif
3730
96c9b2e4 3731static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3732
306f1348
SP
3733static bool phy_flashing_required(struct be_adapter *adapter)
3734{
42f11cf2
AK
3735 return (adapter->phy.phy_type == TN_8022 &&
3736 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3737}
3738
c165541e
PR
3739static bool is_comp_in_ufi(struct be_adapter *adapter,
3740 struct flash_section_info *fsec, int type)
3741{
3742 int i = 0, img_type = 0;
3743 struct flash_section_info_g2 *fsec_g2 = NULL;
3744
ca34fe38 3745 if (BE2_chip(adapter))
c165541e
PR
3746 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3747
3748 for (i = 0; i < MAX_FLASH_COMP; i++) {
3749 if (fsec_g2)
3750 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3751 else
3752 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3753
3754 if (img_type == type)
3755 return true;
3756 }
3757 return false;
3758
3759}
3760
4188e7df 3761static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3762 int header_size,
3763 const struct firmware *fw)
c165541e
PR
3764{
3765 struct flash_section_info *fsec = NULL;
3766 const u8 *p = fw->data;
3767
3768 p += header_size;
3769 while (p < (fw->data + fw->size)) {
3770 fsec = (struct flash_section_info *)p;
3771 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3772 return fsec;
3773 p += 32;
3774 }
3775 return NULL;
3776}
3777
96c9b2e4
VV
3778static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3779 u32 img_offset, u32 img_size, int hdr_size,
3780 u16 img_optype, bool *crc_match)
3781{
3782 u32 crc_offset;
3783 int status;
3784 u8 crc[4];
3785
3786 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3787 if (status)
3788 return status;
3789
3790 crc_offset = hdr_size + img_offset + img_size - 4;
3791
3792 /* Skip flashing, if crc of flashed region matches */
3793 if (!memcmp(crc, p + crc_offset, 4))
3794 *crc_match = true;
3795 else
3796 *crc_match = false;
3797
3798 return status;
3799}
3800
773a2d7c 3801static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3802 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3803{
773a2d7c 3804 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3805 u32 total_bytes, flash_op, num_bytes;
3806 int status;
773a2d7c
PR
3807
3808 total_bytes = img_size;
3809 while (total_bytes) {
3810 num_bytes = min_t(u32, 32*1024, total_bytes);
3811
3812 total_bytes -= num_bytes;
3813
3814 if (!total_bytes) {
3815 if (optype == OPTYPE_PHY_FW)
3816 flash_op = FLASHROM_OPER_PHY_FLASH;
3817 else
3818 flash_op = FLASHROM_OPER_FLASH;
3819 } else {
3820 if (optype == OPTYPE_PHY_FW)
3821 flash_op = FLASHROM_OPER_PHY_SAVE;
3822 else
3823 flash_op = FLASHROM_OPER_SAVE;
3824 }
3825
be716446 3826 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3827 img += num_bytes;
3828 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3829 flash_op, num_bytes);
4c60005f 3830 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3831 optype == OPTYPE_PHY_FW)
3832 break;
3833 else if (status)
773a2d7c 3834 return status;
773a2d7c
PR
3835 }
3836 return 0;
3837}
3838
0ad3157e 3839/* For BE2, BE3 and BE3-R */
ca34fe38 3840static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3841 const struct firmware *fw,
3842 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3843{
c165541e 3844 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 3845 struct device *dev = &adapter->pdev->dev;
c165541e 3846 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3847 int status, i, filehdr_size, num_comp;
3848 const struct flash_comp *pflashcomp;
3849 bool crc_match;
3850 const u8 *p;
c165541e
PR
3851
3852 struct flash_comp gen3_flash_types[] = {
3853 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3854 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3855 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3856 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3857 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3858 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3859 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3860 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3861 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3862 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3863 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3864 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3865 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3866 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3867 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3868 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3869 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3870 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3871 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3872 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3873 };
c165541e
PR
3874
3875 struct flash_comp gen2_flash_types[] = {
3876 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3877 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3878 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3879 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3880 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3881 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3882 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3883 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3884 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3885 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3886 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3887 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3888 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3889 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3890 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3891 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3892 };
3893
ca34fe38 3894 if (BE3_chip(adapter)) {
3f0d4560
AK
3895 pflashcomp = gen3_flash_types;
3896 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3897 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3898 } else {
3899 pflashcomp = gen2_flash_types;
3900 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3901 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3902 }
ca34fe38 3903
c165541e
PR
3904 /* Get flash section info*/
3905 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3906 if (!fsec) {
96c9b2e4 3907 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
3908 return -1;
3909 }
9fe96934 3910 for (i = 0; i < num_comp; i++) {
c165541e 3911 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3912 continue;
c165541e
PR
3913
3914 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3915 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3916 continue;
3917
773a2d7c
PR
3918 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3919 !phy_flashing_required(adapter))
306f1348 3920 continue;
c165541e 3921
773a2d7c 3922 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
3923 status = be_check_flash_crc(adapter, fw->data,
3924 pflashcomp[i].offset,
3925 pflashcomp[i].size,
3926 filehdr_size +
3927 img_hdrs_size,
3928 OPTYPE_REDBOOT, &crc_match);
3929 if (status) {
3930 dev_err(dev,
3931 "Could not get CRC for 0x%x region\n",
3932 pflashcomp[i].optype);
3933 continue;
3934 }
3935
3936 if (crc_match)
773a2d7c
PR
3937 continue;
3938 }
c165541e 3939
96c9b2e4
VV
3940 p = fw->data + filehdr_size + pflashcomp[i].offset +
3941 img_hdrs_size;
306f1348
SP
3942 if (p + pflashcomp[i].size > fw->data + fw->size)
3943 return -1;
773a2d7c
PR
3944
3945 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3946 pflashcomp[i].size);
773a2d7c 3947 if (status) {
96c9b2e4 3948 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
3949 pflashcomp[i].img_type);
3950 return status;
84517482 3951 }
84517482 3952 }
84517482
AK
3953 return 0;
3954}
3955
96c9b2e4
VV
3956static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3957{
3958 u32 img_type = le32_to_cpu(fsec_entry.type);
3959 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3960
3961 if (img_optype != 0xFFFF)
3962 return img_optype;
3963
3964 switch (img_type) {
3965 case IMAGE_FIRMWARE_iSCSI:
3966 img_optype = OPTYPE_ISCSI_ACTIVE;
3967 break;
3968 case IMAGE_BOOT_CODE:
3969 img_optype = OPTYPE_REDBOOT;
3970 break;
3971 case IMAGE_OPTION_ROM_ISCSI:
3972 img_optype = OPTYPE_BIOS;
3973 break;
3974 case IMAGE_OPTION_ROM_PXE:
3975 img_optype = OPTYPE_PXE_BIOS;
3976 break;
3977 case IMAGE_OPTION_ROM_FCoE:
3978 img_optype = OPTYPE_FCOE_BIOS;
3979 break;
3980 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3981 img_optype = OPTYPE_ISCSI_BACKUP;
3982 break;
3983 case IMAGE_NCSI:
3984 img_optype = OPTYPE_NCSI_FW;
3985 break;
3986 case IMAGE_FLASHISM_JUMPVECTOR:
3987 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3988 break;
3989 case IMAGE_FIRMWARE_PHY:
3990 img_optype = OPTYPE_SH_PHY_FW;
3991 break;
3992 case IMAGE_REDBOOT_DIR:
3993 img_optype = OPTYPE_REDBOOT_DIR;
3994 break;
3995 case IMAGE_REDBOOT_CONFIG:
3996 img_optype = OPTYPE_REDBOOT_CONFIG;
3997 break;
3998 case IMAGE_UFI_DIR:
3999 img_optype = OPTYPE_UFI_DIR;
4000 break;
4001 default:
4002 break;
4003 }
4004
4005 return img_optype;
4006}
4007
773a2d7c 4008static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4009 const struct firmware *fw,
4010 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4011{
773a2d7c 4012 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 4013 struct device *dev = &adapter->pdev->dev;
773a2d7c 4014 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4015 u32 img_offset, img_size, img_type;
4016 int status, i, filehdr_size;
4017 bool crc_match, old_fw_img;
4018 u16 img_optype;
4019 const u8 *p;
773a2d7c
PR
4020
4021 filehdr_size = sizeof(struct flash_file_hdr_g3);
4022 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4023 if (!fsec) {
96c9b2e4 4024 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4025 return -EINVAL;
773a2d7c
PR
4026 }
4027
4028 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4029 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4030 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4031 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4032 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4033 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4034
96c9b2e4 4035 if (img_optype == 0xFFFF)
773a2d7c 4036 continue;
96c9b2e4
VV
4037 /* Don't bother verifying CRC if an old FW image is being
4038 * flashed
4039 */
4040 if (old_fw_img)
4041 goto flash;
4042
4043 status = be_check_flash_crc(adapter, fw->data, img_offset,
4044 img_size, filehdr_size +
4045 img_hdrs_size, img_optype,
4046 &crc_match);
4047 /* The current FW image on the card does not recognize the new
4048 * FLASH op_type. The FW download is partially complete.
4049 * Reboot the server now to enable FW image to recognize the
4050 * new FLASH op_type. To complete the remaining process,
4051 * download the same FW again after the reboot.
4052 */
4c60005f
KA
4053 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4054 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
4055 dev_err(dev, "Flash incomplete. Reset the server\n");
4056 dev_err(dev, "Download FW image again after reset\n");
4057 return -EAGAIN;
4058 } else if (status) {
4059 dev_err(dev, "Could not get CRC for 0x%x region\n",
4060 img_optype);
4061 return -EFAULT;
773a2d7c
PR
4062 }
4063
96c9b2e4
VV
4064 if (crc_match)
4065 continue;
773a2d7c 4066
96c9b2e4
VV
4067flash:
4068 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4069 if (p + img_size > fw->data + fw->size)
4070 return -1;
4071
4072 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
4073 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4074 * UFI_DIR region
4075 */
4c60005f
KA
4076 if (old_fw_img &&
4077 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4078 (img_optype == OPTYPE_UFI_DIR &&
4079 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4080 continue;
4081 } else if (status) {
4082 dev_err(dev, "Flashing section type 0x%x failed\n",
4083 img_type);
4084 return -EFAULT;
773a2d7c
PR
4085 }
4086 }
4087 return 0;
3f0d4560
AK
4088}
4089
485bf569 4090static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4091 const struct firmware *fw)
84517482 4092{
485bf569
SN
4093#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4094#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4095 struct device *dev = &adapter->pdev->dev;
84517482 4096 struct be_dma_mem flash_cmd;
485bf569
SN
4097 const u8 *data_ptr = NULL;
4098 u8 *dest_image_ptr = NULL;
4099 size_t image_size = 0;
4100 u32 chunk_size = 0;
4101 u32 data_written = 0;
4102 u32 offset = 0;
4103 int status = 0;
4104 u8 add_status = 0;
f67ef7ba 4105 u8 change_status;
84517482 4106
485bf569 4107 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4108 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4109 return -EINVAL;
d9efd2af
SB
4110 }
4111
485bf569
SN
4112 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4113 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4114 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4115 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4116 if (!flash_cmd.va)
4117 return -ENOMEM;
84517482 4118
485bf569
SN
4119 dest_image_ptr = flash_cmd.va +
4120 sizeof(struct lancer_cmd_req_write_object);
4121 image_size = fw->size;
4122 data_ptr = fw->data;
4123
4124 while (image_size) {
4125 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4126
4127 /* Copy the image chunk content. */
4128 memcpy(dest_image_ptr, data_ptr, chunk_size);
4129
4130 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4131 chunk_size, offset,
4132 LANCER_FW_DOWNLOAD_LOCATION,
4133 &data_written, &change_status,
4134 &add_status);
485bf569
SN
4135 if (status)
4136 break;
4137
4138 offset += data_written;
4139 data_ptr += data_written;
4140 image_size -= data_written;
4141 }
4142
4143 if (!status) {
4144 /* Commit the FW written */
4145 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4146 0, offset,
4147 LANCER_FW_DOWNLOAD_LOCATION,
4148 &data_written, &change_status,
4149 &add_status);
485bf569
SN
4150 }
4151
bb864e07 4152 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4153 if (status) {
bb864e07 4154 dev_err(dev, "Firmware load error\n");
3fb8cb80 4155 return be_cmd_status(status);
485bf569
SN
4156 }
4157
bb864e07
KA
4158 dev_info(dev, "Firmware flashed successfully\n");
4159
f67ef7ba 4160 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4161 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4162 status = lancer_physdev_ctrl(adapter,
4163 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4164 if (status) {
bb864e07
KA
4165 dev_err(dev, "Adapter busy, could not reset FW\n");
4166 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4167 }
4168 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4169 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4170 }
3fb8cb80
KA
4171
4172 return 0;
485bf569
SN
4173}
4174
ca34fe38
SP
4175#define UFI_TYPE2 2
4176#define UFI_TYPE3 3
0ad3157e 4177#define UFI_TYPE3R 10
ca34fe38
SP
4178#define UFI_TYPE4 4
4179static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4180 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4181{
ddf1169f 4182 if (!fhdr)
773a2d7c
PR
4183 goto be_get_ufi_exit;
4184
ca34fe38
SP
4185 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4186 return UFI_TYPE4;
0ad3157e
VV
4187 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4188 if (fhdr->asic_type_rev == 0x10)
4189 return UFI_TYPE3R;
4190 else
4191 return UFI_TYPE3;
4192 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4193 return UFI_TYPE2;
773a2d7c
PR
4194
4195be_get_ufi_exit:
4196 dev_err(&adapter->pdev->dev,
4197 "UFI and Interface are not compatible for flashing\n");
4198 return -1;
4199}
4200
485bf569
SN
4201static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4202{
485bf569
SN
4203 struct flash_file_hdr_g3 *fhdr3;
4204 struct image_hdr *img_hdr_ptr = NULL;
4205 struct be_dma_mem flash_cmd;
4206 const u8 *p;
773a2d7c 4207 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4208
be716446 4209 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4210 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4211 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4212 if (!flash_cmd.va) {
4213 status = -ENOMEM;
485bf569 4214 goto be_fw_exit;
84517482
AK
4215 }
4216
773a2d7c 4217 p = fw->data;
0ad3157e 4218 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4219
0ad3157e 4220 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4221
773a2d7c
PR
4222 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4223 for (i = 0; i < num_imgs; i++) {
4224 img_hdr_ptr = (struct image_hdr *)(fw->data +
4225 (sizeof(struct flash_file_hdr_g3) +
4226 i * sizeof(struct image_hdr)));
4227 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4228 switch (ufi_type) {
4229 case UFI_TYPE4:
773a2d7c 4230 status = be_flash_skyhawk(adapter, fw,
748b539a 4231 &flash_cmd, num_imgs);
0ad3157e
VV
4232 break;
4233 case UFI_TYPE3R:
ca34fe38
SP
4234 status = be_flash_BEx(adapter, fw, &flash_cmd,
4235 num_imgs);
0ad3157e
VV
4236 break;
4237 case UFI_TYPE3:
4238 /* Do not flash this ufi on BE3-R cards */
4239 if (adapter->asic_rev < 0x10)
4240 status = be_flash_BEx(adapter, fw,
4241 &flash_cmd,
4242 num_imgs);
4243 else {
56ace3a0 4244 status = -EINVAL;
0ad3157e
VV
4245 dev_err(&adapter->pdev->dev,
4246 "Can't load BE3 UFI on BE3R\n");
4247 }
4248 }
3f0d4560 4249 }
773a2d7c
PR
4250 }
4251
ca34fe38
SP
4252 if (ufi_type == UFI_TYPE2)
4253 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4254 else if (ufi_type == -1)
56ace3a0 4255 status = -EINVAL;
84517482 4256
2b7bcebf
IV
4257 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4258 flash_cmd.dma);
84517482
AK
4259 if (status) {
4260 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4261 goto be_fw_exit;
84517482
AK
4262 }
4263
af901ca1 4264 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4265
485bf569
SN
4266be_fw_exit:
4267 return status;
4268}
4269
4270int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4271{
4272 const struct firmware *fw;
4273 int status;
4274
4275 if (!netif_running(adapter->netdev)) {
4276 dev_err(&adapter->pdev->dev,
4277 "Firmware load not allowed (interface is down)\n");
940a3fcd 4278 return -ENETDOWN;
485bf569
SN
4279 }
4280
4281 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4282 if (status)
4283 goto fw_exit;
4284
4285 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4286
4287 if (lancer_chip(adapter))
4288 status = lancer_fw_download(adapter, fw);
4289 else
4290 status = be_fw_download(adapter, fw);
4291
eeb65ced 4292 if (!status)
e97e3cda 4293 be_cmd_get_fw_ver(adapter);
eeb65ced 4294
84517482
AK
4295fw_exit:
4296 release_firmware(fw);
4297 return status;
4298}
4299
748b539a 4300static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4301{
4302 struct be_adapter *adapter = netdev_priv(dev);
4303 struct nlattr *attr, *br_spec;
4304 int rem;
4305 int status = 0;
4306 u16 mode = 0;
4307
4308 if (!sriov_enabled(adapter))
4309 return -EOPNOTSUPP;
4310
4311 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4312 if (!br_spec)
4313 return -EINVAL;
a77dcb8c
AK
4314
4315 nla_for_each_nested(attr, br_spec, rem) {
4316 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4317 continue;
4318
b7c1a314
TG
4319 if (nla_len(attr) < sizeof(mode))
4320 return -EINVAL;
4321
a77dcb8c
AK
4322 mode = nla_get_u16(attr);
4323 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4324 return -EINVAL;
4325
4326 status = be_cmd_set_hsw_config(adapter, 0, 0,
4327 adapter->if_handle,
4328 mode == BRIDGE_MODE_VEPA ?
4329 PORT_FWD_TYPE_VEPA :
4330 PORT_FWD_TYPE_VEB);
4331 if (status)
4332 goto err;
4333
4334 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4335 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4336
4337 return status;
4338 }
4339err:
4340 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4341 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4342
4343 return status;
4344}
4345
4346static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4347 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4348{
4349 struct be_adapter *adapter = netdev_priv(dev);
4350 int status = 0;
4351 u8 hsw_mode;
4352
4353 if (!sriov_enabled(adapter))
4354 return 0;
4355
4356 /* BE and Lancer chips support VEB mode only */
4357 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4358 hsw_mode = PORT_FWD_TYPE_VEB;
4359 } else {
4360 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4361 adapter->if_handle, &hsw_mode);
4362 if (status)
4363 return 0;
4364 }
4365
4366 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4367 hsw_mode == PORT_FWD_TYPE_VEPA ?
4368 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4369}
4370
c5abe7c0 4371#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4372static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4373 __be16 port)
4374{
4375 struct be_adapter *adapter = netdev_priv(netdev);
4376 struct device *dev = &adapter->pdev->dev;
4377 int status;
4378
4379 if (lancer_chip(adapter) || BEx_chip(adapter))
4380 return;
4381
4382 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4383 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4384 be16_to_cpu(port));
4385 dev_info(dev,
4386 "Only one UDP port supported for VxLAN offloads\n");
4387 return;
4388 }
4389
4390 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4391 OP_CONVERT_NORMAL_TO_TUNNEL);
4392 if (status) {
4393 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4394 goto err;
4395 }
4396
4397 status = be_cmd_set_vxlan_port(adapter, port);
4398 if (status) {
4399 dev_warn(dev, "Failed to add VxLAN port\n");
4400 goto err;
4401 }
4402 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4403 adapter->vxlan_port = port;
4404
4405 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4406 be16_to_cpu(port));
4407 return;
4408err:
4409 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4410}
4411
4412static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4413 __be16 port)
4414{
4415 struct be_adapter *adapter = netdev_priv(netdev);
4416
4417 if (lancer_chip(adapter) || BEx_chip(adapter))
4418 return;
4419
4420 if (adapter->vxlan_port != port)
4421 return;
4422
4423 be_disable_vxlan_offloads(adapter);
4424
4425 dev_info(&adapter->pdev->dev,
4426 "Disabled VxLAN offloads for UDP port %d\n",
4427 be16_to_cpu(port));
4428}
725d548f
JS
4429
4430static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
4431{
4432 return vxlan_gso_check(skb);
4433}
c5abe7c0 4434#endif
c9c47142 4435
e5686ad8 4436static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4437 .ndo_open = be_open,
4438 .ndo_stop = be_close,
4439 .ndo_start_xmit = be_xmit,
a54769f5 4440 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4441 .ndo_set_mac_address = be_mac_addr_set,
4442 .ndo_change_mtu = be_change_mtu,
ab1594e9 4443 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4444 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4445 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4446 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4447 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4448 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4449 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4450 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4451 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4452#ifdef CONFIG_NET_POLL_CONTROLLER
4453 .ndo_poll_controller = be_netpoll,
4454#endif
a77dcb8c
AK
4455 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4456 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4457#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4458 .ndo_busy_poll = be_busy_poll,
6384a4d0 4459#endif
c5abe7c0 4460#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4461 .ndo_add_vxlan_port = be_add_vxlan_port,
4462 .ndo_del_vxlan_port = be_del_vxlan_port,
725d548f 4463 .ndo_gso_check = be_gso_check,
c5abe7c0 4464#endif
6b7c5b94
SP
4465};
4466
4467static void be_netdev_init(struct net_device *netdev)
4468{
4469 struct be_adapter *adapter = netdev_priv(netdev);
4470
c9c47142
SP
4471 if (skyhawk_chip(adapter)) {
4472 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4473 NETIF_F_TSO | NETIF_F_TSO6 |
4474 NETIF_F_GSO_UDP_TUNNEL;
4475 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4476 }
6332c8d3 4477 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4478 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4479 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4480 if (be_multi_rxq(adapter))
4481 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4482
4483 netdev->features |= netdev->hw_features |
f646968f 4484 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4485
eb8a50d9 4486 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4487 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4488
fbc13f01
AK
4489 netdev->priv_flags |= IFF_UNICAST_FLT;
4490
6b7c5b94
SP
4491 netdev->flags |= IFF_MULTICAST;
4492
b7e5887e 4493 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4494
10ef9ab4 4495 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4496
7ad24ea4 4497 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4498}
4499
4500static void be_unmap_pci_bars(struct be_adapter *adapter)
4501{
c5b3ad4c
SP
4502 if (adapter->csr)
4503 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4504 if (adapter->db)
ce66f781 4505 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4506}
4507
ce66f781
SP
4508static int db_bar(struct be_adapter *adapter)
4509{
4510 if (lancer_chip(adapter) || !be_physfn(adapter))
4511 return 0;
4512 else
4513 return 4;
4514}
4515
4516static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4517{
dbf0f2a7 4518 if (skyhawk_chip(adapter)) {
ce66f781
SP
4519 adapter->roce_db.size = 4096;
4520 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4521 db_bar(adapter));
4522 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4523 db_bar(adapter));
4524 }
045508a8 4525 return 0;
6b7c5b94
SP
4526}
4527
4528static int be_map_pci_bars(struct be_adapter *adapter)
4529{
4530 u8 __iomem *addr;
fe6d2a38 4531
c5b3ad4c
SP
4532 if (BEx_chip(adapter) && be_physfn(adapter)) {
4533 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4534 if (!adapter->csr)
c5b3ad4c
SP
4535 return -ENOMEM;
4536 }
4537
ce66f781 4538 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4539 if (!addr)
6b7c5b94 4540 goto pci_map_err;
ba343c77 4541 adapter->db = addr;
ce66f781
SP
4542
4543 be_roce_map_pci_bars(adapter);
6b7c5b94 4544 return 0;
ce66f781 4545
6b7c5b94 4546pci_map_err:
acbafeb1 4547 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4548 be_unmap_pci_bars(adapter);
4549 return -ENOMEM;
4550}
4551
6b7c5b94
SP
4552static void be_ctrl_cleanup(struct be_adapter *adapter)
4553{
8788fdc2 4554 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4555
4556 be_unmap_pci_bars(adapter);
4557
4558 if (mem->va)
2b7bcebf
IV
4559 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4560 mem->dma);
e7b909a6 4561
5b8821b7 4562 mem = &adapter->rx_filter;
e7b909a6 4563 if (mem->va)
2b7bcebf
IV
4564 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4565 mem->dma);
6b7c5b94
SP
4566}
4567
6b7c5b94
SP
4568static int be_ctrl_init(struct be_adapter *adapter)
4569{
8788fdc2
SP
4570 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4571 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4572 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4573 u32 sli_intf;
6b7c5b94 4574 int status;
6b7c5b94 4575
ce66f781
SP
4576 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4577 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4578 SLI_INTF_FAMILY_SHIFT;
4579 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4580
6b7c5b94
SP
4581 status = be_map_pci_bars(adapter);
4582 if (status)
e7b909a6 4583 goto done;
6b7c5b94
SP
4584
4585 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4586 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4587 mbox_mem_alloc->size,
4588 &mbox_mem_alloc->dma,
4589 GFP_KERNEL);
6b7c5b94 4590 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4591 status = -ENOMEM;
4592 goto unmap_pci_bars;
6b7c5b94
SP
4593 }
4594 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4595 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4596 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4597 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4598
5b8821b7 4599 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4600 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4601 rx_filter->size, &rx_filter->dma,
4602 GFP_KERNEL);
ddf1169f 4603 if (!rx_filter->va) {
e7b909a6
SP
4604 status = -ENOMEM;
4605 goto free_mbox;
4606 }
1f9061d2 4607
2984961c 4608 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4609 spin_lock_init(&adapter->mcc_lock);
4610 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4611
5eeff635 4612 init_completion(&adapter->et_cmd_compl);
cf588477 4613 pci_save_state(adapter->pdev);
6b7c5b94 4614 return 0;
e7b909a6
SP
4615
4616free_mbox:
2b7bcebf
IV
4617 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4618 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4619
4620unmap_pci_bars:
4621 be_unmap_pci_bars(adapter);
4622
4623done:
4624 return status;
6b7c5b94
SP
4625}
4626
4627static void be_stats_cleanup(struct be_adapter *adapter)
4628{
3abcdeda 4629 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4630
4631 if (cmd->va)
2b7bcebf
IV
4632 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4633 cmd->va, cmd->dma);
6b7c5b94
SP
4634}
4635
4636static int be_stats_init(struct be_adapter *adapter)
4637{
3abcdeda 4638 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4639
ca34fe38
SP
4640 if (lancer_chip(adapter))
4641 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4642 else if (BE2_chip(adapter))
89a88ab8 4643 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4644 else if (BE3_chip(adapter))
ca34fe38 4645 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4646 else
4647 /* ALL non-BE ASICs */
4648 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4649
ede23fa8
JP
4650 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4651 GFP_KERNEL);
ddf1169f 4652 if (!cmd->va)
6b568689 4653 return -ENOMEM;
6b7c5b94
SP
4654 return 0;
4655}
4656
3bc6b06c 4657static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4658{
4659 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4660
6b7c5b94
SP
4661 if (!adapter)
4662 return;
4663
045508a8 4664 be_roce_dev_remove(adapter);
8cef7a78 4665 be_intr_set(adapter, false);
045508a8 4666
f67ef7ba
PR
4667 cancel_delayed_work_sync(&adapter->func_recovery_work);
4668
6b7c5b94
SP
4669 unregister_netdev(adapter->netdev);
4670
5fb379ee
SP
4671 be_clear(adapter);
4672
bf99e50d
PR
4673 /* tell fw we're done with firing cmds */
4674 be_cmd_fw_clean(adapter);
4675
6b7c5b94
SP
4676 be_stats_cleanup(adapter);
4677
4678 be_ctrl_cleanup(adapter);
4679
d6b6d987
SP
4680 pci_disable_pcie_error_reporting(pdev);
4681
6b7c5b94
SP
4682 pci_release_regions(pdev);
4683 pci_disable_device(pdev);
4684
4685 free_netdev(adapter->netdev);
4686}
4687
39f1d94d 4688static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4689{
baaa08d1 4690 int status, level;
6b7c5b94 4691
9e1453c5
AK
4692 status = be_cmd_get_cntl_attributes(adapter);
4693 if (status)
4694 return status;
4695
7aeb2156
PR
4696 /* Must be a power of 2 or else MODULO will BUG_ON */
4697 adapter->be_get_temp_freq = 64;
4698
baaa08d1
VV
4699 if (BEx_chip(adapter)) {
4700 level = be_cmd_get_fw_log_level(adapter);
4701 adapter->msg_enable =
4702 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4703 }
941a77d5 4704
92bf14ab 4705 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4706 return 0;
6b7c5b94
SP
4707}
4708
f67ef7ba 4709static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4710{
01e5b2c4 4711 struct device *dev = &adapter->pdev->dev;
d8110f62 4712 int status;
d8110f62 4713
f67ef7ba
PR
4714 status = lancer_test_and_set_rdy_state(adapter);
4715 if (status)
4716 goto err;
d8110f62 4717
f67ef7ba
PR
4718 if (netif_running(adapter->netdev))
4719 be_close(adapter->netdev);
d8110f62 4720
f67ef7ba
PR
4721 be_clear(adapter);
4722
01e5b2c4 4723 be_clear_all_error(adapter);
f67ef7ba
PR
4724
4725 status = be_setup(adapter);
4726 if (status)
4727 goto err;
d8110f62 4728
f67ef7ba
PR
4729 if (netif_running(adapter->netdev)) {
4730 status = be_open(adapter->netdev);
d8110f62
PR
4731 if (status)
4732 goto err;
f67ef7ba 4733 }
d8110f62 4734
4bebb56a 4735 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4736 return 0;
4737err:
01e5b2c4
SK
4738 if (status == -EAGAIN)
4739 dev_err(dev, "Waiting for resource provisioning\n");
4740 else
4bebb56a 4741 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4742
f67ef7ba
PR
4743 return status;
4744}
4745
4746static void be_func_recovery_task(struct work_struct *work)
4747{
4748 struct be_adapter *adapter =
4749 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4750 int status = 0;
d8110f62 4751
f67ef7ba 4752 be_detect_error(adapter);
d8110f62 4753
f67ef7ba 4754 if (adapter->hw_error && lancer_chip(adapter)) {
f67ef7ba
PR
4755 rtnl_lock();
4756 netif_device_detach(adapter->netdev);
4757 rtnl_unlock();
d8110f62 4758
f67ef7ba 4759 status = lancer_recover_func(adapter);
f67ef7ba
PR
4760 if (!status)
4761 netif_device_attach(adapter->netdev);
d8110f62 4762 }
f67ef7ba 4763
01e5b2c4
SK
4764 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4765 * no need to attempt further recovery.
4766 */
4767 if (!status || status == -EAGAIN)
4768 schedule_delayed_work(&adapter->func_recovery_work,
4769 msecs_to_jiffies(1000));
d8110f62
PR
4770}
4771
4772static void be_worker(struct work_struct *work)
4773{
4774 struct be_adapter *adapter =
4775 container_of(work, struct be_adapter, work.work);
4776 struct be_rx_obj *rxo;
4777 int i;
4778
d8110f62
PR
4779 /* when interrupts are not yet enabled, just reap any pending
4780 * mcc completions */
4781 if (!netif_running(adapter->netdev)) {
072a9c48 4782 local_bh_disable();
10ef9ab4 4783 be_process_mcc(adapter);
072a9c48 4784 local_bh_enable();
d8110f62
PR
4785 goto reschedule;
4786 }
4787
4788 if (!adapter->stats_cmd_sent) {
4789 if (lancer_chip(adapter))
4790 lancer_cmd_get_pport_stats(adapter,
cd3307aa 4791 &adapter->stats_cmd);
d8110f62
PR
4792 else
4793 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4794 }
4795
d696b5e2
VV
4796 if (be_physfn(adapter) &&
4797 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4798 be_cmd_get_die_temperature(adapter);
4799
d8110f62 4800 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4801 /* Replenish RX-queues starved due to memory
4802 * allocation failures.
4803 */
4804 if (rxo->rx_post_starved)
c30d7266 4805 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
d8110f62
PR
4806 }
4807
2632bafd 4808 be_eqd_update(adapter);
10ef9ab4 4809
d8110f62
PR
4810reschedule:
4811 adapter->work_counter++;
4812 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4813}
4814
257a3feb 4815/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4816static bool be_reset_required(struct be_adapter *adapter)
4817{
257a3feb 4818 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4819}
4820
d379142b
SP
4821static char *mc_name(struct be_adapter *adapter)
4822{
f93f160b
VV
4823 char *str = ""; /* default */
4824
4825 switch (adapter->mc_type) {
4826 case UMC:
4827 str = "UMC";
4828 break;
4829 case FLEX10:
4830 str = "FLEX10";
4831 break;
4832 case vNIC1:
4833 str = "vNIC-1";
4834 break;
4835 case nPAR:
4836 str = "nPAR";
4837 break;
4838 case UFP:
4839 str = "UFP";
4840 break;
4841 case vNIC2:
4842 str = "vNIC-2";
4843 break;
4844 default:
4845 str = "";
4846 }
4847
4848 return str;
d379142b
SP
4849}
4850
4851static inline char *func_name(struct be_adapter *adapter)
4852{
4853 return be_physfn(adapter) ? "PF" : "VF";
4854}
4855
1dd06ae8 4856static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4857{
4858 int status = 0;
4859 struct be_adapter *adapter;
4860 struct net_device *netdev;
b4e32a71 4861 char port_name;
6b7c5b94 4862
acbafeb1
SP
4863 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4864
6b7c5b94
SP
4865 status = pci_enable_device(pdev);
4866 if (status)
4867 goto do_none;
4868
4869 status = pci_request_regions(pdev, DRV_NAME);
4870 if (status)
4871 goto disable_dev;
4872 pci_set_master(pdev);
4873
7f640062 4874 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 4875 if (!netdev) {
6b7c5b94
SP
4876 status = -ENOMEM;
4877 goto rel_reg;
4878 }
4879 adapter = netdev_priv(netdev);
4880 adapter->pdev = pdev;
4881 pci_set_drvdata(pdev, adapter);
4882 adapter->netdev = netdev;
2243e2e9 4883 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4884
4c15c243 4885 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4886 if (!status) {
4887 netdev->features |= NETIF_F_HIGHDMA;
4888 } else {
4c15c243 4889 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4890 if (status) {
4891 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4892 goto free_netdev;
4893 }
4894 }
4895
2f951a9a
KA
4896 status = pci_enable_pcie_error_reporting(pdev);
4897 if (!status)
4898 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 4899
6b7c5b94
SP
4900 status = be_ctrl_init(adapter);
4901 if (status)
39f1d94d 4902 goto free_netdev;
6b7c5b94 4903
2243e2e9 4904 /* sync up with fw's ready state */
ba343c77 4905 if (be_physfn(adapter)) {
bf99e50d 4906 status = be_fw_wait_ready(adapter);
ba343c77
SB
4907 if (status)
4908 goto ctrl_clean;
ba343c77 4909 }
6b7c5b94 4910
39f1d94d
SP
4911 if (be_reset_required(adapter)) {
4912 status = be_cmd_reset_function(adapter);
4913 if (status)
4914 goto ctrl_clean;
556ae191 4915
2d177be8
KA
4916 /* Wait for interrupts to quiesce after an FLR */
4917 msleep(100);
4918 }
8cef7a78
SK
4919
4920 /* Allow interrupts for other ULPs running on NIC function */
4921 be_intr_set(adapter, true);
10ef9ab4 4922
2d177be8
KA
4923 /* tell fw we're ready to fire cmds */
4924 status = be_cmd_fw_init(adapter);
4925 if (status)
4926 goto ctrl_clean;
4927
2243e2e9
SP
4928 status = be_stats_init(adapter);
4929 if (status)
4930 goto ctrl_clean;
4931
39f1d94d 4932 status = be_get_initial_config(adapter);
6b7c5b94
SP
4933 if (status)
4934 goto stats_clean;
6b7c5b94
SP
4935
4936 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4937 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5f820b6c
KA
4938 adapter->rx_fc = true;
4939 adapter->tx_fc = true;
6b7c5b94 4940
5fb379ee
SP
4941 status = be_setup(adapter);
4942 if (status)
55f5c3c5 4943 goto stats_clean;
2243e2e9 4944
3abcdeda 4945 be_netdev_init(netdev);
6b7c5b94
SP
4946 status = register_netdev(netdev);
4947 if (status != 0)
5fb379ee 4948 goto unsetup;
6b7c5b94 4949
045508a8
PP
4950 be_roce_dev_add(adapter);
4951
f67ef7ba
PR
4952 schedule_delayed_work(&adapter->func_recovery_work,
4953 msecs_to_jiffies(1000));
b4e32a71
PR
4954
4955 be_cmd_query_port_name(adapter, &port_name);
4956
d379142b
SP
4957 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4958 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4959
6b7c5b94
SP
4960 return 0;
4961
5fb379ee
SP
4962unsetup:
4963 be_clear(adapter);
6b7c5b94
SP
4964stats_clean:
4965 be_stats_cleanup(adapter);
4966ctrl_clean:
4967 be_ctrl_cleanup(adapter);
f9449ab7 4968free_netdev:
fe6d2a38 4969 free_netdev(netdev);
6b7c5b94
SP
4970rel_reg:
4971 pci_release_regions(pdev);
4972disable_dev:
4973 pci_disable_device(pdev);
4974do_none:
c4ca2374 4975 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4976 return status;
4977}
4978
4979static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4980{
4981 struct be_adapter *adapter = pci_get_drvdata(pdev);
4982 struct net_device *netdev = adapter->netdev;
4983
76a9e08e 4984 if (adapter->wol_en)
71d8d1b5
AK
4985 be_setup_wol(adapter, true);
4986
d4360d6f 4987 be_intr_set(adapter, false);
f67ef7ba
PR
4988 cancel_delayed_work_sync(&adapter->func_recovery_work);
4989
6b7c5b94
SP
4990 netif_device_detach(netdev);
4991 if (netif_running(netdev)) {
4992 rtnl_lock();
4993 be_close(netdev);
4994 rtnl_unlock();
4995 }
9b0365f1 4996 be_clear(adapter);
6b7c5b94
SP
4997
4998 pci_save_state(pdev);
4999 pci_disable_device(pdev);
5000 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5001 return 0;
5002}
5003
5004static int be_resume(struct pci_dev *pdev)
5005{
5006 int status = 0;
5007 struct be_adapter *adapter = pci_get_drvdata(pdev);
5008 struct net_device *netdev = adapter->netdev;
5009
5010 netif_device_detach(netdev);
5011
5012 status = pci_enable_device(pdev);
5013 if (status)
5014 return status;
5015
1ca01512 5016 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5017 pci_restore_state(pdev);
5018
dd5746bf
SB
5019 status = be_fw_wait_ready(adapter);
5020 if (status)
5021 return status;
5022
d4360d6f 5023 be_intr_set(adapter, true);
2243e2e9
SP
5024 /* tell fw we're ready to fire cmds */
5025 status = be_cmd_fw_init(adapter);
5026 if (status)
5027 return status;
5028
9b0365f1 5029 be_setup(adapter);
6b7c5b94
SP
5030 if (netif_running(netdev)) {
5031 rtnl_lock();
5032 be_open(netdev);
5033 rtnl_unlock();
5034 }
f67ef7ba
PR
5035
5036 schedule_delayed_work(&adapter->func_recovery_work,
5037 msecs_to_jiffies(1000));
6b7c5b94 5038 netif_device_attach(netdev);
71d8d1b5 5039
76a9e08e 5040 if (adapter->wol_en)
71d8d1b5 5041 be_setup_wol(adapter, false);
a4ca055f 5042
6b7c5b94
SP
5043 return 0;
5044}
5045
82456b03
SP
5046/*
5047 * An FLR will stop BE from DMAing any data.
5048 */
5049static void be_shutdown(struct pci_dev *pdev)
5050{
5051 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5052
2d5d4154
AK
5053 if (!adapter)
5054 return;
82456b03 5055
d114f99a 5056 be_roce_dev_shutdown(adapter);
0f4a6828 5057 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5058 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5059
2d5d4154 5060 netif_device_detach(adapter->netdev);
82456b03 5061
57841869
AK
5062 be_cmd_reset_function(adapter);
5063
82456b03 5064 pci_disable_device(pdev);
82456b03
SP
5065}
5066
cf588477 5067static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5068 pci_channel_state_t state)
cf588477
SP
5069{
5070 struct be_adapter *adapter = pci_get_drvdata(pdev);
5071 struct net_device *netdev = adapter->netdev;
5072
5073 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5074
01e5b2c4
SK
5075 if (!adapter->eeh_error) {
5076 adapter->eeh_error = true;
cf588477 5077
01e5b2c4 5078 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5079
cf588477 5080 rtnl_lock();
01e5b2c4
SK
5081 netif_device_detach(netdev);
5082 if (netif_running(netdev))
5083 be_close(netdev);
cf588477 5084 rtnl_unlock();
01e5b2c4
SK
5085
5086 be_clear(adapter);
cf588477 5087 }
cf588477
SP
5088
5089 if (state == pci_channel_io_perm_failure)
5090 return PCI_ERS_RESULT_DISCONNECT;
5091
5092 pci_disable_device(pdev);
5093
eeb7fc7b
SK
5094 /* The error could cause the FW to trigger a flash debug dump.
5095 * Resetting the card while flash dump is in progress
c8a54163
PR
5096 * can cause it not to recover; wait for it to finish.
5097 * Wait only for first function as it is needed only once per
5098 * adapter.
eeb7fc7b 5099 */
c8a54163
PR
5100 if (pdev->devfn == 0)
5101 ssleep(30);
5102
cf588477
SP
5103 return PCI_ERS_RESULT_NEED_RESET;
5104}
5105
5106static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5107{
5108 struct be_adapter *adapter = pci_get_drvdata(pdev);
5109 int status;
5110
5111 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5112
5113 status = pci_enable_device(pdev);
5114 if (status)
5115 return PCI_ERS_RESULT_DISCONNECT;
5116
5117 pci_set_master(pdev);
1ca01512 5118 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5119 pci_restore_state(pdev);
5120
5121 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5122 dev_info(&adapter->pdev->dev,
5123 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5124 status = be_fw_wait_ready(adapter);
cf588477
SP
5125 if (status)
5126 return PCI_ERS_RESULT_DISCONNECT;
5127
d6b6d987 5128 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5129 be_clear_all_error(adapter);
cf588477
SP
5130 return PCI_ERS_RESULT_RECOVERED;
5131}
5132
5133static void be_eeh_resume(struct pci_dev *pdev)
5134{
5135 int status = 0;
5136 struct be_adapter *adapter = pci_get_drvdata(pdev);
5137 struct net_device *netdev = adapter->netdev;
5138
5139 dev_info(&adapter->pdev->dev, "EEH resume\n");
5140
5141 pci_save_state(pdev);
5142
2d177be8 5143 status = be_cmd_reset_function(adapter);
cf588477
SP
5144 if (status)
5145 goto err;
5146
03a58baa
KA
5147 /* On some BE3 FW versions, after a HW reset,
5148 * interrupts will remain disabled for each function.
5149 * So, explicitly enable interrupts
5150 */
5151 be_intr_set(adapter, true);
5152
2d177be8
KA
5153 /* tell fw we're ready to fire cmds */
5154 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5155 if (status)
5156 goto err;
5157
cf588477
SP
5158 status = be_setup(adapter);
5159 if (status)
5160 goto err;
5161
5162 if (netif_running(netdev)) {
5163 status = be_open(netdev);
5164 if (status)
5165 goto err;
5166 }
f67ef7ba
PR
5167
5168 schedule_delayed_work(&adapter->func_recovery_work,
5169 msecs_to_jiffies(1000));
cf588477
SP
5170 netif_device_attach(netdev);
5171 return;
5172err:
5173 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5174}
5175
3646f0e5 5176static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5177 .error_detected = be_eeh_err_detected,
5178 .slot_reset = be_eeh_reset,
5179 .resume = be_eeh_resume,
5180};
5181
6b7c5b94
SP
5182static struct pci_driver be_driver = {
5183 .name = DRV_NAME,
5184 .id_table = be_dev_ids,
5185 .probe = be_probe,
5186 .remove = be_remove,
5187 .suspend = be_suspend,
cf588477 5188 .resume = be_resume,
82456b03 5189 .shutdown = be_shutdown,
cf588477 5190 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5191};
5192
5193static int __init be_init_module(void)
5194{
8e95a202
JP
5195 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5196 rx_frag_size != 2048) {
6b7c5b94
SP
5197 printk(KERN_WARNING DRV_NAME
5198 " : Module param rx_frag_size must be 2048/4096/8192."
5199 " Using 2048\n");
5200 rx_frag_size = 2048;
5201 }
6b7c5b94
SP
5202
5203 return pci_register_driver(&be_driver);
5204}
5205module_init(be_init_module);
5206
5207static void __exit be_exit_module(void)
5208{
5209 pci_unregister_driver(&be_driver);
5210}
5211module_exit(be_exit_module);
This page took 1.09952 seconds and 5 git commands to generate.