Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
6b7c5b94
SP
26
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ba343c77 33static unsigned int num_vfs;
ba343c77 34module_param(num_vfs, uint, S_IRUGO);
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
11ac75ed
SP
37static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
6b7c5b94 41static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
50 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 53/* UE Status Low CSR */
42c8b11e 54static const char * const ue_status_low_desc[] = {
7c185276
AK
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
42c8b11e 89static const char * const ue_status_hi_desc[] = {
7c185276
AK
90 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
42c8b11e 113 "NETC",
7c185276
AK
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
6b7c5b94 123
752961a1 124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 128 if (mem->va) {
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
1cfafab9
SP
131 mem->va = NULL;
132 }
6b7c5b94
SP
133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
ede23fa8
JP
144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
6b7c5b94 146 if (!mem->va)
10ef9ab4 147 return -ENOMEM;
6b7c5b94
SP
148 return 0;
149}
150
68c45a2d 151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 152{
db3ea781 153 u32 reg, enabled;
5f0b849e 154
db3ea781
SP
155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
5f0b849e 159 if (!enabled && enable)
6b7c5b94 160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 161 else if (enabled && !enable)
6b7c5b94 162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else
6b7c5b94 164 return;
5f0b849e 165
db3ea781
SP
166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
168}
169
68c45a2d
SK
170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
8788fdc2 186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
194}
195
94d73aaa
VV
196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
6b7c5b94
SP
198{
199 u32 val = 0;
94d73aaa 200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
202
203 wmb();
94d73aaa 204 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
205}
206
8788fdc2 207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
461ae379
AK
479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
ac124ff9 517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 521 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 524 drvs->rx_drops_too_many_frags =
ac124ff9 525 pport_stats->rx_drops_too_many_frags_lo;
005d5696 526}
89a88ab8 527
09c1c68f
SP
528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
4188e7df 540static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
89a88ab8
AK
554void be_parse_stats(struct be_adapter *adapter)
555{
61000861 556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
557 struct be_rx_obj *rxo;
558 int i;
a6c578ef 559 u32 erx_stat;
ac124ff9 560
ca34fe38
SP
561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
005d5696 563 } else {
ca34fe38
SP
564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
61000861
AK
566 else if (BE3_chip(adapter))
567 /* for BE3 */
ca34fe38 568 populate_be_v1_stats(adapter);
61000861
AK
569 else
570 populate_be_v2_stats(adapter);
d51ebd33 571
61000861 572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 573 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 576 }
09c1c68f 577 }
89a88ab8
AK
578}
579
ab1594e9
SP
580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
6b7c5b94 582{
ab1594e9 583 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 584 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 585 struct be_rx_obj *rxo;
3c8def97 586 struct be_tx_obj *txo;
ab1594e9
SP
587 u64 pkts, bytes;
588 unsigned int start;
3abcdeda 589 int i;
6b7c5b94 590
3abcdeda 591 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
603 }
604
3c8def97 605 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
3c8def97 614 }
6b7c5b94
SP
615
616 /* bad pkts received */
ab1594e9 617 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
ab1594e9 626 drvs->rx_dropped_runt;
68110868 627
6b7c5b94 628 /* detailed rx errors */
ab1594e9 629 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
68110868 632
ab1594e9 633 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
634
635 /* frame alignment errors */
ab1594e9 636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 637
6b7c5b94
SP
638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
ab1594e9 643 return stats;
6b7c5b94
SP
644}
645
b236916a 646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 647{
6b7c5b94
SP
648 struct net_device *netdev = adapter->netdev;
649
b236916a 650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 651 netif_carrier_off(netdev);
b236916a 652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 653 }
b236916a
AK
654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
6b7c5b94
SP
659}
660
3c8def97 661static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 663{
3c8def97
SP
664 struct be_tx_stats *stats = tx_stats(txo);
665
ab1594e9 666 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 671 if (stopped)
ac124ff9 672 stats->tx_stops++;
ab1594e9 673 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
6b7c5b94 679{
ebc8d2ab
DM
680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
6b7c5b94
SP
684 /* to account for hdr wrb */
685 cnt++;
fe6d2a38
SP
686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
6b7c5b94
SP
689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
fe6d2a38 692 }
6b7c5b94
SP
693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 702 wrb->rsvd0 = 0;
6b7c5b94
SP
703}
704
1ded132d
AK
705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
cc4ce020 721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 723{
1ded132d 724 u16 vlan_tag;
cc4ce020 725
6b7c5b94
SP
726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
49e4b847 730 if (skb_is_gso(skb)) {
6b7c5b94
SP
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
4c5102f9 743 if (vlan_tx_tag_present(skb)) {
6b7c5b94 744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
747 }
748
bc0c3405
AK
749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
2b7bcebf 756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 764 if (wrb->frag_len) {
7101e111 765 if (unmap_single)
2b7bcebf
IV
766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
7101e111 768 else
2b7bcebf 769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
770 }
771}
6b7c5b94 772
3c8def97 773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
6b7c5b94 776{
7101e111
SP
777 dma_addr_t busaddr;
778 int i, copied = 0;
2b7bcebf 779 struct device *dev = &adapter->pdev->dev;
6b7c5b94 780 struct sk_buff *first_skb = skb;
6b7c5b94
SP
781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
7101e111
SP
783 bool map_single = false;
784 u16 map_head;
6b7c5b94 785
6b7c5b94
SP
786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
7101e111 788 map_head = txq->head;
6b7c5b94 789
ebc8d2ab 790 if (skb->len > skb->data_len) {
e743d313 791 int len = skb_headlen(skb);
2b7bcebf
IV
792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
7101e111
SP
794 goto dma_err;
795 map_single = true;
ebc8d2ab
DM
796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
6b7c5b94 802
ebc8d2ab 803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 804 const struct skb_frag_struct *frag =
ebc8d2ab 805 &skb_shinfo(skb)->frags[i];
b061b39e 806 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 807 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 808 if (dma_mapping_error(dev, busaddr))
7101e111 809 goto dma_err;
ebc8d2ab 810 wrb = queue_head_node(txq);
9e903e08 811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
9e903e08 814 copied += skb_frag_size(frag);
6b7c5b94
SP
815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
bc0c3405 824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
7101e111
SP
828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
2b7bcebf 832 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
6b7c5b94
SP
838}
839
93040ae5 840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
93040ae5
SK
843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
efee8e87 850 if (vlan_tx_tag_present(skb))
93040ae5 851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
bc0c3405
AK
862
863 if (vlan_tag) {
58717686 864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
865 if (unlikely(!skb))
866 return skb;
bc0c3405
AK
867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
58717686 873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
93040ae5
SK
880 return skb;
881}
882
bc0c3405
AK
883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
ee9c799c
SP
910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
bc0c3405 912{
ee9c799c 913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
914}
915
ee9c799c
SP
916static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
6b7c5b94 919{
d2cb6ce7 920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
93040ae5 923
b54881f9 924 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
48265667 925 * may cause a transmit stall on that port. So the work-around is to
b54881f9 926 * pad short packets (<= 32 bytes) to a 36-byte length.
48265667 927 */
b54881f9 928 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
48265667
SK
929 if (skb_padto(skb, 36))
930 goto tx_drop;
931 skb->len = 36;
932 }
933
1297f9db
AK
934 /* For padded packets, BE HW modifies tot_len field in IP header
935 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 936 * For padded packets, Lancer computes incorrect checksum.
1ded132d 937 */
ee9c799c
SP
938 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
940 if (skb->len <= 60 &&
941 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 942 is_ipv4_pkt(skb)) {
93040ae5
SK
943 ip = (struct iphdr *)ip_hdr(skb);
944 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945 }
1ded132d 946
d2cb6ce7
AK
947 /* If vlan tag is already inlined in the packet, skip HW VLAN
948 * tagging in UMC mode
949 */
950 if ((adapter->function_mode & UMC_ENABLED) &&
951 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 952 *skip_hw_vlan = true;
d2cb6ce7 953
93040ae5
SK
954 /* HW has a bug wherein it will calculate CSUM for VLAN
955 * pkts even though it is disabled.
956 * Manually insert VLAN in pkt.
957 */
958 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
959 vlan_tx_tag_present(skb)) {
960 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
961 if (unlikely(!skb))
962 goto tx_drop;
963 }
964
965 /* HW may lockup when VLAN HW tagging is requested on
966 * certain ipv6 packets. Drop such pkts if the HW workaround to
967 * skip HW tagging is not enabled by FW.
968 */
969 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
970 (adapter->pvid || adapter->qnq_vid) &&
971 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
972 goto tx_drop;
973
974 /* Manual VLAN tag insertion to prevent:
975 * ASIC lockup when the ASIC inserts VLAN tag into
976 * certain ipv6 packets. Insert VLAN tags in driver,
977 * and set event, completion, vlan bits accordingly
978 * in the Tx WRB.
979 */
980 if (be_ipv6_tx_stall_chk(adapter, skb) &&
981 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
983 if (unlikely(!skb))
984 goto tx_drop;
1ded132d
AK
985 }
986
ee9c799c
SP
987 return skb;
988tx_drop:
989 dev_kfree_skb_any(skb);
990 return NULL;
991}
992
993static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
994{
995 struct be_adapter *adapter = netdev_priv(netdev);
996 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
997 struct be_queue_info *txq = &txo->q;
998 bool dummy_wrb, stopped = false;
999 u32 wrb_cnt = 0, copied = 0;
1000 bool skip_hw_vlan = false;
1001 u32 start = txq->head;
1002
1003 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1004 if (!skb) {
1005 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1006 return NETDEV_TX_OK;
bc617526 1007 }
ee9c799c 1008
fe6d2a38 1009 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1010
bc0c3405
AK
1011 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1012 skip_hw_vlan);
c190e3c8 1013 if (copied) {
cd8f76c0
ED
1014 int gso_segs = skb_shinfo(skb)->gso_segs;
1015
c190e3c8 1016 /* record the sent skb in the sent_skb table */
3c8def97
SP
1017 BUG_ON(txo->sent_skb_list[start]);
1018 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1019
1020 /* Ensure txq has space for the next skb; Else stop the queue
1021 * *BEFORE* ringing the tx doorbell, so that we serialze the
1022 * tx compls of the current transmit which'll wake up the queue
1023 */
7101e111 1024 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1025 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1026 txq->len) {
3c8def97 1027 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1028 stopped = true;
1029 }
6b7c5b94 1030
94d73aaa 1031 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1032
cd8f76c0 1033 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1034 } else {
1035 txq->head = start;
bc617526 1036 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1037 dev_kfree_skb_any(skb);
6b7c5b94 1038 }
6b7c5b94
SP
1039 return NETDEV_TX_OK;
1040}
1041
1042static int be_change_mtu(struct net_device *netdev, int new_mtu)
1043{
1044 struct be_adapter *adapter = netdev_priv(netdev);
1045 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
1046 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1047 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
1048 dev_info(&adapter->pdev->dev,
1049 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
1050 BE_MIN_MTU,
1051 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1052 return -EINVAL;
1053 }
1054 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1055 netdev->mtu, new_mtu);
1056 netdev->mtu = new_mtu;
1057 return 0;
1058}
1059
1060/*
82903e4b
AK
1061 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1063 */
10329df8 1064static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1065{
10329df8
SP
1066 u16 vids[BE_NUM_VLANS_SUPPORTED];
1067 u16 num = 0, i;
82903e4b 1068 int status = 0;
1da87b7f 1069
c0e64ef4
SP
1070 /* No need to further configure vids if in promiscuous mode */
1071 if (adapter->promiscuous)
1072 return 0;
1073
92bf14ab 1074 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1075 goto set_vlan_promisc;
1076
1077 /* Construct VLAN Table to give to HW */
1078 for (i = 0; i < VLAN_N_VID; i++)
1079 if (adapter->vlan_tag[i])
10329df8 1080 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1081
1082 status = be_cmd_vlan_config(adapter, adapter->if_handle,
012bd387 1083 vids, num, 0);
0fc16ebf 1084
0fc16ebf 1085 if (status) {
d9d604f8
AK
1086 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1088 goto set_vlan_promisc;
1089 dev_err(&adapter->pdev->dev,
1090 "Setting HW VLAN filtering failed.\n");
1091 } else {
1092 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1093 /* hw VLAN filtering re-enabled. */
1094 status = be_cmd_rx_filter(adapter,
1095 BE_FLAGS_VLAN_PROMISC, OFF);
1096 if (!status) {
1097 dev_info(&adapter->pdev->dev,
1098 "Disabling VLAN Promiscuous mode.\n");
1099 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1100 dev_info(&adapter->pdev->dev,
1101 "Re-Enabling HW VLAN filtering\n");
1102 }
1103 }
6b7c5b94 1104 }
1da87b7f 1105
b31c50a7 1106 return status;
0fc16ebf
PR
1107
1108set_vlan_promisc:
d9d604f8
AK
1109 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1110
1111 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1112 if (!status) {
1113 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1114 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1115 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1116 } else
1117 dev_err(&adapter->pdev->dev,
1118 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1119 return status;
6b7c5b94
SP
1120}
1121
80d5c368 1122static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1123{
1124 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1125 int status = 0;
6b7c5b94 1126
ba343c77 1127
a85e9986
PR
1128 /* Packets with VID 0 are always received by Lancer by default */
1129 if (lancer_chip(adapter) && vid == 0)
1130 goto ret;
1131
6b7c5b94 1132 adapter->vlan_tag[vid] = 1;
92bf14ab 1133 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
10329df8 1134 status = be_vid_config(adapter);
8e586137 1135
80817cbf
AK
1136 if (!status)
1137 adapter->vlans_added++;
1138 else
1139 adapter->vlan_tag[vid] = 0;
1140ret:
1141 return status;
6b7c5b94
SP
1142}
1143
80d5c368 1144static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1145{
1146 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1147 int status = 0;
6b7c5b94 1148
a85e9986
PR
1149 /* Packets with VID 0 are always received by Lancer by default */
1150 if (lancer_chip(adapter) && vid == 0)
1151 goto ret;
1152
6b7c5b94 1153 adapter->vlan_tag[vid] = 0;
92bf14ab 1154 if (adapter->vlans_added <= be_max_vlans(adapter))
10329df8 1155 status = be_vid_config(adapter);
8e586137 1156
80817cbf
AK
1157 if (!status)
1158 adapter->vlans_added--;
1159 else
1160 adapter->vlan_tag[vid] = 1;
1161ret:
1162 return status;
6b7c5b94
SP
1163}
1164
a54769f5 1165static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1166{
1167 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1168 int status;
6b7c5b94 1169
24307eef 1170 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1171 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1172 adapter->promiscuous = true;
1173 goto done;
6b7c5b94
SP
1174 }
1175
25985edc 1176 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1177 if (adapter->promiscuous) {
1178 adapter->promiscuous = false;
5b8821b7 1179 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1180
1181 if (adapter->vlans_added)
10329df8 1182 be_vid_config(adapter);
6b7c5b94
SP
1183 }
1184
e7b909a6 1185 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1186 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1187 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1188 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1189 goto done;
6b7c5b94 1190 }
6b7c5b94 1191
fbc13f01
AK
1192 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1193 struct netdev_hw_addr *ha;
1194 int i = 1; /* First slot is claimed by the Primary MAC */
1195
1196 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1197 be_cmd_pmac_del(adapter, adapter->if_handle,
1198 adapter->pmac_id[i], 0);
1199 }
1200
92bf14ab 1201 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1202 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1203 adapter->promiscuous = true;
1204 goto done;
1205 }
1206
1207 netdev_for_each_uc_addr(ha, adapter->netdev) {
1208 adapter->uc_macs++; /* First slot is for Primary MAC */
1209 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1210 adapter->if_handle,
1211 &adapter->pmac_id[adapter->uc_macs], 0);
1212 }
1213 }
1214
0fc16ebf
PR
1215 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1216
1217 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1218 if (status) {
1219 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1220 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1221 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1222 }
24307eef
SP
1223done:
1224 return;
6b7c5b94
SP
1225}
1226
ba343c77
SB
1227static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1228{
1229 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1230 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1231 int status;
1232
11ac75ed 1233 if (!sriov_enabled(adapter))
ba343c77
SB
1234 return -EPERM;
1235
11ac75ed 1236 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1237 return -EINVAL;
1238
3175d8c2
SP
1239 if (BEx_chip(adapter)) {
1240 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1241 vf + 1);
ba343c77 1242
11ac75ed
SP
1243 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1244 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1245 } else {
1246 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1247 vf + 1);
590c391d
PR
1248 }
1249
64600ea5 1250 if (status)
ba343c77
SB
1251 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1252 mac, vf);
64600ea5 1253 else
11ac75ed 1254 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1255
ba343c77
SB
1256 return status;
1257}
1258
64600ea5
AK
1259static int be_get_vf_config(struct net_device *netdev, int vf,
1260 struct ifla_vf_info *vi)
1261{
1262 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1263 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1264
11ac75ed 1265 if (!sriov_enabled(adapter))
64600ea5
AK
1266 return -EPERM;
1267
11ac75ed 1268 if (vf >= adapter->num_vfs)
64600ea5
AK
1269 return -EINVAL;
1270
1271 vi->vf = vf;
11ac75ed 1272 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1273 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1274 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1275 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1276
1277 return 0;
1278}
1279
1da87b7f
AK
1280static int be_set_vf_vlan(struct net_device *netdev,
1281 int vf, u16 vlan, u8 qos)
1282{
1283 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1284 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1285 int status = 0;
1286
11ac75ed 1287 if (!sriov_enabled(adapter))
1da87b7f
AK
1288 return -EPERM;
1289
b9fc0e53 1290 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1291 return -EINVAL;
1292
b9fc0e53
AK
1293 if (vlan || qos) {
1294 vlan |= qos << VLAN_PRIO_SHIFT;
1295 if (vf_cfg->vlan_tag != vlan) {
f1f3ee1b 1296 /* If this is new value, program it. Else skip. */
b9fc0e53
AK
1297 vf_cfg->vlan_tag = vlan;
1298 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1299 vf_cfg->if_handle, 0);
f1f3ee1b 1300 }
1da87b7f 1301 } else {
f1f3ee1b 1302 /* Reset Transparent Vlan Tagging. */
b9fc0e53
AK
1303 vf_cfg->vlan_tag = 0;
1304 vlan = vf_cfg->def_vid;
f1f3ee1b 1305 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
b9fc0e53 1306 vf_cfg->if_handle, 0);
1da87b7f
AK
1307 }
1308
1da87b7f
AK
1309
1310 if (status)
1311 dev_info(&adapter->pdev->dev,
1312 "VLAN %d config on VF %d failed\n", vlan, vf);
1313 return status;
1314}
1315
e1d18735
AK
1316static int be_set_vf_tx_rate(struct net_device *netdev,
1317 int vf, int rate)
1318{
1319 struct be_adapter *adapter = netdev_priv(netdev);
1320 int status = 0;
1321
11ac75ed 1322 if (!sriov_enabled(adapter))
e1d18735
AK
1323 return -EPERM;
1324
94f434c2 1325 if (vf >= adapter->num_vfs)
e1d18735
AK
1326 return -EINVAL;
1327
94f434c2
AK
1328 if (rate < 100 || rate > 10000) {
1329 dev_err(&adapter->pdev->dev,
1330 "tx rate must be between 100 and 10000 Mbps\n");
1331 return -EINVAL;
1332 }
e1d18735 1333
d5c18473
PR
1334 if (lancer_chip(adapter))
1335 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1336 else
1337 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1338
1339 if (status)
94f434c2 1340 dev_err(&adapter->pdev->dev,
e1d18735 1341 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1342 else
1343 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1344 return status;
1345}
1346
2632bafd
SP
1347static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1348 ulong now)
6b7c5b94 1349{
2632bafd
SP
1350 aic->rx_pkts_prev = rx_pkts;
1351 aic->tx_reqs_prev = tx_pkts;
1352 aic->jiffies = now;
1353}
ac124ff9 1354
2632bafd
SP
1355static void be_eqd_update(struct be_adapter *adapter)
1356{
1357 struct be_set_eqd set_eqd[MAX_EVT_QS];
1358 int eqd, i, num = 0, start;
1359 struct be_aic_obj *aic;
1360 struct be_eq_obj *eqo;
1361 struct be_rx_obj *rxo;
1362 struct be_tx_obj *txo;
1363 u64 rx_pkts, tx_pkts;
1364 ulong now;
1365 u32 pps, delta;
10ef9ab4 1366
2632bafd
SP
1367 for_all_evt_queues(adapter, eqo, i) {
1368 aic = &adapter->aic_obj[eqo->idx];
1369 if (!aic->enable) {
1370 if (aic->jiffies)
1371 aic->jiffies = 0;
1372 eqd = aic->et_eqd;
1373 goto modify_eqd;
1374 }
6b7c5b94 1375
2632bafd
SP
1376 rxo = &adapter->rx_obj[eqo->idx];
1377 do {
1378 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1379 rx_pkts = rxo->stats.rx_pkts;
1380 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
10ef9ab4 1381
2632bafd
SP
1382 txo = &adapter->tx_obj[eqo->idx];
1383 do {
1384 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1385 tx_pkts = txo->stats.tx_reqs;
1386 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
6b7c5b94 1387
6b7c5b94 1388
2632bafd
SP
1389 /* Skip, if wrapped around or first calculation */
1390 now = jiffies;
1391 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1392 rx_pkts < aic->rx_pkts_prev ||
1393 tx_pkts < aic->tx_reqs_prev) {
1394 be_aic_update(aic, rx_pkts, tx_pkts, now);
1395 continue;
1396 }
1397
1398 delta = jiffies_to_msecs(now - aic->jiffies);
1399 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1400 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1401 eqd = (pps / 15000) << 2;
10ef9ab4 1402
2632bafd
SP
1403 if (eqd < 8)
1404 eqd = 0;
1405 eqd = min_t(u32, eqd, aic->max_eqd);
1406 eqd = max_t(u32, eqd, aic->min_eqd);
1407
1408 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1409modify_eqd:
2632bafd
SP
1410 if (eqd != aic->prev_eqd) {
1411 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1412 set_eqd[num].eq_id = eqo->q.id;
1413 aic->prev_eqd = eqd;
1414 num++;
1415 }
ac124ff9 1416 }
2632bafd
SP
1417
1418 if (num)
1419 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1420}
1421
3abcdeda 1422static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1423 struct be_rx_compl_info *rxcp)
4097f663 1424{
ac124ff9 1425 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1426
ab1594e9 1427 u64_stats_update_begin(&stats->sync);
3abcdeda 1428 stats->rx_compl++;
2e588f84 1429 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1430 stats->rx_pkts++;
2e588f84 1431 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1432 stats->rx_mcast_pkts++;
2e588f84 1433 if (rxcp->err)
ac124ff9 1434 stats->rx_compl_err++;
ab1594e9 1435 u64_stats_update_end(&stats->sync);
4097f663
SP
1436}
1437
2e588f84 1438static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1439{
19fad86f
PR
1440 /* L4 checksum is not reliable for non TCP/UDP packets.
1441 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1442 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1443 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1444}
1445
0b0ef1d0 1446static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1447{
10ef9ab4 1448 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1449 struct be_rx_page_info *rx_page_info;
3abcdeda 1450 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1451 u16 frag_idx = rxq->tail;
6b7c5b94 1452
3abcdeda 1453 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1454 BUG_ON(!rx_page_info->page);
1455
205859a2 1456 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1457 dma_unmap_page(&adapter->pdev->dev,
1458 dma_unmap_addr(rx_page_info, bus),
1459 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1460 rx_page_info->last_page_user = false;
1461 }
6b7c5b94 1462
0b0ef1d0 1463 queue_tail_inc(rxq);
6b7c5b94
SP
1464 atomic_dec(&rxq->used);
1465 return rx_page_info;
1466}
1467
1468/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1469static void be_rx_compl_discard(struct be_rx_obj *rxo,
1470 struct be_rx_compl_info *rxcp)
6b7c5b94 1471{
6b7c5b94 1472 struct be_rx_page_info *page_info;
2e588f84 1473 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1474
e80d9da6 1475 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1476 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1477 put_page(page_info->page);
1478 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1479 }
1480}
1481
1482/*
1483 * skb_fill_rx_data forms a complete skb for an ether frame
1484 * indicated by rxcp.
1485 */
10ef9ab4
SP
1486static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1487 struct be_rx_compl_info *rxcp)
6b7c5b94 1488{
6b7c5b94 1489 struct be_rx_page_info *page_info;
2e588f84
SP
1490 u16 i, j;
1491 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1492 u8 *start;
6b7c5b94 1493
0b0ef1d0 1494 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1495 start = page_address(page_info->page) + page_info->page_offset;
1496 prefetch(start);
1497
1498 /* Copy data in the first descriptor of this completion */
2e588f84 1499 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1500
6b7c5b94
SP
1501 skb->len = curr_frag_len;
1502 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1503 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1504 /* Complete packet has now been moved to data */
1505 put_page(page_info->page);
1506 skb->data_len = 0;
1507 skb->tail += curr_frag_len;
1508 } else {
ac1ae5f3
ED
1509 hdr_len = ETH_HLEN;
1510 memcpy(skb->data, start, hdr_len);
6b7c5b94 1511 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1512 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1513 skb_shinfo(skb)->frags[0].page_offset =
1514 page_info->page_offset + hdr_len;
9e903e08 1515 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1516 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1517 skb->truesize += rx_frag_size;
6b7c5b94
SP
1518 skb->tail += hdr_len;
1519 }
205859a2 1520 page_info->page = NULL;
6b7c5b94 1521
2e588f84
SP
1522 if (rxcp->pkt_size <= rx_frag_size) {
1523 BUG_ON(rxcp->num_rcvd != 1);
1524 return;
6b7c5b94
SP
1525 }
1526
1527 /* More frags present for this completion */
2e588f84
SP
1528 remaining = rxcp->pkt_size - curr_frag_len;
1529 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1530 page_info = get_rx_page_info(rxo);
2e588f84 1531 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1532
bd46cb6c
AK
1533 /* Coalesce all frags from the same physical page in one slot */
1534 if (page_info->page_offset == 0) {
1535 /* Fresh page */
1536 j++;
b061b39e 1537 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1538 skb_shinfo(skb)->frags[j].page_offset =
1539 page_info->page_offset;
9e903e08 1540 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1541 skb_shinfo(skb)->nr_frags++;
1542 } else {
1543 put_page(page_info->page);
1544 }
1545
9e903e08 1546 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1547 skb->len += curr_frag_len;
1548 skb->data_len += curr_frag_len;
bdb28a97 1549 skb->truesize += rx_frag_size;
2e588f84 1550 remaining -= curr_frag_len;
205859a2 1551 page_info->page = NULL;
6b7c5b94 1552 }
bd46cb6c 1553 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1554}
1555
5be93b9a 1556/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1557static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1558 struct be_rx_compl_info *rxcp)
6b7c5b94 1559{
10ef9ab4 1560 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1561 struct net_device *netdev = adapter->netdev;
6b7c5b94 1562 struct sk_buff *skb;
89420424 1563
bb349bb4 1564 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1565 if (unlikely(!skb)) {
ac124ff9 1566 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1567 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1568 return;
1569 }
1570
10ef9ab4 1571 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1572
6332c8d3 1573 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1574 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1575 else
1576 skb_checksum_none_assert(skb);
6b7c5b94 1577
6332c8d3 1578 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1579 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1580 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1581 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1582 skb_mark_napi_id(skb, napi);
6b7c5b94 1583
343e43c0 1584 if (rxcp->vlanf)
86a9bad3 1585 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1586
1587 netif_receive_skb(skb);
6b7c5b94
SP
1588}
1589
5be93b9a 1590/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1591static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1592 struct napi_struct *napi,
1593 struct be_rx_compl_info *rxcp)
6b7c5b94 1594{
10ef9ab4 1595 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1596 struct be_rx_page_info *page_info;
5be93b9a 1597 struct sk_buff *skb = NULL;
2e588f84
SP
1598 u16 remaining, curr_frag_len;
1599 u16 i, j;
3968fa1e 1600
10ef9ab4 1601 skb = napi_get_frags(napi);
5be93b9a 1602 if (!skb) {
10ef9ab4 1603 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1604 return;
1605 }
1606
2e588f84
SP
1607 remaining = rxcp->pkt_size;
1608 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1609 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1610
1611 curr_frag_len = min(remaining, rx_frag_size);
1612
bd46cb6c
AK
1613 /* Coalesce all frags from the same physical page in one slot */
1614 if (i == 0 || page_info->page_offset == 0) {
1615 /* First frag or Fresh page */
1616 j++;
b061b39e 1617 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1618 skb_shinfo(skb)->frags[j].page_offset =
1619 page_info->page_offset;
9e903e08 1620 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1621 } else {
1622 put_page(page_info->page);
1623 }
9e903e08 1624 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1625 skb->truesize += rx_frag_size;
bd46cb6c 1626 remaining -= curr_frag_len;
6b7c5b94
SP
1627 memset(page_info, 0, sizeof(*page_info));
1628 }
bd46cb6c 1629 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1630
5be93b9a 1631 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1632 skb->len = rxcp->pkt_size;
1633 skb->data_len = rxcp->pkt_size;
5be93b9a 1634 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1635 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1636 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1637 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1638 skb_mark_napi_id(skb, napi);
5be93b9a 1639
343e43c0 1640 if (rxcp->vlanf)
86a9bad3 1641 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1642
10ef9ab4 1643 napi_gro_frags(napi);
2e588f84
SP
1644}
1645
10ef9ab4
SP
1646static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1647 struct be_rx_compl_info *rxcp)
2e588f84
SP
1648{
1649 rxcp->pkt_size =
1650 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1651 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1652 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1653 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1654 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1655 rxcp->ip_csum =
1656 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1657 rxcp->l4_csum =
1658 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1659 rxcp->ipv6 =
1660 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1661 rxcp->num_rcvd =
1662 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1663 rxcp->pkt_type =
1664 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1665 rxcp->rss_hash =
c297977e 1666 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1667 if (rxcp->vlanf) {
1668 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1669 compl);
1670 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1671 compl);
15d72184 1672 }
12004ae9 1673 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1674}
1675
10ef9ab4
SP
1676static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1677 struct be_rx_compl_info *rxcp)
2e588f84
SP
1678{
1679 rxcp->pkt_size =
1680 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1681 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1682 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1683 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1684 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1685 rxcp->ip_csum =
1686 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1687 rxcp->l4_csum =
1688 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1689 rxcp->ipv6 =
1690 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1691 rxcp->num_rcvd =
1692 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1693 rxcp->pkt_type =
1694 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1695 rxcp->rss_hash =
c297977e 1696 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1697 if (rxcp->vlanf) {
1698 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1699 compl);
1700 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1701 compl);
15d72184 1702 }
12004ae9 1703 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1704 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1705 ip_frag, compl);
2e588f84
SP
1706}
1707
1708static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1709{
1710 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1711 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1712 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1713
2e588f84
SP
1714 /* For checking the valid bit it is Ok to use either definition as the
1715 * valid bit is at the same position in both v0 and v1 Rx compl */
1716 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1717 return NULL;
6b7c5b94 1718
2e588f84
SP
1719 rmb();
1720 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1721
2e588f84 1722 if (adapter->be3_native)
10ef9ab4 1723 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1724 else
10ef9ab4 1725 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1726
e38b1706
SK
1727 if (rxcp->ip_frag)
1728 rxcp->l4_csum = 0;
1729
15d72184
SP
1730 if (rxcp->vlanf) {
1731 /* vlanf could be wrongly set in some cards.
1732 * ignore if vtm is not set */
752961a1 1733 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1734 rxcp->vlanf = 0;
6b7c5b94 1735
15d72184 1736 if (!lancer_chip(adapter))
3c709f8f 1737 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1738
939cf306 1739 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1740 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1741 rxcp->vlanf = 0;
1742 }
2e588f84
SP
1743
1744 /* As the compl has been parsed, reset it; we wont touch it again */
1745 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1746
3abcdeda 1747 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1748 return rxcp;
1749}
1750
1829b086 1751static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1752{
6b7c5b94 1753 u32 order = get_order(size);
1829b086 1754
6b7c5b94 1755 if (order > 0)
1829b086
ED
1756 gfp |= __GFP_COMP;
1757 return alloc_pages(gfp, order);
6b7c5b94
SP
1758}
1759
1760/*
1761 * Allocate a page, split it to fragments of size rx_frag_size and post as
1762 * receive buffers to BE
1763 */
1829b086 1764static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1765{
3abcdeda 1766 struct be_adapter *adapter = rxo->adapter;
26d92f92 1767 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1768 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1769 struct page *pagep = NULL;
ba42fad0 1770 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1771 struct be_eth_rx_d *rxd;
1772 u64 page_dmaaddr = 0, frag_dmaaddr;
1773 u32 posted, page_offset = 0;
1774
3abcdeda 1775 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1776 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1777 if (!pagep) {
1829b086 1778 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1779 if (unlikely(!pagep)) {
ac124ff9 1780 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1781 break;
1782 }
ba42fad0
IV
1783 page_dmaaddr = dma_map_page(dev, pagep, 0,
1784 adapter->big_page_size,
2b7bcebf 1785 DMA_FROM_DEVICE);
ba42fad0
IV
1786 if (dma_mapping_error(dev, page_dmaaddr)) {
1787 put_page(pagep);
1788 pagep = NULL;
1789 rx_stats(rxo)->rx_post_fail++;
1790 break;
1791 }
6b7c5b94
SP
1792 page_info->page_offset = 0;
1793 } else {
1794 get_page(pagep);
1795 page_info->page_offset = page_offset + rx_frag_size;
1796 }
1797 page_offset = page_info->page_offset;
1798 page_info->page = pagep;
fac6da5b 1799 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1800 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1801
1802 rxd = queue_head_node(rxq);
1803 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1804 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1805
1806 /* Any space left in the current big page for another frag? */
1807 if ((page_offset + rx_frag_size + rx_frag_size) >
1808 adapter->big_page_size) {
1809 pagep = NULL;
1810 page_info->last_page_user = true;
1811 }
26d92f92
SP
1812
1813 prev_page_info = page_info;
1814 queue_head_inc(rxq);
10ef9ab4 1815 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1816 }
1817 if (pagep)
26d92f92 1818 prev_page_info->last_page_user = true;
6b7c5b94
SP
1819
1820 if (posted) {
6b7c5b94 1821 atomic_add(posted, &rxq->used);
6384a4d0
SP
1822 if (rxo->rx_post_starved)
1823 rxo->rx_post_starved = false;
8788fdc2 1824 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1825 } else if (atomic_read(&rxq->used) == 0) {
1826 /* Let be_worker replenish when memory is available */
3abcdeda 1827 rxo->rx_post_starved = true;
6b7c5b94 1828 }
6b7c5b94
SP
1829}
1830
5fb379ee 1831static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1832{
6b7c5b94
SP
1833 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1834
1835 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1836 return NULL;
1837
f3eb62d2 1838 rmb();
6b7c5b94
SP
1839 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1840
1841 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1842
1843 queue_tail_inc(tx_cq);
1844 return txcp;
1845}
1846
3c8def97
SP
1847static u16 be_tx_compl_process(struct be_adapter *adapter,
1848 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1849{
3c8def97 1850 struct be_queue_info *txq = &txo->q;
a73b796e 1851 struct be_eth_wrb *wrb;
3c8def97 1852 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1853 struct sk_buff *sent_skb;
ec43b1a6
SP
1854 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1855 bool unmap_skb_hdr = true;
6b7c5b94 1856
ec43b1a6 1857 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1858 BUG_ON(!sent_skb);
ec43b1a6
SP
1859 sent_skbs[txq->tail] = NULL;
1860
1861 /* skip header wrb */
a73b796e 1862 queue_tail_inc(txq);
6b7c5b94 1863
ec43b1a6 1864 do {
6b7c5b94 1865 cur_index = txq->tail;
a73b796e 1866 wrb = queue_tail_node(txq);
2b7bcebf
IV
1867 unmap_tx_frag(&adapter->pdev->dev, wrb,
1868 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1869 unmap_skb_hdr = false;
1870
6b7c5b94
SP
1871 num_wrbs++;
1872 queue_tail_inc(txq);
ec43b1a6 1873 } while (cur_index != last_index);
6b7c5b94 1874
6b7c5b94 1875 kfree_skb(sent_skb);
4d586b82 1876 return num_wrbs;
6b7c5b94
SP
1877}
1878
10ef9ab4
SP
1879/* Return the number of events in the event queue */
1880static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1881{
10ef9ab4
SP
1882 struct be_eq_entry *eqe;
1883 int num = 0;
859b1e4e 1884
10ef9ab4
SP
1885 do {
1886 eqe = queue_tail_node(&eqo->q);
1887 if (eqe->evt == 0)
1888 break;
859b1e4e 1889
10ef9ab4
SP
1890 rmb();
1891 eqe->evt = 0;
1892 num++;
1893 queue_tail_inc(&eqo->q);
1894 } while (true);
1895
1896 return num;
859b1e4e
SP
1897}
1898
10ef9ab4
SP
1899/* Leaves the EQ is disarmed state */
1900static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1901{
10ef9ab4 1902 int num = events_get(eqo);
859b1e4e 1903
10ef9ab4 1904 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1905}
1906
10ef9ab4 1907static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1908{
1909 struct be_rx_page_info *page_info;
3abcdeda
SP
1910 struct be_queue_info *rxq = &rxo->q;
1911 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1912 struct be_rx_compl_info *rxcp;
d23e946c
SP
1913 struct be_adapter *adapter = rxo->adapter;
1914 int flush_wait = 0;
6b7c5b94 1915
d23e946c
SP
1916 /* Consume pending rx completions.
1917 * Wait for the flush completion (identified by zero num_rcvd)
1918 * to arrive. Notify CQ even when there are no more CQ entries
1919 * for HW to flush partially coalesced CQ entries.
1920 * In Lancer, there is no need to wait for flush compl.
1921 */
1922 for (;;) {
1923 rxcp = be_rx_compl_get(rxo);
1924 if (rxcp == NULL) {
1925 if (lancer_chip(adapter))
1926 break;
1927
1928 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1929 dev_warn(&adapter->pdev->dev,
1930 "did not receive flush compl\n");
1931 break;
1932 }
1933 be_cq_notify(adapter, rx_cq->id, true, 0);
1934 mdelay(1);
1935 } else {
1936 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1937 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1938 if (rxcp->num_rcvd == 0)
1939 break;
1940 }
6b7c5b94
SP
1941 }
1942
d23e946c
SP
1943 /* After cleanup, leave the CQ in unarmed state */
1944 be_cq_notify(adapter, rx_cq->id, false, 0);
1945
1946 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
1947 while (atomic_read(&rxq->used) > 0) {
1948 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1949 put_page(page_info->page);
1950 memset(page_info, 0, sizeof(*page_info));
1951 }
1952 BUG_ON(atomic_read(&rxq->used));
482c9e79 1953 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1954}
1955
0ae57bb3 1956static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1957{
0ae57bb3
SP
1958 struct be_tx_obj *txo;
1959 struct be_queue_info *txq;
a8e9179a 1960 struct be_eth_tx_compl *txcp;
4d586b82 1961 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1962 struct sk_buff *sent_skb;
1963 bool dummy_wrb;
0ae57bb3 1964 int i, pending_txqs;
a8e9179a
SP
1965
1966 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1967 do {
0ae57bb3
SP
1968 pending_txqs = adapter->num_tx_qs;
1969
1970 for_all_tx_queues(adapter, txo, i) {
1971 txq = &txo->q;
1972 while ((txcp = be_tx_compl_get(&txo->cq))) {
1973 end_idx =
1974 AMAP_GET_BITS(struct amap_eth_tx_compl,
1975 wrb_index, txcp);
1976 num_wrbs += be_tx_compl_process(adapter, txo,
1977 end_idx);
1978 cmpl++;
1979 }
1980 if (cmpl) {
1981 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1982 atomic_sub(num_wrbs, &txq->used);
1983 cmpl = 0;
1984 num_wrbs = 0;
1985 }
1986 if (atomic_read(&txq->used) == 0)
1987 pending_txqs--;
a8e9179a
SP
1988 }
1989
0ae57bb3 1990 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1991 break;
1992
1993 mdelay(1);
1994 } while (true);
1995
0ae57bb3
SP
1996 for_all_tx_queues(adapter, txo, i) {
1997 txq = &txo->q;
1998 if (atomic_read(&txq->used))
1999 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2000 atomic_read(&txq->used));
2001
2002 /* free posted tx for which compls will never arrive */
2003 while (atomic_read(&txq->used)) {
2004 sent_skb = txo->sent_skb_list[txq->tail];
2005 end_idx = txq->tail;
2006 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2007 &dummy_wrb);
2008 index_adv(&end_idx, num_wrbs - 1, txq->len);
2009 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2010 atomic_sub(num_wrbs, &txq->used);
2011 }
b03388d6 2012 }
6b7c5b94
SP
2013}
2014
10ef9ab4
SP
2015static void be_evt_queues_destroy(struct be_adapter *adapter)
2016{
2017 struct be_eq_obj *eqo;
2018 int i;
2019
2020 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2021 if (eqo->q.created) {
2022 be_eq_clean(eqo);
10ef9ab4 2023 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2024 napi_hash_del(&eqo->napi);
68d7bdcb 2025 netif_napi_del(&eqo->napi);
19d59aa7 2026 }
10ef9ab4
SP
2027 be_queue_free(adapter, &eqo->q);
2028 }
2029}
2030
2031static int be_evt_queues_create(struct be_adapter *adapter)
2032{
2033 struct be_queue_info *eq;
2034 struct be_eq_obj *eqo;
2632bafd 2035 struct be_aic_obj *aic;
10ef9ab4
SP
2036 int i, rc;
2037
92bf14ab
SP
2038 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2039 adapter->cfg_num_qs);
10ef9ab4
SP
2040
2041 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2042 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2043 BE_NAPI_WEIGHT);
6384a4d0 2044 napi_hash_add(&eqo->napi);
2632bafd 2045 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2046 eqo->adapter = adapter;
2047 eqo->tx_budget = BE_TX_BUDGET;
2048 eqo->idx = i;
2632bafd
SP
2049 aic->max_eqd = BE_MAX_EQD;
2050 aic->enable = true;
10ef9ab4
SP
2051
2052 eq = &eqo->q;
2053 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2054 sizeof(struct be_eq_entry));
2055 if (rc)
2056 return rc;
2057
f2f781a7 2058 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2059 if (rc)
2060 return rc;
2061 }
1cfafab9 2062 return 0;
10ef9ab4
SP
2063}
2064
5fb379ee
SP
2065static void be_mcc_queues_destroy(struct be_adapter *adapter)
2066{
2067 struct be_queue_info *q;
5fb379ee 2068
8788fdc2 2069 q = &adapter->mcc_obj.q;
5fb379ee 2070 if (q->created)
8788fdc2 2071 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2072 be_queue_free(adapter, q);
2073
8788fdc2 2074 q = &adapter->mcc_obj.cq;
5fb379ee 2075 if (q->created)
8788fdc2 2076 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2077 be_queue_free(adapter, q);
2078}
2079
2080/* Must be called only after TX qs are created as MCC shares TX EQ */
2081static int be_mcc_queues_create(struct be_adapter *adapter)
2082{
2083 struct be_queue_info *q, *cq;
5fb379ee 2084
8788fdc2 2085 cq = &adapter->mcc_obj.cq;
5fb379ee 2086 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 2087 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2088 goto err;
2089
10ef9ab4
SP
2090 /* Use the default EQ for MCC completions */
2091 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2092 goto mcc_cq_free;
2093
8788fdc2 2094 q = &adapter->mcc_obj.q;
5fb379ee
SP
2095 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2096 goto mcc_cq_destroy;
2097
8788fdc2 2098 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2099 goto mcc_q_free;
2100
2101 return 0;
2102
2103mcc_q_free:
2104 be_queue_free(adapter, q);
2105mcc_cq_destroy:
8788fdc2 2106 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2107mcc_cq_free:
2108 be_queue_free(adapter, cq);
2109err:
2110 return -1;
2111}
2112
6b7c5b94
SP
2113static void be_tx_queues_destroy(struct be_adapter *adapter)
2114{
2115 struct be_queue_info *q;
3c8def97
SP
2116 struct be_tx_obj *txo;
2117 u8 i;
6b7c5b94 2118
3c8def97
SP
2119 for_all_tx_queues(adapter, txo, i) {
2120 q = &txo->q;
2121 if (q->created)
2122 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2123 be_queue_free(adapter, q);
6b7c5b94 2124
3c8def97
SP
2125 q = &txo->cq;
2126 if (q->created)
2127 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2128 be_queue_free(adapter, q);
2129 }
6b7c5b94
SP
2130}
2131
7707133c 2132static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2133{
10ef9ab4 2134 struct be_queue_info *cq, *eq;
3c8def97 2135 struct be_tx_obj *txo;
92bf14ab 2136 int status, i;
6b7c5b94 2137
92bf14ab 2138 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2139
10ef9ab4
SP
2140 for_all_tx_queues(adapter, txo, i) {
2141 cq = &txo->cq;
2142 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2143 sizeof(struct be_eth_tx_compl));
2144 if (status)
2145 return status;
3c8def97 2146
827da44c
JS
2147 u64_stats_init(&txo->stats.sync);
2148 u64_stats_init(&txo->stats.sync_compl);
2149
10ef9ab4
SP
2150 /* If num_evt_qs is less than num_tx_qs, then more than
2151 * one txq share an eq
2152 */
2153 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2154 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2155 if (status)
2156 return status;
6b7c5b94 2157
10ef9ab4
SP
2158 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2159 sizeof(struct be_eth_wrb));
2160 if (status)
2161 return status;
6b7c5b94 2162
94d73aaa 2163 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2164 if (status)
2165 return status;
3c8def97 2166 }
6b7c5b94 2167
d379142b
SP
2168 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2169 adapter->num_tx_qs);
10ef9ab4 2170 return 0;
6b7c5b94
SP
2171}
2172
10ef9ab4 2173static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2174{
2175 struct be_queue_info *q;
3abcdeda
SP
2176 struct be_rx_obj *rxo;
2177 int i;
2178
2179 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2180 q = &rxo->cq;
2181 if (q->created)
2182 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2183 be_queue_free(adapter, q);
ac6a0c4a
SP
2184 }
2185}
2186
10ef9ab4 2187static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2188{
10ef9ab4 2189 struct be_queue_info *eq, *cq;
3abcdeda
SP
2190 struct be_rx_obj *rxo;
2191 int rc, i;
6b7c5b94 2192
92bf14ab
SP
2193 /* We can create as many RSS rings as there are EQs. */
2194 adapter->num_rx_qs = adapter->num_evt_qs;
2195
2196 /* We'll use RSS only if atleast 2 RSS rings are supported.
2197 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2198 */
92bf14ab
SP
2199 if (adapter->num_rx_qs > 1)
2200 adapter->num_rx_qs++;
2201
6b7c5b94 2202 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2203 for_all_rx_queues(adapter, rxo, i) {
2204 rxo->adapter = adapter;
3abcdeda
SP
2205 cq = &rxo->cq;
2206 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2207 sizeof(struct be_eth_rx_compl));
2208 if (rc)
10ef9ab4 2209 return rc;
3abcdeda 2210
827da44c 2211 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2212 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2213 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2214 if (rc)
10ef9ab4 2215 return rc;
3abcdeda 2216 }
6b7c5b94 2217
d379142b
SP
2218 dev_info(&adapter->pdev->dev,
2219 "created %d RSS queue(s) and 1 default RX queue\n",
2220 adapter->num_rx_qs - 1);
10ef9ab4 2221 return 0;
b628bde2
SP
2222}
2223
6b7c5b94
SP
2224static irqreturn_t be_intx(int irq, void *dev)
2225{
e49cc34f
SP
2226 struct be_eq_obj *eqo = dev;
2227 struct be_adapter *adapter = eqo->adapter;
2228 int num_evts = 0;
6b7c5b94 2229
d0b9cec3
SP
2230 /* IRQ is not expected when NAPI is scheduled as the EQ
2231 * will not be armed.
2232 * But, this can happen on Lancer INTx where it takes
2233 * a while to de-assert INTx or in BE2 where occasionaly
2234 * an interrupt may be raised even when EQ is unarmed.
2235 * If NAPI is already scheduled, then counting & notifying
2236 * events will orphan them.
e49cc34f 2237 */
d0b9cec3 2238 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2239 num_evts = events_get(eqo);
d0b9cec3
SP
2240 __napi_schedule(&eqo->napi);
2241 if (num_evts)
2242 eqo->spurious_intr = 0;
2243 }
2244 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2245
d0b9cec3
SP
2246 /* Return IRQ_HANDLED only for the the first spurious intr
2247 * after a valid intr to stop the kernel from branding
2248 * this irq as a bad one!
e49cc34f 2249 */
d0b9cec3
SP
2250 if (num_evts || eqo->spurious_intr++ == 0)
2251 return IRQ_HANDLED;
2252 else
2253 return IRQ_NONE;
6b7c5b94
SP
2254}
2255
10ef9ab4 2256static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2257{
10ef9ab4 2258 struct be_eq_obj *eqo = dev;
6b7c5b94 2259
0b545a62
SP
2260 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2261 napi_schedule(&eqo->napi);
6b7c5b94
SP
2262 return IRQ_HANDLED;
2263}
2264
2e588f84 2265static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2266{
e38b1706 2267 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2268}
2269
10ef9ab4 2270static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
6384a4d0 2271 int budget, int polling)
6b7c5b94 2272{
3abcdeda
SP
2273 struct be_adapter *adapter = rxo->adapter;
2274 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2275 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2276 u32 work_done;
2277
2278 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2279 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2280 if (!rxcp)
2281 break;
2282
12004ae9
SP
2283 /* Is it a flush compl that has no data */
2284 if (unlikely(rxcp->num_rcvd == 0))
2285 goto loop_continue;
2286
2287 /* Discard compl with partial DMA Lancer B0 */
2288 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2289 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2290 goto loop_continue;
2291 }
2292
2293 /* On BE drop pkts that arrive due to imperfect filtering in
2294 * promiscuous mode on some skews
2295 */
2296 if (unlikely(rxcp->port != adapter->port_num &&
2297 !lancer_chip(adapter))) {
10ef9ab4 2298 be_rx_compl_discard(rxo, rxcp);
12004ae9 2299 goto loop_continue;
64642811 2300 }
009dd872 2301
6384a4d0
SP
2302 /* Don't do gro when we're busy_polling */
2303 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2304 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2305 else
6384a4d0
SP
2306 be_rx_compl_process(rxo, napi, rxcp);
2307
12004ae9 2308loop_continue:
2e588f84 2309 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2310 }
2311
10ef9ab4
SP
2312 if (work_done) {
2313 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2314
6384a4d0
SP
2315 /* When an rx-obj gets into post_starved state, just
2316 * let be_worker do the posting.
2317 */
2318 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2319 !rxo->rx_post_starved)
10ef9ab4 2320 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2321 }
10ef9ab4 2322
6b7c5b94
SP
2323 return work_done;
2324}
2325
10ef9ab4
SP
2326static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2327 int budget, int idx)
6b7c5b94 2328{
6b7c5b94 2329 struct be_eth_tx_compl *txcp;
10ef9ab4 2330 int num_wrbs = 0, work_done;
3c8def97 2331
10ef9ab4
SP
2332 for (work_done = 0; work_done < budget; work_done++) {
2333 txcp = be_tx_compl_get(&txo->cq);
2334 if (!txcp)
2335 break;
2336 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2337 AMAP_GET_BITS(struct amap_eth_tx_compl,
2338 wrb_index, txcp));
10ef9ab4 2339 }
6b7c5b94 2340
10ef9ab4
SP
2341 if (work_done) {
2342 be_cq_notify(adapter, txo->cq.id, true, work_done);
2343 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2344
10ef9ab4
SP
2345 /* As Tx wrbs have been freed up, wake up netdev queue
2346 * if it was stopped due to lack of tx wrbs. */
2347 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2348 atomic_read(&txo->q.used) < txo->q.len / 2) {
2349 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2350 }
10ef9ab4
SP
2351
2352 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2353 tx_stats(txo)->tx_compl += work_done;
2354 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2355 }
10ef9ab4
SP
2356 return (work_done < budget); /* Done */
2357}
6b7c5b94 2358
68d7bdcb 2359int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2360{
2361 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2362 struct be_adapter *adapter = eqo->adapter;
0b545a62 2363 int max_work = 0, work, i, num_evts;
6384a4d0 2364 struct be_rx_obj *rxo;
10ef9ab4 2365 bool tx_done;
f31e50a8 2366
0b545a62
SP
2367 num_evts = events_get(eqo);
2368
10ef9ab4
SP
2369 /* Process all TXQs serviced by this EQ */
2370 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2371 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2372 eqo->tx_budget, i);
2373 if (!tx_done)
2374 max_work = budget;
f31e50a8
SP
2375 }
2376
6384a4d0
SP
2377 if (be_lock_napi(eqo)) {
2378 /* This loop will iterate twice for EQ0 in which
2379 * completions of the last RXQ (default one) are also processed
2380 * For other EQs the loop iterates only once
2381 */
2382 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2383 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2384 max_work = max(work, max_work);
2385 }
2386 be_unlock_napi(eqo);
2387 } else {
2388 max_work = budget;
10ef9ab4 2389 }
6b7c5b94 2390
10ef9ab4
SP
2391 if (is_mcc_eqo(eqo))
2392 be_process_mcc(adapter);
93c86700 2393
10ef9ab4
SP
2394 if (max_work < budget) {
2395 napi_complete(napi);
0b545a62 2396 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2397 } else {
2398 /* As we'll continue in polling mode, count and clear events */
0b545a62 2399 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2400 }
10ef9ab4 2401 return max_work;
6b7c5b94
SP
2402}
2403
6384a4d0
SP
2404#ifdef CONFIG_NET_RX_BUSY_POLL
2405static int be_busy_poll(struct napi_struct *napi)
2406{
2407 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2408 struct be_adapter *adapter = eqo->adapter;
2409 struct be_rx_obj *rxo;
2410 int i, work = 0;
2411
2412 if (!be_lock_busy_poll(eqo))
2413 return LL_FLUSH_BUSY;
2414
2415 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2416 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2417 if (work)
2418 break;
2419 }
2420
2421 be_unlock_busy_poll(eqo);
2422 return work;
2423}
2424#endif
2425
f67ef7ba 2426void be_detect_error(struct be_adapter *adapter)
7c185276 2427{
e1cfb67a
PR
2428 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2429 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2430 u32 i;
2431
d23e946c 2432 if (be_hw_error(adapter))
72f02485
SP
2433 return;
2434
e1cfb67a
PR
2435 if (lancer_chip(adapter)) {
2436 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2437 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2438 sliport_err1 = ioread32(adapter->db +
2439 SLIPORT_ERROR1_OFFSET);
2440 sliport_err2 = ioread32(adapter->db +
2441 SLIPORT_ERROR2_OFFSET);
2442 }
2443 } else {
2444 pci_read_config_dword(adapter->pdev,
2445 PCICFG_UE_STATUS_LOW, &ue_lo);
2446 pci_read_config_dword(adapter->pdev,
2447 PCICFG_UE_STATUS_HIGH, &ue_hi);
2448 pci_read_config_dword(adapter->pdev,
2449 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2450 pci_read_config_dword(adapter->pdev,
2451 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2452
f67ef7ba
PR
2453 ue_lo = (ue_lo & ~ue_lo_mask);
2454 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2455 }
7c185276 2456
1451ae6e
AK
2457 /* On certain platforms BE hardware can indicate spurious UEs.
2458 * Allow the h/w to stop working completely in case of a real UE.
2459 * Hence not setting the hw_error for UE detection.
2460 */
2461 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2462 adapter->hw_error = true;
4bebb56a
SK
2463 /* Do not log error messages if its a FW reset */
2464 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2465 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2466 dev_info(&adapter->pdev->dev,
2467 "Firmware update in progress\n");
2468 return;
2469 } else {
2470 dev_err(&adapter->pdev->dev,
2471 "Error detected in the card\n");
2472 }
f67ef7ba
PR
2473 }
2474
2475 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2476 dev_err(&adapter->pdev->dev,
2477 "ERR: sliport status 0x%x\n", sliport_status);
2478 dev_err(&adapter->pdev->dev,
2479 "ERR: sliport error1 0x%x\n", sliport_err1);
2480 dev_err(&adapter->pdev->dev,
2481 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2482 }
2483
e1cfb67a
PR
2484 if (ue_lo) {
2485 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2486 if (ue_lo & 1)
7c185276
AK
2487 dev_err(&adapter->pdev->dev,
2488 "UE: %s bit set\n", ue_status_low_desc[i]);
2489 }
2490 }
f67ef7ba 2491
e1cfb67a
PR
2492 if (ue_hi) {
2493 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2494 if (ue_hi & 1)
7c185276
AK
2495 dev_err(&adapter->pdev->dev,
2496 "UE: %s bit set\n", ue_status_hi_desc[i]);
2497 }
2498 }
2499
2500}
2501
8d56ff11
SP
2502static void be_msix_disable(struct be_adapter *adapter)
2503{
ac6a0c4a 2504 if (msix_enabled(adapter)) {
8d56ff11 2505 pci_disable_msix(adapter->pdev);
ac6a0c4a 2506 adapter->num_msix_vec = 0;
68d7bdcb 2507 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2508 }
2509}
2510
c2bba3df 2511static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2512{
92bf14ab 2513 int i, status, num_vec;
d379142b 2514 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2515
92bf14ab
SP
2516 /* If RoCE is supported, program the max number of NIC vectors that
2517 * may be configured via set-channels, along with vectors needed for
2518 * RoCe. Else, just program the number we'll use initially.
2519 */
2520 if (be_roce_supported(adapter))
2521 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2522 2 * num_online_cpus());
2523 else
2524 num_vec = adapter->cfg_num_qs;
3abcdeda 2525
ac6a0c4a 2526 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2527 adapter->msix_entries[i].entry = i;
2528
ac6a0c4a 2529 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2530 if (status == 0) {
2531 goto done;
92bf14ab 2532 } else if (status >= MIN_MSIX_VECTORS) {
ac6a0c4a 2533 num_vec = status;
c2bba3df
SK
2534 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2535 num_vec);
2536 if (!status)
3abcdeda 2537 goto done;
3abcdeda 2538 }
d379142b
SP
2539
2540 dev_warn(dev, "MSIx enable failed\n");
92bf14ab 2541
c2bba3df
SK
2542 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2543 if (!be_physfn(adapter))
2544 return status;
2545 return 0;
3abcdeda 2546done:
92bf14ab
SP
2547 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2548 adapter->num_msix_roce_vec = num_vec / 2;
2549 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2550 adapter->num_msix_roce_vec);
2551 }
2552
2553 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2554
2555 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2556 adapter->num_msix_vec);
c2bba3df 2557 return 0;
6b7c5b94
SP
2558}
2559
fe6d2a38 2560static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2561 struct be_eq_obj *eqo)
b628bde2 2562{
f2f781a7 2563 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2564}
6b7c5b94 2565
b628bde2
SP
2566static int be_msix_register(struct be_adapter *adapter)
2567{
10ef9ab4
SP
2568 struct net_device *netdev = adapter->netdev;
2569 struct be_eq_obj *eqo;
2570 int status, i, vec;
6b7c5b94 2571
10ef9ab4
SP
2572 for_all_evt_queues(adapter, eqo, i) {
2573 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2574 vec = be_msix_vec_get(adapter, eqo);
2575 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2576 if (status)
2577 goto err_msix;
2578 }
b628bde2 2579
6b7c5b94 2580 return 0;
3abcdeda 2581err_msix:
10ef9ab4
SP
2582 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2583 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2584 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2585 status);
ac6a0c4a 2586 be_msix_disable(adapter);
6b7c5b94
SP
2587 return status;
2588}
2589
2590static int be_irq_register(struct be_adapter *adapter)
2591{
2592 struct net_device *netdev = adapter->netdev;
2593 int status;
2594
ac6a0c4a 2595 if (msix_enabled(adapter)) {
6b7c5b94
SP
2596 status = be_msix_register(adapter);
2597 if (status == 0)
2598 goto done;
ba343c77
SB
2599 /* INTx is not supported for VF */
2600 if (!be_physfn(adapter))
2601 return status;
6b7c5b94
SP
2602 }
2603
e49cc34f 2604 /* INTx: only the first EQ is used */
6b7c5b94
SP
2605 netdev->irq = adapter->pdev->irq;
2606 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2607 &adapter->eq_obj[0]);
6b7c5b94
SP
2608 if (status) {
2609 dev_err(&adapter->pdev->dev,
2610 "INTx request IRQ failed - err %d\n", status);
2611 return status;
2612 }
2613done:
2614 adapter->isr_registered = true;
2615 return 0;
2616}
2617
2618static void be_irq_unregister(struct be_adapter *adapter)
2619{
2620 struct net_device *netdev = adapter->netdev;
10ef9ab4 2621 struct be_eq_obj *eqo;
3abcdeda 2622 int i;
6b7c5b94
SP
2623
2624 if (!adapter->isr_registered)
2625 return;
2626
2627 /* INTx */
ac6a0c4a 2628 if (!msix_enabled(adapter)) {
e49cc34f 2629 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2630 goto done;
2631 }
2632
2633 /* MSIx */
10ef9ab4
SP
2634 for_all_evt_queues(adapter, eqo, i)
2635 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2636
6b7c5b94
SP
2637done:
2638 adapter->isr_registered = false;
6b7c5b94
SP
2639}
2640
10ef9ab4 2641static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2642{
2643 struct be_queue_info *q;
2644 struct be_rx_obj *rxo;
2645 int i;
2646
2647 for_all_rx_queues(adapter, rxo, i) {
2648 q = &rxo->q;
2649 if (q->created) {
2650 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2651 be_rx_cq_clean(rxo);
482c9e79 2652 }
10ef9ab4 2653 be_queue_free(adapter, q);
482c9e79
SP
2654 }
2655}
2656
889cd4b2
SP
2657static int be_close(struct net_device *netdev)
2658{
2659 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2660 struct be_eq_obj *eqo;
2661 int i;
889cd4b2 2662
045508a8
PP
2663 be_roce_dev_close(adapter);
2664
dff345c5
IV
2665 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2666 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2667 napi_disable(&eqo->napi);
6384a4d0
SP
2668 be_disable_busy_poll(eqo);
2669 }
71237b6f 2670 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2671 }
a323d9bf
SP
2672
2673 be_async_mcc_disable(adapter);
2674
2675 /* Wait for all pending tx completions to arrive so that
2676 * all tx skbs are freed.
2677 */
fba87559 2678 netif_tx_disable(netdev);
6e1f9975 2679 be_tx_compl_clean(adapter);
a323d9bf
SP
2680
2681 be_rx_qs_destroy(adapter);
2682
d11a347d
AK
2683 for (i = 1; i < (adapter->uc_macs + 1); i++)
2684 be_cmd_pmac_del(adapter, adapter->if_handle,
2685 adapter->pmac_id[i], 0);
2686 adapter->uc_macs = 0;
2687
a323d9bf 2688 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2689 if (msix_enabled(adapter))
2690 synchronize_irq(be_msix_vec_get(adapter, eqo));
2691 else
2692 synchronize_irq(netdev->irq);
2693 be_eq_clean(eqo);
63fcb27f
PR
2694 }
2695
889cd4b2
SP
2696 be_irq_unregister(adapter);
2697
482c9e79
SP
2698 return 0;
2699}
2700
10ef9ab4 2701static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2702{
2703 struct be_rx_obj *rxo;
e9008ee9
PR
2704 int rc, i, j;
2705 u8 rsstable[128];
482c9e79
SP
2706
2707 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2708 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2709 sizeof(struct be_eth_rx_d));
2710 if (rc)
2711 return rc;
2712 }
2713
2714 /* The FW would like the default RXQ to be created first */
2715 rxo = default_rxo(adapter);
2716 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2717 adapter->if_handle, false, &rxo->rss_id);
2718 if (rc)
2719 return rc;
2720
2721 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2722 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2723 rx_frag_size, adapter->if_handle,
2724 true, &rxo->rss_id);
482c9e79
SP
2725 if (rc)
2726 return rc;
2727 }
2728
2729 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2730 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2731 for_all_rss_queues(adapter, rxo, i) {
2732 if ((j + i) >= 128)
2733 break;
2734 rsstable[j + i] = rxo->rss_id;
2735 }
2736 }
594ad54a
SR
2737 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2738 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2739
2740 if (!BEx_chip(adapter))
2741 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2742 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2743 } else {
2744 /* Disable RSS, if only default RX Q is created */
2745 adapter->rss_flags = RSS_ENABLE_NONE;
2746 }
594ad54a 2747
da1388d6
VV
2748 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2749 128);
2750 if (rc) {
2751 adapter->rss_flags = RSS_ENABLE_NONE;
2752 return rc;
482c9e79
SP
2753 }
2754
2755 /* First time posting */
10ef9ab4 2756 for_all_rx_queues(adapter, rxo, i)
482c9e79 2757 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2758 return 0;
2759}
2760
6b7c5b94
SP
2761static int be_open(struct net_device *netdev)
2762{
2763 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2764 struct be_eq_obj *eqo;
3abcdeda 2765 struct be_rx_obj *rxo;
10ef9ab4 2766 struct be_tx_obj *txo;
b236916a 2767 u8 link_status;
3abcdeda 2768 int status, i;
5fb379ee 2769
10ef9ab4 2770 status = be_rx_qs_create(adapter);
482c9e79
SP
2771 if (status)
2772 goto err;
2773
c2bba3df
SK
2774 status = be_irq_register(adapter);
2775 if (status)
2776 goto err;
5fb379ee 2777
10ef9ab4 2778 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2779 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2780
10ef9ab4
SP
2781 for_all_tx_queues(adapter, txo, i)
2782 be_cq_notify(adapter, txo->cq.id, true, 0);
2783
7a1e9b20
SP
2784 be_async_mcc_enable(adapter);
2785
10ef9ab4
SP
2786 for_all_evt_queues(adapter, eqo, i) {
2787 napi_enable(&eqo->napi);
6384a4d0 2788 be_enable_busy_poll(eqo);
10ef9ab4
SP
2789 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2790 }
04d3d624 2791 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2792
323ff71e 2793 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2794 if (!status)
2795 be_link_status_update(adapter, link_status);
2796
fba87559 2797 netif_tx_start_all_queues(netdev);
045508a8 2798 be_roce_dev_open(adapter);
889cd4b2
SP
2799 return 0;
2800err:
2801 be_close(adapter->netdev);
2802 return -EIO;
5fb379ee
SP
2803}
2804
71d8d1b5
AK
2805static int be_setup_wol(struct be_adapter *adapter, bool enable)
2806{
2807 struct be_dma_mem cmd;
2808 int status = 0;
2809 u8 mac[ETH_ALEN];
2810
2811 memset(mac, 0, ETH_ALEN);
2812
2813 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2814 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2815 GFP_KERNEL);
71d8d1b5
AK
2816 if (cmd.va == NULL)
2817 return -1;
71d8d1b5
AK
2818
2819 if (enable) {
2820 status = pci_write_config_dword(adapter->pdev,
2821 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2822 if (status) {
2823 dev_err(&adapter->pdev->dev,
2381a55c 2824 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2825 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2826 cmd.dma);
71d8d1b5
AK
2827 return status;
2828 }
2829 status = be_cmd_enable_magic_wol(adapter,
2830 adapter->netdev->dev_addr, &cmd);
2831 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2832 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2833 } else {
2834 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2835 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2836 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2837 }
2838
2b7bcebf 2839 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2840 return status;
2841}
2842
6d87f5c3
AK
2843/*
2844 * Generate a seed MAC address from the PF MAC Address using jhash.
2845 * MAC Address for VFs are assigned incrementally starting from the seed.
2846 * These addresses are programmed in the ASIC by the PF and the VF driver
2847 * queries for the MAC address during its probe.
2848 */
4c876616 2849static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2850{
f9449ab7 2851 u32 vf;
3abcdeda 2852 int status = 0;
6d87f5c3 2853 u8 mac[ETH_ALEN];
11ac75ed 2854 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2855
2856 be_vf_eth_addr_generate(adapter, mac);
2857
11ac75ed 2858 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2859 if (BEx_chip(adapter))
590c391d 2860 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2861 vf_cfg->if_handle,
2862 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2863 else
2864 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2865 vf + 1);
590c391d 2866
6d87f5c3
AK
2867 if (status)
2868 dev_err(&adapter->pdev->dev,
590c391d 2869 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2870 else
11ac75ed 2871 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2872
2873 mac[5] += 1;
2874 }
2875 return status;
2876}
2877
4c876616
SP
2878static int be_vfs_mac_query(struct be_adapter *adapter)
2879{
2880 int status, vf;
2881 u8 mac[ETH_ALEN];
2882 struct be_vf_cfg *vf_cfg;
4c876616
SP
2883
2884 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
2885 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2886 mac, vf_cfg->if_handle,
2887 false, vf+1);
4c876616
SP
2888 if (status)
2889 return status;
2890 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2891 }
2892 return 0;
2893}
2894
f9449ab7 2895static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2896{
11ac75ed 2897 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2898 u32 vf;
2899
257a3feb 2900 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2901 dev_warn(&adapter->pdev->dev,
2902 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2903 goto done;
2904 }
2905
b4c1df93
SP
2906 pci_disable_sriov(adapter->pdev);
2907
11ac75ed 2908 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2909 if (BEx_chip(adapter))
11ac75ed
SP
2910 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2911 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2912 else
2913 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2914 vf + 1);
f9449ab7 2915
11ac75ed
SP
2916 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2917 }
39f1d94d
SP
2918done:
2919 kfree(adapter->vf_cfg);
2920 adapter->num_vfs = 0;
6d87f5c3
AK
2921}
2922
7707133c
SP
2923static void be_clear_queues(struct be_adapter *adapter)
2924{
2925 be_mcc_queues_destroy(adapter);
2926 be_rx_cqs_destroy(adapter);
2927 be_tx_queues_destroy(adapter);
2928 be_evt_queues_destroy(adapter);
2929}
2930
68d7bdcb 2931static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 2932{
191eb756
SP
2933 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2934 cancel_delayed_work_sync(&adapter->work);
2935 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2936 }
68d7bdcb
SP
2937}
2938
b05004ad 2939static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
2940{
2941 int i;
2942
b05004ad
SK
2943 if (adapter->pmac_id) {
2944 for (i = 0; i < (adapter->uc_macs + 1); i++)
2945 be_cmd_pmac_del(adapter, adapter->if_handle,
2946 adapter->pmac_id[i], 0);
2947 adapter->uc_macs = 0;
2948
2949 kfree(adapter->pmac_id);
2950 adapter->pmac_id = NULL;
2951 }
2952}
2953
2954static int be_clear(struct be_adapter *adapter)
2955{
68d7bdcb 2956 be_cancel_worker(adapter);
191eb756 2957
11ac75ed 2958 if (sriov_enabled(adapter))
f9449ab7
SP
2959 be_vf_clear(adapter);
2960
2d17f403 2961 /* delete the primary mac along with the uc-mac list */
b05004ad 2962 be_mac_clear(adapter);
fbc13f01 2963
f9449ab7 2964 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 2965
7707133c 2966 be_clear_queues(adapter);
a54769f5 2967
10ef9ab4 2968 be_msix_disable(adapter);
a54769f5
SP
2969 return 0;
2970}
2971
4c876616 2972static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2973{
92bf14ab 2974 struct be_resources res = {0};
4c876616
SP
2975 struct be_vf_cfg *vf_cfg;
2976 u32 cap_flags, en_flags, vf;
922bbe88 2977 int status = 0;
abb93951 2978
4c876616
SP
2979 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2980 BE_IF_FLAGS_MULTICAST;
abb93951 2981
4c876616 2982 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
2983 if (!BE3_chip(adapter)) {
2984 status = be_cmd_get_profile_config(adapter, &res,
2985 vf + 1);
2986 if (!status)
2987 cap_flags = res.if_cap_flags;
2988 }
4c876616
SP
2989
2990 /* If a FW profile exists, then cap_flags are updated */
2991 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2992 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2993 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2994 &vf_cfg->if_handle, vf + 1);
2995 if (status)
2996 goto err;
2997 }
2998err:
2999 return status;
abb93951
PR
3000}
3001
39f1d94d 3002static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3003{
11ac75ed 3004 struct be_vf_cfg *vf_cfg;
30128031
SP
3005 int vf;
3006
39f1d94d
SP
3007 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3008 GFP_KERNEL);
3009 if (!adapter->vf_cfg)
3010 return -ENOMEM;
3011
11ac75ed
SP
3012 for_all_vfs(adapter, vf_cfg, vf) {
3013 vf_cfg->if_handle = -1;
3014 vf_cfg->pmac_id = -1;
30128031 3015 }
39f1d94d 3016 return 0;
30128031
SP
3017}
3018
f9449ab7
SP
3019static int be_vf_setup(struct be_adapter *adapter)
3020{
11ac75ed 3021 struct be_vf_cfg *vf_cfg;
f1f3ee1b 3022 u16 def_vlan, lnk_speed;
4c876616
SP
3023 int status, old_vfs, vf;
3024 struct device *dev = &adapter->pdev->dev;
04a06028 3025 u32 privileges;
39f1d94d 3026
257a3feb 3027 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
3028 if (old_vfs) {
3029 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3030 if (old_vfs != num_vfs)
3031 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3032 adapter->num_vfs = old_vfs;
39f1d94d 3033 } else {
92bf14ab 3034 if (num_vfs > be_max_vfs(adapter))
4c876616 3035 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
3036 be_max_vfs(adapter), num_vfs);
3037 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 3038 if (!adapter->num_vfs)
4c876616 3039 return 0;
39f1d94d
SP
3040 }
3041
3042 status = be_vf_setup_init(adapter);
3043 if (status)
3044 goto err;
30128031 3045
4c876616
SP
3046 if (old_vfs) {
3047 for_all_vfs(adapter, vf_cfg, vf) {
3048 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3049 if (status)
3050 goto err;
3051 }
3052 } else {
3053 status = be_vfs_if_create(adapter);
f9449ab7
SP
3054 if (status)
3055 goto err;
f9449ab7
SP
3056 }
3057
4c876616
SP
3058 if (old_vfs) {
3059 status = be_vfs_mac_query(adapter);
3060 if (status)
3061 goto err;
3062 } else {
39f1d94d
SP
3063 status = be_vf_eth_addr_config(adapter);
3064 if (status)
3065 goto err;
3066 }
f9449ab7 3067
11ac75ed 3068 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3069 /* Allow VFs to programs MAC/VLAN filters */
3070 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3071 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3072 status = be_cmd_set_fn_privileges(adapter,
3073 privileges |
3074 BE_PRIV_FILTMGMT,
3075 vf + 1);
3076 if (!status)
3077 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3078 vf);
3079 }
3080
4c876616
SP
3081 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3082 * Allow full available bandwidth
3083 */
3084 if (BE3_chip(adapter) && !old_vfs)
3085 be_cmd_set_qos(adapter, 1000, vf+1);
3086
3087 status = be_cmd_link_status_query(adapter, &lnk_speed,
3088 NULL, vf + 1);
3089 if (!status)
3090 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
3091
3092 status = be_cmd_get_hsw_config(adapter, &def_vlan,
a77dcb8c 3093 vf + 1, vf_cfg->if_handle, NULL);
f1f3ee1b
AK
3094 if (status)
3095 goto err;
3096 vf_cfg->def_vid = def_vlan;
dcf7ebba 3097
0599863d
VV
3098 if (!old_vfs)
3099 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 3100 }
b4c1df93
SP
3101
3102 if (!old_vfs) {
3103 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3104 if (status) {
3105 dev_err(dev, "SRIOV enable failed\n");
3106 adapter->num_vfs = 0;
3107 goto err;
3108 }
3109 }
f9449ab7
SP
3110 return 0;
3111err:
4c876616
SP
3112 dev_err(dev, "VF setup failed\n");
3113 be_vf_clear(adapter);
f9449ab7
SP
3114 return status;
3115}
3116
92bf14ab
SP
3117/* On BE2/BE3 FW does not suggest the supported limits */
3118static void BEx_get_resources(struct be_adapter *adapter,
3119 struct be_resources *res)
3120{
3121 struct pci_dev *pdev = adapter->pdev;
3122 bool use_sriov = false;
e3dc867c 3123 int max_vfs;
92bf14ab 3124
e3dc867c 3125 max_vfs = pci_sriov_get_totalvfs(pdev);
92bf14ab 3126
e3dc867c 3127 if (BE3_chip(adapter) && sriov_want(adapter)) {
92bf14ab 3128 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
b905b5d4 3129 use_sriov = res->max_vfs;
92bf14ab
SP
3130 }
3131
3132 if (be_physfn(adapter))
3133 res->max_uc_mac = BE_UC_PMAC_COUNT;
3134 else
3135 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3136
3137 if (adapter->function_mode & FLEX10_MODE)
3138 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
1aa9673c
AK
3139 else if (adapter->function_mode & UMC_ENABLED)
3140 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
92bf14ab
SP
3141 else
3142 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3143 res->max_mcast_mac = BE_MAX_MC;
3144
30f3fe45 3145 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
92bf14ab 3146 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
30f3fe45 3147 !be_physfn(adapter) || (adapter->port_num > 1))
92bf14ab
SP
3148 res->max_tx_qs = 1;
3149 else
3150 res->max_tx_qs = BE3_MAX_TX_QS;
3151
3152 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3153 !use_sriov && be_physfn(adapter))
3154 res->max_rss_qs = (adapter->be3_native) ?
3155 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3156 res->max_rx_qs = res->max_rss_qs + 1;
3157
e3dc867c
SR
3158 if (be_physfn(adapter))
3159 res->max_evt_qs = (max_vfs > 0) ?
3160 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3161 else
3162 res->max_evt_qs = 1;
92bf14ab
SP
3163
3164 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3165 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3166 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3167}
3168
30128031
SP
3169static void be_setup_init(struct be_adapter *adapter)
3170{
3171 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3172 adapter->phy.link_speed = -1;
30128031
SP
3173 adapter->if_handle = -1;
3174 adapter->be3_native = false;
3175 adapter->promiscuous = false;
f25b119c
PR
3176 if (be_physfn(adapter))
3177 adapter->cmd_privileges = MAX_PRIVILEGES;
3178 else
3179 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3180}
3181
92bf14ab 3182static int be_get_resources(struct be_adapter *adapter)
abb93951 3183{
92bf14ab
SP
3184 struct device *dev = &adapter->pdev->dev;
3185 struct be_resources res = {0};
3186 int status;
abb93951 3187
92bf14ab
SP
3188 if (BEx_chip(adapter)) {
3189 BEx_get_resources(adapter, &res);
3190 adapter->res = res;
abb93951
PR
3191 }
3192
92bf14ab
SP
3193 /* For Lancer, SH etc read per-function resource limits from FW.
3194 * GET_FUNC_CONFIG returns per function guaranteed limits.
3195 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3196 */
3197 if (!BEx_chip(adapter)) {
3198 status = be_cmd_get_func_config(adapter, &res);
3199 if (status)
3200 return status;
abb93951 3201
92bf14ab
SP
3202 /* If RoCE may be enabled stash away half the EQs for RoCE */
3203 if (be_roce_supported(adapter))
3204 res.max_evt_qs /= 2;
3205 adapter->res = res;
abb93951 3206
92bf14ab
SP
3207 if (be_physfn(adapter)) {
3208 status = be_cmd_get_profile_config(adapter, &res, 0);
3209 if (status)
3210 return status;
3211 adapter->res.max_vfs = res.max_vfs;
3212 }
abb93951 3213
92bf14ab
SP
3214 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3215 be_max_txqs(adapter), be_max_rxqs(adapter),
3216 be_max_rss(adapter), be_max_eqs(adapter),
3217 be_max_vfs(adapter));
3218 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3219 be_max_uc(adapter), be_max_mc(adapter),
3220 be_max_vlans(adapter));
abb93951 3221 }
4c876616 3222
92bf14ab 3223 return 0;
abb93951
PR
3224}
3225
39f1d94d
SP
3226/* Routine to query per function resource limits */
3227static int be_get_config(struct be_adapter *adapter)
3228{
542963b7 3229 u16 profile_id;
4c876616 3230 int status;
39f1d94d 3231
abb93951
PR
3232 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3233 &adapter->function_mode,
0ad3157e
VV
3234 &adapter->function_caps,
3235 &adapter->asic_rev);
abb93951 3236 if (status)
92bf14ab 3237 return status;
abb93951 3238
542963b7
VV
3239 if (be_physfn(adapter)) {
3240 status = be_cmd_get_active_profile(adapter, &profile_id);
3241 if (!status)
3242 dev_info(&adapter->pdev->dev,
3243 "Using profile 0x%x\n", profile_id);
3244 }
3245
92bf14ab
SP
3246 status = be_get_resources(adapter);
3247 if (status)
3248 return status;
abb93951
PR
3249
3250 /* primary mac needs 1 pmac entry */
92bf14ab
SP
3251 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3252 GFP_KERNEL);
3253 if (!adapter->pmac_id)
3254 return -ENOMEM;
abb93951 3255
92bf14ab
SP
3256 /* Sanitize cfg_num_qs based on HW and platform limits */
3257 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3258
3259 return 0;
39f1d94d
SP
3260}
3261
95046b92
SP
3262static int be_mac_setup(struct be_adapter *adapter)
3263{
3264 u8 mac[ETH_ALEN];
3265 int status;
3266
3267 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3268 status = be_cmd_get_perm_mac(adapter, mac);
3269 if (status)
3270 return status;
3271
3272 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3273 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3274 } else {
3275 /* Maybe the HW was reset; dev_addr must be re-programmed */
3276 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3277 }
3278
2c7a9dc1
AK
3279 /* For BE3-R VFs, the PF programs the initial MAC address */
3280 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3281 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3282 &adapter->pmac_id[0], 0);
95046b92
SP
3283 return 0;
3284}
3285
68d7bdcb
SP
3286static void be_schedule_worker(struct be_adapter *adapter)
3287{
3288 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3289 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3290}
3291
7707133c 3292static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3293{
68d7bdcb 3294 struct net_device *netdev = adapter->netdev;
10ef9ab4 3295 int status;
ba343c77 3296
7707133c 3297 status = be_evt_queues_create(adapter);
abb93951
PR
3298 if (status)
3299 goto err;
73d540f2 3300
7707133c 3301 status = be_tx_qs_create(adapter);
c2bba3df
SK
3302 if (status)
3303 goto err;
10ef9ab4 3304
7707133c 3305 status = be_rx_cqs_create(adapter);
10ef9ab4 3306 if (status)
a54769f5 3307 goto err;
6b7c5b94 3308
7707133c 3309 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3310 if (status)
3311 goto err;
3312
68d7bdcb
SP
3313 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3314 if (status)
3315 goto err;
3316
3317 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3318 if (status)
3319 goto err;
3320
7707133c
SP
3321 return 0;
3322err:
3323 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3324 return status;
3325}
3326
68d7bdcb
SP
3327int be_update_queues(struct be_adapter *adapter)
3328{
3329 struct net_device *netdev = adapter->netdev;
3330 int status;
3331
3332 if (netif_running(netdev))
3333 be_close(netdev);
3334
3335 be_cancel_worker(adapter);
3336
3337 /* If any vectors have been shared with RoCE we cannot re-program
3338 * the MSIx table.
3339 */
3340 if (!adapter->num_msix_roce_vec)
3341 be_msix_disable(adapter);
3342
3343 be_clear_queues(adapter);
3344
3345 if (!msix_enabled(adapter)) {
3346 status = be_msix_enable(adapter);
3347 if (status)
3348 return status;
3349 }
3350
3351 status = be_setup_queues(adapter);
3352 if (status)
3353 return status;
3354
3355 be_schedule_worker(adapter);
3356
3357 if (netif_running(netdev))
3358 status = be_open(netdev);
3359
3360 return status;
3361}
3362
7707133c
SP
3363static int be_setup(struct be_adapter *adapter)
3364{
3365 struct device *dev = &adapter->pdev->dev;
3366 u32 tx_fc, rx_fc, en_flags;
3367 int status;
3368
3369 be_setup_init(adapter);
3370
3371 if (!lancer_chip(adapter))
3372 be_cmd_req_native_mode(adapter);
3373
3374 status = be_get_config(adapter);
10ef9ab4 3375 if (status)
a54769f5 3376 goto err;
6b7c5b94 3377
7707133c 3378 status = be_msix_enable(adapter);
10ef9ab4 3379 if (status)
a54769f5 3380 goto err;
6b7c5b94 3381
f9449ab7 3382 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3383 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3384 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3385 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3386 en_flags = en_flags & be_if_cap_flags(adapter);
3387 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3388 &adapter->if_handle, 0);
7707133c 3389 if (status)
a54769f5 3390 goto err;
6b7c5b94 3391
68d7bdcb
SP
3392 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3393 rtnl_lock();
7707133c 3394 status = be_setup_queues(adapter);
68d7bdcb 3395 rtnl_unlock();
95046b92 3396 if (status)
1578e777
PR
3397 goto err;
3398
7707133c 3399 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3400
3401 status = be_mac_setup(adapter);
10ef9ab4
SP
3402 if (status)
3403 goto err;
3404
eeb65ced 3405 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3406
e9e2a904
SK
3407 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3408 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3409 adapter->fw_ver);
3410 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3411 }
3412
1d1e9a46 3413 if (adapter->vlans_added)
10329df8 3414 be_vid_config(adapter);
7ab8b0b4 3415
a54769f5 3416 be_set_rx_mode(adapter->netdev);
5fb379ee 3417
76a9e08e
SR
3418 be_cmd_get_acpi_wol_cap(adapter);
3419
ddc3f5cb 3420 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3421
ddc3f5cb
AK
3422 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3423 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3424 adapter->rx_fc);
2dc1deb6 3425
b905b5d4 3426 if (sriov_want(adapter)) {
92bf14ab 3427 if (be_max_vfs(adapter))
39f1d94d
SP
3428 be_vf_setup(adapter);
3429 else
3430 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3431 }
3432
f25b119c
PR
3433 status = be_cmd_get_phy_info(adapter);
3434 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3435 adapter->phy.fc_autoneg = 1;
3436
68d7bdcb 3437 be_schedule_worker(adapter);
f9449ab7 3438 return 0;
a54769f5
SP
3439err:
3440 be_clear(adapter);
3441 return status;
3442}
6b7c5b94 3443
66268739
IV
3444#ifdef CONFIG_NET_POLL_CONTROLLER
3445static void be_netpoll(struct net_device *netdev)
3446{
3447 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3448 struct be_eq_obj *eqo;
66268739
IV
3449 int i;
3450
e49cc34f
SP
3451 for_all_evt_queues(adapter, eqo, i) {
3452 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3453 napi_schedule(&eqo->napi);
3454 }
10ef9ab4
SP
3455
3456 return;
66268739
IV
3457}
3458#endif
3459
84517482 3460#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3461static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3462
fa9a6fed 3463static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3464 const u8 *p, u32 img_start, int image_size,
3465 int hdr_size)
fa9a6fed
SB
3466{
3467 u32 crc_offset;
3468 u8 flashed_crc[4];
3469 int status;
3f0d4560
AK
3470
3471 crc_offset = hdr_size + img_start + image_size - 4;
3472
fa9a6fed 3473 p += crc_offset;
3f0d4560
AK
3474
3475 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3476 (image_size - 4));
fa9a6fed
SB
3477 if (status) {
3478 dev_err(&adapter->pdev->dev,
3479 "could not get crc from flash, not flashing redboot\n");
3480 return false;
3481 }
3482
3483 /*update redboot only if crc does not match*/
3484 if (!memcmp(flashed_crc, p, 4))
3485 return false;
3486 else
3487 return true;
fa9a6fed
SB
3488}
3489
306f1348
SP
3490static bool phy_flashing_required(struct be_adapter *adapter)
3491{
42f11cf2
AK
3492 return (adapter->phy.phy_type == TN_8022 &&
3493 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3494}
3495
c165541e
PR
3496static bool is_comp_in_ufi(struct be_adapter *adapter,
3497 struct flash_section_info *fsec, int type)
3498{
3499 int i = 0, img_type = 0;
3500 struct flash_section_info_g2 *fsec_g2 = NULL;
3501
ca34fe38 3502 if (BE2_chip(adapter))
c165541e
PR
3503 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3504
3505 for (i = 0; i < MAX_FLASH_COMP; i++) {
3506 if (fsec_g2)
3507 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3508 else
3509 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3510
3511 if (img_type == type)
3512 return true;
3513 }
3514 return false;
3515
3516}
3517
4188e7df 3518static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3519 int header_size,
3520 const struct firmware *fw)
3521{
3522 struct flash_section_info *fsec = NULL;
3523 const u8 *p = fw->data;
3524
3525 p += header_size;
3526 while (p < (fw->data + fw->size)) {
3527 fsec = (struct flash_section_info *)p;
3528 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3529 return fsec;
3530 p += 32;
3531 }
3532 return NULL;
3533}
3534
773a2d7c
PR
3535static int be_flash(struct be_adapter *adapter, const u8 *img,
3536 struct be_dma_mem *flash_cmd, int optype, int img_size)
3537{
3538 u32 total_bytes = 0, flash_op, num_bytes = 0;
3539 int status = 0;
3540 struct be_cmd_write_flashrom *req = flash_cmd->va;
3541
3542 total_bytes = img_size;
3543 while (total_bytes) {
3544 num_bytes = min_t(u32, 32*1024, total_bytes);
3545
3546 total_bytes -= num_bytes;
3547
3548 if (!total_bytes) {
3549 if (optype == OPTYPE_PHY_FW)
3550 flash_op = FLASHROM_OPER_PHY_FLASH;
3551 else
3552 flash_op = FLASHROM_OPER_FLASH;
3553 } else {
3554 if (optype == OPTYPE_PHY_FW)
3555 flash_op = FLASHROM_OPER_PHY_SAVE;
3556 else
3557 flash_op = FLASHROM_OPER_SAVE;
3558 }
3559
be716446 3560 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3561 img += num_bytes;
3562 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3563 flash_op, num_bytes);
3564 if (status) {
3565 if (status == ILLEGAL_IOCTL_REQ &&
3566 optype == OPTYPE_PHY_FW)
3567 break;
3568 dev_err(&adapter->pdev->dev,
3569 "cmd to write to flash rom failed.\n");
3570 return status;
3571 }
3572 }
3573 return 0;
3574}
3575
0ad3157e 3576/* For BE2, BE3 and BE3-R */
ca34fe38 3577static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3578 const struct firmware *fw,
3579 struct be_dma_mem *flash_cmd,
3580 int num_of_images)
3f0d4560 3581
84517482 3582{
3f0d4560 3583 int status = 0, i, filehdr_size = 0;
c165541e 3584 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3585 const u8 *p = fw->data;
215faf9c 3586 const struct flash_comp *pflashcomp;
773a2d7c 3587 int num_comp, redboot;
c165541e
PR
3588 struct flash_section_info *fsec = NULL;
3589
3590 struct flash_comp gen3_flash_types[] = {
3591 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3592 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3593 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3594 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3595 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3596 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3597 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3598 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3599 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3600 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3601 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3602 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3603 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3604 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3605 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3606 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3607 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3608 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3609 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3610 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3611 };
c165541e
PR
3612
3613 struct flash_comp gen2_flash_types[] = {
3614 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3615 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3616 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3617 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3618 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3619 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3620 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3621 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3622 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3623 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3624 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3625 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3626 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3627 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3628 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3629 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3630 };
3631
ca34fe38 3632 if (BE3_chip(adapter)) {
3f0d4560
AK
3633 pflashcomp = gen3_flash_types;
3634 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3635 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3636 } else {
3637 pflashcomp = gen2_flash_types;
3638 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3639 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3640 }
ca34fe38 3641
c165541e
PR
3642 /* Get flash section info*/
3643 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3644 if (!fsec) {
3645 dev_err(&adapter->pdev->dev,
3646 "Invalid Cookie. UFI corrupted ?\n");
3647 return -1;
3648 }
9fe96934 3649 for (i = 0; i < num_comp; i++) {
c165541e 3650 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3651 continue;
c165541e
PR
3652
3653 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3654 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3655 continue;
3656
773a2d7c
PR
3657 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3658 !phy_flashing_required(adapter))
306f1348 3659 continue;
c165541e 3660
773a2d7c
PR
3661 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3662 redboot = be_flash_redboot(adapter, fw->data,
3663 pflashcomp[i].offset, pflashcomp[i].size,
3664 filehdr_size + img_hdrs_size);
3665 if (!redboot)
3666 continue;
3667 }
c165541e 3668
3f0d4560 3669 p = fw->data;
c165541e 3670 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3671 if (p + pflashcomp[i].size > fw->data + fw->size)
3672 return -1;
773a2d7c
PR
3673
3674 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3675 pflashcomp[i].size);
3676 if (status) {
3677 dev_err(&adapter->pdev->dev,
3678 "Flashing section type %d failed.\n",
3679 pflashcomp[i].img_type);
3680 return status;
84517482 3681 }
84517482 3682 }
84517482
AK
3683 return 0;
3684}
3685
773a2d7c
PR
3686static int be_flash_skyhawk(struct be_adapter *adapter,
3687 const struct firmware *fw,
3688 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3689{
773a2d7c
PR
3690 int status = 0, i, filehdr_size = 0;
3691 int img_offset, img_size, img_optype, redboot;
3692 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3693 const u8 *p = fw->data;
3694 struct flash_section_info *fsec = NULL;
3695
3696 filehdr_size = sizeof(struct flash_file_hdr_g3);
3697 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3698 if (!fsec) {
3699 dev_err(&adapter->pdev->dev,
3700 "Invalid Cookie. UFI corrupted ?\n");
3701 return -1;
3702 }
3703
3704 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3705 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3706 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3707
3708 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3709 case IMAGE_FIRMWARE_iSCSI:
3710 img_optype = OPTYPE_ISCSI_ACTIVE;
3711 break;
3712 case IMAGE_BOOT_CODE:
3713 img_optype = OPTYPE_REDBOOT;
3714 break;
3715 case IMAGE_OPTION_ROM_ISCSI:
3716 img_optype = OPTYPE_BIOS;
3717 break;
3718 case IMAGE_OPTION_ROM_PXE:
3719 img_optype = OPTYPE_PXE_BIOS;
3720 break;
3721 case IMAGE_OPTION_ROM_FCoE:
3722 img_optype = OPTYPE_FCOE_BIOS;
3723 break;
3724 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3725 img_optype = OPTYPE_ISCSI_BACKUP;
3726 break;
3727 case IMAGE_NCSI:
3728 img_optype = OPTYPE_NCSI_FW;
3729 break;
3730 default:
3731 continue;
3732 }
3733
3734 if (img_optype == OPTYPE_REDBOOT) {
3735 redboot = be_flash_redboot(adapter, fw->data,
3736 img_offset, img_size,
3737 filehdr_size + img_hdrs_size);
3738 if (!redboot)
3739 continue;
3740 }
3741
3742 p = fw->data;
3743 p += filehdr_size + img_offset + img_hdrs_size;
3744 if (p + img_size > fw->data + fw->size)
3745 return -1;
3746
3747 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3748 if (status) {
3749 dev_err(&adapter->pdev->dev,
3750 "Flashing section type %d failed.\n",
3751 fsec->fsec_entry[i].type);
3752 return status;
3753 }
3754 }
3755 return 0;
3f0d4560
AK
3756}
3757
485bf569
SN
3758static int lancer_fw_download(struct be_adapter *adapter,
3759 const struct firmware *fw)
84517482 3760{
485bf569
SN
3761#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3762#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3763 struct be_dma_mem flash_cmd;
485bf569
SN
3764 const u8 *data_ptr = NULL;
3765 u8 *dest_image_ptr = NULL;
3766 size_t image_size = 0;
3767 u32 chunk_size = 0;
3768 u32 data_written = 0;
3769 u32 offset = 0;
3770 int status = 0;
3771 u8 add_status = 0;
f67ef7ba 3772 u8 change_status;
84517482 3773
485bf569 3774 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3775 dev_err(&adapter->pdev->dev,
485bf569
SN
3776 "FW Image not properly aligned. "
3777 "Length must be 4 byte aligned.\n");
3778 status = -EINVAL;
3779 goto lancer_fw_exit;
d9efd2af
SB
3780 }
3781
485bf569
SN
3782 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3783 + LANCER_FW_DOWNLOAD_CHUNK;
3784 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3785 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3786 if (!flash_cmd.va) {
3787 status = -ENOMEM;
485bf569
SN
3788 goto lancer_fw_exit;
3789 }
84517482 3790
485bf569
SN
3791 dest_image_ptr = flash_cmd.va +
3792 sizeof(struct lancer_cmd_req_write_object);
3793 image_size = fw->size;
3794 data_ptr = fw->data;
3795
3796 while (image_size) {
3797 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3798
3799 /* Copy the image chunk content. */
3800 memcpy(dest_image_ptr, data_ptr, chunk_size);
3801
3802 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3803 chunk_size, offset,
3804 LANCER_FW_DOWNLOAD_LOCATION,
3805 &data_written, &change_status,
3806 &add_status);
485bf569
SN
3807 if (status)
3808 break;
3809
3810 offset += data_written;
3811 data_ptr += data_written;
3812 image_size -= data_written;
3813 }
3814
3815 if (!status) {
3816 /* Commit the FW written */
3817 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3818 0, offset,
3819 LANCER_FW_DOWNLOAD_LOCATION,
3820 &data_written, &change_status,
3821 &add_status);
485bf569
SN
3822 }
3823
3824 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3825 flash_cmd.dma);
3826 if (status) {
3827 dev_err(&adapter->pdev->dev,
3828 "Firmware load error. "
3829 "Status code: 0x%x Additional Status: 0x%x\n",
3830 status, add_status);
3831 goto lancer_fw_exit;
3832 }
3833
f67ef7ba 3834 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
3835 dev_info(&adapter->pdev->dev,
3836 "Resetting adapter to activate new FW\n");
5c510811
SK
3837 status = lancer_physdev_ctrl(adapter,
3838 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3839 if (status) {
3840 dev_err(&adapter->pdev->dev,
3841 "Adapter busy for FW reset.\n"
3842 "New FW will not be active.\n");
3843 goto lancer_fw_exit;
3844 }
3845 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3846 dev_err(&adapter->pdev->dev,
3847 "System reboot required for new FW"
3848 " to be active\n");
3849 }
3850
485bf569
SN
3851 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3852lancer_fw_exit:
3853 return status;
3854}
3855
ca34fe38
SP
3856#define UFI_TYPE2 2
3857#define UFI_TYPE3 3
0ad3157e 3858#define UFI_TYPE3R 10
ca34fe38
SP
3859#define UFI_TYPE4 4
3860static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3861 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3862{
3863 if (fhdr == NULL)
3864 goto be_get_ufi_exit;
3865
ca34fe38
SP
3866 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3867 return UFI_TYPE4;
0ad3157e
VV
3868 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3869 if (fhdr->asic_type_rev == 0x10)
3870 return UFI_TYPE3R;
3871 else
3872 return UFI_TYPE3;
3873 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3874 return UFI_TYPE2;
773a2d7c
PR
3875
3876be_get_ufi_exit:
3877 dev_err(&adapter->pdev->dev,
3878 "UFI and Interface are not compatible for flashing\n");
3879 return -1;
3880}
3881
485bf569
SN
3882static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3883{
485bf569
SN
3884 struct flash_file_hdr_g3 *fhdr3;
3885 struct image_hdr *img_hdr_ptr = NULL;
3886 struct be_dma_mem flash_cmd;
3887 const u8 *p;
773a2d7c 3888 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3889
be716446 3890 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3891 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3892 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3893 if (!flash_cmd.va) {
3894 status = -ENOMEM;
485bf569 3895 goto be_fw_exit;
84517482
AK
3896 }
3897
773a2d7c 3898 p = fw->data;
0ad3157e 3899 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3900
0ad3157e 3901 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3902
773a2d7c
PR
3903 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3904 for (i = 0; i < num_imgs; i++) {
3905 img_hdr_ptr = (struct image_hdr *)(fw->data +
3906 (sizeof(struct flash_file_hdr_g3) +
3907 i * sizeof(struct image_hdr)));
3908 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3909 switch (ufi_type) {
3910 case UFI_TYPE4:
773a2d7c
PR
3911 status = be_flash_skyhawk(adapter, fw,
3912 &flash_cmd, num_imgs);
0ad3157e
VV
3913 break;
3914 case UFI_TYPE3R:
ca34fe38
SP
3915 status = be_flash_BEx(adapter, fw, &flash_cmd,
3916 num_imgs);
0ad3157e
VV
3917 break;
3918 case UFI_TYPE3:
3919 /* Do not flash this ufi on BE3-R cards */
3920 if (adapter->asic_rev < 0x10)
3921 status = be_flash_BEx(adapter, fw,
3922 &flash_cmd,
3923 num_imgs);
3924 else {
3925 status = -1;
3926 dev_err(&adapter->pdev->dev,
3927 "Can't load BE3 UFI on BE3R\n");
3928 }
3929 }
3f0d4560 3930 }
773a2d7c
PR
3931 }
3932
ca34fe38
SP
3933 if (ufi_type == UFI_TYPE2)
3934 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3935 else if (ufi_type == -1)
3f0d4560 3936 status = -1;
84517482 3937
2b7bcebf
IV
3938 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3939 flash_cmd.dma);
84517482
AK
3940 if (status) {
3941 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3942 goto be_fw_exit;
84517482
AK
3943 }
3944
af901ca1 3945 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3946
485bf569
SN
3947be_fw_exit:
3948 return status;
3949}
3950
3951int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3952{
3953 const struct firmware *fw;
3954 int status;
3955
3956 if (!netif_running(adapter->netdev)) {
3957 dev_err(&adapter->pdev->dev,
3958 "Firmware load not allowed (interface is down)\n");
3959 return -1;
3960 }
3961
3962 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3963 if (status)
3964 goto fw_exit;
3965
3966 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3967
3968 if (lancer_chip(adapter))
3969 status = lancer_fw_download(adapter, fw);
3970 else
3971 status = be_fw_download(adapter, fw);
3972
eeb65ced
SK
3973 if (!status)
3974 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3975 adapter->fw_on_flash);
3976
84517482
AK
3977fw_exit:
3978 release_firmware(fw);
3979 return status;
3980}
3981
a77dcb8c
AK
3982static int be_ndo_bridge_setlink(struct net_device *dev,
3983 struct nlmsghdr *nlh)
3984{
3985 struct be_adapter *adapter = netdev_priv(dev);
3986 struct nlattr *attr, *br_spec;
3987 int rem;
3988 int status = 0;
3989 u16 mode = 0;
3990
3991 if (!sriov_enabled(adapter))
3992 return -EOPNOTSUPP;
3993
3994 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3995
3996 nla_for_each_nested(attr, br_spec, rem) {
3997 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3998 continue;
3999
4000 mode = nla_get_u16(attr);
4001 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4002 return -EINVAL;
4003
4004 status = be_cmd_set_hsw_config(adapter, 0, 0,
4005 adapter->if_handle,
4006 mode == BRIDGE_MODE_VEPA ?
4007 PORT_FWD_TYPE_VEPA :
4008 PORT_FWD_TYPE_VEB);
4009 if (status)
4010 goto err;
4011
4012 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4013 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4014
4015 return status;
4016 }
4017err:
4018 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4019 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4020
4021 return status;
4022}
4023
4024static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4025 struct net_device *dev,
4026 u32 filter_mask)
4027{
4028 struct be_adapter *adapter = netdev_priv(dev);
4029 int status = 0;
4030 u8 hsw_mode;
4031
4032 if (!sriov_enabled(adapter))
4033 return 0;
4034
4035 /* BE and Lancer chips support VEB mode only */
4036 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4037 hsw_mode = PORT_FWD_TYPE_VEB;
4038 } else {
4039 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4040 adapter->if_handle, &hsw_mode);
4041 if (status)
4042 return 0;
4043 }
4044
4045 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4046 hsw_mode == PORT_FWD_TYPE_VEPA ?
4047 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4048}
4049
e5686ad8 4050static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4051 .ndo_open = be_open,
4052 .ndo_stop = be_close,
4053 .ndo_start_xmit = be_xmit,
a54769f5 4054 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4055 .ndo_set_mac_address = be_mac_addr_set,
4056 .ndo_change_mtu = be_change_mtu,
ab1594e9 4057 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4058 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4059 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4060 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4061 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4062 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 4063 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
4064 .ndo_get_vf_config = be_get_vf_config,
4065#ifdef CONFIG_NET_POLL_CONTROLLER
4066 .ndo_poll_controller = be_netpoll,
4067#endif
a77dcb8c
AK
4068 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4069 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0
SP
4070#ifdef CONFIG_NET_RX_BUSY_POLL
4071 .ndo_busy_poll = be_busy_poll
4072#endif
6b7c5b94
SP
4073};
4074
4075static void be_netdev_init(struct net_device *netdev)
4076{
4077 struct be_adapter *adapter = netdev_priv(netdev);
4078
6332c8d3 4079 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4080 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4081 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4082 if (be_multi_rxq(adapter))
4083 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4084
4085 netdev->features |= netdev->hw_features |
f646968f 4086 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4087
eb8a50d9 4088 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4089 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4090
fbc13f01
AK
4091 netdev->priv_flags |= IFF_UNICAST_FLT;
4092
6b7c5b94
SP
4093 netdev->flags |= IFF_MULTICAST;
4094
b7e5887e 4095 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4096
10ef9ab4 4097 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
4098
4099 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
4100}
4101
4102static void be_unmap_pci_bars(struct be_adapter *adapter)
4103{
c5b3ad4c
SP
4104 if (adapter->csr)
4105 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4106 if (adapter->db)
ce66f781 4107 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4108}
4109
ce66f781
SP
4110static int db_bar(struct be_adapter *adapter)
4111{
4112 if (lancer_chip(adapter) || !be_physfn(adapter))
4113 return 0;
4114 else
4115 return 4;
4116}
4117
4118static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4119{
dbf0f2a7 4120 if (skyhawk_chip(adapter)) {
ce66f781
SP
4121 adapter->roce_db.size = 4096;
4122 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4123 db_bar(adapter));
4124 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4125 db_bar(adapter));
4126 }
045508a8 4127 return 0;
6b7c5b94
SP
4128}
4129
4130static int be_map_pci_bars(struct be_adapter *adapter)
4131{
4132 u8 __iomem *addr;
fe6d2a38 4133
c5b3ad4c
SP
4134 if (BEx_chip(adapter) && be_physfn(adapter)) {
4135 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4136 if (adapter->csr == NULL)
4137 return -ENOMEM;
4138 }
4139
ce66f781 4140 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4141 if (addr == NULL)
4142 goto pci_map_err;
ba343c77 4143 adapter->db = addr;
ce66f781
SP
4144
4145 be_roce_map_pci_bars(adapter);
6b7c5b94 4146 return 0;
ce66f781 4147
6b7c5b94
SP
4148pci_map_err:
4149 be_unmap_pci_bars(adapter);
4150 return -ENOMEM;
4151}
4152
6b7c5b94
SP
4153static void be_ctrl_cleanup(struct be_adapter *adapter)
4154{
8788fdc2 4155 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4156
4157 be_unmap_pci_bars(adapter);
4158
4159 if (mem->va)
2b7bcebf
IV
4160 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4161 mem->dma);
e7b909a6 4162
5b8821b7 4163 mem = &adapter->rx_filter;
e7b909a6 4164 if (mem->va)
2b7bcebf
IV
4165 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4166 mem->dma);
6b7c5b94
SP
4167}
4168
6b7c5b94
SP
4169static int be_ctrl_init(struct be_adapter *adapter)
4170{
8788fdc2
SP
4171 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4172 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4173 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4174 u32 sli_intf;
6b7c5b94 4175 int status;
6b7c5b94 4176
ce66f781
SP
4177 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4178 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4179 SLI_INTF_FAMILY_SHIFT;
4180 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4181
6b7c5b94
SP
4182 status = be_map_pci_bars(adapter);
4183 if (status)
e7b909a6 4184 goto done;
6b7c5b94
SP
4185
4186 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4187 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4188 mbox_mem_alloc->size,
4189 &mbox_mem_alloc->dma,
4190 GFP_KERNEL);
6b7c5b94 4191 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4192 status = -ENOMEM;
4193 goto unmap_pci_bars;
6b7c5b94
SP
4194 }
4195 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4196 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4197 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4198 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4199
5b8821b7 4200 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4201 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4202 rx_filter->size, &rx_filter->dma,
4203 GFP_KERNEL);
5b8821b7 4204 if (rx_filter->va == NULL) {
e7b909a6
SP
4205 status = -ENOMEM;
4206 goto free_mbox;
4207 }
1f9061d2 4208
2984961c 4209 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4210 spin_lock_init(&adapter->mcc_lock);
4211 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4212
5eeff635 4213 init_completion(&adapter->et_cmd_compl);
cf588477 4214 pci_save_state(adapter->pdev);
6b7c5b94 4215 return 0;
e7b909a6
SP
4216
4217free_mbox:
2b7bcebf
IV
4218 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4219 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4220
4221unmap_pci_bars:
4222 be_unmap_pci_bars(adapter);
4223
4224done:
4225 return status;
6b7c5b94
SP
4226}
4227
4228static void be_stats_cleanup(struct be_adapter *adapter)
4229{
3abcdeda 4230 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4231
4232 if (cmd->va)
2b7bcebf
IV
4233 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4234 cmd->va, cmd->dma);
6b7c5b94
SP
4235}
4236
4237static int be_stats_init(struct be_adapter *adapter)
4238{
3abcdeda 4239 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4240
ca34fe38
SP
4241 if (lancer_chip(adapter))
4242 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4243 else if (BE2_chip(adapter))
89a88ab8 4244 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4245 else if (BE3_chip(adapter))
ca34fe38 4246 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4247 else
4248 /* ALL non-BE ASICs */
4249 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4250
ede23fa8
JP
4251 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4252 GFP_KERNEL);
6b7c5b94
SP
4253 if (cmd->va == NULL)
4254 return -1;
4255 return 0;
4256}
4257
3bc6b06c 4258static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4259{
4260 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4261
6b7c5b94
SP
4262 if (!adapter)
4263 return;
4264
045508a8 4265 be_roce_dev_remove(adapter);
8cef7a78 4266 be_intr_set(adapter, false);
045508a8 4267
f67ef7ba
PR
4268 cancel_delayed_work_sync(&adapter->func_recovery_work);
4269
6b7c5b94
SP
4270 unregister_netdev(adapter->netdev);
4271
5fb379ee
SP
4272 be_clear(adapter);
4273
bf99e50d
PR
4274 /* tell fw we're done with firing cmds */
4275 be_cmd_fw_clean(adapter);
4276
6b7c5b94
SP
4277 be_stats_cleanup(adapter);
4278
4279 be_ctrl_cleanup(adapter);
4280
d6b6d987
SP
4281 pci_disable_pcie_error_reporting(pdev);
4282
6b7c5b94
SP
4283 pci_release_regions(pdev);
4284 pci_disable_device(pdev);
4285
4286 free_netdev(adapter->netdev);
4287}
4288
39f1d94d 4289static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4290{
baaa08d1 4291 int status, level;
6b7c5b94 4292
9e1453c5
AK
4293 status = be_cmd_get_cntl_attributes(adapter);
4294 if (status)
4295 return status;
4296
7aeb2156
PR
4297 /* Must be a power of 2 or else MODULO will BUG_ON */
4298 adapter->be_get_temp_freq = 64;
4299
baaa08d1
VV
4300 if (BEx_chip(adapter)) {
4301 level = be_cmd_get_fw_log_level(adapter);
4302 adapter->msg_enable =
4303 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4304 }
941a77d5 4305
92bf14ab 4306 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4307 return 0;
6b7c5b94
SP
4308}
4309
f67ef7ba 4310static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4311{
01e5b2c4 4312 struct device *dev = &adapter->pdev->dev;
d8110f62 4313 int status;
d8110f62 4314
f67ef7ba
PR
4315 status = lancer_test_and_set_rdy_state(adapter);
4316 if (status)
4317 goto err;
d8110f62 4318
f67ef7ba
PR
4319 if (netif_running(adapter->netdev))
4320 be_close(adapter->netdev);
d8110f62 4321
f67ef7ba
PR
4322 be_clear(adapter);
4323
01e5b2c4 4324 be_clear_all_error(adapter);
f67ef7ba
PR
4325
4326 status = be_setup(adapter);
4327 if (status)
4328 goto err;
d8110f62 4329
f67ef7ba
PR
4330 if (netif_running(adapter->netdev)) {
4331 status = be_open(adapter->netdev);
d8110f62
PR
4332 if (status)
4333 goto err;
f67ef7ba 4334 }
d8110f62 4335
4bebb56a 4336 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4337 return 0;
4338err:
01e5b2c4
SK
4339 if (status == -EAGAIN)
4340 dev_err(dev, "Waiting for resource provisioning\n");
4341 else
4bebb56a 4342 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4343
f67ef7ba
PR
4344 return status;
4345}
4346
4347static void be_func_recovery_task(struct work_struct *work)
4348{
4349 struct be_adapter *adapter =
4350 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4351 int status = 0;
d8110f62 4352
f67ef7ba 4353 be_detect_error(adapter);
d8110f62 4354
f67ef7ba 4355 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4356
f67ef7ba
PR
4357 rtnl_lock();
4358 netif_device_detach(adapter->netdev);
4359 rtnl_unlock();
d8110f62 4360
f67ef7ba 4361 status = lancer_recover_func(adapter);
f67ef7ba
PR
4362 if (!status)
4363 netif_device_attach(adapter->netdev);
d8110f62 4364 }
f67ef7ba 4365
01e5b2c4
SK
4366 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4367 * no need to attempt further recovery.
4368 */
4369 if (!status || status == -EAGAIN)
4370 schedule_delayed_work(&adapter->func_recovery_work,
4371 msecs_to_jiffies(1000));
d8110f62
PR
4372}
4373
4374static void be_worker(struct work_struct *work)
4375{
4376 struct be_adapter *adapter =
4377 container_of(work, struct be_adapter, work.work);
4378 struct be_rx_obj *rxo;
4379 int i;
4380
d8110f62
PR
4381 /* when interrupts are not yet enabled, just reap any pending
4382 * mcc completions */
4383 if (!netif_running(adapter->netdev)) {
072a9c48 4384 local_bh_disable();
10ef9ab4 4385 be_process_mcc(adapter);
072a9c48 4386 local_bh_enable();
d8110f62
PR
4387 goto reschedule;
4388 }
4389
4390 if (!adapter->stats_cmd_sent) {
4391 if (lancer_chip(adapter))
4392 lancer_cmd_get_pport_stats(adapter,
4393 &adapter->stats_cmd);
4394 else
4395 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4396 }
4397
d696b5e2
VV
4398 if (be_physfn(adapter) &&
4399 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4400 be_cmd_get_die_temperature(adapter);
4401
d8110f62 4402 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4403 /* Replenish RX-queues starved due to memory
4404 * allocation failures.
4405 */
4406 if (rxo->rx_post_starved)
d8110f62 4407 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4408 }
4409
2632bafd 4410 be_eqd_update(adapter);
10ef9ab4 4411
d8110f62
PR
4412reschedule:
4413 adapter->work_counter++;
4414 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4415}
4416
257a3feb 4417/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4418static bool be_reset_required(struct be_adapter *adapter)
4419{
257a3feb 4420 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4421}
4422
d379142b
SP
4423static char *mc_name(struct be_adapter *adapter)
4424{
4425 if (adapter->function_mode & FLEX10_MODE)
4426 return "FLEX10";
4427 else if (adapter->function_mode & VNIC_MODE)
4428 return "vNIC";
4429 else if (adapter->function_mode & UMC_ENABLED)
4430 return "UMC";
4431 else
4432 return "";
4433}
4434
4435static inline char *func_name(struct be_adapter *adapter)
4436{
4437 return be_physfn(adapter) ? "PF" : "VF";
4438}
4439
1dd06ae8 4440static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4441{
4442 int status = 0;
4443 struct be_adapter *adapter;
4444 struct net_device *netdev;
b4e32a71 4445 char port_name;
6b7c5b94
SP
4446
4447 status = pci_enable_device(pdev);
4448 if (status)
4449 goto do_none;
4450
4451 status = pci_request_regions(pdev, DRV_NAME);
4452 if (status)
4453 goto disable_dev;
4454 pci_set_master(pdev);
4455
7f640062 4456 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4457 if (netdev == NULL) {
4458 status = -ENOMEM;
4459 goto rel_reg;
4460 }
4461 adapter = netdev_priv(netdev);
4462 adapter->pdev = pdev;
4463 pci_set_drvdata(pdev, adapter);
4464 adapter->netdev = netdev;
2243e2e9 4465 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4466
4c15c243 4467 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4468 if (!status) {
4469 netdev->features |= NETIF_F_HIGHDMA;
4470 } else {
4c15c243 4471 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4472 if (status) {
4473 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4474 goto free_netdev;
4475 }
4476 }
4477
ea58c180
AK
4478 if (be_physfn(adapter)) {
4479 status = pci_enable_pcie_error_reporting(pdev);
4480 if (!status)
4481 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4482 }
d6b6d987 4483
6b7c5b94
SP
4484 status = be_ctrl_init(adapter);
4485 if (status)
39f1d94d 4486 goto free_netdev;
6b7c5b94 4487
2243e2e9 4488 /* sync up with fw's ready state */
ba343c77 4489 if (be_physfn(adapter)) {
bf99e50d 4490 status = be_fw_wait_ready(adapter);
ba343c77
SB
4491 if (status)
4492 goto ctrl_clean;
ba343c77 4493 }
6b7c5b94 4494
39f1d94d
SP
4495 if (be_reset_required(adapter)) {
4496 status = be_cmd_reset_function(adapter);
4497 if (status)
4498 goto ctrl_clean;
556ae191 4499
2d177be8
KA
4500 /* Wait for interrupts to quiesce after an FLR */
4501 msleep(100);
4502 }
8cef7a78
SK
4503
4504 /* Allow interrupts for other ULPs running on NIC function */
4505 be_intr_set(adapter, true);
10ef9ab4 4506
2d177be8
KA
4507 /* tell fw we're ready to fire cmds */
4508 status = be_cmd_fw_init(adapter);
4509 if (status)
4510 goto ctrl_clean;
4511
2243e2e9
SP
4512 status = be_stats_init(adapter);
4513 if (status)
4514 goto ctrl_clean;
4515
39f1d94d 4516 status = be_get_initial_config(adapter);
6b7c5b94
SP
4517 if (status)
4518 goto stats_clean;
6b7c5b94
SP
4519
4520 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4521 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4522 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4523
5fb379ee
SP
4524 status = be_setup(adapter);
4525 if (status)
55f5c3c5 4526 goto stats_clean;
2243e2e9 4527
3abcdeda 4528 be_netdev_init(netdev);
6b7c5b94
SP
4529 status = register_netdev(netdev);
4530 if (status != 0)
5fb379ee 4531 goto unsetup;
6b7c5b94 4532
045508a8
PP
4533 be_roce_dev_add(adapter);
4534
f67ef7ba
PR
4535 schedule_delayed_work(&adapter->func_recovery_work,
4536 msecs_to_jiffies(1000));
b4e32a71
PR
4537
4538 be_cmd_query_port_name(adapter, &port_name);
4539
d379142b
SP
4540 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4541 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4542
6b7c5b94
SP
4543 return 0;
4544
5fb379ee
SP
4545unsetup:
4546 be_clear(adapter);
6b7c5b94
SP
4547stats_clean:
4548 be_stats_cleanup(adapter);
4549ctrl_clean:
4550 be_ctrl_cleanup(adapter);
f9449ab7 4551free_netdev:
fe6d2a38 4552 free_netdev(netdev);
6b7c5b94
SP
4553rel_reg:
4554 pci_release_regions(pdev);
4555disable_dev:
4556 pci_disable_device(pdev);
4557do_none:
c4ca2374 4558 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4559 return status;
4560}
4561
4562static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4563{
4564 struct be_adapter *adapter = pci_get_drvdata(pdev);
4565 struct net_device *netdev = adapter->netdev;
4566
76a9e08e 4567 if (adapter->wol_en)
71d8d1b5
AK
4568 be_setup_wol(adapter, true);
4569
d4360d6f 4570 be_intr_set(adapter, false);
f67ef7ba
PR
4571 cancel_delayed_work_sync(&adapter->func_recovery_work);
4572
6b7c5b94
SP
4573 netif_device_detach(netdev);
4574 if (netif_running(netdev)) {
4575 rtnl_lock();
4576 be_close(netdev);
4577 rtnl_unlock();
4578 }
9b0365f1 4579 be_clear(adapter);
6b7c5b94
SP
4580
4581 pci_save_state(pdev);
4582 pci_disable_device(pdev);
4583 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4584 return 0;
4585}
4586
4587static int be_resume(struct pci_dev *pdev)
4588{
4589 int status = 0;
4590 struct be_adapter *adapter = pci_get_drvdata(pdev);
4591 struct net_device *netdev = adapter->netdev;
4592
4593 netif_device_detach(netdev);
4594
4595 status = pci_enable_device(pdev);
4596 if (status)
4597 return status;
4598
1ca01512 4599 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4600 pci_restore_state(pdev);
4601
dd5746bf
SB
4602 status = be_fw_wait_ready(adapter);
4603 if (status)
4604 return status;
4605
d4360d6f 4606 be_intr_set(adapter, true);
2243e2e9
SP
4607 /* tell fw we're ready to fire cmds */
4608 status = be_cmd_fw_init(adapter);
4609 if (status)
4610 return status;
4611
9b0365f1 4612 be_setup(adapter);
6b7c5b94
SP
4613 if (netif_running(netdev)) {
4614 rtnl_lock();
4615 be_open(netdev);
4616 rtnl_unlock();
4617 }
f67ef7ba
PR
4618
4619 schedule_delayed_work(&adapter->func_recovery_work,
4620 msecs_to_jiffies(1000));
6b7c5b94 4621 netif_device_attach(netdev);
71d8d1b5 4622
76a9e08e 4623 if (adapter->wol_en)
71d8d1b5 4624 be_setup_wol(adapter, false);
a4ca055f 4625
6b7c5b94
SP
4626 return 0;
4627}
4628
82456b03
SP
4629/*
4630 * An FLR will stop BE from DMAing any data.
4631 */
4632static void be_shutdown(struct pci_dev *pdev)
4633{
4634 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4635
2d5d4154
AK
4636 if (!adapter)
4637 return;
82456b03 4638
0f4a6828 4639 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4640 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4641
2d5d4154 4642 netif_device_detach(adapter->netdev);
82456b03 4643
57841869
AK
4644 be_cmd_reset_function(adapter);
4645
82456b03 4646 pci_disable_device(pdev);
82456b03
SP
4647}
4648
cf588477
SP
4649static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4650 pci_channel_state_t state)
4651{
4652 struct be_adapter *adapter = pci_get_drvdata(pdev);
4653 struct net_device *netdev = adapter->netdev;
4654
4655 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4656
01e5b2c4
SK
4657 if (!adapter->eeh_error) {
4658 adapter->eeh_error = true;
cf588477 4659
01e5b2c4 4660 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4661
cf588477 4662 rtnl_lock();
01e5b2c4
SK
4663 netif_device_detach(netdev);
4664 if (netif_running(netdev))
4665 be_close(netdev);
cf588477 4666 rtnl_unlock();
01e5b2c4
SK
4667
4668 be_clear(adapter);
cf588477 4669 }
cf588477
SP
4670
4671 if (state == pci_channel_io_perm_failure)
4672 return PCI_ERS_RESULT_DISCONNECT;
4673
4674 pci_disable_device(pdev);
4675
eeb7fc7b
SK
4676 /* The error could cause the FW to trigger a flash debug dump.
4677 * Resetting the card while flash dump is in progress
c8a54163
PR
4678 * can cause it not to recover; wait for it to finish.
4679 * Wait only for first function as it is needed only once per
4680 * adapter.
eeb7fc7b 4681 */
c8a54163
PR
4682 if (pdev->devfn == 0)
4683 ssleep(30);
4684
cf588477
SP
4685 return PCI_ERS_RESULT_NEED_RESET;
4686}
4687
4688static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4689{
4690 struct be_adapter *adapter = pci_get_drvdata(pdev);
4691 int status;
4692
4693 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4694
4695 status = pci_enable_device(pdev);
4696 if (status)
4697 return PCI_ERS_RESULT_DISCONNECT;
4698
4699 pci_set_master(pdev);
1ca01512 4700 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4701 pci_restore_state(pdev);
4702
4703 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4704 dev_info(&adapter->pdev->dev,
4705 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4706 status = be_fw_wait_ready(adapter);
cf588477
SP
4707 if (status)
4708 return PCI_ERS_RESULT_DISCONNECT;
4709
d6b6d987 4710 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4711 be_clear_all_error(adapter);
cf588477
SP
4712 return PCI_ERS_RESULT_RECOVERED;
4713}
4714
4715static void be_eeh_resume(struct pci_dev *pdev)
4716{
4717 int status = 0;
4718 struct be_adapter *adapter = pci_get_drvdata(pdev);
4719 struct net_device *netdev = adapter->netdev;
4720
4721 dev_info(&adapter->pdev->dev, "EEH resume\n");
4722
4723 pci_save_state(pdev);
4724
2d177be8 4725 status = be_cmd_reset_function(adapter);
cf588477
SP
4726 if (status)
4727 goto err;
4728
2d177be8
KA
4729 /* tell fw we're ready to fire cmds */
4730 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4731 if (status)
4732 goto err;
4733
cf588477
SP
4734 status = be_setup(adapter);
4735 if (status)
4736 goto err;
4737
4738 if (netif_running(netdev)) {
4739 status = be_open(netdev);
4740 if (status)
4741 goto err;
4742 }
f67ef7ba
PR
4743
4744 schedule_delayed_work(&adapter->func_recovery_work,
4745 msecs_to_jiffies(1000));
cf588477
SP
4746 netif_device_attach(netdev);
4747 return;
4748err:
4749 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4750}
4751
3646f0e5 4752static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4753 .error_detected = be_eeh_err_detected,
4754 .slot_reset = be_eeh_reset,
4755 .resume = be_eeh_resume,
4756};
4757
6b7c5b94
SP
4758static struct pci_driver be_driver = {
4759 .name = DRV_NAME,
4760 .id_table = be_dev_ids,
4761 .probe = be_probe,
4762 .remove = be_remove,
4763 .suspend = be_suspend,
cf588477 4764 .resume = be_resume,
82456b03 4765 .shutdown = be_shutdown,
cf588477 4766 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4767};
4768
4769static int __init be_init_module(void)
4770{
8e95a202
JP
4771 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4772 rx_frag_size != 2048) {
6b7c5b94
SP
4773 printk(KERN_WARNING DRV_NAME
4774 " : Module param rx_frag_size must be 2048/4096/8192."
4775 " Using 2048\n");
4776 rx_frag_size = 2048;
4777 }
6b7c5b94
SP
4778
4779 return pci_register_driver(&be_driver);
4780}
4781module_init(be_init_module);
4782
4783static void __exit be_exit_module(void)
4784{
4785 pci_unregister_driver(&be_driver);
4786}
4787module_exit(be_exit_module);
This page took 1.000181 seconds and 5 git commands to generate.