be2net: clear promiscuous bits in adapter->flags while disabling promiscuous mode
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
6b7c5b94
SP
26
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ba343c77 33static unsigned int num_vfs;
ba343c77 34module_param(num_vfs, uint, S_IRUGO);
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
11ac75ed
SP
37static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
6b7c5b94 41static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
50 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 53/* UE Status Low CSR */
42c8b11e 54static const char * const ue_status_low_desc[] = {
7c185276
AK
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
42c8b11e 89static const char * const ue_status_hi_desc[] = {
7c185276
AK
90 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
42c8b11e 113 "NETC",
7c185276
AK
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
6b7c5b94 123
752961a1 124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 128 if (mem->va) {
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
1cfafab9
SP
131 mem->va = NULL;
132 }
6b7c5b94
SP
133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
ede23fa8
JP
144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
6b7c5b94 146 if (!mem->va)
10ef9ab4 147 return -ENOMEM;
6b7c5b94
SP
148 return 0;
149}
150
68c45a2d 151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 152{
db3ea781 153 u32 reg, enabled;
5f0b849e 154
db3ea781
SP
155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
5f0b849e 159 if (!enabled && enable)
6b7c5b94 160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 161 else if (enabled && !enable)
6b7c5b94 162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else
6b7c5b94 164 return;
5f0b849e 165
db3ea781
SP
166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
168}
169
68c45a2d
SK
170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
8788fdc2 186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
194}
195
94d73aaa
VV
196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
6b7c5b94
SP
198{
199 u32 val = 0;
94d73aaa 200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
202
203 wmb();
94d73aaa 204 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
205}
206
8788fdc2 207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
461ae379
AK
479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
ac124ff9 517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 521 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 524 drvs->rx_drops_too_many_frags =
ac124ff9 525 pport_stats->rx_drops_too_many_frags_lo;
005d5696 526}
89a88ab8 527
09c1c68f
SP
528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
4188e7df 540static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
89a88ab8
AK
554void be_parse_stats(struct be_adapter *adapter)
555{
61000861 556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
557 struct be_rx_obj *rxo;
558 int i;
a6c578ef 559 u32 erx_stat;
ac124ff9 560
ca34fe38
SP
561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
005d5696 563 } else {
ca34fe38
SP
564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
61000861
AK
566 else if (BE3_chip(adapter))
567 /* for BE3 */
ca34fe38 568 populate_be_v1_stats(adapter);
61000861
AK
569 else
570 populate_be_v2_stats(adapter);
d51ebd33 571
61000861 572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 573 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 576 }
09c1c68f 577 }
89a88ab8
AK
578}
579
ab1594e9
SP
580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
6b7c5b94 582{
ab1594e9 583 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 584 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 585 struct be_rx_obj *rxo;
3c8def97 586 struct be_tx_obj *txo;
ab1594e9
SP
587 u64 pkts, bytes;
588 unsigned int start;
3abcdeda 589 int i;
6b7c5b94 590
3abcdeda 591 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
603 }
604
3c8def97 605 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
3c8def97 614 }
6b7c5b94
SP
615
616 /* bad pkts received */
ab1594e9 617 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
ab1594e9 626 drvs->rx_dropped_runt;
68110868 627
6b7c5b94 628 /* detailed rx errors */
ab1594e9 629 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
68110868 632
ab1594e9 633 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
634
635 /* frame alignment errors */
ab1594e9 636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 637
6b7c5b94
SP
638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
ab1594e9 643 return stats;
6b7c5b94
SP
644}
645
b236916a 646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 647{
6b7c5b94
SP
648 struct net_device *netdev = adapter->netdev;
649
b236916a 650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 651 netif_carrier_off(netdev);
b236916a 652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 653 }
b236916a
AK
654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
6b7c5b94
SP
659}
660
3c8def97 661static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 663{
3c8def97
SP
664 struct be_tx_stats *stats = tx_stats(txo);
665
ab1594e9 666 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 671 if (stopped)
ac124ff9 672 stats->tx_stops++;
ab1594e9 673 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
6b7c5b94 679{
ebc8d2ab
DM
680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
6b7c5b94
SP
684 /* to account for hdr wrb */
685 cnt++;
fe6d2a38
SP
686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
6b7c5b94
SP
689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
fe6d2a38 692 }
6b7c5b94
SP
693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 702 wrb->rsvd0 = 0;
6b7c5b94
SP
703}
704
1ded132d
AK
705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
cc4ce020 721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 723{
1ded132d 724 u16 vlan_tag;
cc4ce020 725
6b7c5b94
SP
726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
49e4b847 730 if (skb_is_gso(skb)) {
6b7c5b94
SP
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
4c5102f9 743 if (vlan_tx_tag_present(skb)) {
6b7c5b94 744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
747 }
748
bc0c3405
AK
749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
2b7bcebf 756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 764 if (wrb->frag_len) {
7101e111 765 if (unmap_single)
2b7bcebf
IV
766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
7101e111 768 else
2b7bcebf 769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
770 }
771}
6b7c5b94 772
3c8def97 773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
6b7c5b94 776{
7101e111
SP
777 dma_addr_t busaddr;
778 int i, copied = 0;
2b7bcebf 779 struct device *dev = &adapter->pdev->dev;
6b7c5b94 780 struct sk_buff *first_skb = skb;
6b7c5b94
SP
781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
7101e111
SP
783 bool map_single = false;
784 u16 map_head;
6b7c5b94 785
6b7c5b94
SP
786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
7101e111 788 map_head = txq->head;
6b7c5b94 789
ebc8d2ab 790 if (skb->len > skb->data_len) {
e743d313 791 int len = skb_headlen(skb);
2b7bcebf
IV
792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
7101e111
SP
794 goto dma_err;
795 map_single = true;
ebc8d2ab
DM
796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
6b7c5b94 802
ebc8d2ab 803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 804 const struct skb_frag_struct *frag =
ebc8d2ab 805 &skb_shinfo(skb)->frags[i];
b061b39e 806 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 807 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 808 if (dma_mapping_error(dev, busaddr))
7101e111 809 goto dma_err;
ebc8d2ab 810 wrb = queue_head_node(txq);
9e903e08 811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
9e903e08 814 copied += skb_frag_size(frag);
6b7c5b94
SP
815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
bc0c3405 824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
7101e111
SP
828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
2b7bcebf 832 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
6b7c5b94
SP
838}
839
93040ae5 840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
93040ae5
SK
843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
efee8e87 850 if (vlan_tx_tag_present(skb))
93040ae5 851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
bc0c3405
AK
862
863 if (vlan_tag) {
58717686 864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
865 if (unlikely(!skb))
866 return skb;
bc0c3405
AK
867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
58717686 873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
93040ae5
SK
880 return skb;
881}
882
bc0c3405
AK
883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
ee9c799c
SP
910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
bc0c3405 912{
ee9c799c 913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
914}
915
ee9c799c
SP
916static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
6b7c5b94 919{
d2cb6ce7 920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
93040ae5 923
b54881f9 924 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
48265667 925 * may cause a transmit stall on that port. So the work-around is to
b54881f9 926 * pad short packets (<= 32 bytes) to a 36-byte length.
48265667 927 */
b54881f9 928 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
48265667
SK
929 if (skb_padto(skb, 36))
930 goto tx_drop;
931 skb->len = 36;
932 }
933
1297f9db
AK
934 /* For padded packets, BE HW modifies tot_len field in IP header
935 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 936 * For padded packets, Lancer computes incorrect checksum.
1ded132d 937 */
ee9c799c
SP
938 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
940 if (skb->len <= 60 &&
941 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 942 is_ipv4_pkt(skb)) {
93040ae5
SK
943 ip = (struct iphdr *)ip_hdr(skb);
944 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945 }
1ded132d 946
d2cb6ce7
AK
947 /* If vlan tag is already inlined in the packet, skip HW VLAN
948 * tagging in UMC mode
949 */
950 if ((adapter->function_mode & UMC_ENABLED) &&
951 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 952 *skip_hw_vlan = true;
d2cb6ce7 953
93040ae5
SK
954 /* HW has a bug wherein it will calculate CSUM for VLAN
955 * pkts even though it is disabled.
956 * Manually insert VLAN in pkt.
957 */
958 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
959 vlan_tx_tag_present(skb)) {
960 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
961 if (unlikely(!skb))
962 goto tx_drop;
963 }
964
965 /* HW may lockup when VLAN HW tagging is requested on
966 * certain ipv6 packets. Drop such pkts if the HW workaround to
967 * skip HW tagging is not enabled by FW.
968 */
969 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
970 (adapter->pvid || adapter->qnq_vid) &&
971 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
972 goto tx_drop;
973
974 /* Manual VLAN tag insertion to prevent:
975 * ASIC lockup when the ASIC inserts VLAN tag into
976 * certain ipv6 packets. Insert VLAN tags in driver,
977 * and set event, completion, vlan bits accordingly
978 * in the Tx WRB.
979 */
980 if (be_ipv6_tx_stall_chk(adapter, skb) &&
981 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
983 if (unlikely(!skb))
984 goto tx_drop;
1ded132d
AK
985 }
986
ee9c799c
SP
987 return skb;
988tx_drop:
989 dev_kfree_skb_any(skb);
990 return NULL;
991}
992
993static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
994{
995 struct be_adapter *adapter = netdev_priv(netdev);
996 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
997 struct be_queue_info *txq = &txo->q;
998 bool dummy_wrb, stopped = false;
999 u32 wrb_cnt = 0, copied = 0;
1000 bool skip_hw_vlan = false;
1001 u32 start = txq->head;
1002
1003 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1004 if (!skb) {
1005 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1006 return NETDEV_TX_OK;
bc617526 1007 }
ee9c799c 1008
fe6d2a38 1009 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1010
bc0c3405
AK
1011 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1012 skip_hw_vlan);
c190e3c8 1013 if (copied) {
cd8f76c0
ED
1014 int gso_segs = skb_shinfo(skb)->gso_segs;
1015
c190e3c8 1016 /* record the sent skb in the sent_skb table */
3c8def97
SP
1017 BUG_ON(txo->sent_skb_list[start]);
1018 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1019
1020 /* Ensure txq has space for the next skb; Else stop the queue
1021 * *BEFORE* ringing the tx doorbell, so that we serialze the
1022 * tx compls of the current transmit which'll wake up the queue
1023 */
7101e111 1024 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1025 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1026 txq->len) {
3c8def97 1027 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1028 stopped = true;
1029 }
6b7c5b94 1030
94d73aaa 1031 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1032
cd8f76c0 1033 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1034 } else {
1035 txq->head = start;
bc617526 1036 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1037 dev_kfree_skb_any(skb);
6b7c5b94 1038 }
6b7c5b94
SP
1039 return NETDEV_TX_OK;
1040}
1041
1042static int be_change_mtu(struct net_device *netdev, int new_mtu)
1043{
1044 struct be_adapter *adapter = netdev_priv(netdev);
1045 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
1046 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1047 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
1048 dev_info(&adapter->pdev->dev,
1049 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
1050 BE_MIN_MTU,
1051 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1052 return -EINVAL;
1053 }
1054 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1055 netdev->mtu, new_mtu);
1056 netdev->mtu = new_mtu;
1057 return 0;
1058}
1059
1060/*
82903e4b
AK
1061 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1063 */
10329df8 1064static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1065{
10329df8
SP
1066 u16 vids[BE_NUM_VLANS_SUPPORTED];
1067 u16 num = 0, i;
82903e4b 1068 int status = 0;
1da87b7f 1069
c0e64ef4
SP
1070 /* No need to further configure vids if in promiscuous mode */
1071 if (adapter->promiscuous)
1072 return 0;
1073
92bf14ab 1074 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1075 goto set_vlan_promisc;
1076
1077 /* Construct VLAN Table to give to HW */
1078 for (i = 0; i < VLAN_N_VID; i++)
1079 if (adapter->vlan_tag[i])
10329df8 1080 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1081
1082 status = be_cmd_vlan_config(adapter, adapter->if_handle,
012bd387 1083 vids, num, 0);
0fc16ebf 1084
0fc16ebf 1085 if (status) {
d9d604f8
AK
1086 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1088 goto set_vlan_promisc;
1089 dev_err(&adapter->pdev->dev,
1090 "Setting HW VLAN filtering failed.\n");
1091 } else {
1092 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1093 /* hw VLAN filtering re-enabled. */
1094 status = be_cmd_rx_filter(adapter,
1095 BE_FLAGS_VLAN_PROMISC, OFF);
1096 if (!status) {
1097 dev_info(&adapter->pdev->dev,
1098 "Disabling VLAN Promiscuous mode.\n");
1099 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1100 }
1101 }
6b7c5b94 1102 }
1da87b7f 1103
b31c50a7 1104 return status;
0fc16ebf
PR
1105
1106set_vlan_promisc:
a6b74e01
SK
1107 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1108 return 0;
d9d604f8
AK
1109
1110 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111 if (!status) {
1112 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1113 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1114 } else
1115 dev_err(&adapter->pdev->dev,
1116 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1117 return status;
6b7c5b94
SP
1118}
1119
80d5c368 1120static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1121{
1122 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1123 int status = 0;
6b7c5b94 1124
a85e9986
PR
1125 /* Packets with VID 0 are always received by Lancer by default */
1126 if (lancer_chip(adapter) && vid == 0)
1127 goto ret;
1128
6b7c5b94 1129 adapter->vlan_tag[vid] = 1;
a6b74e01 1130 adapter->vlans_added++;
8e586137 1131
a6b74e01
SK
1132 status = be_vid_config(adapter);
1133 if (status) {
1134 adapter->vlans_added--;
80817cbf 1135 adapter->vlan_tag[vid] = 0;
a6b74e01 1136 }
80817cbf
AK
1137ret:
1138 return status;
6b7c5b94
SP
1139}
1140
80d5c368 1141static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1142{
1143 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1144 int status = 0;
6b7c5b94 1145
a85e9986
PR
1146 /* Packets with VID 0 are always received by Lancer by default */
1147 if (lancer_chip(adapter) && vid == 0)
1148 goto ret;
1149
6b7c5b94 1150 adapter->vlan_tag[vid] = 0;
a6b74e01 1151 status = be_vid_config(adapter);
80817cbf
AK
1152 if (!status)
1153 adapter->vlans_added--;
1154 else
1155 adapter->vlan_tag[vid] = 1;
1156ret:
1157 return status;
6b7c5b94
SP
1158}
1159
7ad09458
S
1160static void be_clear_promisc(struct be_adapter *adapter)
1161{
1162 adapter->promiscuous = false;
1163 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1164
1165 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1166}
1167
a54769f5 1168static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1169{
1170 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1171 int status;
6b7c5b94 1172
24307eef 1173 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1174 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1175 adapter->promiscuous = true;
1176 goto done;
6b7c5b94
SP
1177 }
1178
25985edc 1179 /* BE was previously in promiscuous mode; disable it */
24307eef 1180 if (adapter->promiscuous) {
7ad09458 1181 be_clear_promisc(adapter);
c0e64ef4 1182 if (adapter->vlans_added)
10329df8 1183 be_vid_config(adapter);
6b7c5b94
SP
1184 }
1185
e7b909a6 1186 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1187 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1188 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1189 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1190 goto done;
6b7c5b94 1191 }
6b7c5b94 1192
fbc13f01
AK
1193 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1194 struct netdev_hw_addr *ha;
1195 int i = 1; /* First slot is claimed by the Primary MAC */
1196
1197 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1198 be_cmd_pmac_del(adapter, adapter->if_handle,
1199 adapter->pmac_id[i], 0);
1200 }
1201
92bf14ab 1202 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1203 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1204 adapter->promiscuous = true;
1205 goto done;
1206 }
1207
1208 netdev_for_each_uc_addr(ha, adapter->netdev) {
1209 adapter->uc_macs++; /* First slot is for Primary MAC */
1210 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1211 adapter->if_handle,
1212 &adapter->pmac_id[adapter->uc_macs], 0);
1213 }
1214 }
1215
0fc16ebf
PR
1216 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1217
1218 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1219 if (status) {
1220 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1221 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1222 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1223 }
24307eef
SP
1224done:
1225 return;
6b7c5b94
SP
1226}
1227
ba343c77
SB
1228static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1229{
1230 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1231 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1232 int status;
1233
11ac75ed 1234 if (!sriov_enabled(adapter))
ba343c77
SB
1235 return -EPERM;
1236
11ac75ed 1237 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1238 return -EINVAL;
1239
3175d8c2
SP
1240 if (BEx_chip(adapter)) {
1241 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1242 vf + 1);
ba343c77 1243
11ac75ed
SP
1244 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1245 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1246 } else {
1247 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1248 vf + 1);
590c391d
PR
1249 }
1250
64600ea5 1251 if (status)
ba343c77
SB
1252 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1253 mac, vf);
64600ea5 1254 else
11ac75ed 1255 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1256
ba343c77
SB
1257 return status;
1258}
1259
64600ea5
AK
1260static int be_get_vf_config(struct net_device *netdev, int vf,
1261 struct ifla_vf_info *vi)
1262{
1263 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1264 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1265
11ac75ed 1266 if (!sriov_enabled(adapter))
64600ea5
AK
1267 return -EPERM;
1268
11ac75ed 1269 if (vf >= adapter->num_vfs)
64600ea5
AK
1270 return -EINVAL;
1271
1272 vi->vf = vf;
11ac75ed 1273 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1274 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1275 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1276 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1277
1278 return 0;
1279}
1280
1da87b7f
AK
1281static int be_set_vf_vlan(struct net_device *netdev,
1282 int vf, u16 vlan, u8 qos)
1283{
1284 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1285 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1286 int status = 0;
1287
11ac75ed 1288 if (!sriov_enabled(adapter))
1da87b7f
AK
1289 return -EPERM;
1290
b9fc0e53 1291 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1292 return -EINVAL;
1293
b9fc0e53
AK
1294 if (vlan || qos) {
1295 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1296 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1297 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1298 vf_cfg->if_handle, 0);
1da87b7f 1299 } else {
f1f3ee1b 1300 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1301 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1302 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1303 }
1304
c502224e
SK
1305 if (!status)
1306 vf_cfg->vlan_tag = vlan;
1307 else
1da87b7f 1308 dev_info(&adapter->pdev->dev,
c502224e 1309 "VLAN %d config on VF %d failed\n", vlan, vf);
1da87b7f
AK
1310 return status;
1311}
1312
e1d18735
AK
1313static int be_set_vf_tx_rate(struct net_device *netdev,
1314 int vf, int rate)
1315{
1316 struct be_adapter *adapter = netdev_priv(netdev);
1317 int status = 0;
1318
11ac75ed 1319 if (!sriov_enabled(adapter))
e1d18735
AK
1320 return -EPERM;
1321
94f434c2 1322 if (vf >= adapter->num_vfs)
e1d18735
AK
1323 return -EINVAL;
1324
94f434c2
AK
1325 if (rate < 100 || rate > 10000) {
1326 dev_err(&adapter->pdev->dev,
1327 "tx rate must be between 100 and 10000 Mbps\n");
1328 return -EINVAL;
1329 }
e1d18735 1330
d5c18473
PR
1331 if (lancer_chip(adapter))
1332 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1333 else
1334 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1335
1336 if (status)
94f434c2 1337 dev_err(&adapter->pdev->dev,
e1d18735 1338 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1339 else
1340 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1341 return status;
1342}
1343
2632bafd
SP
1344static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1345 ulong now)
6b7c5b94 1346{
2632bafd
SP
1347 aic->rx_pkts_prev = rx_pkts;
1348 aic->tx_reqs_prev = tx_pkts;
1349 aic->jiffies = now;
1350}
ac124ff9 1351
2632bafd
SP
1352static void be_eqd_update(struct be_adapter *adapter)
1353{
1354 struct be_set_eqd set_eqd[MAX_EVT_QS];
1355 int eqd, i, num = 0, start;
1356 struct be_aic_obj *aic;
1357 struct be_eq_obj *eqo;
1358 struct be_rx_obj *rxo;
1359 struct be_tx_obj *txo;
1360 u64 rx_pkts, tx_pkts;
1361 ulong now;
1362 u32 pps, delta;
10ef9ab4 1363
2632bafd
SP
1364 for_all_evt_queues(adapter, eqo, i) {
1365 aic = &adapter->aic_obj[eqo->idx];
1366 if (!aic->enable) {
1367 if (aic->jiffies)
1368 aic->jiffies = 0;
1369 eqd = aic->et_eqd;
1370 goto modify_eqd;
1371 }
6b7c5b94 1372
2632bafd
SP
1373 rxo = &adapter->rx_obj[eqo->idx];
1374 do {
1375 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1376 rx_pkts = rxo->stats.rx_pkts;
1377 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
10ef9ab4 1378
2632bafd
SP
1379 txo = &adapter->tx_obj[eqo->idx];
1380 do {
1381 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1382 tx_pkts = txo->stats.tx_reqs;
1383 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
6b7c5b94 1384
6b7c5b94 1385
2632bafd
SP
1386 /* Skip, if wrapped around or first calculation */
1387 now = jiffies;
1388 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1389 rx_pkts < aic->rx_pkts_prev ||
1390 tx_pkts < aic->tx_reqs_prev) {
1391 be_aic_update(aic, rx_pkts, tx_pkts, now);
1392 continue;
1393 }
1394
1395 delta = jiffies_to_msecs(now - aic->jiffies);
1396 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1397 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1398 eqd = (pps / 15000) << 2;
10ef9ab4 1399
2632bafd
SP
1400 if (eqd < 8)
1401 eqd = 0;
1402 eqd = min_t(u32, eqd, aic->max_eqd);
1403 eqd = max_t(u32, eqd, aic->min_eqd);
1404
1405 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1406modify_eqd:
2632bafd
SP
1407 if (eqd != aic->prev_eqd) {
1408 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1409 set_eqd[num].eq_id = eqo->q.id;
1410 aic->prev_eqd = eqd;
1411 num++;
1412 }
ac124ff9 1413 }
2632bafd
SP
1414
1415 if (num)
1416 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1417}
1418
3abcdeda 1419static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1420 struct be_rx_compl_info *rxcp)
4097f663 1421{
ac124ff9 1422 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1423
ab1594e9 1424 u64_stats_update_begin(&stats->sync);
3abcdeda 1425 stats->rx_compl++;
2e588f84 1426 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1427 stats->rx_pkts++;
2e588f84 1428 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1429 stats->rx_mcast_pkts++;
2e588f84 1430 if (rxcp->err)
ac124ff9 1431 stats->rx_compl_err++;
ab1594e9 1432 u64_stats_update_end(&stats->sync);
4097f663
SP
1433}
1434
2e588f84 1435static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1436{
19fad86f
PR
1437 /* L4 checksum is not reliable for non TCP/UDP packets.
1438 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1439 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1440 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1441}
1442
0b0ef1d0 1443static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1444{
10ef9ab4 1445 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1446 struct be_rx_page_info *rx_page_info;
3abcdeda 1447 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1448 u16 frag_idx = rxq->tail;
6b7c5b94 1449
3abcdeda 1450 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1451 BUG_ON(!rx_page_info->page);
1452
205859a2 1453 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1454 dma_unmap_page(&adapter->pdev->dev,
1455 dma_unmap_addr(rx_page_info, bus),
1456 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1457 rx_page_info->last_page_user = false;
1458 }
6b7c5b94 1459
0b0ef1d0 1460 queue_tail_inc(rxq);
6b7c5b94
SP
1461 atomic_dec(&rxq->used);
1462 return rx_page_info;
1463}
1464
1465/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1466static void be_rx_compl_discard(struct be_rx_obj *rxo,
1467 struct be_rx_compl_info *rxcp)
6b7c5b94 1468{
6b7c5b94 1469 struct be_rx_page_info *page_info;
2e588f84 1470 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1471
e80d9da6 1472 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1473 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1474 put_page(page_info->page);
1475 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1476 }
1477}
1478
1479/*
1480 * skb_fill_rx_data forms a complete skb for an ether frame
1481 * indicated by rxcp.
1482 */
10ef9ab4
SP
1483static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1484 struct be_rx_compl_info *rxcp)
6b7c5b94 1485{
6b7c5b94 1486 struct be_rx_page_info *page_info;
2e588f84
SP
1487 u16 i, j;
1488 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1489 u8 *start;
6b7c5b94 1490
0b0ef1d0 1491 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1492 start = page_address(page_info->page) + page_info->page_offset;
1493 prefetch(start);
1494
1495 /* Copy data in the first descriptor of this completion */
2e588f84 1496 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1497
6b7c5b94
SP
1498 skb->len = curr_frag_len;
1499 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1500 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1501 /* Complete packet has now been moved to data */
1502 put_page(page_info->page);
1503 skb->data_len = 0;
1504 skb->tail += curr_frag_len;
1505 } else {
ac1ae5f3
ED
1506 hdr_len = ETH_HLEN;
1507 memcpy(skb->data, start, hdr_len);
6b7c5b94 1508 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1509 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1510 skb_shinfo(skb)->frags[0].page_offset =
1511 page_info->page_offset + hdr_len;
9e903e08 1512 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1513 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1514 skb->truesize += rx_frag_size;
6b7c5b94
SP
1515 skb->tail += hdr_len;
1516 }
205859a2 1517 page_info->page = NULL;
6b7c5b94 1518
2e588f84
SP
1519 if (rxcp->pkt_size <= rx_frag_size) {
1520 BUG_ON(rxcp->num_rcvd != 1);
1521 return;
6b7c5b94
SP
1522 }
1523
1524 /* More frags present for this completion */
2e588f84
SP
1525 remaining = rxcp->pkt_size - curr_frag_len;
1526 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1527 page_info = get_rx_page_info(rxo);
2e588f84 1528 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1529
bd46cb6c
AK
1530 /* Coalesce all frags from the same physical page in one slot */
1531 if (page_info->page_offset == 0) {
1532 /* Fresh page */
1533 j++;
b061b39e 1534 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1535 skb_shinfo(skb)->frags[j].page_offset =
1536 page_info->page_offset;
9e903e08 1537 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1538 skb_shinfo(skb)->nr_frags++;
1539 } else {
1540 put_page(page_info->page);
1541 }
1542
9e903e08 1543 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1544 skb->len += curr_frag_len;
1545 skb->data_len += curr_frag_len;
bdb28a97 1546 skb->truesize += rx_frag_size;
2e588f84 1547 remaining -= curr_frag_len;
205859a2 1548 page_info->page = NULL;
6b7c5b94 1549 }
bd46cb6c 1550 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1551}
1552
5be93b9a 1553/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1554static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1555 struct be_rx_compl_info *rxcp)
6b7c5b94 1556{
10ef9ab4 1557 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1558 struct net_device *netdev = adapter->netdev;
6b7c5b94 1559 struct sk_buff *skb;
89420424 1560
bb349bb4 1561 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1562 if (unlikely(!skb)) {
ac124ff9 1563 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1564 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1565 return;
1566 }
1567
10ef9ab4 1568 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1569
6332c8d3 1570 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1571 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1572 else
1573 skb_checksum_none_assert(skb);
6b7c5b94 1574
6332c8d3 1575 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1576 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1577 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1578 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1579 skb_mark_napi_id(skb, napi);
6b7c5b94 1580
343e43c0 1581 if (rxcp->vlanf)
86a9bad3 1582 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1583
1584 netif_receive_skb(skb);
6b7c5b94
SP
1585}
1586
5be93b9a 1587/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1588static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1589 struct napi_struct *napi,
1590 struct be_rx_compl_info *rxcp)
6b7c5b94 1591{
10ef9ab4 1592 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1593 struct be_rx_page_info *page_info;
5be93b9a 1594 struct sk_buff *skb = NULL;
2e588f84
SP
1595 u16 remaining, curr_frag_len;
1596 u16 i, j;
3968fa1e 1597
10ef9ab4 1598 skb = napi_get_frags(napi);
5be93b9a 1599 if (!skb) {
10ef9ab4 1600 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1601 return;
1602 }
1603
2e588f84
SP
1604 remaining = rxcp->pkt_size;
1605 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1606 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1607
1608 curr_frag_len = min(remaining, rx_frag_size);
1609
bd46cb6c
AK
1610 /* Coalesce all frags from the same physical page in one slot */
1611 if (i == 0 || page_info->page_offset == 0) {
1612 /* First frag or Fresh page */
1613 j++;
b061b39e 1614 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1615 skb_shinfo(skb)->frags[j].page_offset =
1616 page_info->page_offset;
9e903e08 1617 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1618 } else {
1619 put_page(page_info->page);
1620 }
9e903e08 1621 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1622 skb->truesize += rx_frag_size;
bd46cb6c 1623 remaining -= curr_frag_len;
6b7c5b94
SP
1624 memset(page_info, 0, sizeof(*page_info));
1625 }
bd46cb6c 1626 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1627
5be93b9a 1628 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1629 skb->len = rxcp->pkt_size;
1630 skb->data_len = rxcp->pkt_size;
5be93b9a 1631 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1632 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1633 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1634 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1635 skb_mark_napi_id(skb, napi);
5be93b9a 1636
343e43c0 1637 if (rxcp->vlanf)
86a9bad3 1638 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1639
10ef9ab4 1640 napi_gro_frags(napi);
2e588f84
SP
1641}
1642
10ef9ab4
SP
1643static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1644 struct be_rx_compl_info *rxcp)
2e588f84
SP
1645{
1646 rxcp->pkt_size =
1647 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1648 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1649 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1650 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1651 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1652 rxcp->ip_csum =
1653 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1654 rxcp->l4_csum =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1656 rxcp->ipv6 =
1657 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1658 rxcp->num_rcvd =
1659 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1660 rxcp->pkt_type =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1662 rxcp->rss_hash =
c297977e 1663 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1664 if (rxcp->vlanf) {
1665 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1666 compl);
1667 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1668 compl);
15d72184 1669 }
12004ae9 1670 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1671}
1672
10ef9ab4
SP
1673static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1674 struct be_rx_compl_info *rxcp)
2e588f84
SP
1675{
1676 rxcp->pkt_size =
1677 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1678 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1679 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1680 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1681 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1682 rxcp->ip_csum =
1683 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1684 rxcp->l4_csum =
1685 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1686 rxcp->ipv6 =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1688 rxcp->num_rcvd =
1689 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1690 rxcp->pkt_type =
1691 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1692 rxcp->rss_hash =
c297977e 1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1694 if (rxcp->vlanf) {
1695 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1696 compl);
1697 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1698 compl);
15d72184 1699 }
12004ae9 1700 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1701 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1702 ip_frag, compl);
2e588f84
SP
1703}
1704
1705static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1706{
1707 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1708 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1709 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1710
2e588f84
SP
1711 /* For checking the valid bit it is Ok to use either definition as the
1712 * valid bit is at the same position in both v0 and v1 Rx compl */
1713 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1714 return NULL;
6b7c5b94 1715
2e588f84
SP
1716 rmb();
1717 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1718
2e588f84 1719 if (adapter->be3_native)
10ef9ab4 1720 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1721 else
10ef9ab4 1722 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1723
e38b1706
SK
1724 if (rxcp->ip_frag)
1725 rxcp->l4_csum = 0;
1726
15d72184
SP
1727 if (rxcp->vlanf) {
1728 /* vlanf could be wrongly set in some cards.
1729 * ignore if vtm is not set */
752961a1 1730 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1731 rxcp->vlanf = 0;
6b7c5b94 1732
15d72184 1733 if (!lancer_chip(adapter))
3c709f8f 1734 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1735
939cf306 1736 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1737 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1738 rxcp->vlanf = 0;
1739 }
2e588f84
SP
1740
1741 /* As the compl has been parsed, reset it; we wont touch it again */
1742 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1743
3abcdeda 1744 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1745 return rxcp;
1746}
1747
1829b086 1748static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1749{
6b7c5b94 1750 u32 order = get_order(size);
1829b086 1751
6b7c5b94 1752 if (order > 0)
1829b086
ED
1753 gfp |= __GFP_COMP;
1754 return alloc_pages(gfp, order);
6b7c5b94
SP
1755}
1756
1757/*
1758 * Allocate a page, split it to fragments of size rx_frag_size and post as
1759 * receive buffers to BE
1760 */
1829b086 1761static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1762{
3abcdeda 1763 struct be_adapter *adapter = rxo->adapter;
26d92f92 1764 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1765 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1766 struct page *pagep = NULL;
ba42fad0 1767 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1768 struct be_eth_rx_d *rxd;
1769 u64 page_dmaaddr = 0, frag_dmaaddr;
1770 u32 posted, page_offset = 0;
1771
3abcdeda 1772 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1773 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1774 if (!pagep) {
1829b086 1775 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1776 if (unlikely(!pagep)) {
ac124ff9 1777 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1778 break;
1779 }
ba42fad0
IV
1780 page_dmaaddr = dma_map_page(dev, pagep, 0,
1781 adapter->big_page_size,
2b7bcebf 1782 DMA_FROM_DEVICE);
ba42fad0
IV
1783 if (dma_mapping_error(dev, page_dmaaddr)) {
1784 put_page(pagep);
1785 pagep = NULL;
1786 rx_stats(rxo)->rx_post_fail++;
1787 break;
1788 }
6b7c5b94
SP
1789 page_info->page_offset = 0;
1790 } else {
1791 get_page(pagep);
1792 page_info->page_offset = page_offset + rx_frag_size;
1793 }
1794 page_offset = page_info->page_offset;
1795 page_info->page = pagep;
fac6da5b 1796 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1797 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1798
1799 rxd = queue_head_node(rxq);
1800 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1801 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1802
1803 /* Any space left in the current big page for another frag? */
1804 if ((page_offset + rx_frag_size + rx_frag_size) >
1805 adapter->big_page_size) {
1806 pagep = NULL;
1807 page_info->last_page_user = true;
1808 }
26d92f92
SP
1809
1810 prev_page_info = page_info;
1811 queue_head_inc(rxq);
10ef9ab4 1812 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1813 }
1814 if (pagep)
26d92f92 1815 prev_page_info->last_page_user = true;
6b7c5b94
SP
1816
1817 if (posted) {
6b7c5b94 1818 atomic_add(posted, &rxq->used);
6384a4d0
SP
1819 if (rxo->rx_post_starved)
1820 rxo->rx_post_starved = false;
8788fdc2 1821 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1822 } else if (atomic_read(&rxq->used) == 0) {
1823 /* Let be_worker replenish when memory is available */
3abcdeda 1824 rxo->rx_post_starved = true;
6b7c5b94 1825 }
6b7c5b94
SP
1826}
1827
5fb379ee 1828static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1829{
6b7c5b94
SP
1830 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1831
1832 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1833 return NULL;
1834
f3eb62d2 1835 rmb();
6b7c5b94
SP
1836 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1837
1838 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1839
1840 queue_tail_inc(tx_cq);
1841 return txcp;
1842}
1843
3c8def97
SP
1844static u16 be_tx_compl_process(struct be_adapter *adapter,
1845 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1846{
3c8def97 1847 struct be_queue_info *txq = &txo->q;
a73b796e 1848 struct be_eth_wrb *wrb;
3c8def97 1849 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1850 struct sk_buff *sent_skb;
ec43b1a6
SP
1851 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1852 bool unmap_skb_hdr = true;
6b7c5b94 1853
ec43b1a6 1854 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1855 BUG_ON(!sent_skb);
ec43b1a6
SP
1856 sent_skbs[txq->tail] = NULL;
1857
1858 /* skip header wrb */
a73b796e 1859 queue_tail_inc(txq);
6b7c5b94 1860
ec43b1a6 1861 do {
6b7c5b94 1862 cur_index = txq->tail;
a73b796e 1863 wrb = queue_tail_node(txq);
2b7bcebf
IV
1864 unmap_tx_frag(&adapter->pdev->dev, wrb,
1865 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1866 unmap_skb_hdr = false;
1867
6b7c5b94
SP
1868 num_wrbs++;
1869 queue_tail_inc(txq);
ec43b1a6 1870 } while (cur_index != last_index);
6b7c5b94 1871
6b7c5b94 1872 kfree_skb(sent_skb);
4d586b82 1873 return num_wrbs;
6b7c5b94
SP
1874}
1875
10ef9ab4
SP
1876/* Return the number of events in the event queue */
1877static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1878{
10ef9ab4
SP
1879 struct be_eq_entry *eqe;
1880 int num = 0;
859b1e4e 1881
10ef9ab4
SP
1882 do {
1883 eqe = queue_tail_node(&eqo->q);
1884 if (eqe->evt == 0)
1885 break;
859b1e4e 1886
10ef9ab4
SP
1887 rmb();
1888 eqe->evt = 0;
1889 num++;
1890 queue_tail_inc(&eqo->q);
1891 } while (true);
1892
1893 return num;
859b1e4e
SP
1894}
1895
10ef9ab4
SP
1896/* Leaves the EQ is disarmed state */
1897static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1898{
10ef9ab4 1899 int num = events_get(eqo);
859b1e4e 1900
10ef9ab4 1901 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1902}
1903
10ef9ab4 1904static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1905{
1906 struct be_rx_page_info *page_info;
3abcdeda
SP
1907 struct be_queue_info *rxq = &rxo->q;
1908 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1909 struct be_rx_compl_info *rxcp;
d23e946c
SP
1910 struct be_adapter *adapter = rxo->adapter;
1911 int flush_wait = 0;
6b7c5b94 1912
d23e946c
SP
1913 /* Consume pending rx completions.
1914 * Wait for the flush completion (identified by zero num_rcvd)
1915 * to arrive. Notify CQ even when there are no more CQ entries
1916 * for HW to flush partially coalesced CQ entries.
1917 * In Lancer, there is no need to wait for flush compl.
1918 */
1919 for (;;) {
1920 rxcp = be_rx_compl_get(rxo);
1921 if (rxcp == NULL) {
1922 if (lancer_chip(adapter))
1923 break;
1924
1925 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1926 dev_warn(&adapter->pdev->dev,
1927 "did not receive flush compl\n");
1928 break;
1929 }
1930 be_cq_notify(adapter, rx_cq->id, true, 0);
1931 mdelay(1);
1932 } else {
1933 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1934 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1935 if (rxcp->num_rcvd == 0)
1936 break;
1937 }
6b7c5b94
SP
1938 }
1939
d23e946c
SP
1940 /* After cleanup, leave the CQ in unarmed state */
1941 be_cq_notify(adapter, rx_cq->id, false, 0);
1942
1943 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
1944 while (atomic_read(&rxq->used) > 0) {
1945 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1946 put_page(page_info->page);
1947 memset(page_info, 0, sizeof(*page_info));
1948 }
1949 BUG_ON(atomic_read(&rxq->used));
482c9e79 1950 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1951}
1952
0ae57bb3 1953static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1954{
0ae57bb3
SP
1955 struct be_tx_obj *txo;
1956 struct be_queue_info *txq;
a8e9179a 1957 struct be_eth_tx_compl *txcp;
4d586b82 1958 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1959 struct sk_buff *sent_skb;
1960 bool dummy_wrb;
0ae57bb3 1961 int i, pending_txqs;
a8e9179a
SP
1962
1963 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1964 do {
0ae57bb3
SP
1965 pending_txqs = adapter->num_tx_qs;
1966
1967 for_all_tx_queues(adapter, txo, i) {
1968 txq = &txo->q;
1969 while ((txcp = be_tx_compl_get(&txo->cq))) {
1970 end_idx =
1971 AMAP_GET_BITS(struct amap_eth_tx_compl,
1972 wrb_index, txcp);
1973 num_wrbs += be_tx_compl_process(adapter, txo,
1974 end_idx);
1975 cmpl++;
1976 }
1977 if (cmpl) {
1978 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1979 atomic_sub(num_wrbs, &txq->used);
1980 cmpl = 0;
1981 num_wrbs = 0;
1982 }
1983 if (atomic_read(&txq->used) == 0)
1984 pending_txqs--;
a8e9179a
SP
1985 }
1986
0ae57bb3 1987 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1988 break;
1989
1990 mdelay(1);
1991 } while (true);
1992
0ae57bb3
SP
1993 for_all_tx_queues(adapter, txo, i) {
1994 txq = &txo->q;
1995 if (atomic_read(&txq->used))
1996 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1997 atomic_read(&txq->used));
1998
1999 /* free posted tx for which compls will never arrive */
2000 while (atomic_read(&txq->used)) {
2001 sent_skb = txo->sent_skb_list[txq->tail];
2002 end_idx = txq->tail;
2003 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2004 &dummy_wrb);
2005 index_adv(&end_idx, num_wrbs - 1, txq->len);
2006 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2007 atomic_sub(num_wrbs, &txq->used);
2008 }
b03388d6 2009 }
6b7c5b94
SP
2010}
2011
10ef9ab4
SP
2012static void be_evt_queues_destroy(struct be_adapter *adapter)
2013{
2014 struct be_eq_obj *eqo;
2015 int i;
2016
2017 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2018 if (eqo->q.created) {
2019 be_eq_clean(eqo);
10ef9ab4 2020 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2021 napi_hash_del(&eqo->napi);
68d7bdcb 2022 netif_napi_del(&eqo->napi);
19d59aa7 2023 }
10ef9ab4
SP
2024 be_queue_free(adapter, &eqo->q);
2025 }
2026}
2027
2028static int be_evt_queues_create(struct be_adapter *adapter)
2029{
2030 struct be_queue_info *eq;
2031 struct be_eq_obj *eqo;
2632bafd 2032 struct be_aic_obj *aic;
10ef9ab4
SP
2033 int i, rc;
2034
92bf14ab
SP
2035 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2036 adapter->cfg_num_qs);
10ef9ab4
SP
2037
2038 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2039 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2040 BE_NAPI_WEIGHT);
6384a4d0 2041 napi_hash_add(&eqo->napi);
2632bafd 2042 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2043 eqo->adapter = adapter;
2044 eqo->tx_budget = BE_TX_BUDGET;
2045 eqo->idx = i;
2632bafd
SP
2046 aic->max_eqd = BE_MAX_EQD;
2047 aic->enable = true;
10ef9ab4
SP
2048
2049 eq = &eqo->q;
2050 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2051 sizeof(struct be_eq_entry));
2052 if (rc)
2053 return rc;
2054
f2f781a7 2055 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2056 if (rc)
2057 return rc;
2058 }
1cfafab9 2059 return 0;
10ef9ab4
SP
2060}
2061
5fb379ee
SP
2062static void be_mcc_queues_destroy(struct be_adapter *adapter)
2063{
2064 struct be_queue_info *q;
5fb379ee 2065
8788fdc2 2066 q = &adapter->mcc_obj.q;
5fb379ee 2067 if (q->created)
8788fdc2 2068 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2069 be_queue_free(adapter, q);
2070
8788fdc2 2071 q = &adapter->mcc_obj.cq;
5fb379ee 2072 if (q->created)
8788fdc2 2073 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2074 be_queue_free(adapter, q);
2075}
2076
2077/* Must be called only after TX qs are created as MCC shares TX EQ */
2078static int be_mcc_queues_create(struct be_adapter *adapter)
2079{
2080 struct be_queue_info *q, *cq;
5fb379ee 2081
8788fdc2 2082 cq = &adapter->mcc_obj.cq;
5fb379ee 2083 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 2084 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2085 goto err;
2086
10ef9ab4
SP
2087 /* Use the default EQ for MCC completions */
2088 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2089 goto mcc_cq_free;
2090
8788fdc2 2091 q = &adapter->mcc_obj.q;
5fb379ee
SP
2092 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2093 goto mcc_cq_destroy;
2094
8788fdc2 2095 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2096 goto mcc_q_free;
2097
2098 return 0;
2099
2100mcc_q_free:
2101 be_queue_free(adapter, q);
2102mcc_cq_destroy:
8788fdc2 2103 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2104mcc_cq_free:
2105 be_queue_free(adapter, cq);
2106err:
2107 return -1;
2108}
2109
6b7c5b94
SP
2110static void be_tx_queues_destroy(struct be_adapter *adapter)
2111{
2112 struct be_queue_info *q;
3c8def97
SP
2113 struct be_tx_obj *txo;
2114 u8 i;
6b7c5b94 2115
3c8def97
SP
2116 for_all_tx_queues(adapter, txo, i) {
2117 q = &txo->q;
2118 if (q->created)
2119 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2120 be_queue_free(adapter, q);
6b7c5b94 2121
3c8def97
SP
2122 q = &txo->cq;
2123 if (q->created)
2124 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2125 be_queue_free(adapter, q);
2126 }
6b7c5b94
SP
2127}
2128
7707133c 2129static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2130{
10ef9ab4 2131 struct be_queue_info *cq, *eq;
3c8def97 2132 struct be_tx_obj *txo;
92bf14ab 2133 int status, i;
6b7c5b94 2134
92bf14ab 2135 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2136
10ef9ab4
SP
2137 for_all_tx_queues(adapter, txo, i) {
2138 cq = &txo->cq;
2139 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2140 sizeof(struct be_eth_tx_compl));
2141 if (status)
2142 return status;
3c8def97 2143
827da44c
JS
2144 u64_stats_init(&txo->stats.sync);
2145 u64_stats_init(&txo->stats.sync_compl);
2146
10ef9ab4
SP
2147 /* If num_evt_qs is less than num_tx_qs, then more than
2148 * one txq share an eq
2149 */
2150 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2151 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2152 if (status)
2153 return status;
6b7c5b94 2154
10ef9ab4
SP
2155 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2156 sizeof(struct be_eth_wrb));
2157 if (status)
2158 return status;
6b7c5b94 2159
94d73aaa 2160 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2161 if (status)
2162 return status;
3c8def97 2163 }
6b7c5b94 2164
d379142b
SP
2165 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2166 adapter->num_tx_qs);
10ef9ab4 2167 return 0;
6b7c5b94
SP
2168}
2169
10ef9ab4 2170static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2171{
2172 struct be_queue_info *q;
3abcdeda
SP
2173 struct be_rx_obj *rxo;
2174 int i;
2175
2176 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2177 q = &rxo->cq;
2178 if (q->created)
2179 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2180 be_queue_free(adapter, q);
ac6a0c4a
SP
2181 }
2182}
2183
10ef9ab4 2184static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2185{
10ef9ab4 2186 struct be_queue_info *eq, *cq;
3abcdeda
SP
2187 struct be_rx_obj *rxo;
2188 int rc, i;
6b7c5b94 2189
92bf14ab
SP
2190 /* We can create as many RSS rings as there are EQs. */
2191 adapter->num_rx_qs = adapter->num_evt_qs;
2192
2193 /* We'll use RSS only if atleast 2 RSS rings are supported.
2194 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2195 */
92bf14ab
SP
2196 if (adapter->num_rx_qs > 1)
2197 adapter->num_rx_qs++;
2198
6b7c5b94 2199 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2200 for_all_rx_queues(adapter, rxo, i) {
2201 rxo->adapter = adapter;
3abcdeda
SP
2202 cq = &rxo->cq;
2203 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2204 sizeof(struct be_eth_rx_compl));
2205 if (rc)
10ef9ab4 2206 return rc;
3abcdeda 2207
827da44c 2208 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2209 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2210 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2211 if (rc)
10ef9ab4 2212 return rc;
3abcdeda 2213 }
6b7c5b94 2214
d379142b
SP
2215 dev_info(&adapter->pdev->dev,
2216 "created %d RSS queue(s) and 1 default RX queue\n",
2217 adapter->num_rx_qs - 1);
10ef9ab4 2218 return 0;
b628bde2
SP
2219}
2220
6b7c5b94
SP
2221static irqreturn_t be_intx(int irq, void *dev)
2222{
e49cc34f
SP
2223 struct be_eq_obj *eqo = dev;
2224 struct be_adapter *adapter = eqo->adapter;
2225 int num_evts = 0;
6b7c5b94 2226
d0b9cec3
SP
2227 /* IRQ is not expected when NAPI is scheduled as the EQ
2228 * will not be armed.
2229 * But, this can happen on Lancer INTx where it takes
2230 * a while to de-assert INTx or in BE2 where occasionaly
2231 * an interrupt may be raised even when EQ is unarmed.
2232 * If NAPI is already scheduled, then counting & notifying
2233 * events will orphan them.
e49cc34f 2234 */
d0b9cec3 2235 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2236 num_evts = events_get(eqo);
d0b9cec3
SP
2237 __napi_schedule(&eqo->napi);
2238 if (num_evts)
2239 eqo->spurious_intr = 0;
2240 }
2241 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2242
d0b9cec3
SP
2243 /* Return IRQ_HANDLED only for the the first spurious intr
2244 * after a valid intr to stop the kernel from branding
2245 * this irq as a bad one!
e49cc34f 2246 */
d0b9cec3
SP
2247 if (num_evts || eqo->spurious_intr++ == 0)
2248 return IRQ_HANDLED;
2249 else
2250 return IRQ_NONE;
6b7c5b94
SP
2251}
2252
10ef9ab4 2253static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2254{
10ef9ab4 2255 struct be_eq_obj *eqo = dev;
6b7c5b94 2256
0b545a62
SP
2257 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2258 napi_schedule(&eqo->napi);
6b7c5b94
SP
2259 return IRQ_HANDLED;
2260}
2261
2e588f84 2262static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2263{
e38b1706 2264 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2265}
2266
10ef9ab4 2267static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
6384a4d0 2268 int budget, int polling)
6b7c5b94 2269{
3abcdeda
SP
2270 struct be_adapter *adapter = rxo->adapter;
2271 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2272 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2273 u32 work_done;
2274
2275 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2276 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2277 if (!rxcp)
2278 break;
2279
12004ae9
SP
2280 /* Is it a flush compl that has no data */
2281 if (unlikely(rxcp->num_rcvd == 0))
2282 goto loop_continue;
2283
2284 /* Discard compl with partial DMA Lancer B0 */
2285 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2286 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2287 goto loop_continue;
2288 }
2289
2290 /* On BE drop pkts that arrive due to imperfect filtering in
2291 * promiscuous mode on some skews
2292 */
2293 if (unlikely(rxcp->port != adapter->port_num &&
2294 !lancer_chip(adapter))) {
10ef9ab4 2295 be_rx_compl_discard(rxo, rxcp);
12004ae9 2296 goto loop_continue;
64642811 2297 }
009dd872 2298
6384a4d0
SP
2299 /* Don't do gro when we're busy_polling */
2300 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2301 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2302 else
6384a4d0
SP
2303 be_rx_compl_process(rxo, napi, rxcp);
2304
12004ae9 2305loop_continue:
2e588f84 2306 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2307 }
2308
10ef9ab4
SP
2309 if (work_done) {
2310 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2311
6384a4d0
SP
2312 /* When an rx-obj gets into post_starved state, just
2313 * let be_worker do the posting.
2314 */
2315 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2316 !rxo->rx_post_starved)
10ef9ab4 2317 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2318 }
10ef9ab4 2319
6b7c5b94
SP
2320 return work_done;
2321}
2322
10ef9ab4
SP
2323static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2324 int budget, int idx)
6b7c5b94 2325{
6b7c5b94 2326 struct be_eth_tx_compl *txcp;
10ef9ab4 2327 int num_wrbs = 0, work_done;
3c8def97 2328
10ef9ab4
SP
2329 for (work_done = 0; work_done < budget; work_done++) {
2330 txcp = be_tx_compl_get(&txo->cq);
2331 if (!txcp)
2332 break;
2333 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2334 AMAP_GET_BITS(struct amap_eth_tx_compl,
2335 wrb_index, txcp));
10ef9ab4 2336 }
6b7c5b94 2337
10ef9ab4
SP
2338 if (work_done) {
2339 be_cq_notify(adapter, txo->cq.id, true, work_done);
2340 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2341
10ef9ab4
SP
2342 /* As Tx wrbs have been freed up, wake up netdev queue
2343 * if it was stopped due to lack of tx wrbs. */
2344 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2345 atomic_read(&txo->q.used) < txo->q.len / 2) {
2346 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2347 }
10ef9ab4
SP
2348
2349 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2350 tx_stats(txo)->tx_compl += work_done;
2351 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2352 }
10ef9ab4
SP
2353 return (work_done < budget); /* Done */
2354}
6b7c5b94 2355
68d7bdcb 2356int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2357{
2358 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2359 struct be_adapter *adapter = eqo->adapter;
0b545a62 2360 int max_work = 0, work, i, num_evts;
6384a4d0 2361 struct be_rx_obj *rxo;
10ef9ab4 2362 bool tx_done;
f31e50a8 2363
0b545a62
SP
2364 num_evts = events_get(eqo);
2365
10ef9ab4
SP
2366 /* Process all TXQs serviced by this EQ */
2367 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2368 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2369 eqo->tx_budget, i);
2370 if (!tx_done)
2371 max_work = budget;
f31e50a8
SP
2372 }
2373
6384a4d0
SP
2374 if (be_lock_napi(eqo)) {
2375 /* This loop will iterate twice for EQ0 in which
2376 * completions of the last RXQ (default one) are also processed
2377 * For other EQs the loop iterates only once
2378 */
2379 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2380 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2381 max_work = max(work, max_work);
2382 }
2383 be_unlock_napi(eqo);
2384 } else {
2385 max_work = budget;
10ef9ab4 2386 }
6b7c5b94 2387
10ef9ab4
SP
2388 if (is_mcc_eqo(eqo))
2389 be_process_mcc(adapter);
93c86700 2390
10ef9ab4
SP
2391 if (max_work < budget) {
2392 napi_complete(napi);
0b545a62 2393 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2394 } else {
2395 /* As we'll continue in polling mode, count and clear events */
0b545a62 2396 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2397 }
10ef9ab4 2398 return max_work;
6b7c5b94
SP
2399}
2400
6384a4d0
SP
2401#ifdef CONFIG_NET_RX_BUSY_POLL
2402static int be_busy_poll(struct napi_struct *napi)
2403{
2404 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2405 struct be_adapter *adapter = eqo->adapter;
2406 struct be_rx_obj *rxo;
2407 int i, work = 0;
2408
2409 if (!be_lock_busy_poll(eqo))
2410 return LL_FLUSH_BUSY;
2411
2412 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2413 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2414 if (work)
2415 break;
2416 }
2417
2418 be_unlock_busy_poll(eqo);
2419 return work;
2420}
2421#endif
2422
f67ef7ba 2423void be_detect_error(struct be_adapter *adapter)
7c185276 2424{
e1cfb67a
PR
2425 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2426 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2427 u32 i;
2428
d23e946c 2429 if (be_hw_error(adapter))
72f02485
SP
2430 return;
2431
e1cfb67a
PR
2432 if (lancer_chip(adapter)) {
2433 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2434 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2435 sliport_err1 = ioread32(adapter->db +
2436 SLIPORT_ERROR1_OFFSET);
2437 sliport_err2 = ioread32(adapter->db +
2438 SLIPORT_ERROR2_OFFSET);
2439 }
2440 } else {
2441 pci_read_config_dword(adapter->pdev,
2442 PCICFG_UE_STATUS_LOW, &ue_lo);
2443 pci_read_config_dword(adapter->pdev,
2444 PCICFG_UE_STATUS_HIGH, &ue_hi);
2445 pci_read_config_dword(adapter->pdev,
2446 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2447 pci_read_config_dword(adapter->pdev,
2448 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2449
f67ef7ba
PR
2450 ue_lo = (ue_lo & ~ue_lo_mask);
2451 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2452 }
7c185276 2453
1451ae6e
AK
2454 /* On certain platforms BE hardware can indicate spurious UEs.
2455 * Allow the h/w to stop working completely in case of a real UE.
2456 * Hence not setting the hw_error for UE detection.
2457 */
2458 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2459 adapter->hw_error = true;
4bebb56a
SK
2460 /* Do not log error messages if its a FW reset */
2461 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2462 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2463 dev_info(&adapter->pdev->dev,
2464 "Firmware update in progress\n");
2465 return;
2466 } else {
2467 dev_err(&adapter->pdev->dev,
2468 "Error detected in the card\n");
2469 }
f67ef7ba
PR
2470 }
2471
2472 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2473 dev_err(&adapter->pdev->dev,
2474 "ERR: sliport status 0x%x\n", sliport_status);
2475 dev_err(&adapter->pdev->dev,
2476 "ERR: sliport error1 0x%x\n", sliport_err1);
2477 dev_err(&adapter->pdev->dev,
2478 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2479 }
2480
e1cfb67a
PR
2481 if (ue_lo) {
2482 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2483 if (ue_lo & 1)
7c185276
AK
2484 dev_err(&adapter->pdev->dev,
2485 "UE: %s bit set\n", ue_status_low_desc[i]);
2486 }
2487 }
f67ef7ba 2488
e1cfb67a
PR
2489 if (ue_hi) {
2490 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2491 if (ue_hi & 1)
7c185276
AK
2492 dev_err(&adapter->pdev->dev,
2493 "UE: %s bit set\n", ue_status_hi_desc[i]);
2494 }
2495 }
2496
2497}
2498
8d56ff11
SP
2499static void be_msix_disable(struct be_adapter *adapter)
2500{
ac6a0c4a 2501 if (msix_enabled(adapter)) {
8d56ff11 2502 pci_disable_msix(adapter->pdev);
ac6a0c4a 2503 adapter->num_msix_vec = 0;
68d7bdcb 2504 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2505 }
2506}
2507
c2bba3df 2508static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2509{
92bf14ab 2510 int i, status, num_vec;
d379142b 2511 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2512
92bf14ab
SP
2513 /* If RoCE is supported, program the max number of NIC vectors that
2514 * may be configured via set-channels, along with vectors needed for
2515 * RoCe. Else, just program the number we'll use initially.
2516 */
2517 if (be_roce_supported(adapter))
2518 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2519 2 * num_online_cpus());
2520 else
2521 num_vec = adapter->cfg_num_qs;
3abcdeda 2522
ac6a0c4a 2523 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2524 adapter->msix_entries[i].entry = i;
2525
ac6a0c4a 2526 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2527 if (status == 0) {
2528 goto done;
92bf14ab 2529 } else if (status >= MIN_MSIX_VECTORS) {
ac6a0c4a 2530 num_vec = status;
c2bba3df
SK
2531 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2532 num_vec);
2533 if (!status)
3abcdeda 2534 goto done;
3abcdeda 2535 }
d379142b
SP
2536
2537 dev_warn(dev, "MSIx enable failed\n");
92bf14ab 2538
c2bba3df
SK
2539 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2540 if (!be_physfn(adapter))
2541 return status;
2542 return 0;
3abcdeda 2543done:
92bf14ab
SP
2544 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2545 adapter->num_msix_roce_vec = num_vec / 2;
2546 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2547 adapter->num_msix_roce_vec);
2548 }
2549
2550 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2551
2552 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2553 adapter->num_msix_vec);
c2bba3df 2554 return 0;
6b7c5b94
SP
2555}
2556
fe6d2a38 2557static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2558 struct be_eq_obj *eqo)
b628bde2 2559{
f2f781a7 2560 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2561}
6b7c5b94 2562
b628bde2
SP
2563static int be_msix_register(struct be_adapter *adapter)
2564{
10ef9ab4
SP
2565 struct net_device *netdev = adapter->netdev;
2566 struct be_eq_obj *eqo;
2567 int status, i, vec;
6b7c5b94 2568
10ef9ab4
SP
2569 for_all_evt_queues(adapter, eqo, i) {
2570 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2571 vec = be_msix_vec_get(adapter, eqo);
2572 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2573 if (status)
2574 goto err_msix;
2575 }
b628bde2 2576
6b7c5b94 2577 return 0;
3abcdeda 2578err_msix:
10ef9ab4
SP
2579 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2580 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2581 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2582 status);
ac6a0c4a 2583 be_msix_disable(adapter);
6b7c5b94
SP
2584 return status;
2585}
2586
2587static int be_irq_register(struct be_adapter *adapter)
2588{
2589 struct net_device *netdev = adapter->netdev;
2590 int status;
2591
ac6a0c4a 2592 if (msix_enabled(adapter)) {
6b7c5b94
SP
2593 status = be_msix_register(adapter);
2594 if (status == 0)
2595 goto done;
ba343c77
SB
2596 /* INTx is not supported for VF */
2597 if (!be_physfn(adapter))
2598 return status;
6b7c5b94
SP
2599 }
2600
e49cc34f 2601 /* INTx: only the first EQ is used */
6b7c5b94
SP
2602 netdev->irq = adapter->pdev->irq;
2603 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2604 &adapter->eq_obj[0]);
6b7c5b94
SP
2605 if (status) {
2606 dev_err(&adapter->pdev->dev,
2607 "INTx request IRQ failed - err %d\n", status);
2608 return status;
2609 }
2610done:
2611 adapter->isr_registered = true;
2612 return 0;
2613}
2614
2615static void be_irq_unregister(struct be_adapter *adapter)
2616{
2617 struct net_device *netdev = adapter->netdev;
10ef9ab4 2618 struct be_eq_obj *eqo;
3abcdeda 2619 int i;
6b7c5b94
SP
2620
2621 if (!adapter->isr_registered)
2622 return;
2623
2624 /* INTx */
ac6a0c4a 2625 if (!msix_enabled(adapter)) {
e49cc34f 2626 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2627 goto done;
2628 }
2629
2630 /* MSIx */
10ef9ab4
SP
2631 for_all_evt_queues(adapter, eqo, i)
2632 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2633
6b7c5b94
SP
2634done:
2635 adapter->isr_registered = false;
6b7c5b94
SP
2636}
2637
10ef9ab4 2638static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2639{
2640 struct be_queue_info *q;
2641 struct be_rx_obj *rxo;
2642 int i;
2643
2644 for_all_rx_queues(adapter, rxo, i) {
2645 q = &rxo->q;
2646 if (q->created) {
2647 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2648 be_rx_cq_clean(rxo);
482c9e79 2649 }
10ef9ab4 2650 be_queue_free(adapter, q);
482c9e79
SP
2651 }
2652}
2653
889cd4b2
SP
2654static int be_close(struct net_device *netdev)
2655{
2656 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2657 struct be_eq_obj *eqo;
2658 int i;
889cd4b2 2659
045508a8
PP
2660 be_roce_dev_close(adapter);
2661
dff345c5
IV
2662 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2663 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2664 napi_disable(&eqo->napi);
6384a4d0
SP
2665 be_disable_busy_poll(eqo);
2666 }
71237b6f 2667 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2668 }
a323d9bf
SP
2669
2670 be_async_mcc_disable(adapter);
2671
2672 /* Wait for all pending tx completions to arrive so that
2673 * all tx skbs are freed.
2674 */
fba87559 2675 netif_tx_disable(netdev);
6e1f9975 2676 be_tx_compl_clean(adapter);
a323d9bf
SP
2677
2678 be_rx_qs_destroy(adapter);
2679
d11a347d
AK
2680 for (i = 1; i < (adapter->uc_macs + 1); i++)
2681 be_cmd_pmac_del(adapter, adapter->if_handle,
2682 adapter->pmac_id[i], 0);
2683 adapter->uc_macs = 0;
2684
a323d9bf 2685 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2686 if (msix_enabled(adapter))
2687 synchronize_irq(be_msix_vec_get(adapter, eqo));
2688 else
2689 synchronize_irq(netdev->irq);
2690 be_eq_clean(eqo);
63fcb27f
PR
2691 }
2692
889cd4b2
SP
2693 be_irq_unregister(adapter);
2694
482c9e79
SP
2695 return 0;
2696}
2697
10ef9ab4 2698static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2699{
2700 struct be_rx_obj *rxo;
e9008ee9
PR
2701 int rc, i, j;
2702 u8 rsstable[128];
482c9e79
SP
2703
2704 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2705 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2706 sizeof(struct be_eth_rx_d));
2707 if (rc)
2708 return rc;
2709 }
2710
2711 /* The FW would like the default RXQ to be created first */
2712 rxo = default_rxo(adapter);
2713 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2714 adapter->if_handle, false, &rxo->rss_id);
2715 if (rc)
2716 return rc;
2717
2718 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2719 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2720 rx_frag_size, adapter->if_handle,
2721 true, &rxo->rss_id);
482c9e79
SP
2722 if (rc)
2723 return rc;
2724 }
2725
2726 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2727 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2728 for_all_rss_queues(adapter, rxo, i) {
2729 if ((j + i) >= 128)
2730 break;
2731 rsstable[j + i] = rxo->rss_id;
2732 }
2733 }
594ad54a
SR
2734 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2735 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2736
2737 if (!BEx_chip(adapter))
2738 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2739 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2740 } else {
2741 /* Disable RSS, if only default RX Q is created */
2742 adapter->rss_flags = RSS_ENABLE_NONE;
2743 }
594ad54a 2744
da1388d6
VV
2745 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2746 128);
2747 if (rc) {
2748 adapter->rss_flags = RSS_ENABLE_NONE;
2749 return rc;
482c9e79
SP
2750 }
2751
2752 /* First time posting */
10ef9ab4 2753 for_all_rx_queues(adapter, rxo, i)
482c9e79 2754 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2755 return 0;
2756}
2757
6b7c5b94
SP
2758static int be_open(struct net_device *netdev)
2759{
2760 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2761 struct be_eq_obj *eqo;
3abcdeda 2762 struct be_rx_obj *rxo;
10ef9ab4 2763 struct be_tx_obj *txo;
b236916a 2764 u8 link_status;
3abcdeda 2765 int status, i;
5fb379ee 2766
10ef9ab4 2767 status = be_rx_qs_create(adapter);
482c9e79
SP
2768 if (status)
2769 goto err;
2770
c2bba3df
SK
2771 status = be_irq_register(adapter);
2772 if (status)
2773 goto err;
5fb379ee 2774
10ef9ab4 2775 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2776 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2777
10ef9ab4
SP
2778 for_all_tx_queues(adapter, txo, i)
2779 be_cq_notify(adapter, txo->cq.id, true, 0);
2780
7a1e9b20
SP
2781 be_async_mcc_enable(adapter);
2782
10ef9ab4
SP
2783 for_all_evt_queues(adapter, eqo, i) {
2784 napi_enable(&eqo->napi);
6384a4d0 2785 be_enable_busy_poll(eqo);
10ef9ab4
SP
2786 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2787 }
04d3d624 2788 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2789
323ff71e 2790 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2791 if (!status)
2792 be_link_status_update(adapter, link_status);
2793
fba87559 2794 netif_tx_start_all_queues(netdev);
045508a8 2795 be_roce_dev_open(adapter);
889cd4b2
SP
2796 return 0;
2797err:
2798 be_close(adapter->netdev);
2799 return -EIO;
5fb379ee
SP
2800}
2801
71d8d1b5
AK
2802static int be_setup_wol(struct be_adapter *adapter, bool enable)
2803{
2804 struct be_dma_mem cmd;
2805 int status = 0;
2806 u8 mac[ETH_ALEN];
2807
2808 memset(mac, 0, ETH_ALEN);
2809
2810 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2811 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2812 GFP_KERNEL);
71d8d1b5
AK
2813 if (cmd.va == NULL)
2814 return -1;
71d8d1b5
AK
2815
2816 if (enable) {
2817 status = pci_write_config_dword(adapter->pdev,
2818 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2819 if (status) {
2820 dev_err(&adapter->pdev->dev,
2381a55c 2821 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2822 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2823 cmd.dma);
71d8d1b5
AK
2824 return status;
2825 }
2826 status = be_cmd_enable_magic_wol(adapter,
2827 adapter->netdev->dev_addr, &cmd);
2828 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2829 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2830 } else {
2831 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2832 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2833 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2834 }
2835
2b7bcebf 2836 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2837 return status;
2838}
2839
6d87f5c3
AK
2840/*
2841 * Generate a seed MAC address from the PF MAC Address using jhash.
2842 * MAC Address for VFs are assigned incrementally starting from the seed.
2843 * These addresses are programmed in the ASIC by the PF and the VF driver
2844 * queries for the MAC address during its probe.
2845 */
4c876616 2846static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2847{
f9449ab7 2848 u32 vf;
3abcdeda 2849 int status = 0;
6d87f5c3 2850 u8 mac[ETH_ALEN];
11ac75ed 2851 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2852
2853 be_vf_eth_addr_generate(adapter, mac);
2854
11ac75ed 2855 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2856 if (BEx_chip(adapter))
590c391d 2857 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2858 vf_cfg->if_handle,
2859 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2860 else
2861 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2862 vf + 1);
590c391d 2863
6d87f5c3
AK
2864 if (status)
2865 dev_err(&adapter->pdev->dev,
590c391d 2866 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2867 else
11ac75ed 2868 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2869
2870 mac[5] += 1;
2871 }
2872 return status;
2873}
2874
4c876616
SP
2875static int be_vfs_mac_query(struct be_adapter *adapter)
2876{
2877 int status, vf;
2878 u8 mac[ETH_ALEN];
2879 struct be_vf_cfg *vf_cfg;
4c876616
SP
2880
2881 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
2882 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2883 mac, vf_cfg->if_handle,
2884 false, vf+1);
4c876616
SP
2885 if (status)
2886 return status;
2887 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2888 }
2889 return 0;
2890}
2891
f9449ab7 2892static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2893{
11ac75ed 2894 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2895 u32 vf;
2896
257a3feb 2897 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2898 dev_warn(&adapter->pdev->dev,
2899 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2900 goto done;
2901 }
2902
b4c1df93
SP
2903 pci_disable_sriov(adapter->pdev);
2904
11ac75ed 2905 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2906 if (BEx_chip(adapter))
11ac75ed
SP
2907 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2908 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2909 else
2910 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2911 vf + 1);
f9449ab7 2912
11ac75ed
SP
2913 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2914 }
39f1d94d
SP
2915done:
2916 kfree(adapter->vf_cfg);
2917 adapter->num_vfs = 0;
6d87f5c3
AK
2918}
2919
7707133c
SP
2920static void be_clear_queues(struct be_adapter *adapter)
2921{
2922 be_mcc_queues_destroy(adapter);
2923 be_rx_cqs_destroy(adapter);
2924 be_tx_queues_destroy(adapter);
2925 be_evt_queues_destroy(adapter);
2926}
2927
68d7bdcb 2928static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 2929{
191eb756
SP
2930 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2931 cancel_delayed_work_sync(&adapter->work);
2932 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2933 }
68d7bdcb
SP
2934}
2935
b05004ad 2936static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
2937{
2938 int i;
2939
b05004ad
SK
2940 if (adapter->pmac_id) {
2941 for (i = 0; i < (adapter->uc_macs + 1); i++)
2942 be_cmd_pmac_del(adapter, adapter->if_handle,
2943 adapter->pmac_id[i], 0);
2944 adapter->uc_macs = 0;
2945
2946 kfree(adapter->pmac_id);
2947 adapter->pmac_id = NULL;
2948 }
2949}
2950
2951static int be_clear(struct be_adapter *adapter)
2952{
68d7bdcb 2953 be_cancel_worker(adapter);
191eb756 2954
11ac75ed 2955 if (sriov_enabled(adapter))
f9449ab7
SP
2956 be_vf_clear(adapter);
2957
2d17f403 2958 /* delete the primary mac along with the uc-mac list */
b05004ad 2959 be_mac_clear(adapter);
fbc13f01 2960
f9449ab7 2961 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 2962
7707133c 2963 be_clear_queues(adapter);
a54769f5 2964
10ef9ab4 2965 be_msix_disable(adapter);
a54769f5
SP
2966 return 0;
2967}
2968
4c876616 2969static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2970{
92bf14ab 2971 struct be_resources res = {0};
4c876616
SP
2972 struct be_vf_cfg *vf_cfg;
2973 u32 cap_flags, en_flags, vf;
922bbe88 2974 int status = 0;
abb93951 2975
4c876616
SP
2976 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2977 BE_IF_FLAGS_MULTICAST;
abb93951 2978
4c876616 2979 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
2980 if (!BE3_chip(adapter)) {
2981 status = be_cmd_get_profile_config(adapter, &res,
2982 vf + 1);
2983 if (!status)
2984 cap_flags = res.if_cap_flags;
2985 }
4c876616
SP
2986
2987 /* If a FW profile exists, then cap_flags are updated */
2988 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2989 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2990 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2991 &vf_cfg->if_handle, vf + 1);
2992 if (status)
2993 goto err;
2994 }
2995err:
2996 return status;
abb93951
PR
2997}
2998
39f1d94d 2999static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3000{
11ac75ed 3001 struct be_vf_cfg *vf_cfg;
30128031
SP
3002 int vf;
3003
39f1d94d
SP
3004 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3005 GFP_KERNEL);
3006 if (!adapter->vf_cfg)
3007 return -ENOMEM;
3008
11ac75ed
SP
3009 for_all_vfs(adapter, vf_cfg, vf) {
3010 vf_cfg->if_handle = -1;
3011 vf_cfg->pmac_id = -1;
30128031 3012 }
39f1d94d 3013 return 0;
30128031
SP
3014}
3015
f9449ab7
SP
3016static int be_vf_setup(struct be_adapter *adapter)
3017{
c502224e 3018 struct device *dev = &adapter->pdev->dev;
11ac75ed 3019 struct be_vf_cfg *vf_cfg;
4c876616 3020 int status, old_vfs, vf;
04a06028 3021 u32 privileges;
c502224e 3022 u16 lnk_speed;
39f1d94d 3023
257a3feb 3024 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
3025 if (old_vfs) {
3026 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3027 if (old_vfs != num_vfs)
3028 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3029 adapter->num_vfs = old_vfs;
39f1d94d 3030 } else {
92bf14ab 3031 if (num_vfs > be_max_vfs(adapter))
4c876616 3032 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
3033 be_max_vfs(adapter), num_vfs);
3034 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 3035 if (!adapter->num_vfs)
4c876616 3036 return 0;
39f1d94d
SP
3037 }
3038
3039 status = be_vf_setup_init(adapter);
3040 if (status)
3041 goto err;
30128031 3042
4c876616
SP
3043 if (old_vfs) {
3044 for_all_vfs(adapter, vf_cfg, vf) {
3045 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3046 if (status)
3047 goto err;
3048 }
3049 } else {
3050 status = be_vfs_if_create(adapter);
f9449ab7
SP
3051 if (status)
3052 goto err;
f9449ab7
SP
3053 }
3054
4c876616
SP
3055 if (old_vfs) {
3056 status = be_vfs_mac_query(adapter);
3057 if (status)
3058 goto err;
3059 } else {
39f1d94d
SP
3060 status = be_vf_eth_addr_config(adapter);
3061 if (status)
3062 goto err;
3063 }
f9449ab7 3064
11ac75ed 3065 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3066 /* Allow VFs to programs MAC/VLAN filters */
3067 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3068 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3069 status = be_cmd_set_fn_privileges(adapter,
3070 privileges |
3071 BE_PRIV_FILTMGMT,
3072 vf + 1);
3073 if (!status)
3074 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3075 vf);
3076 }
3077
4c876616
SP
3078 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3079 * Allow full available bandwidth
3080 */
3081 if (BE3_chip(adapter) && !old_vfs)
3082 be_cmd_set_qos(adapter, 1000, vf+1);
3083
3084 status = be_cmd_link_status_query(adapter, &lnk_speed,
3085 NULL, vf + 1);
3086 if (!status)
3087 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b 3088
0599863d
VV
3089 if (!old_vfs)
3090 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 3091 }
b4c1df93
SP
3092
3093 if (!old_vfs) {
3094 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3095 if (status) {
3096 dev_err(dev, "SRIOV enable failed\n");
3097 adapter->num_vfs = 0;
3098 goto err;
3099 }
3100 }
f9449ab7
SP
3101 return 0;
3102err:
4c876616
SP
3103 dev_err(dev, "VF setup failed\n");
3104 be_vf_clear(adapter);
f9449ab7
SP
3105 return status;
3106}
3107
92bf14ab
SP
3108/* On BE2/BE3 FW does not suggest the supported limits */
3109static void BEx_get_resources(struct be_adapter *adapter,
3110 struct be_resources *res)
3111{
3112 struct pci_dev *pdev = adapter->pdev;
3113 bool use_sriov = false;
e3dc867c 3114 int max_vfs;
92bf14ab 3115
e3dc867c 3116 max_vfs = pci_sriov_get_totalvfs(pdev);
92bf14ab 3117
e3dc867c 3118 if (BE3_chip(adapter) && sriov_want(adapter)) {
92bf14ab 3119 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
b905b5d4 3120 use_sriov = res->max_vfs;
92bf14ab
SP
3121 }
3122
3123 if (be_physfn(adapter))
3124 res->max_uc_mac = BE_UC_PMAC_COUNT;
3125 else
3126 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3127
3128 if (adapter->function_mode & FLEX10_MODE)
3129 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
1aa9673c
AK
3130 else if (adapter->function_mode & UMC_ENABLED)
3131 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
92bf14ab
SP
3132 else
3133 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3134 res->max_mcast_mac = BE_MAX_MC;
3135
30f3fe45 3136 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
92bf14ab 3137 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
30f3fe45 3138 !be_physfn(adapter) || (adapter->port_num > 1))
92bf14ab
SP
3139 res->max_tx_qs = 1;
3140 else
3141 res->max_tx_qs = BE3_MAX_TX_QS;
3142
3143 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3144 !use_sriov && be_physfn(adapter))
3145 res->max_rss_qs = (adapter->be3_native) ?
3146 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3147 res->max_rx_qs = res->max_rss_qs + 1;
3148
e3dc867c
SR
3149 if (be_physfn(adapter))
3150 res->max_evt_qs = (max_vfs > 0) ?
3151 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3152 else
3153 res->max_evt_qs = 1;
92bf14ab
SP
3154
3155 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3156 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3157 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3158}
3159
30128031
SP
3160static void be_setup_init(struct be_adapter *adapter)
3161{
3162 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3163 adapter->phy.link_speed = -1;
30128031
SP
3164 adapter->if_handle = -1;
3165 adapter->be3_native = false;
3166 adapter->promiscuous = false;
f25b119c
PR
3167 if (be_physfn(adapter))
3168 adapter->cmd_privileges = MAX_PRIVILEGES;
3169 else
3170 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3171}
3172
92bf14ab 3173static int be_get_resources(struct be_adapter *adapter)
abb93951 3174{
92bf14ab
SP
3175 struct device *dev = &adapter->pdev->dev;
3176 struct be_resources res = {0};
3177 int status;
abb93951 3178
92bf14ab
SP
3179 if (BEx_chip(adapter)) {
3180 BEx_get_resources(adapter, &res);
3181 adapter->res = res;
abb93951
PR
3182 }
3183
92bf14ab
SP
3184 /* For Lancer, SH etc read per-function resource limits from FW.
3185 * GET_FUNC_CONFIG returns per function guaranteed limits.
3186 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3187 */
3188 if (!BEx_chip(adapter)) {
3189 status = be_cmd_get_func_config(adapter, &res);
3190 if (status)
3191 return status;
abb93951 3192
92bf14ab
SP
3193 /* If RoCE may be enabled stash away half the EQs for RoCE */
3194 if (be_roce_supported(adapter))
3195 res.max_evt_qs /= 2;
3196 adapter->res = res;
abb93951 3197
92bf14ab
SP
3198 if (be_physfn(adapter)) {
3199 status = be_cmd_get_profile_config(adapter, &res, 0);
3200 if (status)
3201 return status;
3202 adapter->res.max_vfs = res.max_vfs;
3203 }
abb93951 3204
92bf14ab
SP
3205 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3206 be_max_txqs(adapter), be_max_rxqs(adapter),
3207 be_max_rss(adapter), be_max_eqs(adapter),
3208 be_max_vfs(adapter));
3209 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3210 be_max_uc(adapter), be_max_mc(adapter),
3211 be_max_vlans(adapter));
abb93951 3212 }
4c876616 3213
92bf14ab 3214 return 0;
abb93951
PR
3215}
3216
39f1d94d
SP
3217/* Routine to query per function resource limits */
3218static int be_get_config(struct be_adapter *adapter)
3219{
542963b7 3220 u16 profile_id;
4c876616 3221 int status;
39f1d94d 3222
abb93951
PR
3223 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3224 &adapter->function_mode,
0ad3157e
VV
3225 &adapter->function_caps,
3226 &adapter->asic_rev);
abb93951 3227 if (status)
92bf14ab 3228 return status;
abb93951 3229
542963b7
VV
3230 if (be_physfn(adapter)) {
3231 status = be_cmd_get_active_profile(adapter, &profile_id);
3232 if (!status)
3233 dev_info(&adapter->pdev->dev,
3234 "Using profile 0x%x\n", profile_id);
3235 }
3236
92bf14ab
SP
3237 status = be_get_resources(adapter);
3238 if (status)
3239 return status;
abb93951
PR
3240
3241 /* primary mac needs 1 pmac entry */
92bf14ab
SP
3242 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3243 GFP_KERNEL);
3244 if (!adapter->pmac_id)
3245 return -ENOMEM;
abb93951 3246
92bf14ab
SP
3247 /* Sanitize cfg_num_qs based on HW and platform limits */
3248 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3249
3250 return 0;
39f1d94d
SP
3251}
3252
95046b92
SP
3253static int be_mac_setup(struct be_adapter *adapter)
3254{
3255 u8 mac[ETH_ALEN];
3256 int status;
3257
3258 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3259 status = be_cmd_get_perm_mac(adapter, mac);
3260 if (status)
3261 return status;
3262
3263 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3264 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3265 } else {
3266 /* Maybe the HW was reset; dev_addr must be re-programmed */
3267 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3268 }
3269
2c7a9dc1
AK
3270 /* For BE3-R VFs, the PF programs the initial MAC address */
3271 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3272 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3273 &adapter->pmac_id[0], 0);
95046b92
SP
3274 return 0;
3275}
3276
68d7bdcb
SP
3277static void be_schedule_worker(struct be_adapter *adapter)
3278{
3279 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3280 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3281}
3282
7707133c 3283static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3284{
68d7bdcb 3285 struct net_device *netdev = adapter->netdev;
10ef9ab4 3286 int status;
ba343c77 3287
7707133c 3288 status = be_evt_queues_create(adapter);
abb93951
PR
3289 if (status)
3290 goto err;
73d540f2 3291
7707133c 3292 status = be_tx_qs_create(adapter);
c2bba3df
SK
3293 if (status)
3294 goto err;
10ef9ab4 3295
7707133c 3296 status = be_rx_cqs_create(adapter);
10ef9ab4 3297 if (status)
a54769f5 3298 goto err;
6b7c5b94 3299
7707133c 3300 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3301 if (status)
3302 goto err;
3303
68d7bdcb
SP
3304 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3305 if (status)
3306 goto err;
3307
3308 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3309 if (status)
3310 goto err;
3311
7707133c
SP
3312 return 0;
3313err:
3314 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3315 return status;
3316}
3317
68d7bdcb
SP
3318int be_update_queues(struct be_adapter *adapter)
3319{
3320 struct net_device *netdev = adapter->netdev;
3321 int status;
3322
3323 if (netif_running(netdev))
3324 be_close(netdev);
3325
3326 be_cancel_worker(adapter);
3327
3328 /* If any vectors have been shared with RoCE we cannot re-program
3329 * the MSIx table.
3330 */
3331 if (!adapter->num_msix_roce_vec)
3332 be_msix_disable(adapter);
3333
3334 be_clear_queues(adapter);
3335
3336 if (!msix_enabled(adapter)) {
3337 status = be_msix_enable(adapter);
3338 if (status)
3339 return status;
3340 }
3341
3342 status = be_setup_queues(adapter);
3343 if (status)
3344 return status;
3345
3346 be_schedule_worker(adapter);
3347
3348 if (netif_running(netdev))
3349 status = be_open(netdev);
3350
3351 return status;
3352}
3353
7707133c
SP
3354static int be_setup(struct be_adapter *adapter)
3355{
3356 struct device *dev = &adapter->pdev->dev;
3357 u32 tx_fc, rx_fc, en_flags;
3358 int status;
3359
3360 be_setup_init(adapter);
3361
3362 if (!lancer_chip(adapter))
3363 be_cmd_req_native_mode(adapter);
3364
3365 status = be_get_config(adapter);
10ef9ab4 3366 if (status)
a54769f5 3367 goto err;
6b7c5b94 3368
7707133c 3369 status = be_msix_enable(adapter);
10ef9ab4 3370 if (status)
a54769f5 3371 goto err;
6b7c5b94 3372
f9449ab7 3373 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3374 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3375 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3376 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3377 en_flags = en_flags & be_if_cap_flags(adapter);
3378 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3379 &adapter->if_handle, 0);
7707133c 3380 if (status)
a54769f5 3381 goto err;
6b7c5b94 3382
68d7bdcb
SP
3383 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3384 rtnl_lock();
7707133c 3385 status = be_setup_queues(adapter);
68d7bdcb 3386 rtnl_unlock();
95046b92 3387 if (status)
1578e777
PR
3388 goto err;
3389
7707133c 3390 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3391
3392 status = be_mac_setup(adapter);
10ef9ab4
SP
3393 if (status)
3394 goto err;
3395
eeb65ced 3396 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3397
e9e2a904
SK
3398 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3399 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3400 adapter->fw_ver);
3401 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3402 }
3403
1d1e9a46 3404 if (adapter->vlans_added)
10329df8 3405 be_vid_config(adapter);
7ab8b0b4 3406
a54769f5 3407 be_set_rx_mode(adapter->netdev);
5fb379ee 3408
76a9e08e
SR
3409 be_cmd_get_acpi_wol_cap(adapter);
3410
ddc3f5cb 3411 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3412
ddc3f5cb
AK
3413 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3414 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3415 adapter->rx_fc);
2dc1deb6 3416
b905b5d4 3417 if (sriov_want(adapter)) {
92bf14ab 3418 if (be_max_vfs(adapter))
39f1d94d
SP
3419 be_vf_setup(adapter);
3420 else
3421 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3422 }
3423
f25b119c
PR
3424 status = be_cmd_get_phy_info(adapter);
3425 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3426 adapter->phy.fc_autoneg = 1;
3427
68d7bdcb 3428 be_schedule_worker(adapter);
f9449ab7 3429 return 0;
a54769f5
SP
3430err:
3431 be_clear(adapter);
3432 return status;
3433}
6b7c5b94 3434
66268739
IV
3435#ifdef CONFIG_NET_POLL_CONTROLLER
3436static void be_netpoll(struct net_device *netdev)
3437{
3438 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3439 struct be_eq_obj *eqo;
66268739
IV
3440 int i;
3441
e49cc34f
SP
3442 for_all_evt_queues(adapter, eqo, i) {
3443 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3444 napi_schedule(&eqo->napi);
3445 }
10ef9ab4
SP
3446
3447 return;
66268739
IV
3448}
3449#endif
3450
84517482 3451#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3452static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3453
fa9a6fed 3454static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3455 const u8 *p, u32 img_start, int image_size,
3456 int hdr_size)
fa9a6fed
SB
3457{
3458 u32 crc_offset;
3459 u8 flashed_crc[4];
3460 int status;
3f0d4560
AK
3461
3462 crc_offset = hdr_size + img_start + image_size - 4;
3463
fa9a6fed 3464 p += crc_offset;
3f0d4560
AK
3465
3466 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3467 (image_size - 4));
fa9a6fed
SB
3468 if (status) {
3469 dev_err(&adapter->pdev->dev,
3470 "could not get crc from flash, not flashing redboot\n");
3471 return false;
3472 }
3473
3474 /*update redboot only if crc does not match*/
3475 if (!memcmp(flashed_crc, p, 4))
3476 return false;
3477 else
3478 return true;
fa9a6fed
SB
3479}
3480
306f1348
SP
3481static bool phy_flashing_required(struct be_adapter *adapter)
3482{
42f11cf2
AK
3483 return (adapter->phy.phy_type == TN_8022 &&
3484 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3485}
3486
c165541e
PR
3487static bool is_comp_in_ufi(struct be_adapter *adapter,
3488 struct flash_section_info *fsec, int type)
3489{
3490 int i = 0, img_type = 0;
3491 struct flash_section_info_g2 *fsec_g2 = NULL;
3492
ca34fe38 3493 if (BE2_chip(adapter))
c165541e
PR
3494 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3495
3496 for (i = 0; i < MAX_FLASH_COMP; i++) {
3497 if (fsec_g2)
3498 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3499 else
3500 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3501
3502 if (img_type == type)
3503 return true;
3504 }
3505 return false;
3506
3507}
3508
4188e7df 3509static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3510 int header_size,
3511 const struct firmware *fw)
3512{
3513 struct flash_section_info *fsec = NULL;
3514 const u8 *p = fw->data;
3515
3516 p += header_size;
3517 while (p < (fw->data + fw->size)) {
3518 fsec = (struct flash_section_info *)p;
3519 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3520 return fsec;
3521 p += 32;
3522 }
3523 return NULL;
3524}
3525
773a2d7c
PR
3526static int be_flash(struct be_adapter *adapter, const u8 *img,
3527 struct be_dma_mem *flash_cmd, int optype, int img_size)
3528{
3529 u32 total_bytes = 0, flash_op, num_bytes = 0;
3530 int status = 0;
3531 struct be_cmd_write_flashrom *req = flash_cmd->va;
3532
3533 total_bytes = img_size;
3534 while (total_bytes) {
3535 num_bytes = min_t(u32, 32*1024, total_bytes);
3536
3537 total_bytes -= num_bytes;
3538
3539 if (!total_bytes) {
3540 if (optype == OPTYPE_PHY_FW)
3541 flash_op = FLASHROM_OPER_PHY_FLASH;
3542 else
3543 flash_op = FLASHROM_OPER_FLASH;
3544 } else {
3545 if (optype == OPTYPE_PHY_FW)
3546 flash_op = FLASHROM_OPER_PHY_SAVE;
3547 else
3548 flash_op = FLASHROM_OPER_SAVE;
3549 }
3550
be716446 3551 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3552 img += num_bytes;
3553 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3554 flash_op, num_bytes);
3555 if (status) {
3556 if (status == ILLEGAL_IOCTL_REQ &&
3557 optype == OPTYPE_PHY_FW)
3558 break;
3559 dev_err(&adapter->pdev->dev,
3560 "cmd to write to flash rom failed.\n");
3561 return status;
3562 }
3563 }
3564 return 0;
3565}
3566
0ad3157e 3567/* For BE2, BE3 and BE3-R */
ca34fe38 3568static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3569 const struct firmware *fw,
3570 struct be_dma_mem *flash_cmd,
3571 int num_of_images)
3f0d4560 3572
84517482 3573{
3f0d4560 3574 int status = 0, i, filehdr_size = 0;
c165541e 3575 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3576 const u8 *p = fw->data;
215faf9c 3577 const struct flash_comp *pflashcomp;
773a2d7c 3578 int num_comp, redboot;
c165541e
PR
3579 struct flash_section_info *fsec = NULL;
3580
3581 struct flash_comp gen3_flash_types[] = {
3582 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3583 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3584 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3585 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3586 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3587 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3588 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3589 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3590 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3591 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3592 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3593 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3594 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3595 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3596 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3597 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3598 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3599 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3600 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3601 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3602 };
c165541e
PR
3603
3604 struct flash_comp gen2_flash_types[] = {
3605 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3606 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3607 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3608 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3609 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3610 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3611 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3612 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3613 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3614 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3615 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3616 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3617 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3618 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3619 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3620 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3621 };
3622
ca34fe38 3623 if (BE3_chip(adapter)) {
3f0d4560
AK
3624 pflashcomp = gen3_flash_types;
3625 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3626 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3627 } else {
3628 pflashcomp = gen2_flash_types;
3629 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3630 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3631 }
ca34fe38 3632
c165541e
PR
3633 /* Get flash section info*/
3634 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3635 if (!fsec) {
3636 dev_err(&adapter->pdev->dev,
3637 "Invalid Cookie. UFI corrupted ?\n");
3638 return -1;
3639 }
9fe96934 3640 for (i = 0; i < num_comp; i++) {
c165541e 3641 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3642 continue;
c165541e
PR
3643
3644 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3645 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3646 continue;
3647
773a2d7c
PR
3648 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3649 !phy_flashing_required(adapter))
306f1348 3650 continue;
c165541e 3651
773a2d7c
PR
3652 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3653 redboot = be_flash_redboot(adapter, fw->data,
3654 pflashcomp[i].offset, pflashcomp[i].size,
3655 filehdr_size + img_hdrs_size);
3656 if (!redboot)
3657 continue;
3658 }
c165541e 3659
3f0d4560 3660 p = fw->data;
c165541e 3661 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3662 if (p + pflashcomp[i].size > fw->data + fw->size)
3663 return -1;
773a2d7c
PR
3664
3665 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3666 pflashcomp[i].size);
3667 if (status) {
3668 dev_err(&adapter->pdev->dev,
3669 "Flashing section type %d failed.\n",
3670 pflashcomp[i].img_type);
3671 return status;
84517482 3672 }
84517482 3673 }
84517482
AK
3674 return 0;
3675}
3676
773a2d7c
PR
3677static int be_flash_skyhawk(struct be_adapter *adapter,
3678 const struct firmware *fw,
3679 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3680{
773a2d7c
PR
3681 int status = 0, i, filehdr_size = 0;
3682 int img_offset, img_size, img_optype, redboot;
3683 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3684 const u8 *p = fw->data;
3685 struct flash_section_info *fsec = NULL;
3686
3687 filehdr_size = sizeof(struct flash_file_hdr_g3);
3688 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3689 if (!fsec) {
3690 dev_err(&adapter->pdev->dev,
3691 "Invalid Cookie. UFI corrupted ?\n");
3692 return -1;
3693 }
3694
3695 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3696 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3697 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3698
3699 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3700 case IMAGE_FIRMWARE_iSCSI:
3701 img_optype = OPTYPE_ISCSI_ACTIVE;
3702 break;
3703 case IMAGE_BOOT_CODE:
3704 img_optype = OPTYPE_REDBOOT;
3705 break;
3706 case IMAGE_OPTION_ROM_ISCSI:
3707 img_optype = OPTYPE_BIOS;
3708 break;
3709 case IMAGE_OPTION_ROM_PXE:
3710 img_optype = OPTYPE_PXE_BIOS;
3711 break;
3712 case IMAGE_OPTION_ROM_FCoE:
3713 img_optype = OPTYPE_FCOE_BIOS;
3714 break;
3715 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3716 img_optype = OPTYPE_ISCSI_BACKUP;
3717 break;
3718 case IMAGE_NCSI:
3719 img_optype = OPTYPE_NCSI_FW;
3720 break;
3721 default:
3722 continue;
3723 }
3724
3725 if (img_optype == OPTYPE_REDBOOT) {
3726 redboot = be_flash_redboot(adapter, fw->data,
3727 img_offset, img_size,
3728 filehdr_size + img_hdrs_size);
3729 if (!redboot)
3730 continue;
3731 }
3732
3733 p = fw->data;
3734 p += filehdr_size + img_offset + img_hdrs_size;
3735 if (p + img_size > fw->data + fw->size)
3736 return -1;
3737
3738 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3739 if (status) {
3740 dev_err(&adapter->pdev->dev,
3741 "Flashing section type %d failed.\n",
3742 fsec->fsec_entry[i].type);
3743 return status;
3744 }
3745 }
3746 return 0;
3f0d4560
AK
3747}
3748
485bf569
SN
3749static int lancer_fw_download(struct be_adapter *adapter,
3750 const struct firmware *fw)
84517482 3751{
485bf569
SN
3752#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3753#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3754 struct be_dma_mem flash_cmd;
485bf569
SN
3755 const u8 *data_ptr = NULL;
3756 u8 *dest_image_ptr = NULL;
3757 size_t image_size = 0;
3758 u32 chunk_size = 0;
3759 u32 data_written = 0;
3760 u32 offset = 0;
3761 int status = 0;
3762 u8 add_status = 0;
f67ef7ba 3763 u8 change_status;
84517482 3764
485bf569 3765 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3766 dev_err(&adapter->pdev->dev,
485bf569
SN
3767 "FW Image not properly aligned. "
3768 "Length must be 4 byte aligned.\n");
3769 status = -EINVAL;
3770 goto lancer_fw_exit;
d9efd2af
SB
3771 }
3772
485bf569
SN
3773 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3774 + LANCER_FW_DOWNLOAD_CHUNK;
3775 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3776 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3777 if (!flash_cmd.va) {
3778 status = -ENOMEM;
485bf569
SN
3779 goto lancer_fw_exit;
3780 }
84517482 3781
485bf569
SN
3782 dest_image_ptr = flash_cmd.va +
3783 sizeof(struct lancer_cmd_req_write_object);
3784 image_size = fw->size;
3785 data_ptr = fw->data;
3786
3787 while (image_size) {
3788 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3789
3790 /* Copy the image chunk content. */
3791 memcpy(dest_image_ptr, data_ptr, chunk_size);
3792
3793 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3794 chunk_size, offset,
3795 LANCER_FW_DOWNLOAD_LOCATION,
3796 &data_written, &change_status,
3797 &add_status);
485bf569
SN
3798 if (status)
3799 break;
3800
3801 offset += data_written;
3802 data_ptr += data_written;
3803 image_size -= data_written;
3804 }
3805
3806 if (!status) {
3807 /* Commit the FW written */
3808 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3809 0, offset,
3810 LANCER_FW_DOWNLOAD_LOCATION,
3811 &data_written, &change_status,
3812 &add_status);
485bf569
SN
3813 }
3814
3815 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3816 flash_cmd.dma);
3817 if (status) {
3818 dev_err(&adapter->pdev->dev,
3819 "Firmware load error. "
3820 "Status code: 0x%x Additional Status: 0x%x\n",
3821 status, add_status);
3822 goto lancer_fw_exit;
3823 }
3824
f67ef7ba 3825 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
3826 dev_info(&adapter->pdev->dev,
3827 "Resetting adapter to activate new FW\n");
5c510811
SK
3828 status = lancer_physdev_ctrl(adapter,
3829 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3830 if (status) {
3831 dev_err(&adapter->pdev->dev,
3832 "Adapter busy for FW reset.\n"
3833 "New FW will not be active.\n");
3834 goto lancer_fw_exit;
3835 }
3836 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3837 dev_err(&adapter->pdev->dev,
3838 "System reboot required for new FW"
3839 " to be active\n");
3840 }
3841
485bf569
SN
3842 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3843lancer_fw_exit:
3844 return status;
3845}
3846
ca34fe38
SP
3847#define UFI_TYPE2 2
3848#define UFI_TYPE3 3
0ad3157e 3849#define UFI_TYPE3R 10
ca34fe38
SP
3850#define UFI_TYPE4 4
3851static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3852 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3853{
3854 if (fhdr == NULL)
3855 goto be_get_ufi_exit;
3856
ca34fe38
SP
3857 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3858 return UFI_TYPE4;
0ad3157e
VV
3859 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3860 if (fhdr->asic_type_rev == 0x10)
3861 return UFI_TYPE3R;
3862 else
3863 return UFI_TYPE3;
3864 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3865 return UFI_TYPE2;
773a2d7c
PR
3866
3867be_get_ufi_exit:
3868 dev_err(&adapter->pdev->dev,
3869 "UFI and Interface are not compatible for flashing\n");
3870 return -1;
3871}
3872
485bf569
SN
3873static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3874{
485bf569
SN
3875 struct flash_file_hdr_g3 *fhdr3;
3876 struct image_hdr *img_hdr_ptr = NULL;
3877 struct be_dma_mem flash_cmd;
3878 const u8 *p;
773a2d7c 3879 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3880
be716446 3881 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3882 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3883 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3884 if (!flash_cmd.va) {
3885 status = -ENOMEM;
485bf569 3886 goto be_fw_exit;
84517482
AK
3887 }
3888
773a2d7c 3889 p = fw->data;
0ad3157e 3890 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3891
0ad3157e 3892 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3893
773a2d7c
PR
3894 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3895 for (i = 0; i < num_imgs; i++) {
3896 img_hdr_ptr = (struct image_hdr *)(fw->data +
3897 (sizeof(struct flash_file_hdr_g3) +
3898 i * sizeof(struct image_hdr)));
3899 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3900 switch (ufi_type) {
3901 case UFI_TYPE4:
773a2d7c
PR
3902 status = be_flash_skyhawk(adapter, fw,
3903 &flash_cmd, num_imgs);
0ad3157e
VV
3904 break;
3905 case UFI_TYPE3R:
ca34fe38
SP
3906 status = be_flash_BEx(adapter, fw, &flash_cmd,
3907 num_imgs);
0ad3157e
VV
3908 break;
3909 case UFI_TYPE3:
3910 /* Do not flash this ufi on BE3-R cards */
3911 if (adapter->asic_rev < 0x10)
3912 status = be_flash_BEx(adapter, fw,
3913 &flash_cmd,
3914 num_imgs);
3915 else {
3916 status = -1;
3917 dev_err(&adapter->pdev->dev,
3918 "Can't load BE3 UFI on BE3R\n");
3919 }
3920 }
3f0d4560 3921 }
773a2d7c
PR
3922 }
3923
ca34fe38
SP
3924 if (ufi_type == UFI_TYPE2)
3925 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3926 else if (ufi_type == -1)
3f0d4560 3927 status = -1;
84517482 3928
2b7bcebf
IV
3929 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3930 flash_cmd.dma);
84517482
AK
3931 if (status) {
3932 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3933 goto be_fw_exit;
84517482
AK
3934 }
3935
af901ca1 3936 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3937
485bf569
SN
3938be_fw_exit:
3939 return status;
3940}
3941
3942int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3943{
3944 const struct firmware *fw;
3945 int status;
3946
3947 if (!netif_running(adapter->netdev)) {
3948 dev_err(&adapter->pdev->dev,
3949 "Firmware load not allowed (interface is down)\n");
3950 return -1;
3951 }
3952
3953 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3954 if (status)
3955 goto fw_exit;
3956
3957 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3958
3959 if (lancer_chip(adapter))
3960 status = lancer_fw_download(adapter, fw);
3961 else
3962 status = be_fw_download(adapter, fw);
3963
eeb65ced
SK
3964 if (!status)
3965 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3966 adapter->fw_on_flash);
3967
84517482
AK
3968fw_exit:
3969 release_firmware(fw);
3970 return status;
3971}
3972
a77dcb8c
AK
3973static int be_ndo_bridge_setlink(struct net_device *dev,
3974 struct nlmsghdr *nlh)
3975{
3976 struct be_adapter *adapter = netdev_priv(dev);
3977 struct nlattr *attr, *br_spec;
3978 int rem;
3979 int status = 0;
3980 u16 mode = 0;
3981
3982 if (!sriov_enabled(adapter))
3983 return -EOPNOTSUPP;
3984
3985 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3986
3987 nla_for_each_nested(attr, br_spec, rem) {
3988 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3989 continue;
3990
3991 mode = nla_get_u16(attr);
3992 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3993 return -EINVAL;
3994
3995 status = be_cmd_set_hsw_config(adapter, 0, 0,
3996 adapter->if_handle,
3997 mode == BRIDGE_MODE_VEPA ?
3998 PORT_FWD_TYPE_VEPA :
3999 PORT_FWD_TYPE_VEB);
4000 if (status)
4001 goto err;
4002
4003 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4004 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4005
4006 return status;
4007 }
4008err:
4009 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4010 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4011
4012 return status;
4013}
4014
4015static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4016 struct net_device *dev,
4017 u32 filter_mask)
4018{
4019 struct be_adapter *adapter = netdev_priv(dev);
4020 int status = 0;
4021 u8 hsw_mode;
4022
4023 if (!sriov_enabled(adapter))
4024 return 0;
4025
4026 /* BE and Lancer chips support VEB mode only */
4027 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4028 hsw_mode = PORT_FWD_TYPE_VEB;
4029 } else {
4030 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4031 adapter->if_handle, &hsw_mode);
4032 if (status)
4033 return 0;
4034 }
4035
4036 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4037 hsw_mode == PORT_FWD_TYPE_VEPA ?
4038 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4039}
4040
e5686ad8 4041static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4042 .ndo_open = be_open,
4043 .ndo_stop = be_close,
4044 .ndo_start_xmit = be_xmit,
a54769f5 4045 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4046 .ndo_set_mac_address = be_mac_addr_set,
4047 .ndo_change_mtu = be_change_mtu,
ab1594e9 4048 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4049 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4050 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4051 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4052 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4053 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 4054 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
4055 .ndo_get_vf_config = be_get_vf_config,
4056#ifdef CONFIG_NET_POLL_CONTROLLER
4057 .ndo_poll_controller = be_netpoll,
4058#endif
a77dcb8c
AK
4059 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4060 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0
SP
4061#ifdef CONFIG_NET_RX_BUSY_POLL
4062 .ndo_busy_poll = be_busy_poll
4063#endif
6b7c5b94
SP
4064};
4065
4066static void be_netdev_init(struct net_device *netdev)
4067{
4068 struct be_adapter *adapter = netdev_priv(netdev);
4069
6332c8d3 4070 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4071 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4072 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4073 if (be_multi_rxq(adapter))
4074 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4075
4076 netdev->features |= netdev->hw_features |
f646968f 4077 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4078
eb8a50d9 4079 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4080 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4081
fbc13f01
AK
4082 netdev->priv_flags |= IFF_UNICAST_FLT;
4083
6b7c5b94
SP
4084 netdev->flags |= IFF_MULTICAST;
4085
b7e5887e 4086 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4087
10ef9ab4 4088 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
4089
4090 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
4091}
4092
4093static void be_unmap_pci_bars(struct be_adapter *adapter)
4094{
c5b3ad4c
SP
4095 if (adapter->csr)
4096 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4097 if (adapter->db)
ce66f781 4098 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4099}
4100
ce66f781
SP
4101static int db_bar(struct be_adapter *adapter)
4102{
4103 if (lancer_chip(adapter) || !be_physfn(adapter))
4104 return 0;
4105 else
4106 return 4;
4107}
4108
4109static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4110{
dbf0f2a7 4111 if (skyhawk_chip(adapter)) {
ce66f781
SP
4112 adapter->roce_db.size = 4096;
4113 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4114 db_bar(adapter));
4115 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4116 db_bar(adapter));
4117 }
045508a8 4118 return 0;
6b7c5b94
SP
4119}
4120
4121static int be_map_pci_bars(struct be_adapter *adapter)
4122{
4123 u8 __iomem *addr;
fe6d2a38 4124
c5b3ad4c
SP
4125 if (BEx_chip(adapter) && be_physfn(adapter)) {
4126 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4127 if (adapter->csr == NULL)
4128 return -ENOMEM;
4129 }
4130
ce66f781 4131 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4132 if (addr == NULL)
4133 goto pci_map_err;
ba343c77 4134 adapter->db = addr;
ce66f781
SP
4135
4136 be_roce_map_pci_bars(adapter);
6b7c5b94 4137 return 0;
ce66f781 4138
6b7c5b94
SP
4139pci_map_err:
4140 be_unmap_pci_bars(adapter);
4141 return -ENOMEM;
4142}
4143
6b7c5b94
SP
4144static void be_ctrl_cleanup(struct be_adapter *adapter)
4145{
8788fdc2 4146 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4147
4148 be_unmap_pci_bars(adapter);
4149
4150 if (mem->va)
2b7bcebf
IV
4151 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4152 mem->dma);
e7b909a6 4153
5b8821b7 4154 mem = &adapter->rx_filter;
e7b909a6 4155 if (mem->va)
2b7bcebf
IV
4156 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4157 mem->dma);
6b7c5b94
SP
4158}
4159
6b7c5b94
SP
4160static int be_ctrl_init(struct be_adapter *adapter)
4161{
8788fdc2
SP
4162 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4163 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4164 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4165 u32 sli_intf;
6b7c5b94 4166 int status;
6b7c5b94 4167
ce66f781
SP
4168 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4169 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4170 SLI_INTF_FAMILY_SHIFT;
4171 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4172
6b7c5b94
SP
4173 status = be_map_pci_bars(adapter);
4174 if (status)
e7b909a6 4175 goto done;
6b7c5b94
SP
4176
4177 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4178 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4179 mbox_mem_alloc->size,
4180 &mbox_mem_alloc->dma,
4181 GFP_KERNEL);
6b7c5b94 4182 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4183 status = -ENOMEM;
4184 goto unmap_pci_bars;
6b7c5b94
SP
4185 }
4186 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4187 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4188 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4189 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4190
5b8821b7 4191 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4192 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4193 rx_filter->size, &rx_filter->dma,
4194 GFP_KERNEL);
5b8821b7 4195 if (rx_filter->va == NULL) {
e7b909a6
SP
4196 status = -ENOMEM;
4197 goto free_mbox;
4198 }
1f9061d2 4199
2984961c 4200 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4201 spin_lock_init(&adapter->mcc_lock);
4202 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4203
5eeff635 4204 init_completion(&adapter->et_cmd_compl);
cf588477 4205 pci_save_state(adapter->pdev);
6b7c5b94 4206 return 0;
e7b909a6
SP
4207
4208free_mbox:
2b7bcebf
IV
4209 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4210 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4211
4212unmap_pci_bars:
4213 be_unmap_pci_bars(adapter);
4214
4215done:
4216 return status;
6b7c5b94
SP
4217}
4218
4219static void be_stats_cleanup(struct be_adapter *adapter)
4220{
3abcdeda 4221 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4222
4223 if (cmd->va)
2b7bcebf
IV
4224 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4225 cmd->va, cmd->dma);
6b7c5b94
SP
4226}
4227
4228static int be_stats_init(struct be_adapter *adapter)
4229{
3abcdeda 4230 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4231
ca34fe38
SP
4232 if (lancer_chip(adapter))
4233 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4234 else if (BE2_chip(adapter))
89a88ab8 4235 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4236 else if (BE3_chip(adapter))
ca34fe38 4237 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4238 else
4239 /* ALL non-BE ASICs */
4240 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4241
ede23fa8
JP
4242 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4243 GFP_KERNEL);
6b7c5b94
SP
4244 if (cmd->va == NULL)
4245 return -1;
4246 return 0;
4247}
4248
3bc6b06c 4249static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4250{
4251 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4252
6b7c5b94
SP
4253 if (!adapter)
4254 return;
4255
045508a8 4256 be_roce_dev_remove(adapter);
8cef7a78 4257 be_intr_set(adapter, false);
045508a8 4258
f67ef7ba
PR
4259 cancel_delayed_work_sync(&adapter->func_recovery_work);
4260
6b7c5b94
SP
4261 unregister_netdev(adapter->netdev);
4262
5fb379ee
SP
4263 be_clear(adapter);
4264
bf99e50d
PR
4265 /* tell fw we're done with firing cmds */
4266 be_cmd_fw_clean(adapter);
4267
6b7c5b94
SP
4268 be_stats_cleanup(adapter);
4269
4270 be_ctrl_cleanup(adapter);
4271
d6b6d987
SP
4272 pci_disable_pcie_error_reporting(pdev);
4273
6b7c5b94
SP
4274 pci_release_regions(pdev);
4275 pci_disable_device(pdev);
4276
4277 free_netdev(adapter->netdev);
4278}
4279
39f1d94d 4280static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4281{
baaa08d1 4282 int status, level;
6b7c5b94 4283
9e1453c5
AK
4284 status = be_cmd_get_cntl_attributes(adapter);
4285 if (status)
4286 return status;
4287
7aeb2156
PR
4288 /* Must be a power of 2 or else MODULO will BUG_ON */
4289 adapter->be_get_temp_freq = 64;
4290
baaa08d1
VV
4291 if (BEx_chip(adapter)) {
4292 level = be_cmd_get_fw_log_level(adapter);
4293 adapter->msg_enable =
4294 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4295 }
941a77d5 4296
92bf14ab 4297 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4298 return 0;
6b7c5b94
SP
4299}
4300
f67ef7ba 4301static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4302{
01e5b2c4 4303 struct device *dev = &adapter->pdev->dev;
d8110f62 4304 int status;
d8110f62 4305
f67ef7ba
PR
4306 status = lancer_test_and_set_rdy_state(adapter);
4307 if (status)
4308 goto err;
d8110f62 4309
f67ef7ba
PR
4310 if (netif_running(adapter->netdev))
4311 be_close(adapter->netdev);
d8110f62 4312
f67ef7ba
PR
4313 be_clear(adapter);
4314
01e5b2c4 4315 be_clear_all_error(adapter);
f67ef7ba
PR
4316
4317 status = be_setup(adapter);
4318 if (status)
4319 goto err;
d8110f62 4320
f67ef7ba
PR
4321 if (netif_running(adapter->netdev)) {
4322 status = be_open(adapter->netdev);
d8110f62
PR
4323 if (status)
4324 goto err;
f67ef7ba 4325 }
d8110f62 4326
4bebb56a 4327 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4328 return 0;
4329err:
01e5b2c4
SK
4330 if (status == -EAGAIN)
4331 dev_err(dev, "Waiting for resource provisioning\n");
4332 else
4bebb56a 4333 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4334
f67ef7ba
PR
4335 return status;
4336}
4337
4338static void be_func_recovery_task(struct work_struct *work)
4339{
4340 struct be_adapter *adapter =
4341 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4342 int status = 0;
d8110f62 4343
f67ef7ba 4344 be_detect_error(adapter);
d8110f62 4345
f67ef7ba 4346 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4347
f67ef7ba
PR
4348 rtnl_lock();
4349 netif_device_detach(adapter->netdev);
4350 rtnl_unlock();
d8110f62 4351
f67ef7ba 4352 status = lancer_recover_func(adapter);
f67ef7ba
PR
4353 if (!status)
4354 netif_device_attach(adapter->netdev);
d8110f62 4355 }
f67ef7ba 4356
01e5b2c4
SK
4357 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4358 * no need to attempt further recovery.
4359 */
4360 if (!status || status == -EAGAIN)
4361 schedule_delayed_work(&adapter->func_recovery_work,
4362 msecs_to_jiffies(1000));
d8110f62
PR
4363}
4364
4365static void be_worker(struct work_struct *work)
4366{
4367 struct be_adapter *adapter =
4368 container_of(work, struct be_adapter, work.work);
4369 struct be_rx_obj *rxo;
4370 int i;
4371
d8110f62
PR
4372 /* when interrupts are not yet enabled, just reap any pending
4373 * mcc completions */
4374 if (!netif_running(adapter->netdev)) {
072a9c48 4375 local_bh_disable();
10ef9ab4 4376 be_process_mcc(adapter);
072a9c48 4377 local_bh_enable();
d8110f62
PR
4378 goto reschedule;
4379 }
4380
4381 if (!adapter->stats_cmd_sent) {
4382 if (lancer_chip(adapter))
4383 lancer_cmd_get_pport_stats(adapter,
4384 &adapter->stats_cmd);
4385 else
4386 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4387 }
4388
d696b5e2
VV
4389 if (be_physfn(adapter) &&
4390 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4391 be_cmd_get_die_temperature(adapter);
4392
d8110f62 4393 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4394 /* Replenish RX-queues starved due to memory
4395 * allocation failures.
4396 */
4397 if (rxo->rx_post_starved)
d8110f62 4398 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4399 }
4400
2632bafd 4401 be_eqd_update(adapter);
10ef9ab4 4402
d8110f62
PR
4403reschedule:
4404 adapter->work_counter++;
4405 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4406}
4407
257a3feb 4408/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4409static bool be_reset_required(struct be_adapter *adapter)
4410{
257a3feb 4411 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4412}
4413
d379142b
SP
4414static char *mc_name(struct be_adapter *adapter)
4415{
4416 if (adapter->function_mode & FLEX10_MODE)
4417 return "FLEX10";
4418 else if (adapter->function_mode & VNIC_MODE)
4419 return "vNIC";
4420 else if (adapter->function_mode & UMC_ENABLED)
4421 return "UMC";
4422 else
4423 return "";
4424}
4425
4426static inline char *func_name(struct be_adapter *adapter)
4427{
4428 return be_physfn(adapter) ? "PF" : "VF";
4429}
4430
1dd06ae8 4431static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4432{
4433 int status = 0;
4434 struct be_adapter *adapter;
4435 struct net_device *netdev;
b4e32a71 4436 char port_name;
6b7c5b94
SP
4437
4438 status = pci_enable_device(pdev);
4439 if (status)
4440 goto do_none;
4441
4442 status = pci_request_regions(pdev, DRV_NAME);
4443 if (status)
4444 goto disable_dev;
4445 pci_set_master(pdev);
4446
7f640062 4447 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4448 if (netdev == NULL) {
4449 status = -ENOMEM;
4450 goto rel_reg;
4451 }
4452 adapter = netdev_priv(netdev);
4453 adapter->pdev = pdev;
4454 pci_set_drvdata(pdev, adapter);
4455 adapter->netdev = netdev;
2243e2e9 4456 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4457
4c15c243 4458 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4459 if (!status) {
4460 netdev->features |= NETIF_F_HIGHDMA;
4461 } else {
4c15c243 4462 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4463 if (status) {
4464 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4465 goto free_netdev;
4466 }
4467 }
4468
ea58c180
AK
4469 if (be_physfn(adapter)) {
4470 status = pci_enable_pcie_error_reporting(pdev);
4471 if (!status)
4472 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4473 }
d6b6d987 4474
6b7c5b94
SP
4475 status = be_ctrl_init(adapter);
4476 if (status)
39f1d94d 4477 goto free_netdev;
6b7c5b94 4478
2243e2e9 4479 /* sync up with fw's ready state */
ba343c77 4480 if (be_physfn(adapter)) {
bf99e50d 4481 status = be_fw_wait_ready(adapter);
ba343c77
SB
4482 if (status)
4483 goto ctrl_clean;
ba343c77 4484 }
6b7c5b94 4485
39f1d94d
SP
4486 if (be_reset_required(adapter)) {
4487 status = be_cmd_reset_function(adapter);
4488 if (status)
4489 goto ctrl_clean;
556ae191 4490
2d177be8
KA
4491 /* Wait for interrupts to quiesce after an FLR */
4492 msleep(100);
4493 }
8cef7a78
SK
4494
4495 /* Allow interrupts for other ULPs running on NIC function */
4496 be_intr_set(adapter, true);
10ef9ab4 4497
2d177be8
KA
4498 /* tell fw we're ready to fire cmds */
4499 status = be_cmd_fw_init(adapter);
4500 if (status)
4501 goto ctrl_clean;
4502
2243e2e9
SP
4503 status = be_stats_init(adapter);
4504 if (status)
4505 goto ctrl_clean;
4506
39f1d94d 4507 status = be_get_initial_config(adapter);
6b7c5b94
SP
4508 if (status)
4509 goto stats_clean;
6b7c5b94
SP
4510
4511 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4512 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4513 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4514
5fb379ee
SP
4515 status = be_setup(adapter);
4516 if (status)
55f5c3c5 4517 goto stats_clean;
2243e2e9 4518
3abcdeda 4519 be_netdev_init(netdev);
6b7c5b94
SP
4520 status = register_netdev(netdev);
4521 if (status != 0)
5fb379ee 4522 goto unsetup;
6b7c5b94 4523
045508a8
PP
4524 be_roce_dev_add(adapter);
4525
f67ef7ba
PR
4526 schedule_delayed_work(&adapter->func_recovery_work,
4527 msecs_to_jiffies(1000));
b4e32a71
PR
4528
4529 be_cmd_query_port_name(adapter, &port_name);
4530
d379142b
SP
4531 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4532 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4533
6b7c5b94
SP
4534 return 0;
4535
5fb379ee
SP
4536unsetup:
4537 be_clear(adapter);
6b7c5b94
SP
4538stats_clean:
4539 be_stats_cleanup(adapter);
4540ctrl_clean:
4541 be_ctrl_cleanup(adapter);
f9449ab7 4542free_netdev:
fe6d2a38 4543 free_netdev(netdev);
6b7c5b94
SP
4544rel_reg:
4545 pci_release_regions(pdev);
4546disable_dev:
4547 pci_disable_device(pdev);
4548do_none:
c4ca2374 4549 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4550 return status;
4551}
4552
4553static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4554{
4555 struct be_adapter *adapter = pci_get_drvdata(pdev);
4556 struct net_device *netdev = adapter->netdev;
4557
76a9e08e 4558 if (adapter->wol_en)
71d8d1b5
AK
4559 be_setup_wol(adapter, true);
4560
d4360d6f 4561 be_intr_set(adapter, false);
f67ef7ba
PR
4562 cancel_delayed_work_sync(&adapter->func_recovery_work);
4563
6b7c5b94
SP
4564 netif_device_detach(netdev);
4565 if (netif_running(netdev)) {
4566 rtnl_lock();
4567 be_close(netdev);
4568 rtnl_unlock();
4569 }
9b0365f1 4570 be_clear(adapter);
6b7c5b94
SP
4571
4572 pci_save_state(pdev);
4573 pci_disable_device(pdev);
4574 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4575 return 0;
4576}
4577
4578static int be_resume(struct pci_dev *pdev)
4579{
4580 int status = 0;
4581 struct be_adapter *adapter = pci_get_drvdata(pdev);
4582 struct net_device *netdev = adapter->netdev;
4583
4584 netif_device_detach(netdev);
4585
4586 status = pci_enable_device(pdev);
4587 if (status)
4588 return status;
4589
1ca01512 4590 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4591 pci_restore_state(pdev);
4592
dd5746bf
SB
4593 status = be_fw_wait_ready(adapter);
4594 if (status)
4595 return status;
4596
d4360d6f 4597 be_intr_set(adapter, true);
2243e2e9
SP
4598 /* tell fw we're ready to fire cmds */
4599 status = be_cmd_fw_init(adapter);
4600 if (status)
4601 return status;
4602
9b0365f1 4603 be_setup(adapter);
6b7c5b94
SP
4604 if (netif_running(netdev)) {
4605 rtnl_lock();
4606 be_open(netdev);
4607 rtnl_unlock();
4608 }
f67ef7ba
PR
4609
4610 schedule_delayed_work(&adapter->func_recovery_work,
4611 msecs_to_jiffies(1000));
6b7c5b94 4612 netif_device_attach(netdev);
71d8d1b5 4613
76a9e08e 4614 if (adapter->wol_en)
71d8d1b5 4615 be_setup_wol(adapter, false);
a4ca055f 4616
6b7c5b94
SP
4617 return 0;
4618}
4619
82456b03
SP
4620/*
4621 * An FLR will stop BE from DMAing any data.
4622 */
4623static void be_shutdown(struct pci_dev *pdev)
4624{
4625 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4626
2d5d4154
AK
4627 if (!adapter)
4628 return;
82456b03 4629
0f4a6828 4630 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4631 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4632
2d5d4154 4633 netif_device_detach(adapter->netdev);
82456b03 4634
57841869
AK
4635 be_cmd_reset_function(adapter);
4636
82456b03 4637 pci_disable_device(pdev);
82456b03
SP
4638}
4639
cf588477
SP
4640static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4641 pci_channel_state_t state)
4642{
4643 struct be_adapter *adapter = pci_get_drvdata(pdev);
4644 struct net_device *netdev = adapter->netdev;
4645
4646 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4647
01e5b2c4
SK
4648 if (!adapter->eeh_error) {
4649 adapter->eeh_error = true;
cf588477 4650
01e5b2c4 4651 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4652
cf588477 4653 rtnl_lock();
01e5b2c4
SK
4654 netif_device_detach(netdev);
4655 if (netif_running(netdev))
4656 be_close(netdev);
cf588477 4657 rtnl_unlock();
01e5b2c4
SK
4658
4659 be_clear(adapter);
cf588477 4660 }
cf588477
SP
4661
4662 if (state == pci_channel_io_perm_failure)
4663 return PCI_ERS_RESULT_DISCONNECT;
4664
4665 pci_disable_device(pdev);
4666
eeb7fc7b
SK
4667 /* The error could cause the FW to trigger a flash debug dump.
4668 * Resetting the card while flash dump is in progress
c8a54163
PR
4669 * can cause it not to recover; wait for it to finish.
4670 * Wait only for first function as it is needed only once per
4671 * adapter.
eeb7fc7b 4672 */
c8a54163
PR
4673 if (pdev->devfn == 0)
4674 ssleep(30);
4675
cf588477
SP
4676 return PCI_ERS_RESULT_NEED_RESET;
4677}
4678
4679static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4680{
4681 struct be_adapter *adapter = pci_get_drvdata(pdev);
4682 int status;
4683
4684 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4685
4686 status = pci_enable_device(pdev);
4687 if (status)
4688 return PCI_ERS_RESULT_DISCONNECT;
4689
4690 pci_set_master(pdev);
1ca01512 4691 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4692 pci_restore_state(pdev);
4693
4694 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4695 dev_info(&adapter->pdev->dev,
4696 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4697 status = be_fw_wait_ready(adapter);
cf588477
SP
4698 if (status)
4699 return PCI_ERS_RESULT_DISCONNECT;
4700
d6b6d987 4701 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4702 be_clear_all_error(adapter);
cf588477
SP
4703 return PCI_ERS_RESULT_RECOVERED;
4704}
4705
4706static void be_eeh_resume(struct pci_dev *pdev)
4707{
4708 int status = 0;
4709 struct be_adapter *adapter = pci_get_drvdata(pdev);
4710 struct net_device *netdev = adapter->netdev;
4711
4712 dev_info(&adapter->pdev->dev, "EEH resume\n");
4713
4714 pci_save_state(pdev);
4715
2d177be8 4716 status = be_cmd_reset_function(adapter);
cf588477
SP
4717 if (status)
4718 goto err;
4719
2d177be8
KA
4720 /* tell fw we're ready to fire cmds */
4721 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4722 if (status)
4723 goto err;
4724
cf588477
SP
4725 status = be_setup(adapter);
4726 if (status)
4727 goto err;
4728
4729 if (netif_running(netdev)) {
4730 status = be_open(netdev);
4731 if (status)
4732 goto err;
4733 }
f67ef7ba
PR
4734
4735 schedule_delayed_work(&adapter->func_recovery_work,
4736 msecs_to_jiffies(1000));
cf588477
SP
4737 netif_device_attach(netdev);
4738 return;
4739err:
4740 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4741}
4742
3646f0e5 4743static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4744 .error_detected = be_eeh_err_detected,
4745 .slot_reset = be_eeh_reset,
4746 .resume = be_eeh_resume,
4747};
4748
6b7c5b94
SP
4749static struct pci_driver be_driver = {
4750 .name = DRV_NAME,
4751 .id_table = be_dev_ids,
4752 .probe = be_probe,
4753 .remove = be_remove,
4754 .suspend = be_suspend,
cf588477 4755 .resume = be_resume,
82456b03 4756 .shutdown = be_shutdown,
cf588477 4757 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4758};
4759
4760static int __init be_init_module(void)
4761{
8e95a202
JP
4762 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4763 rx_frag_size != 2048) {
6b7c5b94
SP
4764 printk(KERN_WARNING DRV_NAME
4765 " : Module param rx_frag_size must be 2048/4096/8192."
4766 " Using 2048\n");
4767 rx_frag_size = 2048;
4768 }
6b7c5b94
SP
4769
4770 return pci_register_driver(&be_driver);
4771}
4772module_init(be_init_module);
4773
4774static void __exit be_exit_module(void)
4775{
4776 pci_unregister_driver(&be_driver);
4777}
4778module_exit(be_exit_module);
This page took 1.196193 seconds and 5 git commands to generate.