be2net: Fix be_vlan_add/rem_vid() routines
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
6b7c5b94
SP
26
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ba343c77 33static unsigned int num_vfs;
ba343c77 34module_param(num_vfs, uint, S_IRUGO);
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
11ac75ed
SP
37static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
6b7c5b94 41static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
50 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 53/* UE Status Low CSR */
42c8b11e 54static const char * const ue_status_low_desc[] = {
7c185276
AK
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
42c8b11e 89static const char * const ue_status_hi_desc[] = {
7c185276
AK
90 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
42c8b11e 113 "NETC",
7c185276
AK
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
6b7c5b94 123
752961a1 124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 128 if (mem->va) {
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
1cfafab9
SP
131 mem->va = NULL;
132 }
6b7c5b94
SP
133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
ede23fa8
JP
144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
6b7c5b94 146 if (!mem->va)
10ef9ab4 147 return -ENOMEM;
6b7c5b94
SP
148 return 0;
149}
150
68c45a2d 151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 152{
db3ea781 153 u32 reg, enabled;
5f0b849e 154
db3ea781
SP
155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
5f0b849e 159 if (!enabled && enable)
6b7c5b94 160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 161 else if (enabled && !enable)
6b7c5b94 162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else
6b7c5b94 164 return;
5f0b849e 165
db3ea781
SP
166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
168}
169
68c45a2d
SK
170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
8788fdc2 186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
194}
195
94d73aaa
VV
196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
6b7c5b94
SP
198{
199 u32 val = 0;
94d73aaa 200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
202
203 wmb();
94d73aaa 204 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
205}
206
8788fdc2 207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
461ae379
AK
479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
ac124ff9 517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 521 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 524 drvs->rx_drops_too_many_frags =
ac124ff9 525 pport_stats->rx_drops_too_many_frags_lo;
005d5696 526}
89a88ab8 527
09c1c68f
SP
528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
4188e7df 540static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
89a88ab8
AK
554void be_parse_stats(struct be_adapter *adapter)
555{
61000861 556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
557 struct be_rx_obj *rxo;
558 int i;
a6c578ef 559 u32 erx_stat;
ac124ff9 560
ca34fe38
SP
561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
005d5696 563 } else {
ca34fe38
SP
564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
61000861
AK
566 else if (BE3_chip(adapter))
567 /* for BE3 */
ca34fe38 568 populate_be_v1_stats(adapter);
61000861
AK
569 else
570 populate_be_v2_stats(adapter);
d51ebd33 571
61000861 572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 573 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 576 }
09c1c68f 577 }
89a88ab8
AK
578}
579
ab1594e9
SP
580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
6b7c5b94 582{
ab1594e9 583 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 584 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 585 struct be_rx_obj *rxo;
3c8def97 586 struct be_tx_obj *txo;
ab1594e9
SP
587 u64 pkts, bytes;
588 unsigned int start;
3abcdeda 589 int i;
6b7c5b94 590
3abcdeda 591 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
603 }
604
3c8def97 605 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
3c8def97 614 }
6b7c5b94
SP
615
616 /* bad pkts received */
ab1594e9 617 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
ab1594e9 626 drvs->rx_dropped_runt;
68110868 627
6b7c5b94 628 /* detailed rx errors */
ab1594e9 629 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
68110868 632
ab1594e9 633 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
634
635 /* frame alignment errors */
ab1594e9 636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 637
6b7c5b94
SP
638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
ab1594e9 643 return stats;
6b7c5b94
SP
644}
645
b236916a 646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 647{
6b7c5b94
SP
648 struct net_device *netdev = adapter->netdev;
649
b236916a 650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 651 netif_carrier_off(netdev);
b236916a 652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 653 }
b236916a
AK
654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
6b7c5b94
SP
659}
660
3c8def97 661static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 663{
3c8def97
SP
664 struct be_tx_stats *stats = tx_stats(txo);
665
ab1594e9 666 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 671 if (stopped)
ac124ff9 672 stats->tx_stops++;
ab1594e9 673 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
6b7c5b94 679{
ebc8d2ab
DM
680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
6b7c5b94
SP
684 /* to account for hdr wrb */
685 cnt++;
fe6d2a38
SP
686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
6b7c5b94
SP
689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
fe6d2a38 692 }
6b7c5b94
SP
693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 702 wrb->rsvd0 = 0;
6b7c5b94
SP
703}
704
1ded132d
AK
705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
cc4ce020 721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 723{
1ded132d 724 u16 vlan_tag;
cc4ce020 725
6b7c5b94
SP
726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
49e4b847 730 if (skb_is_gso(skb)) {
6b7c5b94
SP
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
4c5102f9 743 if (vlan_tx_tag_present(skb)) {
6b7c5b94 744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
747 }
748
bc0c3405
AK
749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
2b7bcebf 756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 764 if (wrb->frag_len) {
7101e111 765 if (unmap_single)
2b7bcebf
IV
766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
7101e111 768 else
2b7bcebf 769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
770 }
771}
6b7c5b94 772
3c8def97 773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
6b7c5b94 776{
7101e111
SP
777 dma_addr_t busaddr;
778 int i, copied = 0;
2b7bcebf 779 struct device *dev = &adapter->pdev->dev;
6b7c5b94 780 struct sk_buff *first_skb = skb;
6b7c5b94
SP
781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
7101e111
SP
783 bool map_single = false;
784 u16 map_head;
6b7c5b94 785
6b7c5b94
SP
786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
7101e111 788 map_head = txq->head;
6b7c5b94 789
ebc8d2ab 790 if (skb->len > skb->data_len) {
e743d313 791 int len = skb_headlen(skb);
2b7bcebf
IV
792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
7101e111
SP
794 goto dma_err;
795 map_single = true;
ebc8d2ab
DM
796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
6b7c5b94 802
ebc8d2ab 803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 804 const struct skb_frag_struct *frag =
ebc8d2ab 805 &skb_shinfo(skb)->frags[i];
b061b39e 806 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 807 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 808 if (dma_mapping_error(dev, busaddr))
7101e111 809 goto dma_err;
ebc8d2ab 810 wrb = queue_head_node(txq);
9e903e08 811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
9e903e08 814 copied += skb_frag_size(frag);
6b7c5b94
SP
815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
bc0c3405 824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
7101e111
SP
828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
2b7bcebf 832 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
6b7c5b94
SP
838}
839
93040ae5 840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
93040ae5
SK
843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
efee8e87 850 if (vlan_tx_tag_present(skb))
93040ae5 851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
bc0c3405
AK
862
863 if (vlan_tag) {
58717686 864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
865 if (unlikely(!skb))
866 return skb;
bc0c3405
AK
867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
58717686 873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
93040ae5
SK
880 return skb;
881}
882
bc0c3405
AK
883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
ee9c799c
SP
910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
bc0c3405 912{
ee9c799c 913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
914}
915
ee9c799c
SP
916static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
6b7c5b94 919{
d2cb6ce7 920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
93040ae5 923
b54881f9 924 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
48265667 925 * may cause a transmit stall on that port. So the work-around is to
b54881f9 926 * pad short packets (<= 32 bytes) to a 36-byte length.
48265667 927 */
b54881f9 928 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
48265667
SK
929 if (skb_padto(skb, 36))
930 goto tx_drop;
931 skb->len = 36;
932 }
933
1297f9db
AK
934 /* For padded packets, BE HW modifies tot_len field in IP header
935 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 936 * For padded packets, Lancer computes incorrect checksum.
1ded132d 937 */
ee9c799c
SP
938 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
940 if (skb->len <= 60 &&
941 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 942 is_ipv4_pkt(skb)) {
93040ae5
SK
943 ip = (struct iphdr *)ip_hdr(skb);
944 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945 }
1ded132d 946
d2cb6ce7
AK
947 /* If vlan tag is already inlined in the packet, skip HW VLAN
948 * tagging in UMC mode
949 */
950 if ((adapter->function_mode & UMC_ENABLED) &&
951 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 952 *skip_hw_vlan = true;
d2cb6ce7 953
93040ae5
SK
954 /* HW has a bug wherein it will calculate CSUM for VLAN
955 * pkts even though it is disabled.
956 * Manually insert VLAN in pkt.
957 */
958 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
959 vlan_tx_tag_present(skb)) {
960 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
961 if (unlikely(!skb))
962 goto tx_drop;
963 }
964
965 /* HW may lockup when VLAN HW tagging is requested on
966 * certain ipv6 packets. Drop such pkts if the HW workaround to
967 * skip HW tagging is not enabled by FW.
968 */
969 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
970 (adapter->pvid || adapter->qnq_vid) &&
971 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
972 goto tx_drop;
973
974 /* Manual VLAN tag insertion to prevent:
975 * ASIC lockup when the ASIC inserts VLAN tag into
976 * certain ipv6 packets. Insert VLAN tags in driver,
977 * and set event, completion, vlan bits accordingly
978 * in the Tx WRB.
979 */
980 if (be_ipv6_tx_stall_chk(adapter, skb) &&
981 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
983 if (unlikely(!skb))
984 goto tx_drop;
1ded132d
AK
985 }
986
ee9c799c
SP
987 return skb;
988tx_drop:
989 dev_kfree_skb_any(skb);
990 return NULL;
991}
992
993static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
994{
995 struct be_adapter *adapter = netdev_priv(netdev);
996 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
997 struct be_queue_info *txq = &txo->q;
998 bool dummy_wrb, stopped = false;
999 u32 wrb_cnt = 0, copied = 0;
1000 bool skip_hw_vlan = false;
1001 u32 start = txq->head;
1002
1003 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1004 if (!skb) {
1005 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1006 return NETDEV_TX_OK;
bc617526 1007 }
ee9c799c 1008
fe6d2a38 1009 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1010
bc0c3405
AK
1011 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1012 skip_hw_vlan);
c190e3c8 1013 if (copied) {
cd8f76c0
ED
1014 int gso_segs = skb_shinfo(skb)->gso_segs;
1015
c190e3c8 1016 /* record the sent skb in the sent_skb table */
3c8def97
SP
1017 BUG_ON(txo->sent_skb_list[start]);
1018 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1019
1020 /* Ensure txq has space for the next skb; Else stop the queue
1021 * *BEFORE* ringing the tx doorbell, so that we serialze the
1022 * tx compls of the current transmit which'll wake up the queue
1023 */
7101e111 1024 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1025 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1026 txq->len) {
3c8def97 1027 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1028 stopped = true;
1029 }
6b7c5b94 1030
94d73aaa 1031 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1032
cd8f76c0 1033 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1034 } else {
1035 txq->head = start;
bc617526 1036 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1037 dev_kfree_skb_any(skb);
6b7c5b94 1038 }
6b7c5b94
SP
1039 return NETDEV_TX_OK;
1040}
1041
1042static int be_change_mtu(struct net_device *netdev, int new_mtu)
1043{
1044 struct be_adapter *adapter = netdev_priv(netdev);
1045 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
1046 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1047 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
1048 dev_info(&adapter->pdev->dev,
1049 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
1050 BE_MIN_MTU,
1051 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1052 return -EINVAL;
1053 }
1054 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1055 netdev->mtu, new_mtu);
1056 netdev->mtu = new_mtu;
1057 return 0;
1058}
1059
1060/*
82903e4b
AK
1061 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1063 */
10329df8 1064static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1065{
10329df8
SP
1066 u16 vids[BE_NUM_VLANS_SUPPORTED];
1067 u16 num = 0, i;
82903e4b 1068 int status = 0;
1da87b7f 1069
c0e64ef4
SP
1070 /* No need to further configure vids if in promiscuous mode */
1071 if (adapter->promiscuous)
1072 return 0;
1073
92bf14ab 1074 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1075 goto set_vlan_promisc;
1076
1077 /* Construct VLAN Table to give to HW */
1078 for (i = 0; i < VLAN_N_VID; i++)
1079 if (adapter->vlan_tag[i])
10329df8 1080 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1081
1082 status = be_cmd_vlan_config(adapter, adapter->if_handle,
012bd387 1083 vids, num, 0);
0fc16ebf 1084
0fc16ebf 1085 if (status) {
d9d604f8
AK
1086 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1088 goto set_vlan_promisc;
1089 dev_err(&adapter->pdev->dev,
1090 "Setting HW VLAN filtering failed.\n");
1091 } else {
1092 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1093 /* hw VLAN filtering re-enabled. */
1094 status = be_cmd_rx_filter(adapter,
1095 BE_FLAGS_VLAN_PROMISC, OFF);
1096 if (!status) {
1097 dev_info(&adapter->pdev->dev,
1098 "Disabling VLAN Promiscuous mode.\n");
1099 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1100 }
1101 }
6b7c5b94 1102 }
1da87b7f 1103
b31c50a7 1104 return status;
0fc16ebf
PR
1105
1106set_vlan_promisc:
a6b74e01
SK
1107 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1108 return 0;
d9d604f8
AK
1109
1110 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111 if (!status) {
1112 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1113 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1114 } else
1115 dev_err(&adapter->pdev->dev,
1116 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1117 return status;
6b7c5b94
SP
1118}
1119
80d5c368 1120static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1121{
1122 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1123 int status = 0;
6b7c5b94 1124
a85e9986
PR
1125 /* Packets with VID 0 are always received by Lancer by default */
1126 if (lancer_chip(adapter) && vid == 0)
1127 goto ret;
1128
6b7c5b94 1129 adapter->vlan_tag[vid] = 1;
a6b74e01 1130 adapter->vlans_added++;
8e586137 1131
a6b74e01
SK
1132 status = be_vid_config(adapter);
1133 if (status) {
1134 adapter->vlans_added--;
80817cbf 1135 adapter->vlan_tag[vid] = 0;
a6b74e01 1136 }
80817cbf
AK
1137ret:
1138 return status;
6b7c5b94
SP
1139}
1140
80d5c368 1141static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1142{
1143 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1144 int status = 0;
6b7c5b94 1145
a85e9986
PR
1146 /* Packets with VID 0 are always received by Lancer by default */
1147 if (lancer_chip(adapter) && vid == 0)
1148 goto ret;
1149
6b7c5b94 1150 adapter->vlan_tag[vid] = 0;
a6b74e01 1151 status = be_vid_config(adapter);
80817cbf
AK
1152 if (!status)
1153 adapter->vlans_added--;
1154 else
1155 adapter->vlan_tag[vid] = 1;
1156ret:
1157 return status;
6b7c5b94
SP
1158}
1159
a54769f5 1160static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1161{
1162 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1163 int status;
6b7c5b94 1164
24307eef 1165 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1166 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1167 adapter->promiscuous = true;
1168 goto done;
6b7c5b94
SP
1169 }
1170
25985edc 1171 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1172 if (adapter->promiscuous) {
1173 adapter->promiscuous = false;
5b8821b7 1174 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1175
1176 if (adapter->vlans_added)
10329df8 1177 be_vid_config(adapter);
6b7c5b94
SP
1178 }
1179
e7b909a6 1180 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1181 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1182 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1183 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1184 goto done;
6b7c5b94 1185 }
6b7c5b94 1186
fbc13f01
AK
1187 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1188 struct netdev_hw_addr *ha;
1189 int i = 1; /* First slot is claimed by the Primary MAC */
1190
1191 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1192 be_cmd_pmac_del(adapter, adapter->if_handle,
1193 adapter->pmac_id[i], 0);
1194 }
1195
92bf14ab 1196 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1197 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1198 adapter->promiscuous = true;
1199 goto done;
1200 }
1201
1202 netdev_for_each_uc_addr(ha, adapter->netdev) {
1203 adapter->uc_macs++; /* First slot is for Primary MAC */
1204 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1205 adapter->if_handle,
1206 &adapter->pmac_id[adapter->uc_macs], 0);
1207 }
1208 }
1209
0fc16ebf
PR
1210 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1211
1212 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1213 if (status) {
1214 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1215 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1216 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1217 }
24307eef
SP
1218done:
1219 return;
6b7c5b94
SP
1220}
1221
ba343c77
SB
1222static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1223{
1224 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1225 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1226 int status;
1227
11ac75ed 1228 if (!sriov_enabled(adapter))
ba343c77
SB
1229 return -EPERM;
1230
11ac75ed 1231 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1232 return -EINVAL;
1233
3175d8c2
SP
1234 if (BEx_chip(adapter)) {
1235 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1236 vf + 1);
ba343c77 1237
11ac75ed
SP
1238 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1239 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1240 } else {
1241 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1242 vf + 1);
590c391d
PR
1243 }
1244
64600ea5 1245 if (status)
ba343c77
SB
1246 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1247 mac, vf);
64600ea5 1248 else
11ac75ed 1249 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1250
ba343c77
SB
1251 return status;
1252}
1253
64600ea5
AK
1254static int be_get_vf_config(struct net_device *netdev, int vf,
1255 struct ifla_vf_info *vi)
1256{
1257 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1258 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1259
11ac75ed 1260 if (!sriov_enabled(adapter))
64600ea5
AK
1261 return -EPERM;
1262
11ac75ed 1263 if (vf >= adapter->num_vfs)
64600ea5
AK
1264 return -EINVAL;
1265
1266 vi->vf = vf;
11ac75ed 1267 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1268 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1269 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1270 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1271
1272 return 0;
1273}
1274
1da87b7f
AK
1275static int be_set_vf_vlan(struct net_device *netdev,
1276 int vf, u16 vlan, u8 qos)
1277{
1278 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1279 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1280 int status = 0;
1281
11ac75ed 1282 if (!sriov_enabled(adapter))
1da87b7f
AK
1283 return -EPERM;
1284
b9fc0e53 1285 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1286 return -EINVAL;
1287
b9fc0e53
AK
1288 if (vlan || qos) {
1289 vlan |= qos << VLAN_PRIO_SHIFT;
1290 if (vf_cfg->vlan_tag != vlan) {
f1f3ee1b 1291 /* If this is new value, program it. Else skip. */
b9fc0e53
AK
1292 vf_cfg->vlan_tag = vlan;
1293 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1294 vf_cfg->if_handle, 0);
f1f3ee1b 1295 }
1da87b7f 1296 } else {
f1f3ee1b 1297 /* Reset Transparent Vlan Tagging. */
b9fc0e53
AK
1298 vf_cfg->vlan_tag = 0;
1299 vlan = vf_cfg->def_vid;
f1f3ee1b 1300 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
b9fc0e53 1301 vf_cfg->if_handle, 0);
1da87b7f
AK
1302 }
1303
1da87b7f
AK
1304
1305 if (status)
1306 dev_info(&adapter->pdev->dev,
1307 "VLAN %d config on VF %d failed\n", vlan, vf);
1308 return status;
1309}
1310
e1d18735
AK
1311static int be_set_vf_tx_rate(struct net_device *netdev,
1312 int vf, int rate)
1313{
1314 struct be_adapter *adapter = netdev_priv(netdev);
1315 int status = 0;
1316
11ac75ed 1317 if (!sriov_enabled(adapter))
e1d18735
AK
1318 return -EPERM;
1319
94f434c2 1320 if (vf >= adapter->num_vfs)
e1d18735
AK
1321 return -EINVAL;
1322
94f434c2
AK
1323 if (rate < 100 || rate > 10000) {
1324 dev_err(&adapter->pdev->dev,
1325 "tx rate must be between 100 and 10000 Mbps\n");
1326 return -EINVAL;
1327 }
e1d18735 1328
d5c18473
PR
1329 if (lancer_chip(adapter))
1330 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1331 else
1332 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1333
1334 if (status)
94f434c2 1335 dev_err(&adapter->pdev->dev,
e1d18735 1336 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1337 else
1338 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1339 return status;
1340}
1341
2632bafd
SP
1342static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1343 ulong now)
6b7c5b94 1344{
2632bafd
SP
1345 aic->rx_pkts_prev = rx_pkts;
1346 aic->tx_reqs_prev = tx_pkts;
1347 aic->jiffies = now;
1348}
ac124ff9 1349
2632bafd
SP
1350static void be_eqd_update(struct be_adapter *adapter)
1351{
1352 struct be_set_eqd set_eqd[MAX_EVT_QS];
1353 int eqd, i, num = 0, start;
1354 struct be_aic_obj *aic;
1355 struct be_eq_obj *eqo;
1356 struct be_rx_obj *rxo;
1357 struct be_tx_obj *txo;
1358 u64 rx_pkts, tx_pkts;
1359 ulong now;
1360 u32 pps, delta;
10ef9ab4 1361
2632bafd
SP
1362 for_all_evt_queues(adapter, eqo, i) {
1363 aic = &adapter->aic_obj[eqo->idx];
1364 if (!aic->enable) {
1365 if (aic->jiffies)
1366 aic->jiffies = 0;
1367 eqd = aic->et_eqd;
1368 goto modify_eqd;
1369 }
6b7c5b94 1370
2632bafd
SP
1371 rxo = &adapter->rx_obj[eqo->idx];
1372 do {
1373 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1374 rx_pkts = rxo->stats.rx_pkts;
1375 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
10ef9ab4 1376
2632bafd
SP
1377 txo = &adapter->tx_obj[eqo->idx];
1378 do {
1379 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1380 tx_pkts = txo->stats.tx_reqs;
1381 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
6b7c5b94 1382
6b7c5b94 1383
2632bafd
SP
1384 /* Skip, if wrapped around or first calculation */
1385 now = jiffies;
1386 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1387 rx_pkts < aic->rx_pkts_prev ||
1388 tx_pkts < aic->tx_reqs_prev) {
1389 be_aic_update(aic, rx_pkts, tx_pkts, now);
1390 continue;
1391 }
1392
1393 delta = jiffies_to_msecs(now - aic->jiffies);
1394 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1395 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1396 eqd = (pps / 15000) << 2;
10ef9ab4 1397
2632bafd
SP
1398 if (eqd < 8)
1399 eqd = 0;
1400 eqd = min_t(u32, eqd, aic->max_eqd);
1401 eqd = max_t(u32, eqd, aic->min_eqd);
1402
1403 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1404modify_eqd:
2632bafd
SP
1405 if (eqd != aic->prev_eqd) {
1406 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1407 set_eqd[num].eq_id = eqo->q.id;
1408 aic->prev_eqd = eqd;
1409 num++;
1410 }
ac124ff9 1411 }
2632bafd
SP
1412
1413 if (num)
1414 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1415}
1416
3abcdeda 1417static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1418 struct be_rx_compl_info *rxcp)
4097f663 1419{
ac124ff9 1420 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1421
ab1594e9 1422 u64_stats_update_begin(&stats->sync);
3abcdeda 1423 stats->rx_compl++;
2e588f84 1424 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1425 stats->rx_pkts++;
2e588f84 1426 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1427 stats->rx_mcast_pkts++;
2e588f84 1428 if (rxcp->err)
ac124ff9 1429 stats->rx_compl_err++;
ab1594e9 1430 u64_stats_update_end(&stats->sync);
4097f663
SP
1431}
1432
2e588f84 1433static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1434{
19fad86f
PR
1435 /* L4 checksum is not reliable for non TCP/UDP packets.
1436 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1437 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1438 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1439}
1440
0b0ef1d0 1441static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1442{
10ef9ab4 1443 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1444 struct be_rx_page_info *rx_page_info;
3abcdeda 1445 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1446 u16 frag_idx = rxq->tail;
6b7c5b94 1447
3abcdeda 1448 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1449 BUG_ON(!rx_page_info->page);
1450
205859a2 1451 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1452 dma_unmap_page(&adapter->pdev->dev,
1453 dma_unmap_addr(rx_page_info, bus),
1454 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1455 rx_page_info->last_page_user = false;
1456 }
6b7c5b94 1457
0b0ef1d0 1458 queue_tail_inc(rxq);
6b7c5b94
SP
1459 atomic_dec(&rxq->used);
1460 return rx_page_info;
1461}
1462
1463/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1464static void be_rx_compl_discard(struct be_rx_obj *rxo,
1465 struct be_rx_compl_info *rxcp)
6b7c5b94 1466{
6b7c5b94 1467 struct be_rx_page_info *page_info;
2e588f84 1468 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1469
e80d9da6 1470 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1471 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1472 put_page(page_info->page);
1473 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1474 }
1475}
1476
1477/*
1478 * skb_fill_rx_data forms a complete skb for an ether frame
1479 * indicated by rxcp.
1480 */
10ef9ab4
SP
1481static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1482 struct be_rx_compl_info *rxcp)
6b7c5b94 1483{
6b7c5b94 1484 struct be_rx_page_info *page_info;
2e588f84
SP
1485 u16 i, j;
1486 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1487 u8 *start;
6b7c5b94 1488
0b0ef1d0 1489 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1490 start = page_address(page_info->page) + page_info->page_offset;
1491 prefetch(start);
1492
1493 /* Copy data in the first descriptor of this completion */
2e588f84 1494 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1495
6b7c5b94
SP
1496 skb->len = curr_frag_len;
1497 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1498 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1499 /* Complete packet has now been moved to data */
1500 put_page(page_info->page);
1501 skb->data_len = 0;
1502 skb->tail += curr_frag_len;
1503 } else {
ac1ae5f3
ED
1504 hdr_len = ETH_HLEN;
1505 memcpy(skb->data, start, hdr_len);
6b7c5b94 1506 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1507 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1508 skb_shinfo(skb)->frags[0].page_offset =
1509 page_info->page_offset + hdr_len;
9e903e08 1510 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1511 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1512 skb->truesize += rx_frag_size;
6b7c5b94
SP
1513 skb->tail += hdr_len;
1514 }
205859a2 1515 page_info->page = NULL;
6b7c5b94 1516
2e588f84
SP
1517 if (rxcp->pkt_size <= rx_frag_size) {
1518 BUG_ON(rxcp->num_rcvd != 1);
1519 return;
6b7c5b94
SP
1520 }
1521
1522 /* More frags present for this completion */
2e588f84
SP
1523 remaining = rxcp->pkt_size - curr_frag_len;
1524 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1525 page_info = get_rx_page_info(rxo);
2e588f84 1526 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1527
bd46cb6c
AK
1528 /* Coalesce all frags from the same physical page in one slot */
1529 if (page_info->page_offset == 0) {
1530 /* Fresh page */
1531 j++;
b061b39e 1532 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1533 skb_shinfo(skb)->frags[j].page_offset =
1534 page_info->page_offset;
9e903e08 1535 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1536 skb_shinfo(skb)->nr_frags++;
1537 } else {
1538 put_page(page_info->page);
1539 }
1540
9e903e08 1541 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1542 skb->len += curr_frag_len;
1543 skb->data_len += curr_frag_len;
bdb28a97 1544 skb->truesize += rx_frag_size;
2e588f84 1545 remaining -= curr_frag_len;
205859a2 1546 page_info->page = NULL;
6b7c5b94 1547 }
bd46cb6c 1548 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1549}
1550
5be93b9a 1551/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1552static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1553 struct be_rx_compl_info *rxcp)
6b7c5b94 1554{
10ef9ab4 1555 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1556 struct net_device *netdev = adapter->netdev;
6b7c5b94 1557 struct sk_buff *skb;
89420424 1558
bb349bb4 1559 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1560 if (unlikely(!skb)) {
ac124ff9 1561 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1562 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1563 return;
1564 }
1565
10ef9ab4 1566 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1567
6332c8d3 1568 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1569 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1570 else
1571 skb_checksum_none_assert(skb);
6b7c5b94 1572
6332c8d3 1573 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1574 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1575 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1576 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1577 skb_mark_napi_id(skb, napi);
6b7c5b94 1578
343e43c0 1579 if (rxcp->vlanf)
86a9bad3 1580 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1581
1582 netif_receive_skb(skb);
6b7c5b94
SP
1583}
1584
5be93b9a 1585/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1586static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1587 struct napi_struct *napi,
1588 struct be_rx_compl_info *rxcp)
6b7c5b94 1589{
10ef9ab4 1590 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1591 struct be_rx_page_info *page_info;
5be93b9a 1592 struct sk_buff *skb = NULL;
2e588f84
SP
1593 u16 remaining, curr_frag_len;
1594 u16 i, j;
3968fa1e 1595
10ef9ab4 1596 skb = napi_get_frags(napi);
5be93b9a 1597 if (!skb) {
10ef9ab4 1598 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1599 return;
1600 }
1601
2e588f84
SP
1602 remaining = rxcp->pkt_size;
1603 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1604 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1605
1606 curr_frag_len = min(remaining, rx_frag_size);
1607
bd46cb6c
AK
1608 /* Coalesce all frags from the same physical page in one slot */
1609 if (i == 0 || page_info->page_offset == 0) {
1610 /* First frag or Fresh page */
1611 j++;
b061b39e 1612 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1613 skb_shinfo(skb)->frags[j].page_offset =
1614 page_info->page_offset;
9e903e08 1615 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1616 } else {
1617 put_page(page_info->page);
1618 }
9e903e08 1619 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1620 skb->truesize += rx_frag_size;
bd46cb6c 1621 remaining -= curr_frag_len;
6b7c5b94
SP
1622 memset(page_info, 0, sizeof(*page_info));
1623 }
bd46cb6c 1624 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1625
5be93b9a 1626 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1627 skb->len = rxcp->pkt_size;
1628 skb->data_len = rxcp->pkt_size;
5be93b9a 1629 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1630 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1631 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1632 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1633 skb_mark_napi_id(skb, napi);
5be93b9a 1634
343e43c0 1635 if (rxcp->vlanf)
86a9bad3 1636 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1637
10ef9ab4 1638 napi_gro_frags(napi);
2e588f84
SP
1639}
1640
10ef9ab4
SP
1641static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1642 struct be_rx_compl_info *rxcp)
2e588f84
SP
1643{
1644 rxcp->pkt_size =
1645 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1646 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1647 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1648 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1649 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1650 rxcp->ip_csum =
1651 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1652 rxcp->l4_csum =
1653 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1654 rxcp->ipv6 =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1656 rxcp->num_rcvd =
1657 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1658 rxcp->pkt_type =
1659 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1660 rxcp->rss_hash =
c297977e 1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1662 if (rxcp->vlanf) {
1663 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1664 compl);
1665 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1666 compl);
15d72184 1667 }
12004ae9 1668 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1669}
1670
10ef9ab4
SP
1671static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1672 struct be_rx_compl_info *rxcp)
2e588f84
SP
1673{
1674 rxcp->pkt_size =
1675 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1676 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1677 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1678 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1679 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1680 rxcp->ip_csum =
1681 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1682 rxcp->l4_csum =
1683 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1684 rxcp->ipv6 =
1685 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1686 rxcp->num_rcvd =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1688 rxcp->pkt_type =
1689 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1690 rxcp->rss_hash =
c297977e 1691 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1692 if (rxcp->vlanf) {
1693 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1694 compl);
1695 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1696 compl);
15d72184 1697 }
12004ae9 1698 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1699 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1700 ip_frag, compl);
2e588f84
SP
1701}
1702
1703static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1704{
1705 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1706 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1707 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1708
2e588f84
SP
1709 /* For checking the valid bit it is Ok to use either definition as the
1710 * valid bit is at the same position in both v0 and v1 Rx compl */
1711 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1712 return NULL;
6b7c5b94 1713
2e588f84
SP
1714 rmb();
1715 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1716
2e588f84 1717 if (adapter->be3_native)
10ef9ab4 1718 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1719 else
10ef9ab4 1720 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1721
e38b1706
SK
1722 if (rxcp->ip_frag)
1723 rxcp->l4_csum = 0;
1724
15d72184
SP
1725 if (rxcp->vlanf) {
1726 /* vlanf could be wrongly set in some cards.
1727 * ignore if vtm is not set */
752961a1 1728 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1729 rxcp->vlanf = 0;
6b7c5b94 1730
15d72184 1731 if (!lancer_chip(adapter))
3c709f8f 1732 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1733
939cf306 1734 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1735 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1736 rxcp->vlanf = 0;
1737 }
2e588f84
SP
1738
1739 /* As the compl has been parsed, reset it; we wont touch it again */
1740 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1741
3abcdeda 1742 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1743 return rxcp;
1744}
1745
1829b086 1746static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1747{
6b7c5b94 1748 u32 order = get_order(size);
1829b086 1749
6b7c5b94 1750 if (order > 0)
1829b086
ED
1751 gfp |= __GFP_COMP;
1752 return alloc_pages(gfp, order);
6b7c5b94
SP
1753}
1754
1755/*
1756 * Allocate a page, split it to fragments of size rx_frag_size and post as
1757 * receive buffers to BE
1758 */
1829b086 1759static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1760{
3abcdeda 1761 struct be_adapter *adapter = rxo->adapter;
26d92f92 1762 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1763 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1764 struct page *pagep = NULL;
ba42fad0 1765 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1766 struct be_eth_rx_d *rxd;
1767 u64 page_dmaaddr = 0, frag_dmaaddr;
1768 u32 posted, page_offset = 0;
1769
3abcdeda 1770 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1771 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1772 if (!pagep) {
1829b086 1773 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1774 if (unlikely(!pagep)) {
ac124ff9 1775 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1776 break;
1777 }
ba42fad0
IV
1778 page_dmaaddr = dma_map_page(dev, pagep, 0,
1779 adapter->big_page_size,
2b7bcebf 1780 DMA_FROM_DEVICE);
ba42fad0
IV
1781 if (dma_mapping_error(dev, page_dmaaddr)) {
1782 put_page(pagep);
1783 pagep = NULL;
1784 rx_stats(rxo)->rx_post_fail++;
1785 break;
1786 }
6b7c5b94
SP
1787 page_info->page_offset = 0;
1788 } else {
1789 get_page(pagep);
1790 page_info->page_offset = page_offset + rx_frag_size;
1791 }
1792 page_offset = page_info->page_offset;
1793 page_info->page = pagep;
fac6da5b 1794 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1795 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1796
1797 rxd = queue_head_node(rxq);
1798 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1799 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1800
1801 /* Any space left in the current big page for another frag? */
1802 if ((page_offset + rx_frag_size + rx_frag_size) >
1803 adapter->big_page_size) {
1804 pagep = NULL;
1805 page_info->last_page_user = true;
1806 }
26d92f92
SP
1807
1808 prev_page_info = page_info;
1809 queue_head_inc(rxq);
10ef9ab4 1810 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1811 }
1812 if (pagep)
26d92f92 1813 prev_page_info->last_page_user = true;
6b7c5b94
SP
1814
1815 if (posted) {
6b7c5b94 1816 atomic_add(posted, &rxq->used);
6384a4d0
SP
1817 if (rxo->rx_post_starved)
1818 rxo->rx_post_starved = false;
8788fdc2 1819 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1820 } else if (atomic_read(&rxq->used) == 0) {
1821 /* Let be_worker replenish when memory is available */
3abcdeda 1822 rxo->rx_post_starved = true;
6b7c5b94 1823 }
6b7c5b94
SP
1824}
1825
5fb379ee 1826static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1827{
6b7c5b94
SP
1828 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1829
1830 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1831 return NULL;
1832
f3eb62d2 1833 rmb();
6b7c5b94
SP
1834 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1835
1836 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1837
1838 queue_tail_inc(tx_cq);
1839 return txcp;
1840}
1841
3c8def97
SP
1842static u16 be_tx_compl_process(struct be_adapter *adapter,
1843 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1844{
3c8def97 1845 struct be_queue_info *txq = &txo->q;
a73b796e 1846 struct be_eth_wrb *wrb;
3c8def97 1847 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1848 struct sk_buff *sent_skb;
ec43b1a6
SP
1849 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1850 bool unmap_skb_hdr = true;
6b7c5b94 1851
ec43b1a6 1852 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1853 BUG_ON(!sent_skb);
ec43b1a6
SP
1854 sent_skbs[txq->tail] = NULL;
1855
1856 /* skip header wrb */
a73b796e 1857 queue_tail_inc(txq);
6b7c5b94 1858
ec43b1a6 1859 do {
6b7c5b94 1860 cur_index = txq->tail;
a73b796e 1861 wrb = queue_tail_node(txq);
2b7bcebf
IV
1862 unmap_tx_frag(&adapter->pdev->dev, wrb,
1863 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1864 unmap_skb_hdr = false;
1865
6b7c5b94
SP
1866 num_wrbs++;
1867 queue_tail_inc(txq);
ec43b1a6 1868 } while (cur_index != last_index);
6b7c5b94 1869
6b7c5b94 1870 kfree_skb(sent_skb);
4d586b82 1871 return num_wrbs;
6b7c5b94
SP
1872}
1873
10ef9ab4
SP
1874/* Return the number of events in the event queue */
1875static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1876{
10ef9ab4
SP
1877 struct be_eq_entry *eqe;
1878 int num = 0;
859b1e4e 1879
10ef9ab4
SP
1880 do {
1881 eqe = queue_tail_node(&eqo->q);
1882 if (eqe->evt == 0)
1883 break;
859b1e4e 1884
10ef9ab4
SP
1885 rmb();
1886 eqe->evt = 0;
1887 num++;
1888 queue_tail_inc(&eqo->q);
1889 } while (true);
1890
1891 return num;
859b1e4e
SP
1892}
1893
10ef9ab4
SP
1894/* Leaves the EQ is disarmed state */
1895static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1896{
10ef9ab4 1897 int num = events_get(eqo);
859b1e4e 1898
10ef9ab4 1899 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1900}
1901
10ef9ab4 1902static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1903{
1904 struct be_rx_page_info *page_info;
3abcdeda
SP
1905 struct be_queue_info *rxq = &rxo->q;
1906 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1907 struct be_rx_compl_info *rxcp;
d23e946c
SP
1908 struct be_adapter *adapter = rxo->adapter;
1909 int flush_wait = 0;
6b7c5b94 1910
d23e946c
SP
1911 /* Consume pending rx completions.
1912 * Wait for the flush completion (identified by zero num_rcvd)
1913 * to arrive. Notify CQ even when there are no more CQ entries
1914 * for HW to flush partially coalesced CQ entries.
1915 * In Lancer, there is no need to wait for flush compl.
1916 */
1917 for (;;) {
1918 rxcp = be_rx_compl_get(rxo);
1919 if (rxcp == NULL) {
1920 if (lancer_chip(adapter))
1921 break;
1922
1923 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1924 dev_warn(&adapter->pdev->dev,
1925 "did not receive flush compl\n");
1926 break;
1927 }
1928 be_cq_notify(adapter, rx_cq->id, true, 0);
1929 mdelay(1);
1930 } else {
1931 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1932 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1933 if (rxcp->num_rcvd == 0)
1934 break;
1935 }
6b7c5b94
SP
1936 }
1937
d23e946c
SP
1938 /* After cleanup, leave the CQ in unarmed state */
1939 be_cq_notify(adapter, rx_cq->id, false, 0);
1940
1941 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
1942 while (atomic_read(&rxq->used) > 0) {
1943 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1944 put_page(page_info->page);
1945 memset(page_info, 0, sizeof(*page_info));
1946 }
1947 BUG_ON(atomic_read(&rxq->used));
482c9e79 1948 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1949}
1950
0ae57bb3 1951static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1952{
0ae57bb3
SP
1953 struct be_tx_obj *txo;
1954 struct be_queue_info *txq;
a8e9179a 1955 struct be_eth_tx_compl *txcp;
4d586b82 1956 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1957 struct sk_buff *sent_skb;
1958 bool dummy_wrb;
0ae57bb3 1959 int i, pending_txqs;
a8e9179a
SP
1960
1961 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1962 do {
0ae57bb3
SP
1963 pending_txqs = adapter->num_tx_qs;
1964
1965 for_all_tx_queues(adapter, txo, i) {
1966 txq = &txo->q;
1967 while ((txcp = be_tx_compl_get(&txo->cq))) {
1968 end_idx =
1969 AMAP_GET_BITS(struct amap_eth_tx_compl,
1970 wrb_index, txcp);
1971 num_wrbs += be_tx_compl_process(adapter, txo,
1972 end_idx);
1973 cmpl++;
1974 }
1975 if (cmpl) {
1976 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1977 atomic_sub(num_wrbs, &txq->used);
1978 cmpl = 0;
1979 num_wrbs = 0;
1980 }
1981 if (atomic_read(&txq->used) == 0)
1982 pending_txqs--;
a8e9179a
SP
1983 }
1984
0ae57bb3 1985 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1986 break;
1987
1988 mdelay(1);
1989 } while (true);
1990
0ae57bb3
SP
1991 for_all_tx_queues(adapter, txo, i) {
1992 txq = &txo->q;
1993 if (atomic_read(&txq->used))
1994 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1995 atomic_read(&txq->used));
1996
1997 /* free posted tx for which compls will never arrive */
1998 while (atomic_read(&txq->used)) {
1999 sent_skb = txo->sent_skb_list[txq->tail];
2000 end_idx = txq->tail;
2001 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2002 &dummy_wrb);
2003 index_adv(&end_idx, num_wrbs - 1, txq->len);
2004 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2005 atomic_sub(num_wrbs, &txq->used);
2006 }
b03388d6 2007 }
6b7c5b94
SP
2008}
2009
10ef9ab4
SP
2010static void be_evt_queues_destroy(struct be_adapter *adapter)
2011{
2012 struct be_eq_obj *eqo;
2013 int i;
2014
2015 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2016 if (eqo->q.created) {
2017 be_eq_clean(eqo);
10ef9ab4 2018 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2019 napi_hash_del(&eqo->napi);
68d7bdcb 2020 netif_napi_del(&eqo->napi);
19d59aa7 2021 }
10ef9ab4
SP
2022 be_queue_free(adapter, &eqo->q);
2023 }
2024}
2025
2026static int be_evt_queues_create(struct be_adapter *adapter)
2027{
2028 struct be_queue_info *eq;
2029 struct be_eq_obj *eqo;
2632bafd 2030 struct be_aic_obj *aic;
10ef9ab4
SP
2031 int i, rc;
2032
92bf14ab
SP
2033 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2034 adapter->cfg_num_qs);
10ef9ab4
SP
2035
2036 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2037 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2038 BE_NAPI_WEIGHT);
6384a4d0 2039 napi_hash_add(&eqo->napi);
2632bafd 2040 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2041 eqo->adapter = adapter;
2042 eqo->tx_budget = BE_TX_BUDGET;
2043 eqo->idx = i;
2632bafd
SP
2044 aic->max_eqd = BE_MAX_EQD;
2045 aic->enable = true;
10ef9ab4
SP
2046
2047 eq = &eqo->q;
2048 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2049 sizeof(struct be_eq_entry));
2050 if (rc)
2051 return rc;
2052
f2f781a7 2053 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2054 if (rc)
2055 return rc;
2056 }
1cfafab9 2057 return 0;
10ef9ab4
SP
2058}
2059
5fb379ee
SP
2060static void be_mcc_queues_destroy(struct be_adapter *adapter)
2061{
2062 struct be_queue_info *q;
5fb379ee 2063
8788fdc2 2064 q = &adapter->mcc_obj.q;
5fb379ee 2065 if (q->created)
8788fdc2 2066 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2067 be_queue_free(adapter, q);
2068
8788fdc2 2069 q = &adapter->mcc_obj.cq;
5fb379ee 2070 if (q->created)
8788fdc2 2071 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2072 be_queue_free(adapter, q);
2073}
2074
2075/* Must be called only after TX qs are created as MCC shares TX EQ */
2076static int be_mcc_queues_create(struct be_adapter *adapter)
2077{
2078 struct be_queue_info *q, *cq;
5fb379ee 2079
8788fdc2 2080 cq = &adapter->mcc_obj.cq;
5fb379ee 2081 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 2082 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2083 goto err;
2084
10ef9ab4
SP
2085 /* Use the default EQ for MCC completions */
2086 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2087 goto mcc_cq_free;
2088
8788fdc2 2089 q = &adapter->mcc_obj.q;
5fb379ee
SP
2090 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2091 goto mcc_cq_destroy;
2092
8788fdc2 2093 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2094 goto mcc_q_free;
2095
2096 return 0;
2097
2098mcc_q_free:
2099 be_queue_free(adapter, q);
2100mcc_cq_destroy:
8788fdc2 2101 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2102mcc_cq_free:
2103 be_queue_free(adapter, cq);
2104err:
2105 return -1;
2106}
2107
6b7c5b94
SP
2108static void be_tx_queues_destroy(struct be_adapter *adapter)
2109{
2110 struct be_queue_info *q;
3c8def97
SP
2111 struct be_tx_obj *txo;
2112 u8 i;
6b7c5b94 2113
3c8def97
SP
2114 for_all_tx_queues(adapter, txo, i) {
2115 q = &txo->q;
2116 if (q->created)
2117 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2118 be_queue_free(adapter, q);
6b7c5b94 2119
3c8def97
SP
2120 q = &txo->cq;
2121 if (q->created)
2122 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2123 be_queue_free(adapter, q);
2124 }
6b7c5b94
SP
2125}
2126
7707133c 2127static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2128{
10ef9ab4 2129 struct be_queue_info *cq, *eq;
3c8def97 2130 struct be_tx_obj *txo;
92bf14ab 2131 int status, i;
6b7c5b94 2132
92bf14ab 2133 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2134
10ef9ab4
SP
2135 for_all_tx_queues(adapter, txo, i) {
2136 cq = &txo->cq;
2137 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2138 sizeof(struct be_eth_tx_compl));
2139 if (status)
2140 return status;
3c8def97 2141
827da44c
JS
2142 u64_stats_init(&txo->stats.sync);
2143 u64_stats_init(&txo->stats.sync_compl);
2144
10ef9ab4
SP
2145 /* If num_evt_qs is less than num_tx_qs, then more than
2146 * one txq share an eq
2147 */
2148 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2149 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2150 if (status)
2151 return status;
6b7c5b94 2152
10ef9ab4
SP
2153 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2154 sizeof(struct be_eth_wrb));
2155 if (status)
2156 return status;
6b7c5b94 2157
94d73aaa 2158 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2159 if (status)
2160 return status;
3c8def97 2161 }
6b7c5b94 2162
d379142b
SP
2163 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2164 adapter->num_tx_qs);
10ef9ab4 2165 return 0;
6b7c5b94
SP
2166}
2167
10ef9ab4 2168static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2169{
2170 struct be_queue_info *q;
3abcdeda
SP
2171 struct be_rx_obj *rxo;
2172 int i;
2173
2174 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2175 q = &rxo->cq;
2176 if (q->created)
2177 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2178 be_queue_free(adapter, q);
ac6a0c4a
SP
2179 }
2180}
2181
10ef9ab4 2182static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2183{
10ef9ab4 2184 struct be_queue_info *eq, *cq;
3abcdeda
SP
2185 struct be_rx_obj *rxo;
2186 int rc, i;
6b7c5b94 2187
92bf14ab
SP
2188 /* We can create as many RSS rings as there are EQs. */
2189 adapter->num_rx_qs = adapter->num_evt_qs;
2190
2191 /* We'll use RSS only if atleast 2 RSS rings are supported.
2192 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2193 */
92bf14ab
SP
2194 if (adapter->num_rx_qs > 1)
2195 adapter->num_rx_qs++;
2196
6b7c5b94 2197 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2198 for_all_rx_queues(adapter, rxo, i) {
2199 rxo->adapter = adapter;
3abcdeda
SP
2200 cq = &rxo->cq;
2201 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2202 sizeof(struct be_eth_rx_compl));
2203 if (rc)
10ef9ab4 2204 return rc;
3abcdeda 2205
827da44c 2206 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2207 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2208 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2209 if (rc)
10ef9ab4 2210 return rc;
3abcdeda 2211 }
6b7c5b94 2212
d379142b
SP
2213 dev_info(&adapter->pdev->dev,
2214 "created %d RSS queue(s) and 1 default RX queue\n",
2215 adapter->num_rx_qs - 1);
10ef9ab4 2216 return 0;
b628bde2
SP
2217}
2218
6b7c5b94
SP
2219static irqreturn_t be_intx(int irq, void *dev)
2220{
e49cc34f
SP
2221 struct be_eq_obj *eqo = dev;
2222 struct be_adapter *adapter = eqo->adapter;
2223 int num_evts = 0;
6b7c5b94 2224
d0b9cec3
SP
2225 /* IRQ is not expected when NAPI is scheduled as the EQ
2226 * will not be armed.
2227 * But, this can happen on Lancer INTx where it takes
2228 * a while to de-assert INTx or in BE2 where occasionaly
2229 * an interrupt may be raised even when EQ is unarmed.
2230 * If NAPI is already scheduled, then counting & notifying
2231 * events will orphan them.
e49cc34f 2232 */
d0b9cec3 2233 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2234 num_evts = events_get(eqo);
d0b9cec3
SP
2235 __napi_schedule(&eqo->napi);
2236 if (num_evts)
2237 eqo->spurious_intr = 0;
2238 }
2239 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2240
d0b9cec3
SP
2241 /* Return IRQ_HANDLED only for the the first spurious intr
2242 * after a valid intr to stop the kernel from branding
2243 * this irq as a bad one!
e49cc34f 2244 */
d0b9cec3
SP
2245 if (num_evts || eqo->spurious_intr++ == 0)
2246 return IRQ_HANDLED;
2247 else
2248 return IRQ_NONE;
6b7c5b94
SP
2249}
2250
10ef9ab4 2251static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2252{
10ef9ab4 2253 struct be_eq_obj *eqo = dev;
6b7c5b94 2254
0b545a62
SP
2255 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2256 napi_schedule(&eqo->napi);
6b7c5b94
SP
2257 return IRQ_HANDLED;
2258}
2259
2e588f84 2260static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2261{
e38b1706 2262 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2263}
2264
10ef9ab4 2265static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
6384a4d0 2266 int budget, int polling)
6b7c5b94 2267{
3abcdeda
SP
2268 struct be_adapter *adapter = rxo->adapter;
2269 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2270 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2271 u32 work_done;
2272
2273 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2274 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2275 if (!rxcp)
2276 break;
2277
12004ae9
SP
2278 /* Is it a flush compl that has no data */
2279 if (unlikely(rxcp->num_rcvd == 0))
2280 goto loop_continue;
2281
2282 /* Discard compl with partial DMA Lancer B0 */
2283 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2284 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2285 goto loop_continue;
2286 }
2287
2288 /* On BE drop pkts that arrive due to imperfect filtering in
2289 * promiscuous mode on some skews
2290 */
2291 if (unlikely(rxcp->port != adapter->port_num &&
2292 !lancer_chip(adapter))) {
10ef9ab4 2293 be_rx_compl_discard(rxo, rxcp);
12004ae9 2294 goto loop_continue;
64642811 2295 }
009dd872 2296
6384a4d0
SP
2297 /* Don't do gro when we're busy_polling */
2298 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2299 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2300 else
6384a4d0
SP
2301 be_rx_compl_process(rxo, napi, rxcp);
2302
12004ae9 2303loop_continue:
2e588f84 2304 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2305 }
2306
10ef9ab4
SP
2307 if (work_done) {
2308 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2309
6384a4d0
SP
2310 /* When an rx-obj gets into post_starved state, just
2311 * let be_worker do the posting.
2312 */
2313 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2314 !rxo->rx_post_starved)
10ef9ab4 2315 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2316 }
10ef9ab4 2317
6b7c5b94
SP
2318 return work_done;
2319}
2320
10ef9ab4
SP
2321static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2322 int budget, int idx)
6b7c5b94 2323{
6b7c5b94 2324 struct be_eth_tx_compl *txcp;
10ef9ab4 2325 int num_wrbs = 0, work_done;
3c8def97 2326
10ef9ab4
SP
2327 for (work_done = 0; work_done < budget; work_done++) {
2328 txcp = be_tx_compl_get(&txo->cq);
2329 if (!txcp)
2330 break;
2331 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2332 AMAP_GET_BITS(struct amap_eth_tx_compl,
2333 wrb_index, txcp));
10ef9ab4 2334 }
6b7c5b94 2335
10ef9ab4
SP
2336 if (work_done) {
2337 be_cq_notify(adapter, txo->cq.id, true, work_done);
2338 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2339
10ef9ab4
SP
2340 /* As Tx wrbs have been freed up, wake up netdev queue
2341 * if it was stopped due to lack of tx wrbs. */
2342 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2343 atomic_read(&txo->q.used) < txo->q.len / 2) {
2344 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2345 }
10ef9ab4
SP
2346
2347 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2348 tx_stats(txo)->tx_compl += work_done;
2349 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2350 }
10ef9ab4
SP
2351 return (work_done < budget); /* Done */
2352}
6b7c5b94 2353
68d7bdcb 2354int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2355{
2356 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2357 struct be_adapter *adapter = eqo->adapter;
0b545a62 2358 int max_work = 0, work, i, num_evts;
6384a4d0 2359 struct be_rx_obj *rxo;
10ef9ab4 2360 bool tx_done;
f31e50a8 2361
0b545a62
SP
2362 num_evts = events_get(eqo);
2363
10ef9ab4
SP
2364 /* Process all TXQs serviced by this EQ */
2365 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2366 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2367 eqo->tx_budget, i);
2368 if (!tx_done)
2369 max_work = budget;
f31e50a8
SP
2370 }
2371
6384a4d0
SP
2372 if (be_lock_napi(eqo)) {
2373 /* This loop will iterate twice for EQ0 in which
2374 * completions of the last RXQ (default one) are also processed
2375 * For other EQs the loop iterates only once
2376 */
2377 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2378 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2379 max_work = max(work, max_work);
2380 }
2381 be_unlock_napi(eqo);
2382 } else {
2383 max_work = budget;
10ef9ab4 2384 }
6b7c5b94 2385
10ef9ab4
SP
2386 if (is_mcc_eqo(eqo))
2387 be_process_mcc(adapter);
93c86700 2388
10ef9ab4
SP
2389 if (max_work < budget) {
2390 napi_complete(napi);
0b545a62 2391 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2392 } else {
2393 /* As we'll continue in polling mode, count and clear events */
0b545a62 2394 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2395 }
10ef9ab4 2396 return max_work;
6b7c5b94
SP
2397}
2398
6384a4d0
SP
2399#ifdef CONFIG_NET_RX_BUSY_POLL
2400static int be_busy_poll(struct napi_struct *napi)
2401{
2402 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2403 struct be_adapter *adapter = eqo->adapter;
2404 struct be_rx_obj *rxo;
2405 int i, work = 0;
2406
2407 if (!be_lock_busy_poll(eqo))
2408 return LL_FLUSH_BUSY;
2409
2410 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2411 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2412 if (work)
2413 break;
2414 }
2415
2416 be_unlock_busy_poll(eqo);
2417 return work;
2418}
2419#endif
2420
f67ef7ba 2421void be_detect_error(struct be_adapter *adapter)
7c185276 2422{
e1cfb67a
PR
2423 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2424 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2425 u32 i;
2426
d23e946c 2427 if (be_hw_error(adapter))
72f02485
SP
2428 return;
2429
e1cfb67a
PR
2430 if (lancer_chip(adapter)) {
2431 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2432 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2433 sliport_err1 = ioread32(adapter->db +
2434 SLIPORT_ERROR1_OFFSET);
2435 sliport_err2 = ioread32(adapter->db +
2436 SLIPORT_ERROR2_OFFSET);
2437 }
2438 } else {
2439 pci_read_config_dword(adapter->pdev,
2440 PCICFG_UE_STATUS_LOW, &ue_lo);
2441 pci_read_config_dword(adapter->pdev,
2442 PCICFG_UE_STATUS_HIGH, &ue_hi);
2443 pci_read_config_dword(adapter->pdev,
2444 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2445 pci_read_config_dword(adapter->pdev,
2446 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2447
f67ef7ba
PR
2448 ue_lo = (ue_lo & ~ue_lo_mask);
2449 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2450 }
7c185276 2451
1451ae6e
AK
2452 /* On certain platforms BE hardware can indicate spurious UEs.
2453 * Allow the h/w to stop working completely in case of a real UE.
2454 * Hence not setting the hw_error for UE detection.
2455 */
2456 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2457 adapter->hw_error = true;
4bebb56a
SK
2458 /* Do not log error messages if its a FW reset */
2459 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2460 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2461 dev_info(&adapter->pdev->dev,
2462 "Firmware update in progress\n");
2463 return;
2464 } else {
2465 dev_err(&adapter->pdev->dev,
2466 "Error detected in the card\n");
2467 }
f67ef7ba
PR
2468 }
2469
2470 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2471 dev_err(&adapter->pdev->dev,
2472 "ERR: sliport status 0x%x\n", sliport_status);
2473 dev_err(&adapter->pdev->dev,
2474 "ERR: sliport error1 0x%x\n", sliport_err1);
2475 dev_err(&adapter->pdev->dev,
2476 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2477 }
2478
e1cfb67a
PR
2479 if (ue_lo) {
2480 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2481 if (ue_lo & 1)
7c185276
AK
2482 dev_err(&adapter->pdev->dev,
2483 "UE: %s bit set\n", ue_status_low_desc[i]);
2484 }
2485 }
f67ef7ba 2486
e1cfb67a
PR
2487 if (ue_hi) {
2488 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2489 if (ue_hi & 1)
7c185276
AK
2490 dev_err(&adapter->pdev->dev,
2491 "UE: %s bit set\n", ue_status_hi_desc[i]);
2492 }
2493 }
2494
2495}
2496
8d56ff11
SP
2497static void be_msix_disable(struct be_adapter *adapter)
2498{
ac6a0c4a 2499 if (msix_enabled(adapter)) {
8d56ff11 2500 pci_disable_msix(adapter->pdev);
ac6a0c4a 2501 adapter->num_msix_vec = 0;
68d7bdcb 2502 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2503 }
2504}
2505
c2bba3df 2506static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2507{
92bf14ab 2508 int i, status, num_vec;
d379142b 2509 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2510
92bf14ab
SP
2511 /* If RoCE is supported, program the max number of NIC vectors that
2512 * may be configured via set-channels, along with vectors needed for
2513 * RoCe. Else, just program the number we'll use initially.
2514 */
2515 if (be_roce_supported(adapter))
2516 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2517 2 * num_online_cpus());
2518 else
2519 num_vec = adapter->cfg_num_qs;
3abcdeda 2520
ac6a0c4a 2521 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2522 adapter->msix_entries[i].entry = i;
2523
ac6a0c4a 2524 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2525 if (status == 0) {
2526 goto done;
92bf14ab 2527 } else if (status >= MIN_MSIX_VECTORS) {
ac6a0c4a 2528 num_vec = status;
c2bba3df
SK
2529 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2530 num_vec);
2531 if (!status)
3abcdeda 2532 goto done;
3abcdeda 2533 }
d379142b
SP
2534
2535 dev_warn(dev, "MSIx enable failed\n");
92bf14ab 2536
c2bba3df
SK
2537 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2538 if (!be_physfn(adapter))
2539 return status;
2540 return 0;
3abcdeda 2541done:
92bf14ab
SP
2542 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2543 adapter->num_msix_roce_vec = num_vec / 2;
2544 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2545 adapter->num_msix_roce_vec);
2546 }
2547
2548 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2549
2550 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2551 adapter->num_msix_vec);
c2bba3df 2552 return 0;
6b7c5b94
SP
2553}
2554
fe6d2a38 2555static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2556 struct be_eq_obj *eqo)
b628bde2 2557{
f2f781a7 2558 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2559}
6b7c5b94 2560
b628bde2
SP
2561static int be_msix_register(struct be_adapter *adapter)
2562{
10ef9ab4
SP
2563 struct net_device *netdev = adapter->netdev;
2564 struct be_eq_obj *eqo;
2565 int status, i, vec;
6b7c5b94 2566
10ef9ab4
SP
2567 for_all_evt_queues(adapter, eqo, i) {
2568 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2569 vec = be_msix_vec_get(adapter, eqo);
2570 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2571 if (status)
2572 goto err_msix;
2573 }
b628bde2 2574
6b7c5b94 2575 return 0;
3abcdeda 2576err_msix:
10ef9ab4
SP
2577 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2578 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2579 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2580 status);
ac6a0c4a 2581 be_msix_disable(adapter);
6b7c5b94
SP
2582 return status;
2583}
2584
2585static int be_irq_register(struct be_adapter *adapter)
2586{
2587 struct net_device *netdev = adapter->netdev;
2588 int status;
2589
ac6a0c4a 2590 if (msix_enabled(adapter)) {
6b7c5b94
SP
2591 status = be_msix_register(adapter);
2592 if (status == 0)
2593 goto done;
ba343c77
SB
2594 /* INTx is not supported for VF */
2595 if (!be_physfn(adapter))
2596 return status;
6b7c5b94
SP
2597 }
2598
e49cc34f 2599 /* INTx: only the first EQ is used */
6b7c5b94
SP
2600 netdev->irq = adapter->pdev->irq;
2601 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2602 &adapter->eq_obj[0]);
6b7c5b94
SP
2603 if (status) {
2604 dev_err(&adapter->pdev->dev,
2605 "INTx request IRQ failed - err %d\n", status);
2606 return status;
2607 }
2608done:
2609 adapter->isr_registered = true;
2610 return 0;
2611}
2612
2613static void be_irq_unregister(struct be_adapter *adapter)
2614{
2615 struct net_device *netdev = adapter->netdev;
10ef9ab4 2616 struct be_eq_obj *eqo;
3abcdeda 2617 int i;
6b7c5b94
SP
2618
2619 if (!adapter->isr_registered)
2620 return;
2621
2622 /* INTx */
ac6a0c4a 2623 if (!msix_enabled(adapter)) {
e49cc34f 2624 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2625 goto done;
2626 }
2627
2628 /* MSIx */
10ef9ab4
SP
2629 for_all_evt_queues(adapter, eqo, i)
2630 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2631
6b7c5b94
SP
2632done:
2633 adapter->isr_registered = false;
6b7c5b94
SP
2634}
2635
10ef9ab4 2636static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2637{
2638 struct be_queue_info *q;
2639 struct be_rx_obj *rxo;
2640 int i;
2641
2642 for_all_rx_queues(adapter, rxo, i) {
2643 q = &rxo->q;
2644 if (q->created) {
2645 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2646 be_rx_cq_clean(rxo);
482c9e79 2647 }
10ef9ab4 2648 be_queue_free(adapter, q);
482c9e79
SP
2649 }
2650}
2651
889cd4b2
SP
2652static int be_close(struct net_device *netdev)
2653{
2654 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2655 struct be_eq_obj *eqo;
2656 int i;
889cd4b2 2657
045508a8
PP
2658 be_roce_dev_close(adapter);
2659
dff345c5
IV
2660 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2661 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2662 napi_disable(&eqo->napi);
6384a4d0
SP
2663 be_disable_busy_poll(eqo);
2664 }
71237b6f 2665 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2666 }
a323d9bf
SP
2667
2668 be_async_mcc_disable(adapter);
2669
2670 /* Wait for all pending tx completions to arrive so that
2671 * all tx skbs are freed.
2672 */
fba87559 2673 netif_tx_disable(netdev);
6e1f9975 2674 be_tx_compl_clean(adapter);
a323d9bf
SP
2675
2676 be_rx_qs_destroy(adapter);
2677
d11a347d
AK
2678 for (i = 1; i < (adapter->uc_macs + 1); i++)
2679 be_cmd_pmac_del(adapter, adapter->if_handle,
2680 adapter->pmac_id[i], 0);
2681 adapter->uc_macs = 0;
2682
a323d9bf 2683 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2684 if (msix_enabled(adapter))
2685 synchronize_irq(be_msix_vec_get(adapter, eqo));
2686 else
2687 synchronize_irq(netdev->irq);
2688 be_eq_clean(eqo);
63fcb27f
PR
2689 }
2690
889cd4b2
SP
2691 be_irq_unregister(adapter);
2692
482c9e79
SP
2693 return 0;
2694}
2695
10ef9ab4 2696static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2697{
2698 struct be_rx_obj *rxo;
e9008ee9
PR
2699 int rc, i, j;
2700 u8 rsstable[128];
482c9e79
SP
2701
2702 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2703 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2704 sizeof(struct be_eth_rx_d));
2705 if (rc)
2706 return rc;
2707 }
2708
2709 /* The FW would like the default RXQ to be created first */
2710 rxo = default_rxo(adapter);
2711 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2712 adapter->if_handle, false, &rxo->rss_id);
2713 if (rc)
2714 return rc;
2715
2716 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2717 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2718 rx_frag_size, adapter->if_handle,
2719 true, &rxo->rss_id);
482c9e79
SP
2720 if (rc)
2721 return rc;
2722 }
2723
2724 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2725 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2726 for_all_rss_queues(adapter, rxo, i) {
2727 if ((j + i) >= 128)
2728 break;
2729 rsstable[j + i] = rxo->rss_id;
2730 }
2731 }
594ad54a
SR
2732 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2733 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2734
2735 if (!BEx_chip(adapter))
2736 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2737 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2738 } else {
2739 /* Disable RSS, if only default RX Q is created */
2740 adapter->rss_flags = RSS_ENABLE_NONE;
2741 }
594ad54a 2742
da1388d6
VV
2743 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2744 128);
2745 if (rc) {
2746 adapter->rss_flags = RSS_ENABLE_NONE;
2747 return rc;
482c9e79
SP
2748 }
2749
2750 /* First time posting */
10ef9ab4 2751 for_all_rx_queues(adapter, rxo, i)
482c9e79 2752 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2753 return 0;
2754}
2755
6b7c5b94
SP
2756static int be_open(struct net_device *netdev)
2757{
2758 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2759 struct be_eq_obj *eqo;
3abcdeda 2760 struct be_rx_obj *rxo;
10ef9ab4 2761 struct be_tx_obj *txo;
b236916a 2762 u8 link_status;
3abcdeda 2763 int status, i;
5fb379ee 2764
10ef9ab4 2765 status = be_rx_qs_create(adapter);
482c9e79
SP
2766 if (status)
2767 goto err;
2768
c2bba3df
SK
2769 status = be_irq_register(adapter);
2770 if (status)
2771 goto err;
5fb379ee 2772
10ef9ab4 2773 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2774 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2775
10ef9ab4
SP
2776 for_all_tx_queues(adapter, txo, i)
2777 be_cq_notify(adapter, txo->cq.id, true, 0);
2778
7a1e9b20
SP
2779 be_async_mcc_enable(adapter);
2780
10ef9ab4
SP
2781 for_all_evt_queues(adapter, eqo, i) {
2782 napi_enable(&eqo->napi);
6384a4d0 2783 be_enable_busy_poll(eqo);
10ef9ab4
SP
2784 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2785 }
04d3d624 2786 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2787
323ff71e 2788 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2789 if (!status)
2790 be_link_status_update(adapter, link_status);
2791
fba87559 2792 netif_tx_start_all_queues(netdev);
045508a8 2793 be_roce_dev_open(adapter);
889cd4b2
SP
2794 return 0;
2795err:
2796 be_close(adapter->netdev);
2797 return -EIO;
5fb379ee
SP
2798}
2799
71d8d1b5
AK
2800static int be_setup_wol(struct be_adapter *adapter, bool enable)
2801{
2802 struct be_dma_mem cmd;
2803 int status = 0;
2804 u8 mac[ETH_ALEN];
2805
2806 memset(mac, 0, ETH_ALEN);
2807
2808 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2809 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2810 GFP_KERNEL);
71d8d1b5
AK
2811 if (cmd.va == NULL)
2812 return -1;
71d8d1b5
AK
2813
2814 if (enable) {
2815 status = pci_write_config_dword(adapter->pdev,
2816 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2817 if (status) {
2818 dev_err(&adapter->pdev->dev,
2381a55c 2819 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2820 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2821 cmd.dma);
71d8d1b5
AK
2822 return status;
2823 }
2824 status = be_cmd_enable_magic_wol(adapter,
2825 adapter->netdev->dev_addr, &cmd);
2826 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2827 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2828 } else {
2829 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2830 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2831 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2832 }
2833
2b7bcebf 2834 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2835 return status;
2836}
2837
6d87f5c3
AK
2838/*
2839 * Generate a seed MAC address from the PF MAC Address using jhash.
2840 * MAC Address for VFs are assigned incrementally starting from the seed.
2841 * These addresses are programmed in the ASIC by the PF and the VF driver
2842 * queries for the MAC address during its probe.
2843 */
4c876616 2844static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2845{
f9449ab7 2846 u32 vf;
3abcdeda 2847 int status = 0;
6d87f5c3 2848 u8 mac[ETH_ALEN];
11ac75ed 2849 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2850
2851 be_vf_eth_addr_generate(adapter, mac);
2852
11ac75ed 2853 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2854 if (BEx_chip(adapter))
590c391d 2855 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2856 vf_cfg->if_handle,
2857 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2858 else
2859 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2860 vf + 1);
590c391d 2861
6d87f5c3
AK
2862 if (status)
2863 dev_err(&adapter->pdev->dev,
590c391d 2864 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2865 else
11ac75ed 2866 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2867
2868 mac[5] += 1;
2869 }
2870 return status;
2871}
2872
4c876616
SP
2873static int be_vfs_mac_query(struct be_adapter *adapter)
2874{
2875 int status, vf;
2876 u8 mac[ETH_ALEN];
2877 struct be_vf_cfg *vf_cfg;
4c876616
SP
2878
2879 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
2880 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2881 mac, vf_cfg->if_handle,
2882 false, vf+1);
4c876616
SP
2883 if (status)
2884 return status;
2885 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2886 }
2887 return 0;
2888}
2889
f9449ab7 2890static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2891{
11ac75ed 2892 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2893 u32 vf;
2894
257a3feb 2895 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2896 dev_warn(&adapter->pdev->dev,
2897 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2898 goto done;
2899 }
2900
b4c1df93
SP
2901 pci_disable_sriov(adapter->pdev);
2902
11ac75ed 2903 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2904 if (BEx_chip(adapter))
11ac75ed
SP
2905 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2906 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2907 else
2908 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2909 vf + 1);
f9449ab7 2910
11ac75ed
SP
2911 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2912 }
39f1d94d
SP
2913done:
2914 kfree(adapter->vf_cfg);
2915 adapter->num_vfs = 0;
6d87f5c3
AK
2916}
2917
7707133c
SP
2918static void be_clear_queues(struct be_adapter *adapter)
2919{
2920 be_mcc_queues_destroy(adapter);
2921 be_rx_cqs_destroy(adapter);
2922 be_tx_queues_destroy(adapter);
2923 be_evt_queues_destroy(adapter);
2924}
2925
68d7bdcb 2926static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 2927{
191eb756
SP
2928 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2929 cancel_delayed_work_sync(&adapter->work);
2930 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2931 }
68d7bdcb
SP
2932}
2933
b05004ad 2934static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
2935{
2936 int i;
2937
b05004ad
SK
2938 if (adapter->pmac_id) {
2939 for (i = 0; i < (adapter->uc_macs + 1); i++)
2940 be_cmd_pmac_del(adapter, adapter->if_handle,
2941 adapter->pmac_id[i], 0);
2942 adapter->uc_macs = 0;
2943
2944 kfree(adapter->pmac_id);
2945 adapter->pmac_id = NULL;
2946 }
2947}
2948
2949static int be_clear(struct be_adapter *adapter)
2950{
68d7bdcb 2951 be_cancel_worker(adapter);
191eb756 2952
11ac75ed 2953 if (sriov_enabled(adapter))
f9449ab7
SP
2954 be_vf_clear(adapter);
2955
2d17f403 2956 /* delete the primary mac along with the uc-mac list */
b05004ad 2957 be_mac_clear(adapter);
fbc13f01 2958
f9449ab7 2959 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 2960
7707133c 2961 be_clear_queues(adapter);
a54769f5 2962
10ef9ab4 2963 be_msix_disable(adapter);
a54769f5
SP
2964 return 0;
2965}
2966
4c876616 2967static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2968{
92bf14ab 2969 struct be_resources res = {0};
4c876616
SP
2970 struct be_vf_cfg *vf_cfg;
2971 u32 cap_flags, en_flags, vf;
922bbe88 2972 int status = 0;
abb93951 2973
4c876616
SP
2974 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2975 BE_IF_FLAGS_MULTICAST;
abb93951 2976
4c876616 2977 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
2978 if (!BE3_chip(adapter)) {
2979 status = be_cmd_get_profile_config(adapter, &res,
2980 vf + 1);
2981 if (!status)
2982 cap_flags = res.if_cap_flags;
2983 }
4c876616
SP
2984
2985 /* If a FW profile exists, then cap_flags are updated */
2986 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2987 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2988 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2989 &vf_cfg->if_handle, vf + 1);
2990 if (status)
2991 goto err;
2992 }
2993err:
2994 return status;
abb93951
PR
2995}
2996
39f1d94d 2997static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2998{
11ac75ed 2999 struct be_vf_cfg *vf_cfg;
30128031
SP
3000 int vf;
3001
39f1d94d
SP
3002 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3003 GFP_KERNEL);
3004 if (!adapter->vf_cfg)
3005 return -ENOMEM;
3006
11ac75ed
SP
3007 for_all_vfs(adapter, vf_cfg, vf) {
3008 vf_cfg->if_handle = -1;
3009 vf_cfg->pmac_id = -1;
30128031 3010 }
39f1d94d 3011 return 0;
30128031
SP
3012}
3013
f9449ab7
SP
3014static int be_vf_setup(struct be_adapter *adapter)
3015{
11ac75ed 3016 struct be_vf_cfg *vf_cfg;
f1f3ee1b 3017 u16 def_vlan, lnk_speed;
4c876616
SP
3018 int status, old_vfs, vf;
3019 struct device *dev = &adapter->pdev->dev;
04a06028 3020 u32 privileges;
39f1d94d 3021
257a3feb 3022 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
3023 if (old_vfs) {
3024 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3025 if (old_vfs != num_vfs)
3026 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3027 adapter->num_vfs = old_vfs;
39f1d94d 3028 } else {
92bf14ab 3029 if (num_vfs > be_max_vfs(adapter))
4c876616 3030 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
3031 be_max_vfs(adapter), num_vfs);
3032 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 3033 if (!adapter->num_vfs)
4c876616 3034 return 0;
39f1d94d
SP
3035 }
3036
3037 status = be_vf_setup_init(adapter);
3038 if (status)
3039 goto err;
30128031 3040
4c876616
SP
3041 if (old_vfs) {
3042 for_all_vfs(adapter, vf_cfg, vf) {
3043 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3044 if (status)
3045 goto err;
3046 }
3047 } else {
3048 status = be_vfs_if_create(adapter);
f9449ab7
SP
3049 if (status)
3050 goto err;
f9449ab7
SP
3051 }
3052
4c876616
SP
3053 if (old_vfs) {
3054 status = be_vfs_mac_query(adapter);
3055 if (status)
3056 goto err;
3057 } else {
39f1d94d
SP
3058 status = be_vf_eth_addr_config(adapter);
3059 if (status)
3060 goto err;
3061 }
f9449ab7 3062
11ac75ed 3063 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3064 /* Allow VFs to programs MAC/VLAN filters */
3065 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3066 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3067 status = be_cmd_set_fn_privileges(adapter,
3068 privileges |
3069 BE_PRIV_FILTMGMT,
3070 vf + 1);
3071 if (!status)
3072 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3073 vf);
3074 }
3075
4c876616
SP
3076 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3077 * Allow full available bandwidth
3078 */
3079 if (BE3_chip(adapter) && !old_vfs)
3080 be_cmd_set_qos(adapter, 1000, vf+1);
3081
3082 status = be_cmd_link_status_query(adapter, &lnk_speed,
3083 NULL, vf + 1);
3084 if (!status)
3085 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
3086
3087 status = be_cmd_get_hsw_config(adapter, &def_vlan,
a77dcb8c 3088 vf + 1, vf_cfg->if_handle, NULL);
f1f3ee1b
AK
3089 if (status)
3090 goto err;
3091 vf_cfg->def_vid = def_vlan;
dcf7ebba 3092
0599863d
VV
3093 if (!old_vfs)
3094 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 3095 }
b4c1df93
SP
3096
3097 if (!old_vfs) {
3098 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3099 if (status) {
3100 dev_err(dev, "SRIOV enable failed\n");
3101 adapter->num_vfs = 0;
3102 goto err;
3103 }
3104 }
f9449ab7
SP
3105 return 0;
3106err:
4c876616
SP
3107 dev_err(dev, "VF setup failed\n");
3108 be_vf_clear(adapter);
f9449ab7
SP
3109 return status;
3110}
3111
92bf14ab
SP
3112/* On BE2/BE3 FW does not suggest the supported limits */
3113static void BEx_get_resources(struct be_adapter *adapter,
3114 struct be_resources *res)
3115{
3116 struct pci_dev *pdev = adapter->pdev;
3117 bool use_sriov = false;
e3dc867c 3118 int max_vfs;
92bf14ab 3119
e3dc867c 3120 max_vfs = pci_sriov_get_totalvfs(pdev);
92bf14ab 3121
e3dc867c 3122 if (BE3_chip(adapter) && sriov_want(adapter)) {
92bf14ab 3123 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
b905b5d4 3124 use_sriov = res->max_vfs;
92bf14ab
SP
3125 }
3126
3127 if (be_physfn(adapter))
3128 res->max_uc_mac = BE_UC_PMAC_COUNT;
3129 else
3130 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3131
3132 if (adapter->function_mode & FLEX10_MODE)
3133 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
1aa9673c
AK
3134 else if (adapter->function_mode & UMC_ENABLED)
3135 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
92bf14ab
SP
3136 else
3137 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3138 res->max_mcast_mac = BE_MAX_MC;
3139
30f3fe45 3140 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
92bf14ab 3141 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
30f3fe45 3142 !be_physfn(adapter) || (adapter->port_num > 1))
92bf14ab
SP
3143 res->max_tx_qs = 1;
3144 else
3145 res->max_tx_qs = BE3_MAX_TX_QS;
3146
3147 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3148 !use_sriov && be_physfn(adapter))
3149 res->max_rss_qs = (adapter->be3_native) ?
3150 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3151 res->max_rx_qs = res->max_rss_qs + 1;
3152
e3dc867c
SR
3153 if (be_physfn(adapter))
3154 res->max_evt_qs = (max_vfs > 0) ?
3155 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3156 else
3157 res->max_evt_qs = 1;
92bf14ab
SP
3158
3159 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3160 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3161 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3162}
3163
30128031
SP
3164static void be_setup_init(struct be_adapter *adapter)
3165{
3166 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3167 adapter->phy.link_speed = -1;
30128031
SP
3168 adapter->if_handle = -1;
3169 adapter->be3_native = false;
3170 adapter->promiscuous = false;
f25b119c
PR
3171 if (be_physfn(adapter))
3172 adapter->cmd_privileges = MAX_PRIVILEGES;
3173 else
3174 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3175}
3176
92bf14ab 3177static int be_get_resources(struct be_adapter *adapter)
abb93951 3178{
92bf14ab
SP
3179 struct device *dev = &adapter->pdev->dev;
3180 struct be_resources res = {0};
3181 int status;
abb93951 3182
92bf14ab
SP
3183 if (BEx_chip(adapter)) {
3184 BEx_get_resources(adapter, &res);
3185 adapter->res = res;
abb93951
PR
3186 }
3187
92bf14ab
SP
3188 /* For Lancer, SH etc read per-function resource limits from FW.
3189 * GET_FUNC_CONFIG returns per function guaranteed limits.
3190 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3191 */
3192 if (!BEx_chip(adapter)) {
3193 status = be_cmd_get_func_config(adapter, &res);
3194 if (status)
3195 return status;
abb93951 3196
92bf14ab
SP
3197 /* If RoCE may be enabled stash away half the EQs for RoCE */
3198 if (be_roce_supported(adapter))
3199 res.max_evt_qs /= 2;
3200 adapter->res = res;
abb93951 3201
92bf14ab
SP
3202 if (be_physfn(adapter)) {
3203 status = be_cmd_get_profile_config(adapter, &res, 0);
3204 if (status)
3205 return status;
3206 adapter->res.max_vfs = res.max_vfs;
3207 }
abb93951 3208
92bf14ab
SP
3209 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3210 be_max_txqs(adapter), be_max_rxqs(adapter),
3211 be_max_rss(adapter), be_max_eqs(adapter),
3212 be_max_vfs(adapter));
3213 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3214 be_max_uc(adapter), be_max_mc(adapter),
3215 be_max_vlans(adapter));
abb93951 3216 }
4c876616 3217
92bf14ab 3218 return 0;
abb93951
PR
3219}
3220
39f1d94d
SP
3221/* Routine to query per function resource limits */
3222static int be_get_config(struct be_adapter *adapter)
3223{
542963b7 3224 u16 profile_id;
4c876616 3225 int status;
39f1d94d 3226
abb93951
PR
3227 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3228 &adapter->function_mode,
0ad3157e
VV
3229 &adapter->function_caps,
3230 &adapter->asic_rev);
abb93951 3231 if (status)
92bf14ab 3232 return status;
abb93951 3233
542963b7
VV
3234 if (be_physfn(adapter)) {
3235 status = be_cmd_get_active_profile(adapter, &profile_id);
3236 if (!status)
3237 dev_info(&adapter->pdev->dev,
3238 "Using profile 0x%x\n", profile_id);
3239 }
3240
92bf14ab
SP
3241 status = be_get_resources(adapter);
3242 if (status)
3243 return status;
abb93951
PR
3244
3245 /* primary mac needs 1 pmac entry */
92bf14ab
SP
3246 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3247 GFP_KERNEL);
3248 if (!adapter->pmac_id)
3249 return -ENOMEM;
abb93951 3250
92bf14ab
SP
3251 /* Sanitize cfg_num_qs based on HW and platform limits */
3252 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3253
3254 return 0;
39f1d94d
SP
3255}
3256
95046b92
SP
3257static int be_mac_setup(struct be_adapter *adapter)
3258{
3259 u8 mac[ETH_ALEN];
3260 int status;
3261
3262 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3263 status = be_cmd_get_perm_mac(adapter, mac);
3264 if (status)
3265 return status;
3266
3267 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3268 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3269 } else {
3270 /* Maybe the HW was reset; dev_addr must be re-programmed */
3271 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3272 }
3273
2c7a9dc1
AK
3274 /* For BE3-R VFs, the PF programs the initial MAC address */
3275 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3276 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3277 &adapter->pmac_id[0], 0);
95046b92
SP
3278 return 0;
3279}
3280
68d7bdcb
SP
3281static void be_schedule_worker(struct be_adapter *adapter)
3282{
3283 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3284 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3285}
3286
7707133c 3287static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3288{
68d7bdcb 3289 struct net_device *netdev = adapter->netdev;
10ef9ab4 3290 int status;
ba343c77 3291
7707133c 3292 status = be_evt_queues_create(adapter);
abb93951
PR
3293 if (status)
3294 goto err;
73d540f2 3295
7707133c 3296 status = be_tx_qs_create(adapter);
c2bba3df
SK
3297 if (status)
3298 goto err;
10ef9ab4 3299
7707133c 3300 status = be_rx_cqs_create(adapter);
10ef9ab4 3301 if (status)
a54769f5 3302 goto err;
6b7c5b94 3303
7707133c 3304 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3305 if (status)
3306 goto err;
3307
68d7bdcb
SP
3308 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3309 if (status)
3310 goto err;
3311
3312 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3313 if (status)
3314 goto err;
3315
7707133c
SP
3316 return 0;
3317err:
3318 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3319 return status;
3320}
3321
68d7bdcb
SP
3322int be_update_queues(struct be_adapter *adapter)
3323{
3324 struct net_device *netdev = adapter->netdev;
3325 int status;
3326
3327 if (netif_running(netdev))
3328 be_close(netdev);
3329
3330 be_cancel_worker(adapter);
3331
3332 /* If any vectors have been shared with RoCE we cannot re-program
3333 * the MSIx table.
3334 */
3335 if (!adapter->num_msix_roce_vec)
3336 be_msix_disable(adapter);
3337
3338 be_clear_queues(adapter);
3339
3340 if (!msix_enabled(adapter)) {
3341 status = be_msix_enable(adapter);
3342 if (status)
3343 return status;
3344 }
3345
3346 status = be_setup_queues(adapter);
3347 if (status)
3348 return status;
3349
3350 be_schedule_worker(adapter);
3351
3352 if (netif_running(netdev))
3353 status = be_open(netdev);
3354
3355 return status;
3356}
3357
7707133c
SP
3358static int be_setup(struct be_adapter *adapter)
3359{
3360 struct device *dev = &adapter->pdev->dev;
3361 u32 tx_fc, rx_fc, en_flags;
3362 int status;
3363
3364 be_setup_init(adapter);
3365
3366 if (!lancer_chip(adapter))
3367 be_cmd_req_native_mode(adapter);
3368
3369 status = be_get_config(adapter);
10ef9ab4 3370 if (status)
a54769f5 3371 goto err;
6b7c5b94 3372
7707133c 3373 status = be_msix_enable(adapter);
10ef9ab4 3374 if (status)
a54769f5 3375 goto err;
6b7c5b94 3376
f9449ab7 3377 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3378 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3379 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3380 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3381 en_flags = en_flags & be_if_cap_flags(adapter);
3382 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3383 &adapter->if_handle, 0);
7707133c 3384 if (status)
a54769f5 3385 goto err;
6b7c5b94 3386
68d7bdcb
SP
3387 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3388 rtnl_lock();
7707133c 3389 status = be_setup_queues(adapter);
68d7bdcb 3390 rtnl_unlock();
95046b92 3391 if (status)
1578e777
PR
3392 goto err;
3393
7707133c 3394 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3395
3396 status = be_mac_setup(adapter);
10ef9ab4
SP
3397 if (status)
3398 goto err;
3399
eeb65ced 3400 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3401
e9e2a904
SK
3402 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3403 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3404 adapter->fw_ver);
3405 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3406 }
3407
1d1e9a46 3408 if (adapter->vlans_added)
10329df8 3409 be_vid_config(adapter);
7ab8b0b4 3410
a54769f5 3411 be_set_rx_mode(adapter->netdev);
5fb379ee 3412
76a9e08e
SR
3413 be_cmd_get_acpi_wol_cap(adapter);
3414
ddc3f5cb 3415 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3416
ddc3f5cb
AK
3417 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3418 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3419 adapter->rx_fc);
2dc1deb6 3420
b905b5d4 3421 if (sriov_want(adapter)) {
92bf14ab 3422 if (be_max_vfs(adapter))
39f1d94d
SP
3423 be_vf_setup(adapter);
3424 else
3425 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3426 }
3427
f25b119c
PR
3428 status = be_cmd_get_phy_info(adapter);
3429 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3430 adapter->phy.fc_autoneg = 1;
3431
68d7bdcb 3432 be_schedule_worker(adapter);
f9449ab7 3433 return 0;
a54769f5
SP
3434err:
3435 be_clear(adapter);
3436 return status;
3437}
6b7c5b94 3438
66268739
IV
3439#ifdef CONFIG_NET_POLL_CONTROLLER
3440static void be_netpoll(struct net_device *netdev)
3441{
3442 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3443 struct be_eq_obj *eqo;
66268739
IV
3444 int i;
3445
e49cc34f
SP
3446 for_all_evt_queues(adapter, eqo, i) {
3447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3448 napi_schedule(&eqo->napi);
3449 }
10ef9ab4
SP
3450
3451 return;
66268739
IV
3452}
3453#endif
3454
84517482 3455#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3456static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3457
fa9a6fed 3458static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3459 const u8 *p, u32 img_start, int image_size,
3460 int hdr_size)
fa9a6fed
SB
3461{
3462 u32 crc_offset;
3463 u8 flashed_crc[4];
3464 int status;
3f0d4560
AK
3465
3466 crc_offset = hdr_size + img_start + image_size - 4;
3467
fa9a6fed 3468 p += crc_offset;
3f0d4560
AK
3469
3470 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3471 (image_size - 4));
fa9a6fed
SB
3472 if (status) {
3473 dev_err(&adapter->pdev->dev,
3474 "could not get crc from flash, not flashing redboot\n");
3475 return false;
3476 }
3477
3478 /*update redboot only if crc does not match*/
3479 if (!memcmp(flashed_crc, p, 4))
3480 return false;
3481 else
3482 return true;
fa9a6fed
SB
3483}
3484
306f1348
SP
3485static bool phy_flashing_required(struct be_adapter *adapter)
3486{
42f11cf2
AK
3487 return (adapter->phy.phy_type == TN_8022 &&
3488 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3489}
3490
c165541e
PR
3491static bool is_comp_in_ufi(struct be_adapter *adapter,
3492 struct flash_section_info *fsec, int type)
3493{
3494 int i = 0, img_type = 0;
3495 struct flash_section_info_g2 *fsec_g2 = NULL;
3496
ca34fe38 3497 if (BE2_chip(adapter))
c165541e
PR
3498 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3499
3500 for (i = 0; i < MAX_FLASH_COMP; i++) {
3501 if (fsec_g2)
3502 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3503 else
3504 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3505
3506 if (img_type == type)
3507 return true;
3508 }
3509 return false;
3510
3511}
3512
4188e7df 3513static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3514 int header_size,
3515 const struct firmware *fw)
3516{
3517 struct flash_section_info *fsec = NULL;
3518 const u8 *p = fw->data;
3519
3520 p += header_size;
3521 while (p < (fw->data + fw->size)) {
3522 fsec = (struct flash_section_info *)p;
3523 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3524 return fsec;
3525 p += 32;
3526 }
3527 return NULL;
3528}
3529
773a2d7c
PR
3530static int be_flash(struct be_adapter *adapter, const u8 *img,
3531 struct be_dma_mem *flash_cmd, int optype, int img_size)
3532{
3533 u32 total_bytes = 0, flash_op, num_bytes = 0;
3534 int status = 0;
3535 struct be_cmd_write_flashrom *req = flash_cmd->va;
3536
3537 total_bytes = img_size;
3538 while (total_bytes) {
3539 num_bytes = min_t(u32, 32*1024, total_bytes);
3540
3541 total_bytes -= num_bytes;
3542
3543 if (!total_bytes) {
3544 if (optype == OPTYPE_PHY_FW)
3545 flash_op = FLASHROM_OPER_PHY_FLASH;
3546 else
3547 flash_op = FLASHROM_OPER_FLASH;
3548 } else {
3549 if (optype == OPTYPE_PHY_FW)
3550 flash_op = FLASHROM_OPER_PHY_SAVE;
3551 else
3552 flash_op = FLASHROM_OPER_SAVE;
3553 }
3554
be716446 3555 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3556 img += num_bytes;
3557 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3558 flash_op, num_bytes);
3559 if (status) {
3560 if (status == ILLEGAL_IOCTL_REQ &&
3561 optype == OPTYPE_PHY_FW)
3562 break;
3563 dev_err(&adapter->pdev->dev,
3564 "cmd to write to flash rom failed.\n");
3565 return status;
3566 }
3567 }
3568 return 0;
3569}
3570
0ad3157e 3571/* For BE2, BE3 and BE3-R */
ca34fe38 3572static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3573 const struct firmware *fw,
3574 struct be_dma_mem *flash_cmd,
3575 int num_of_images)
3f0d4560 3576
84517482 3577{
3f0d4560 3578 int status = 0, i, filehdr_size = 0;
c165541e 3579 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3580 const u8 *p = fw->data;
215faf9c 3581 const struct flash_comp *pflashcomp;
773a2d7c 3582 int num_comp, redboot;
c165541e
PR
3583 struct flash_section_info *fsec = NULL;
3584
3585 struct flash_comp gen3_flash_types[] = {
3586 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3587 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3588 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3589 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3590 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3591 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3592 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3593 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3594 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3595 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3596 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3597 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3598 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3599 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3600 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3601 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3602 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3603 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3604 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3605 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3606 };
c165541e
PR
3607
3608 struct flash_comp gen2_flash_types[] = {
3609 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3610 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3611 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3612 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3613 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3614 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3615 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3616 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3617 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3618 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3619 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3620 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3621 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3622 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3623 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3624 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3625 };
3626
ca34fe38 3627 if (BE3_chip(adapter)) {
3f0d4560
AK
3628 pflashcomp = gen3_flash_types;
3629 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3630 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3631 } else {
3632 pflashcomp = gen2_flash_types;
3633 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3634 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3635 }
ca34fe38 3636
c165541e
PR
3637 /* Get flash section info*/
3638 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3639 if (!fsec) {
3640 dev_err(&adapter->pdev->dev,
3641 "Invalid Cookie. UFI corrupted ?\n");
3642 return -1;
3643 }
9fe96934 3644 for (i = 0; i < num_comp; i++) {
c165541e 3645 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3646 continue;
c165541e
PR
3647
3648 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3649 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3650 continue;
3651
773a2d7c
PR
3652 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3653 !phy_flashing_required(adapter))
306f1348 3654 continue;
c165541e 3655
773a2d7c
PR
3656 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3657 redboot = be_flash_redboot(adapter, fw->data,
3658 pflashcomp[i].offset, pflashcomp[i].size,
3659 filehdr_size + img_hdrs_size);
3660 if (!redboot)
3661 continue;
3662 }
c165541e 3663
3f0d4560 3664 p = fw->data;
c165541e 3665 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3666 if (p + pflashcomp[i].size > fw->data + fw->size)
3667 return -1;
773a2d7c
PR
3668
3669 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3670 pflashcomp[i].size);
3671 if (status) {
3672 dev_err(&adapter->pdev->dev,
3673 "Flashing section type %d failed.\n",
3674 pflashcomp[i].img_type);
3675 return status;
84517482 3676 }
84517482 3677 }
84517482
AK
3678 return 0;
3679}
3680
773a2d7c
PR
3681static int be_flash_skyhawk(struct be_adapter *adapter,
3682 const struct firmware *fw,
3683 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3684{
773a2d7c
PR
3685 int status = 0, i, filehdr_size = 0;
3686 int img_offset, img_size, img_optype, redboot;
3687 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3688 const u8 *p = fw->data;
3689 struct flash_section_info *fsec = NULL;
3690
3691 filehdr_size = sizeof(struct flash_file_hdr_g3);
3692 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3693 if (!fsec) {
3694 dev_err(&adapter->pdev->dev,
3695 "Invalid Cookie. UFI corrupted ?\n");
3696 return -1;
3697 }
3698
3699 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3700 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3701 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3702
3703 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3704 case IMAGE_FIRMWARE_iSCSI:
3705 img_optype = OPTYPE_ISCSI_ACTIVE;
3706 break;
3707 case IMAGE_BOOT_CODE:
3708 img_optype = OPTYPE_REDBOOT;
3709 break;
3710 case IMAGE_OPTION_ROM_ISCSI:
3711 img_optype = OPTYPE_BIOS;
3712 break;
3713 case IMAGE_OPTION_ROM_PXE:
3714 img_optype = OPTYPE_PXE_BIOS;
3715 break;
3716 case IMAGE_OPTION_ROM_FCoE:
3717 img_optype = OPTYPE_FCOE_BIOS;
3718 break;
3719 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3720 img_optype = OPTYPE_ISCSI_BACKUP;
3721 break;
3722 case IMAGE_NCSI:
3723 img_optype = OPTYPE_NCSI_FW;
3724 break;
3725 default:
3726 continue;
3727 }
3728
3729 if (img_optype == OPTYPE_REDBOOT) {
3730 redboot = be_flash_redboot(adapter, fw->data,
3731 img_offset, img_size,
3732 filehdr_size + img_hdrs_size);
3733 if (!redboot)
3734 continue;
3735 }
3736
3737 p = fw->data;
3738 p += filehdr_size + img_offset + img_hdrs_size;
3739 if (p + img_size > fw->data + fw->size)
3740 return -1;
3741
3742 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3743 if (status) {
3744 dev_err(&adapter->pdev->dev,
3745 "Flashing section type %d failed.\n",
3746 fsec->fsec_entry[i].type);
3747 return status;
3748 }
3749 }
3750 return 0;
3f0d4560
AK
3751}
3752
485bf569
SN
3753static int lancer_fw_download(struct be_adapter *adapter,
3754 const struct firmware *fw)
84517482 3755{
485bf569
SN
3756#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3757#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3758 struct be_dma_mem flash_cmd;
485bf569
SN
3759 const u8 *data_ptr = NULL;
3760 u8 *dest_image_ptr = NULL;
3761 size_t image_size = 0;
3762 u32 chunk_size = 0;
3763 u32 data_written = 0;
3764 u32 offset = 0;
3765 int status = 0;
3766 u8 add_status = 0;
f67ef7ba 3767 u8 change_status;
84517482 3768
485bf569 3769 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3770 dev_err(&adapter->pdev->dev,
485bf569
SN
3771 "FW Image not properly aligned. "
3772 "Length must be 4 byte aligned.\n");
3773 status = -EINVAL;
3774 goto lancer_fw_exit;
d9efd2af
SB
3775 }
3776
485bf569
SN
3777 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3778 + LANCER_FW_DOWNLOAD_CHUNK;
3779 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3780 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3781 if (!flash_cmd.va) {
3782 status = -ENOMEM;
485bf569
SN
3783 goto lancer_fw_exit;
3784 }
84517482 3785
485bf569
SN
3786 dest_image_ptr = flash_cmd.va +
3787 sizeof(struct lancer_cmd_req_write_object);
3788 image_size = fw->size;
3789 data_ptr = fw->data;
3790
3791 while (image_size) {
3792 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3793
3794 /* Copy the image chunk content. */
3795 memcpy(dest_image_ptr, data_ptr, chunk_size);
3796
3797 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3798 chunk_size, offset,
3799 LANCER_FW_DOWNLOAD_LOCATION,
3800 &data_written, &change_status,
3801 &add_status);
485bf569
SN
3802 if (status)
3803 break;
3804
3805 offset += data_written;
3806 data_ptr += data_written;
3807 image_size -= data_written;
3808 }
3809
3810 if (!status) {
3811 /* Commit the FW written */
3812 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3813 0, offset,
3814 LANCER_FW_DOWNLOAD_LOCATION,
3815 &data_written, &change_status,
3816 &add_status);
485bf569
SN
3817 }
3818
3819 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3820 flash_cmd.dma);
3821 if (status) {
3822 dev_err(&adapter->pdev->dev,
3823 "Firmware load error. "
3824 "Status code: 0x%x Additional Status: 0x%x\n",
3825 status, add_status);
3826 goto lancer_fw_exit;
3827 }
3828
f67ef7ba 3829 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
3830 dev_info(&adapter->pdev->dev,
3831 "Resetting adapter to activate new FW\n");
5c510811
SK
3832 status = lancer_physdev_ctrl(adapter,
3833 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3834 if (status) {
3835 dev_err(&adapter->pdev->dev,
3836 "Adapter busy for FW reset.\n"
3837 "New FW will not be active.\n");
3838 goto lancer_fw_exit;
3839 }
3840 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3841 dev_err(&adapter->pdev->dev,
3842 "System reboot required for new FW"
3843 " to be active\n");
3844 }
3845
485bf569
SN
3846 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3847lancer_fw_exit:
3848 return status;
3849}
3850
ca34fe38
SP
3851#define UFI_TYPE2 2
3852#define UFI_TYPE3 3
0ad3157e 3853#define UFI_TYPE3R 10
ca34fe38
SP
3854#define UFI_TYPE4 4
3855static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3856 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3857{
3858 if (fhdr == NULL)
3859 goto be_get_ufi_exit;
3860
ca34fe38
SP
3861 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3862 return UFI_TYPE4;
0ad3157e
VV
3863 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3864 if (fhdr->asic_type_rev == 0x10)
3865 return UFI_TYPE3R;
3866 else
3867 return UFI_TYPE3;
3868 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3869 return UFI_TYPE2;
773a2d7c
PR
3870
3871be_get_ufi_exit:
3872 dev_err(&adapter->pdev->dev,
3873 "UFI and Interface are not compatible for flashing\n");
3874 return -1;
3875}
3876
485bf569
SN
3877static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3878{
485bf569
SN
3879 struct flash_file_hdr_g3 *fhdr3;
3880 struct image_hdr *img_hdr_ptr = NULL;
3881 struct be_dma_mem flash_cmd;
3882 const u8 *p;
773a2d7c 3883 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3884
be716446 3885 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3886 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3887 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3888 if (!flash_cmd.va) {
3889 status = -ENOMEM;
485bf569 3890 goto be_fw_exit;
84517482
AK
3891 }
3892
773a2d7c 3893 p = fw->data;
0ad3157e 3894 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3895
0ad3157e 3896 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3897
773a2d7c
PR
3898 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3899 for (i = 0; i < num_imgs; i++) {
3900 img_hdr_ptr = (struct image_hdr *)(fw->data +
3901 (sizeof(struct flash_file_hdr_g3) +
3902 i * sizeof(struct image_hdr)));
3903 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3904 switch (ufi_type) {
3905 case UFI_TYPE4:
773a2d7c
PR
3906 status = be_flash_skyhawk(adapter, fw,
3907 &flash_cmd, num_imgs);
0ad3157e
VV
3908 break;
3909 case UFI_TYPE3R:
ca34fe38
SP
3910 status = be_flash_BEx(adapter, fw, &flash_cmd,
3911 num_imgs);
0ad3157e
VV
3912 break;
3913 case UFI_TYPE3:
3914 /* Do not flash this ufi on BE3-R cards */
3915 if (adapter->asic_rev < 0x10)
3916 status = be_flash_BEx(adapter, fw,
3917 &flash_cmd,
3918 num_imgs);
3919 else {
3920 status = -1;
3921 dev_err(&adapter->pdev->dev,
3922 "Can't load BE3 UFI on BE3R\n");
3923 }
3924 }
3f0d4560 3925 }
773a2d7c
PR
3926 }
3927
ca34fe38
SP
3928 if (ufi_type == UFI_TYPE2)
3929 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3930 else if (ufi_type == -1)
3f0d4560 3931 status = -1;
84517482 3932
2b7bcebf
IV
3933 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3934 flash_cmd.dma);
84517482
AK
3935 if (status) {
3936 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3937 goto be_fw_exit;
84517482
AK
3938 }
3939
af901ca1 3940 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3941
485bf569
SN
3942be_fw_exit:
3943 return status;
3944}
3945
3946int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3947{
3948 const struct firmware *fw;
3949 int status;
3950
3951 if (!netif_running(adapter->netdev)) {
3952 dev_err(&adapter->pdev->dev,
3953 "Firmware load not allowed (interface is down)\n");
3954 return -1;
3955 }
3956
3957 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3958 if (status)
3959 goto fw_exit;
3960
3961 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3962
3963 if (lancer_chip(adapter))
3964 status = lancer_fw_download(adapter, fw);
3965 else
3966 status = be_fw_download(adapter, fw);
3967
eeb65ced
SK
3968 if (!status)
3969 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3970 adapter->fw_on_flash);
3971
84517482
AK
3972fw_exit:
3973 release_firmware(fw);
3974 return status;
3975}
3976
a77dcb8c
AK
3977static int be_ndo_bridge_setlink(struct net_device *dev,
3978 struct nlmsghdr *nlh)
3979{
3980 struct be_adapter *adapter = netdev_priv(dev);
3981 struct nlattr *attr, *br_spec;
3982 int rem;
3983 int status = 0;
3984 u16 mode = 0;
3985
3986 if (!sriov_enabled(adapter))
3987 return -EOPNOTSUPP;
3988
3989 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3990
3991 nla_for_each_nested(attr, br_spec, rem) {
3992 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3993 continue;
3994
3995 mode = nla_get_u16(attr);
3996 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3997 return -EINVAL;
3998
3999 status = be_cmd_set_hsw_config(adapter, 0, 0,
4000 adapter->if_handle,
4001 mode == BRIDGE_MODE_VEPA ?
4002 PORT_FWD_TYPE_VEPA :
4003 PORT_FWD_TYPE_VEB);
4004 if (status)
4005 goto err;
4006
4007 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4008 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4009
4010 return status;
4011 }
4012err:
4013 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4014 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4015
4016 return status;
4017}
4018
4019static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4020 struct net_device *dev,
4021 u32 filter_mask)
4022{
4023 struct be_adapter *adapter = netdev_priv(dev);
4024 int status = 0;
4025 u8 hsw_mode;
4026
4027 if (!sriov_enabled(adapter))
4028 return 0;
4029
4030 /* BE and Lancer chips support VEB mode only */
4031 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4032 hsw_mode = PORT_FWD_TYPE_VEB;
4033 } else {
4034 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4035 adapter->if_handle, &hsw_mode);
4036 if (status)
4037 return 0;
4038 }
4039
4040 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4041 hsw_mode == PORT_FWD_TYPE_VEPA ?
4042 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4043}
4044
e5686ad8 4045static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4046 .ndo_open = be_open,
4047 .ndo_stop = be_close,
4048 .ndo_start_xmit = be_xmit,
a54769f5 4049 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4050 .ndo_set_mac_address = be_mac_addr_set,
4051 .ndo_change_mtu = be_change_mtu,
ab1594e9 4052 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4053 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4054 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4055 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4056 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4057 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 4058 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
4059 .ndo_get_vf_config = be_get_vf_config,
4060#ifdef CONFIG_NET_POLL_CONTROLLER
4061 .ndo_poll_controller = be_netpoll,
4062#endif
a77dcb8c
AK
4063 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4064 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0
SP
4065#ifdef CONFIG_NET_RX_BUSY_POLL
4066 .ndo_busy_poll = be_busy_poll
4067#endif
6b7c5b94
SP
4068};
4069
4070static void be_netdev_init(struct net_device *netdev)
4071{
4072 struct be_adapter *adapter = netdev_priv(netdev);
4073
6332c8d3 4074 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4075 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4076 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4077 if (be_multi_rxq(adapter))
4078 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4079
4080 netdev->features |= netdev->hw_features |
f646968f 4081 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4082
eb8a50d9 4083 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4084 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4085
fbc13f01
AK
4086 netdev->priv_flags |= IFF_UNICAST_FLT;
4087
6b7c5b94
SP
4088 netdev->flags |= IFF_MULTICAST;
4089
b7e5887e 4090 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4091
10ef9ab4 4092 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
4093
4094 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
4095}
4096
4097static void be_unmap_pci_bars(struct be_adapter *adapter)
4098{
c5b3ad4c
SP
4099 if (adapter->csr)
4100 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4101 if (adapter->db)
ce66f781 4102 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4103}
4104
ce66f781
SP
4105static int db_bar(struct be_adapter *adapter)
4106{
4107 if (lancer_chip(adapter) || !be_physfn(adapter))
4108 return 0;
4109 else
4110 return 4;
4111}
4112
4113static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4114{
dbf0f2a7 4115 if (skyhawk_chip(adapter)) {
ce66f781
SP
4116 adapter->roce_db.size = 4096;
4117 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4118 db_bar(adapter));
4119 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4120 db_bar(adapter));
4121 }
045508a8 4122 return 0;
6b7c5b94
SP
4123}
4124
4125static int be_map_pci_bars(struct be_adapter *adapter)
4126{
4127 u8 __iomem *addr;
fe6d2a38 4128
c5b3ad4c
SP
4129 if (BEx_chip(adapter) && be_physfn(adapter)) {
4130 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4131 if (adapter->csr == NULL)
4132 return -ENOMEM;
4133 }
4134
ce66f781 4135 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4136 if (addr == NULL)
4137 goto pci_map_err;
ba343c77 4138 adapter->db = addr;
ce66f781
SP
4139
4140 be_roce_map_pci_bars(adapter);
6b7c5b94 4141 return 0;
ce66f781 4142
6b7c5b94
SP
4143pci_map_err:
4144 be_unmap_pci_bars(adapter);
4145 return -ENOMEM;
4146}
4147
6b7c5b94
SP
4148static void be_ctrl_cleanup(struct be_adapter *adapter)
4149{
8788fdc2 4150 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4151
4152 be_unmap_pci_bars(adapter);
4153
4154 if (mem->va)
2b7bcebf
IV
4155 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4156 mem->dma);
e7b909a6 4157
5b8821b7 4158 mem = &adapter->rx_filter;
e7b909a6 4159 if (mem->va)
2b7bcebf
IV
4160 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4161 mem->dma);
6b7c5b94
SP
4162}
4163
6b7c5b94
SP
4164static int be_ctrl_init(struct be_adapter *adapter)
4165{
8788fdc2
SP
4166 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4167 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4168 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4169 u32 sli_intf;
6b7c5b94 4170 int status;
6b7c5b94 4171
ce66f781
SP
4172 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4173 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4174 SLI_INTF_FAMILY_SHIFT;
4175 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4176
6b7c5b94
SP
4177 status = be_map_pci_bars(adapter);
4178 if (status)
e7b909a6 4179 goto done;
6b7c5b94
SP
4180
4181 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4182 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4183 mbox_mem_alloc->size,
4184 &mbox_mem_alloc->dma,
4185 GFP_KERNEL);
6b7c5b94 4186 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4187 status = -ENOMEM;
4188 goto unmap_pci_bars;
6b7c5b94
SP
4189 }
4190 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4191 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4192 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4193 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4194
5b8821b7 4195 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4196 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4197 rx_filter->size, &rx_filter->dma,
4198 GFP_KERNEL);
5b8821b7 4199 if (rx_filter->va == NULL) {
e7b909a6
SP
4200 status = -ENOMEM;
4201 goto free_mbox;
4202 }
1f9061d2 4203
2984961c 4204 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4205 spin_lock_init(&adapter->mcc_lock);
4206 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4207
5eeff635 4208 init_completion(&adapter->et_cmd_compl);
cf588477 4209 pci_save_state(adapter->pdev);
6b7c5b94 4210 return 0;
e7b909a6
SP
4211
4212free_mbox:
2b7bcebf
IV
4213 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4214 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4215
4216unmap_pci_bars:
4217 be_unmap_pci_bars(adapter);
4218
4219done:
4220 return status;
6b7c5b94
SP
4221}
4222
4223static void be_stats_cleanup(struct be_adapter *adapter)
4224{
3abcdeda 4225 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4226
4227 if (cmd->va)
2b7bcebf
IV
4228 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4229 cmd->va, cmd->dma);
6b7c5b94
SP
4230}
4231
4232static int be_stats_init(struct be_adapter *adapter)
4233{
3abcdeda 4234 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4235
ca34fe38
SP
4236 if (lancer_chip(adapter))
4237 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4238 else if (BE2_chip(adapter))
89a88ab8 4239 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4240 else if (BE3_chip(adapter))
ca34fe38 4241 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4242 else
4243 /* ALL non-BE ASICs */
4244 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4245
ede23fa8
JP
4246 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4247 GFP_KERNEL);
6b7c5b94
SP
4248 if (cmd->va == NULL)
4249 return -1;
4250 return 0;
4251}
4252
3bc6b06c 4253static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4254{
4255 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4256
6b7c5b94
SP
4257 if (!adapter)
4258 return;
4259
045508a8 4260 be_roce_dev_remove(adapter);
8cef7a78 4261 be_intr_set(adapter, false);
045508a8 4262
f67ef7ba
PR
4263 cancel_delayed_work_sync(&adapter->func_recovery_work);
4264
6b7c5b94
SP
4265 unregister_netdev(adapter->netdev);
4266
5fb379ee
SP
4267 be_clear(adapter);
4268
bf99e50d
PR
4269 /* tell fw we're done with firing cmds */
4270 be_cmd_fw_clean(adapter);
4271
6b7c5b94
SP
4272 be_stats_cleanup(adapter);
4273
4274 be_ctrl_cleanup(adapter);
4275
d6b6d987
SP
4276 pci_disable_pcie_error_reporting(pdev);
4277
6b7c5b94
SP
4278 pci_release_regions(pdev);
4279 pci_disable_device(pdev);
4280
4281 free_netdev(adapter->netdev);
4282}
4283
39f1d94d 4284static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4285{
baaa08d1 4286 int status, level;
6b7c5b94 4287
9e1453c5
AK
4288 status = be_cmd_get_cntl_attributes(adapter);
4289 if (status)
4290 return status;
4291
7aeb2156
PR
4292 /* Must be a power of 2 or else MODULO will BUG_ON */
4293 adapter->be_get_temp_freq = 64;
4294
baaa08d1
VV
4295 if (BEx_chip(adapter)) {
4296 level = be_cmd_get_fw_log_level(adapter);
4297 adapter->msg_enable =
4298 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4299 }
941a77d5 4300
92bf14ab 4301 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4302 return 0;
6b7c5b94
SP
4303}
4304
f67ef7ba 4305static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4306{
01e5b2c4 4307 struct device *dev = &adapter->pdev->dev;
d8110f62 4308 int status;
d8110f62 4309
f67ef7ba
PR
4310 status = lancer_test_and_set_rdy_state(adapter);
4311 if (status)
4312 goto err;
d8110f62 4313
f67ef7ba
PR
4314 if (netif_running(adapter->netdev))
4315 be_close(adapter->netdev);
d8110f62 4316
f67ef7ba
PR
4317 be_clear(adapter);
4318
01e5b2c4 4319 be_clear_all_error(adapter);
f67ef7ba
PR
4320
4321 status = be_setup(adapter);
4322 if (status)
4323 goto err;
d8110f62 4324
f67ef7ba
PR
4325 if (netif_running(adapter->netdev)) {
4326 status = be_open(adapter->netdev);
d8110f62
PR
4327 if (status)
4328 goto err;
f67ef7ba 4329 }
d8110f62 4330
4bebb56a 4331 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4332 return 0;
4333err:
01e5b2c4
SK
4334 if (status == -EAGAIN)
4335 dev_err(dev, "Waiting for resource provisioning\n");
4336 else
4bebb56a 4337 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4338
f67ef7ba
PR
4339 return status;
4340}
4341
4342static void be_func_recovery_task(struct work_struct *work)
4343{
4344 struct be_adapter *adapter =
4345 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4346 int status = 0;
d8110f62 4347
f67ef7ba 4348 be_detect_error(adapter);
d8110f62 4349
f67ef7ba 4350 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4351
f67ef7ba
PR
4352 rtnl_lock();
4353 netif_device_detach(adapter->netdev);
4354 rtnl_unlock();
d8110f62 4355
f67ef7ba 4356 status = lancer_recover_func(adapter);
f67ef7ba
PR
4357 if (!status)
4358 netif_device_attach(adapter->netdev);
d8110f62 4359 }
f67ef7ba 4360
01e5b2c4
SK
4361 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4362 * no need to attempt further recovery.
4363 */
4364 if (!status || status == -EAGAIN)
4365 schedule_delayed_work(&adapter->func_recovery_work,
4366 msecs_to_jiffies(1000));
d8110f62
PR
4367}
4368
4369static void be_worker(struct work_struct *work)
4370{
4371 struct be_adapter *adapter =
4372 container_of(work, struct be_adapter, work.work);
4373 struct be_rx_obj *rxo;
4374 int i;
4375
d8110f62
PR
4376 /* when interrupts are not yet enabled, just reap any pending
4377 * mcc completions */
4378 if (!netif_running(adapter->netdev)) {
072a9c48 4379 local_bh_disable();
10ef9ab4 4380 be_process_mcc(adapter);
072a9c48 4381 local_bh_enable();
d8110f62
PR
4382 goto reschedule;
4383 }
4384
4385 if (!adapter->stats_cmd_sent) {
4386 if (lancer_chip(adapter))
4387 lancer_cmd_get_pport_stats(adapter,
4388 &adapter->stats_cmd);
4389 else
4390 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4391 }
4392
d696b5e2
VV
4393 if (be_physfn(adapter) &&
4394 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4395 be_cmd_get_die_temperature(adapter);
4396
d8110f62 4397 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4398 /* Replenish RX-queues starved due to memory
4399 * allocation failures.
4400 */
4401 if (rxo->rx_post_starved)
d8110f62 4402 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4403 }
4404
2632bafd 4405 be_eqd_update(adapter);
10ef9ab4 4406
d8110f62
PR
4407reschedule:
4408 adapter->work_counter++;
4409 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4410}
4411
257a3feb 4412/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4413static bool be_reset_required(struct be_adapter *adapter)
4414{
257a3feb 4415 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4416}
4417
d379142b
SP
4418static char *mc_name(struct be_adapter *adapter)
4419{
4420 if (adapter->function_mode & FLEX10_MODE)
4421 return "FLEX10";
4422 else if (adapter->function_mode & VNIC_MODE)
4423 return "vNIC";
4424 else if (adapter->function_mode & UMC_ENABLED)
4425 return "UMC";
4426 else
4427 return "";
4428}
4429
4430static inline char *func_name(struct be_adapter *adapter)
4431{
4432 return be_physfn(adapter) ? "PF" : "VF";
4433}
4434
1dd06ae8 4435static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4436{
4437 int status = 0;
4438 struct be_adapter *adapter;
4439 struct net_device *netdev;
b4e32a71 4440 char port_name;
6b7c5b94
SP
4441
4442 status = pci_enable_device(pdev);
4443 if (status)
4444 goto do_none;
4445
4446 status = pci_request_regions(pdev, DRV_NAME);
4447 if (status)
4448 goto disable_dev;
4449 pci_set_master(pdev);
4450
7f640062 4451 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4452 if (netdev == NULL) {
4453 status = -ENOMEM;
4454 goto rel_reg;
4455 }
4456 adapter = netdev_priv(netdev);
4457 adapter->pdev = pdev;
4458 pci_set_drvdata(pdev, adapter);
4459 adapter->netdev = netdev;
2243e2e9 4460 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4461
4c15c243 4462 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4463 if (!status) {
4464 netdev->features |= NETIF_F_HIGHDMA;
4465 } else {
4c15c243 4466 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4467 if (status) {
4468 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4469 goto free_netdev;
4470 }
4471 }
4472
ea58c180
AK
4473 if (be_physfn(adapter)) {
4474 status = pci_enable_pcie_error_reporting(pdev);
4475 if (!status)
4476 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4477 }
d6b6d987 4478
6b7c5b94
SP
4479 status = be_ctrl_init(adapter);
4480 if (status)
39f1d94d 4481 goto free_netdev;
6b7c5b94 4482
2243e2e9 4483 /* sync up with fw's ready state */
ba343c77 4484 if (be_physfn(adapter)) {
bf99e50d 4485 status = be_fw_wait_ready(adapter);
ba343c77
SB
4486 if (status)
4487 goto ctrl_clean;
ba343c77 4488 }
6b7c5b94 4489
39f1d94d
SP
4490 if (be_reset_required(adapter)) {
4491 status = be_cmd_reset_function(adapter);
4492 if (status)
4493 goto ctrl_clean;
556ae191 4494
2d177be8
KA
4495 /* Wait for interrupts to quiesce after an FLR */
4496 msleep(100);
4497 }
8cef7a78
SK
4498
4499 /* Allow interrupts for other ULPs running on NIC function */
4500 be_intr_set(adapter, true);
10ef9ab4 4501
2d177be8
KA
4502 /* tell fw we're ready to fire cmds */
4503 status = be_cmd_fw_init(adapter);
4504 if (status)
4505 goto ctrl_clean;
4506
2243e2e9
SP
4507 status = be_stats_init(adapter);
4508 if (status)
4509 goto ctrl_clean;
4510
39f1d94d 4511 status = be_get_initial_config(adapter);
6b7c5b94
SP
4512 if (status)
4513 goto stats_clean;
6b7c5b94
SP
4514
4515 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4516 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4517 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4518
5fb379ee
SP
4519 status = be_setup(adapter);
4520 if (status)
55f5c3c5 4521 goto stats_clean;
2243e2e9 4522
3abcdeda 4523 be_netdev_init(netdev);
6b7c5b94
SP
4524 status = register_netdev(netdev);
4525 if (status != 0)
5fb379ee 4526 goto unsetup;
6b7c5b94 4527
045508a8
PP
4528 be_roce_dev_add(adapter);
4529
f67ef7ba
PR
4530 schedule_delayed_work(&adapter->func_recovery_work,
4531 msecs_to_jiffies(1000));
b4e32a71
PR
4532
4533 be_cmd_query_port_name(adapter, &port_name);
4534
d379142b
SP
4535 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4536 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4537
6b7c5b94
SP
4538 return 0;
4539
5fb379ee
SP
4540unsetup:
4541 be_clear(adapter);
6b7c5b94
SP
4542stats_clean:
4543 be_stats_cleanup(adapter);
4544ctrl_clean:
4545 be_ctrl_cleanup(adapter);
f9449ab7 4546free_netdev:
fe6d2a38 4547 free_netdev(netdev);
6b7c5b94
SP
4548rel_reg:
4549 pci_release_regions(pdev);
4550disable_dev:
4551 pci_disable_device(pdev);
4552do_none:
c4ca2374 4553 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4554 return status;
4555}
4556
4557static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4558{
4559 struct be_adapter *adapter = pci_get_drvdata(pdev);
4560 struct net_device *netdev = adapter->netdev;
4561
76a9e08e 4562 if (adapter->wol_en)
71d8d1b5
AK
4563 be_setup_wol(adapter, true);
4564
d4360d6f 4565 be_intr_set(adapter, false);
f67ef7ba
PR
4566 cancel_delayed_work_sync(&adapter->func_recovery_work);
4567
6b7c5b94
SP
4568 netif_device_detach(netdev);
4569 if (netif_running(netdev)) {
4570 rtnl_lock();
4571 be_close(netdev);
4572 rtnl_unlock();
4573 }
9b0365f1 4574 be_clear(adapter);
6b7c5b94
SP
4575
4576 pci_save_state(pdev);
4577 pci_disable_device(pdev);
4578 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4579 return 0;
4580}
4581
4582static int be_resume(struct pci_dev *pdev)
4583{
4584 int status = 0;
4585 struct be_adapter *adapter = pci_get_drvdata(pdev);
4586 struct net_device *netdev = adapter->netdev;
4587
4588 netif_device_detach(netdev);
4589
4590 status = pci_enable_device(pdev);
4591 if (status)
4592 return status;
4593
1ca01512 4594 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4595 pci_restore_state(pdev);
4596
dd5746bf
SB
4597 status = be_fw_wait_ready(adapter);
4598 if (status)
4599 return status;
4600
d4360d6f 4601 be_intr_set(adapter, true);
2243e2e9
SP
4602 /* tell fw we're ready to fire cmds */
4603 status = be_cmd_fw_init(adapter);
4604 if (status)
4605 return status;
4606
9b0365f1 4607 be_setup(adapter);
6b7c5b94
SP
4608 if (netif_running(netdev)) {
4609 rtnl_lock();
4610 be_open(netdev);
4611 rtnl_unlock();
4612 }
f67ef7ba
PR
4613
4614 schedule_delayed_work(&adapter->func_recovery_work,
4615 msecs_to_jiffies(1000));
6b7c5b94 4616 netif_device_attach(netdev);
71d8d1b5 4617
76a9e08e 4618 if (adapter->wol_en)
71d8d1b5 4619 be_setup_wol(adapter, false);
a4ca055f 4620
6b7c5b94
SP
4621 return 0;
4622}
4623
82456b03
SP
4624/*
4625 * An FLR will stop BE from DMAing any data.
4626 */
4627static void be_shutdown(struct pci_dev *pdev)
4628{
4629 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4630
2d5d4154
AK
4631 if (!adapter)
4632 return;
82456b03 4633
0f4a6828 4634 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4635 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4636
2d5d4154 4637 netif_device_detach(adapter->netdev);
82456b03 4638
57841869
AK
4639 be_cmd_reset_function(adapter);
4640
82456b03 4641 pci_disable_device(pdev);
82456b03
SP
4642}
4643
cf588477
SP
4644static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4645 pci_channel_state_t state)
4646{
4647 struct be_adapter *adapter = pci_get_drvdata(pdev);
4648 struct net_device *netdev = adapter->netdev;
4649
4650 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4651
01e5b2c4
SK
4652 if (!adapter->eeh_error) {
4653 adapter->eeh_error = true;
cf588477 4654
01e5b2c4 4655 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4656
cf588477 4657 rtnl_lock();
01e5b2c4
SK
4658 netif_device_detach(netdev);
4659 if (netif_running(netdev))
4660 be_close(netdev);
cf588477 4661 rtnl_unlock();
01e5b2c4
SK
4662
4663 be_clear(adapter);
cf588477 4664 }
cf588477
SP
4665
4666 if (state == pci_channel_io_perm_failure)
4667 return PCI_ERS_RESULT_DISCONNECT;
4668
4669 pci_disable_device(pdev);
4670
eeb7fc7b
SK
4671 /* The error could cause the FW to trigger a flash debug dump.
4672 * Resetting the card while flash dump is in progress
c8a54163
PR
4673 * can cause it not to recover; wait for it to finish.
4674 * Wait only for first function as it is needed only once per
4675 * adapter.
eeb7fc7b 4676 */
c8a54163
PR
4677 if (pdev->devfn == 0)
4678 ssleep(30);
4679
cf588477
SP
4680 return PCI_ERS_RESULT_NEED_RESET;
4681}
4682
4683static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4684{
4685 struct be_adapter *adapter = pci_get_drvdata(pdev);
4686 int status;
4687
4688 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4689
4690 status = pci_enable_device(pdev);
4691 if (status)
4692 return PCI_ERS_RESULT_DISCONNECT;
4693
4694 pci_set_master(pdev);
1ca01512 4695 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4696 pci_restore_state(pdev);
4697
4698 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4699 dev_info(&adapter->pdev->dev,
4700 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4701 status = be_fw_wait_ready(adapter);
cf588477
SP
4702 if (status)
4703 return PCI_ERS_RESULT_DISCONNECT;
4704
d6b6d987 4705 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4706 be_clear_all_error(adapter);
cf588477
SP
4707 return PCI_ERS_RESULT_RECOVERED;
4708}
4709
4710static void be_eeh_resume(struct pci_dev *pdev)
4711{
4712 int status = 0;
4713 struct be_adapter *adapter = pci_get_drvdata(pdev);
4714 struct net_device *netdev = adapter->netdev;
4715
4716 dev_info(&adapter->pdev->dev, "EEH resume\n");
4717
4718 pci_save_state(pdev);
4719
2d177be8 4720 status = be_cmd_reset_function(adapter);
cf588477
SP
4721 if (status)
4722 goto err;
4723
2d177be8
KA
4724 /* tell fw we're ready to fire cmds */
4725 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4726 if (status)
4727 goto err;
4728
cf588477
SP
4729 status = be_setup(adapter);
4730 if (status)
4731 goto err;
4732
4733 if (netif_running(netdev)) {
4734 status = be_open(netdev);
4735 if (status)
4736 goto err;
4737 }
f67ef7ba
PR
4738
4739 schedule_delayed_work(&adapter->func_recovery_work,
4740 msecs_to_jiffies(1000));
cf588477
SP
4741 netif_device_attach(netdev);
4742 return;
4743err:
4744 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4745}
4746
3646f0e5 4747static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4748 .error_detected = be_eeh_err_detected,
4749 .slot_reset = be_eeh_reset,
4750 .resume = be_eeh_resume,
4751};
4752
6b7c5b94
SP
4753static struct pci_driver be_driver = {
4754 .name = DRV_NAME,
4755 .id_table = be_dev_ids,
4756 .probe = be_probe,
4757 .remove = be_remove,
4758 .suspend = be_suspend,
cf588477 4759 .resume = be_resume,
82456b03 4760 .shutdown = be_shutdown,
cf588477 4761 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4762};
4763
4764static int __init be_init_module(void)
4765{
8e95a202
JP
4766 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4767 rx_frag_size != 2048) {
6b7c5b94
SP
4768 printk(KERN_WARNING DRV_NAME
4769 " : Module param rx_frag_size must be 2048/4096/8192."
4770 " Using 2048\n");
4771 rx_frag_size = 2048;
4772 }
6b7c5b94
SP
4773
4774 return pci_register_driver(&be_driver);
4775}
4776module_init(be_init_module);
4777
4778static void __exit be_exit_module(void)
4779{
4780 pci_unregister_driver(&be_driver);
4781}
4782module_exit(be_exit_module);
This page took 1.151154 seconds and 5 git commands to generate.