drivers:net: Convert dma_alloc_coherent(...__GFP_ZERO) to dma_zalloc_coherent
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
195
196 wmb();
8788fdc2 197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
198}
199
94d73aaa
VV
200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
6b7c5b94
SP
202{
203 u32 val = 0;
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 250 struct device *dev = &adapter->pdev->dev;
6b7c5b94 251 struct sockaddr *addr = p;
5a712c13
SP
252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
5a712c13
SP
259 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
260 * privilege or if PF did not provision the new MAC address.
261 * On BE3, this cmd will always fail if the VF doesn't have the
262 * FILTMGMT privilege. This failure is OK, only if the PF programmed
263 * the MAC for the VF.
704e4c88 264 */
5a712c13
SP
265 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
266 adapter->if_handle, &adapter->pmac_id[0], 0);
267 if (!status) {
268 curr_pmac_id = adapter->pmac_id[0];
269
270 /* Delete the old programmed MAC. This call may fail if the
271 * old MAC was already deleted by the PF driver.
272 */
273 if (adapter->pmac_id[0] != old_pmac_id)
274 be_cmd_pmac_del(adapter, adapter->if_handle,
275 old_pmac_id, 0);
704e4c88
PR
276 }
277
5a712c13
SP
278 /* Decide if the new MAC is successfully activated only after
279 * querying the FW
704e4c88 280 */
5a712c13 281 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
a65027e4 282 if (status)
e3a7ae2c 283 goto err;
6b7c5b94 284
5a712c13
SP
285 /* The MAC change did not happen, either due to lack of privilege
286 * or PF didn't pre-provision.
287 */
288 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
289 status = -EPERM;
290 goto err;
291 }
292
e3a7ae2c 293 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 294 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
295 return 0;
296err:
5a712c13 297 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
298 return status;
299}
300
ca34fe38
SP
301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 330{
ac124ff9
SP
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 334 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 337
ac124ff9 338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
89a88ab8
AK
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
ac124ff9 366 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 367 else
ac124ff9 368 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
ca34fe38 378static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 379{
ac124ff9
SP
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 383 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 386
ac124ff9 387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
ac124ff9 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
005d5696
SX
424static void populate_lancer_stats(struct be_adapter *adapter)
425{
89a88ab8 426
005d5696 427 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
ac124ff9 452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 456 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 459 drvs->rx_drops_too_many_frags =
ac124ff9 460 pport_stats->rx_drops_too_many_frags_lo;
005d5696 461}
89a88ab8 462
09c1c68f
SP
463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
4188e7df 475static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
89a88ab8
AK
489void be_parse_stats(struct be_adapter *adapter)
490{
ac124ff9
SP
491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
a6c578ef 494 u32 erx_stat;
ac124ff9 495
ca34fe38
SP
496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
005d5696 498 } else {
ca34fe38
SP
499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
d51ebd33 504
ca34fe38
SP
505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 509 }
09c1c68f 510 }
89a88ab8
AK
511}
512
ab1594e9
SP
513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
6b7c5b94 515{
ab1594e9 516 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 517 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 518 struct be_rx_obj *rxo;
3c8def97 519 struct be_tx_obj *txo;
ab1594e9
SP
520 u64 pkts, bytes;
521 unsigned int start;
3abcdeda 522 int i;
6b7c5b94 523
3abcdeda 524 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
536 }
537
3c8def97 538 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
3c8def97 547 }
6b7c5b94
SP
548
549 /* bad pkts received */
ab1594e9 550 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
ab1594e9 559 drvs->rx_dropped_runt;
68110868 560
6b7c5b94 561 /* detailed rx errors */
ab1594e9 562 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
68110868 565
ab1594e9 566 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
567
568 /* frame alignment errors */
ab1594e9 569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 570
6b7c5b94
SP
571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
ab1594e9 576 return stats;
6b7c5b94
SP
577}
578
b236916a 579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 580{
6b7c5b94
SP
581 struct net_device *netdev = adapter->netdev;
582
b236916a 583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 584 netif_carrier_off(netdev);
b236916a 585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 586 }
b236916a
AK
587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
6b7c5b94
SP
592}
593
3c8def97 594static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 596{
3c8def97
SP
597 struct be_tx_stats *stats = tx_stats(txo);
598
ab1594e9 599 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 604 if (stopped)
ac124ff9 605 stats->tx_stops++;
ab1594e9 606 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
6b7c5b94 612{
ebc8d2ab
DM
613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
6b7c5b94
SP
617 /* to account for hdr wrb */
618 cnt++;
fe6d2a38
SP
619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
6b7c5b94
SP
622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
fe6d2a38 625 }
6b7c5b94
SP
626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 635 wrb->rsvd0 = 0;
6b7c5b94
SP
636}
637
1ded132d
AK
638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
cc4ce020 654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 656{
1ded132d 657 u16 vlan_tag;
cc4ce020 658
6b7c5b94
SP
659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
49e4b847 663 if (skb_is_gso(skb)) {
6b7c5b94
SP
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
4c5102f9 676 if (vlan_tx_tag_present(skb)) {
6b7c5b94 677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
680 }
681
bc0c3405
AK
682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
2b7bcebf 689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 697 if (wrb->frag_len) {
7101e111 698 if (unmap_single)
2b7bcebf
IV
699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
7101e111 701 else
2b7bcebf 702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
703 }
704}
6b7c5b94 705
3c8def97 706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
6b7c5b94 709{
7101e111
SP
710 dma_addr_t busaddr;
711 int i, copied = 0;
2b7bcebf 712 struct device *dev = &adapter->pdev->dev;
6b7c5b94 713 struct sk_buff *first_skb = skb;
6b7c5b94
SP
714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
7101e111
SP
716 bool map_single = false;
717 u16 map_head;
6b7c5b94 718
6b7c5b94
SP
719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
7101e111 721 map_head = txq->head;
6b7c5b94 722
ebc8d2ab 723 if (skb->len > skb->data_len) {
e743d313 724 int len = skb_headlen(skb);
2b7bcebf
IV
725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
7101e111
SP
727 goto dma_err;
728 map_single = true;
ebc8d2ab
DM
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
6b7c5b94 735
ebc8d2ab 736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 737 const struct skb_frag_struct *frag =
ebc8d2ab 738 &skb_shinfo(skb)->frags[i];
b061b39e 739 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 740 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 741 if (dma_mapping_error(dev, busaddr))
7101e111 742 goto dma_err;
ebc8d2ab 743 wrb = queue_head_node(txq);
9e903e08 744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
9e903e08 747 copied += skb_frag_size(frag);
6b7c5b94
SP
748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
bc0c3405 757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
7101e111
SP
761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
2b7bcebf 765 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
6b7c5b94
SP
771}
772
93040ae5 773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
93040ae5
SK
776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
efee8e87 783 if (vlan_tx_tag_present(skb))
93040ae5 784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
785
786 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
787 if (!vlan_tag)
788 vlan_tag = adapter->pvid;
789 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
790 * skip VLAN insertion
791 */
792 if (skip_hw_vlan)
793 *skip_hw_vlan = true;
794 }
bc0c3405
AK
795
796 if (vlan_tag) {
58717686 797 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
798 if (unlikely(!skb))
799 return skb;
bc0c3405
AK
800 skb->vlan_tci = 0;
801 }
802
803 /* Insert the outer VLAN, if any */
804 if (adapter->qnq_vid) {
805 vlan_tag = adapter->qnq_vid;
58717686 806 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
807 if (unlikely(!skb))
808 return skb;
809 if (skip_hw_vlan)
810 *skip_hw_vlan = true;
811 }
812
93040ae5
SK
813 return skb;
814}
815
bc0c3405
AK
816static bool be_ipv6_exthdr_check(struct sk_buff *skb)
817{
818 struct ethhdr *eh = (struct ethhdr *)skb->data;
819 u16 offset = ETH_HLEN;
820
821 if (eh->h_proto == htons(ETH_P_IPV6)) {
822 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
823
824 offset += sizeof(struct ipv6hdr);
825 if (ip6h->nexthdr != NEXTHDR_TCP &&
826 ip6h->nexthdr != NEXTHDR_UDP) {
827 struct ipv6_opt_hdr *ehdr =
828 (struct ipv6_opt_hdr *) (skb->data + offset);
829
830 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
831 if (ehdr->hdrlen == 0xff)
832 return true;
833 }
834 }
835 return false;
836}
837
838static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
839{
840 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
841}
842
ee9c799c
SP
843static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
844 struct sk_buff *skb)
bc0c3405 845{
ee9c799c 846 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
847}
848
ee9c799c
SP
849static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
850 struct sk_buff *skb,
851 bool *skip_hw_vlan)
6b7c5b94 852{
d2cb6ce7 853 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
854 unsigned int eth_hdr_len;
855 struct iphdr *ip;
93040ae5 856
48265667
SK
857 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
858 * may cause a transmit stall on that port. So the work-around is to
859 * pad such packets to a 36-byte length.
860 */
861 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
862 if (skb_padto(skb, 36))
863 goto tx_drop;
864 skb->len = 36;
865 }
866
1297f9db
AK
867 /* For padded packets, BE HW modifies tot_len field in IP header
868 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 869 * For padded packets, Lancer computes incorrect checksum.
1ded132d 870 */
ee9c799c
SP
871 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
872 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
873 if (skb->len <= 60 &&
874 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 875 is_ipv4_pkt(skb)) {
93040ae5
SK
876 ip = (struct iphdr *)ip_hdr(skb);
877 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
878 }
1ded132d 879
d2cb6ce7
AK
880 /* If vlan tag is already inlined in the packet, skip HW VLAN
881 * tagging in UMC mode
882 */
883 if ((adapter->function_mode & UMC_ENABLED) &&
884 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 885 *skip_hw_vlan = true;
d2cb6ce7 886
93040ae5
SK
887 /* HW has a bug wherein it will calculate CSUM for VLAN
888 * pkts even though it is disabled.
889 * Manually insert VLAN in pkt.
890 */
891 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
892 vlan_tx_tag_present(skb)) {
893 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
894 if (unlikely(!skb))
895 goto tx_drop;
896 }
897
898 /* HW may lockup when VLAN HW tagging is requested on
899 * certain ipv6 packets. Drop such pkts if the HW workaround to
900 * skip HW tagging is not enabled by FW.
901 */
902 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
903 (adapter->pvid || adapter->qnq_vid) &&
904 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
905 goto tx_drop;
906
907 /* Manual VLAN tag insertion to prevent:
908 * ASIC lockup when the ASIC inserts VLAN tag into
909 * certain ipv6 packets. Insert VLAN tags in driver,
910 * and set event, completion, vlan bits accordingly
911 * in the Tx WRB.
912 */
913 if (be_ipv6_tx_stall_chk(adapter, skb) &&
914 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 915 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
916 if (unlikely(!skb))
917 goto tx_drop;
1ded132d
AK
918 }
919
ee9c799c
SP
920 return skb;
921tx_drop:
922 dev_kfree_skb_any(skb);
923 return NULL;
924}
925
926static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
927{
928 struct be_adapter *adapter = netdev_priv(netdev);
929 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
930 struct be_queue_info *txq = &txo->q;
931 bool dummy_wrb, stopped = false;
932 u32 wrb_cnt = 0, copied = 0;
933 bool skip_hw_vlan = false;
934 u32 start = txq->head;
935
936 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
937 if (!skb)
938 return NETDEV_TX_OK;
939
fe6d2a38 940 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 941
bc0c3405
AK
942 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
943 skip_hw_vlan);
c190e3c8 944 if (copied) {
cd8f76c0
ED
945 int gso_segs = skb_shinfo(skb)->gso_segs;
946
c190e3c8 947 /* record the sent skb in the sent_skb table */
3c8def97
SP
948 BUG_ON(txo->sent_skb_list[start]);
949 txo->sent_skb_list[start] = skb;
c190e3c8
AK
950
951 /* Ensure txq has space for the next skb; Else stop the queue
952 * *BEFORE* ringing the tx doorbell, so that we serialze the
953 * tx compls of the current transmit which'll wake up the queue
954 */
7101e111 955 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
956 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
957 txq->len) {
3c8def97 958 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
959 stopped = true;
960 }
6b7c5b94 961
94d73aaa 962 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 963
cd8f76c0 964 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
965 } else {
966 txq->head = start;
967 dev_kfree_skb_any(skb);
6b7c5b94 968 }
6b7c5b94
SP
969 return NETDEV_TX_OK;
970}
971
972static int be_change_mtu(struct net_device *netdev, int new_mtu)
973{
974 struct be_adapter *adapter = netdev_priv(netdev);
975 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
976 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
977 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
978 dev_info(&adapter->pdev->dev,
979 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
980 BE_MIN_MTU,
981 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
982 return -EINVAL;
983 }
984 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
985 netdev->mtu, new_mtu);
986 netdev->mtu = new_mtu;
987 return 0;
988}
989
990/*
82903e4b
AK
991 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
992 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 993 */
10329df8 994static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 995{
10329df8
SP
996 u16 vids[BE_NUM_VLANS_SUPPORTED];
997 u16 num = 0, i;
82903e4b 998 int status = 0;
1da87b7f 999
c0e64ef4
SP
1000 /* No need to further configure vids if in promiscuous mode */
1001 if (adapter->promiscuous)
1002 return 0;
1003
92bf14ab 1004 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1005 goto set_vlan_promisc;
1006
1007 /* Construct VLAN Table to give to HW */
1008 for (i = 0; i < VLAN_N_VID; i++)
1009 if (adapter->vlan_tag[i])
10329df8 1010 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1011
1012 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 1013 vids, num, 1, 0);
0fc16ebf
PR
1014
1015 /* Set to VLAN promisc mode as setting VLAN filter failed */
1016 if (status) {
1017 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1018 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1019 goto set_vlan_promisc;
6b7c5b94 1020 }
1da87b7f 1021
b31c50a7 1022 return status;
0fc16ebf
PR
1023
1024set_vlan_promisc:
1025 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1026 NULL, 0, 1, 1);
1027 return status;
6b7c5b94
SP
1028}
1029
80d5c368 1030static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1031{
1032 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1033 int status = 0;
6b7c5b94 1034
a85e9986 1035 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1036 status = -EINVAL;
1037 goto ret;
1038 }
ba343c77 1039
a85e9986
PR
1040 /* Packets with VID 0 are always received by Lancer by default */
1041 if (lancer_chip(adapter) && vid == 0)
1042 goto ret;
1043
6b7c5b94 1044 adapter->vlan_tag[vid] = 1;
92bf14ab 1045 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
10329df8 1046 status = be_vid_config(adapter);
8e586137 1047
80817cbf
AK
1048 if (!status)
1049 adapter->vlans_added++;
1050 else
1051 adapter->vlan_tag[vid] = 0;
1052ret:
1053 return status;
6b7c5b94
SP
1054}
1055
80d5c368 1056static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1057{
1058 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1059 int status = 0;
6b7c5b94 1060
a85e9986 1061 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1062 status = -EINVAL;
1063 goto ret;
1064 }
ba343c77 1065
a85e9986
PR
1066 /* Packets with VID 0 are always received by Lancer by default */
1067 if (lancer_chip(adapter) && vid == 0)
1068 goto ret;
1069
6b7c5b94 1070 adapter->vlan_tag[vid] = 0;
92bf14ab 1071 if (adapter->vlans_added <= be_max_vlans(adapter))
10329df8 1072 status = be_vid_config(adapter);
8e586137 1073
80817cbf
AK
1074 if (!status)
1075 adapter->vlans_added--;
1076 else
1077 adapter->vlan_tag[vid] = 1;
1078ret:
1079 return status;
6b7c5b94
SP
1080}
1081
a54769f5 1082static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1083{
1084 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1085 int status;
6b7c5b94 1086
24307eef 1087 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1088 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1089 adapter->promiscuous = true;
1090 goto done;
6b7c5b94
SP
1091 }
1092
25985edc 1093 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1094 if (adapter->promiscuous) {
1095 adapter->promiscuous = false;
5b8821b7 1096 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1097
1098 if (adapter->vlans_added)
10329df8 1099 be_vid_config(adapter);
6b7c5b94
SP
1100 }
1101
e7b909a6 1102 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1103 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1104 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1105 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1106 goto done;
6b7c5b94 1107 }
6b7c5b94 1108
fbc13f01
AK
1109 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1110 struct netdev_hw_addr *ha;
1111 int i = 1; /* First slot is claimed by the Primary MAC */
1112
1113 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1114 be_cmd_pmac_del(adapter, adapter->if_handle,
1115 adapter->pmac_id[i], 0);
1116 }
1117
92bf14ab 1118 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1119 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1120 adapter->promiscuous = true;
1121 goto done;
1122 }
1123
1124 netdev_for_each_uc_addr(ha, adapter->netdev) {
1125 adapter->uc_macs++; /* First slot is for Primary MAC */
1126 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1127 adapter->if_handle,
1128 &adapter->pmac_id[adapter->uc_macs], 0);
1129 }
1130 }
1131
0fc16ebf
PR
1132 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1133
1134 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1135 if (status) {
1136 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1137 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1138 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1139 }
24307eef
SP
1140done:
1141 return;
6b7c5b94
SP
1142}
1143
ba343c77
SB
1144static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1145{
1146 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1147 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1148 int status;
1149
11ac75ed 1150 if (!sriov_enabled(adapter))
ba343c77
SB
1151 return -EPERM;
1152
11ac75ed 1153 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1154 return -EINVAL;
1155
3175d8c2
SP
1156 if (BEx_chip(adapter)) {
1157 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1158 vf + 1);
ba343c77 1159
11ac75ed
SP
1160 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1161 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1162 } else {
1163 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1164 vf + 1);
590c391d
PR
1165 }
1166
64600ea5 1167 if (status)
ba343c77
SB
1168 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1169 mac, vf);
64600ea5 1170 else
11ac75ed 1171 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1172
ba343c77
SB
1173 return status;
1174}
1175
64600ea5
AK
1176static int be_get_vf_config(struct net_device *netdev, int vf,
1177 struct ifla_vf_info *vi)
1178{
1179 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1180 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1181
11ac75ed 1182 if (!sriov_enabled(adapter))
64600ea5
AK
1183 return -EPERM;
1184
11ac75ed 1185 if (vf >= adapter->num_vfs)
64600ea5
AK
1186 return -EINVAL;
1187
1188 vi->vf = vf;
11ac75ed
SP
1189 vi->tx_rate = vf_cfg->tx_rate;
1190 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1191 vi->qos = 0;
11ac75ed 1192 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1193
1194 return 0;
1195}
1196
1da87b7f
AK
1197static int be_set_vf_vlan(struct net_device *netdev,
1198 int vf, u16 vlan, u8 qos)
1199{
1200 struct be_adapter *adapter = netdev_priv(netdev);
1201 int status = 0;
1202
11ac75ed 1203 if (!sriov_enabled(adapter))
1da87b7f
AK
1204 return -EPERM;
1205
11ac75ed 1206 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1207 return -EINVAL;
1208
1209 if (vlan) {
f1f3ee1b
AK
1210 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1211 /* If this is new value, program it. Else skip. */
1212 adapter->vf_cfg[vf].vlan_tag = vlan;
1213
1214 status = be_cmd_set_hsw_config(adapter, vlan,
1215 vf + 1, adapter->vf_cfg[vf].if_handle);
1216 }
1da87b7f 1217 } else {
f1f3ee1b 1218 /* Reset Transparent Vlan Tagging. */
11ac75ed 1219 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1220 vlan = adapter->vf_cfg[vf].def_vid;
1221 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1222 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1223 }
1224
1da87b7f
AK
1225
1226 if (status)
1227 dev_info(&adapter->pdev->dev,
1228 "VLAN %d config on VF %d failed\n", vlan, vf);
1229 return status;
1230}
1231
e1d18735
AK
1232static int be_set_vf_tx_rate(struct net_device *netdev,
1233 int vf, int rate)
1234{
1235 struct be_adapter *adapter = netdev_priv(netdev);
1236 int status = 0;
1237
11ac75ed 1238 if (!sriov_enabled(adapter))
e1d18735
AK
1239 return -EPERM;
1240
94f434c2 1241 if (vf >= adapter->num_vfs)
e1d18735
AK
1242 return -EINVAL;
1243
94f434c2
AK
1244 if (rate < 100 || rate > 10000) {
1245 dev_err(&adapter->pdev->dev,
1246 "tx rate must be between 100 and 10000 Mbps\n");
1247 return -EINVAL;
1248 }
e1d18735 1249
d5c18473
PR
1250 if (lancer_chip(adapter))
1251 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1252 else
1253 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1254
1255 if (status)
94f434c2 1256 dev_err(&adapter->pdev->dev,
e1d18735 1257 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1258 else
1259 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1260 return status;
1261}
1262
10ef9ab4 1263static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1264{
10ef9ab4 1265 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1266 ulong now = jiffies;
ac124ff9 1267 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1268 u64 pkts;
1269 unsigned int start, eqd;
ac124ff9 1270
10ef9ab4
SP
1271 if (!eqo->enable_aic) {
1272 eqd = eqo->eqd;
1273 goto modify_eqd;
1274 }
1275
1276 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1277 return;
6b7c5b94 1278
10ef9ab4
SP
1279 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1280
4097f663 1281 /* Wrapped around */
3abcdeda
SP
1282 if (time_before(now, stats->rx_jiffies)) {
1283 stats->rx_jiffies = now;
4097f663
SP
1284 return;
1285 }
6b7c5b94 1286
ac124ff9
SP
1287 /* Update once a second */
1288 if (delta < HZ)
6b7c5b94
SP
1289 return;
1290
ab1594e9
SP
1291 do {
1292 start = u64_stats_fetch_begin_bh(&stats->sync);
1293 pkts = stats->rx_pkts;
1294 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1295
68c3e5a7 1296 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1297 stats->rx_pkts_prev = pkts;
3abcdeda 1298 stats->rx_jiffies = now;
10ef9ab4
SP
1299 eqd = (stats->rx_pps / 110000) << 3;
1300 eqd = min(eqd, eqo->max_eqd);
1301 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1302 if (eqd < 10)
1303 eqd = 0;
10ef9ab4
SP
1304
1305modify_eqd:
1306 if (eqd != eqo->cur_eqd) {
1307 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1308 eqo->cur_eqd = eqd;
ac124ff9 1309 }
6b7c5b94
SP
1310}
1311
3abcdeda 1312static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1313 struct be_rx_compl_info *rxcp)
4097f663 1314{
ac124ff9 1315 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1316
ab1594e9 1317 u64_stats_update_begin(&stats->sync);
3abcdeda 1318 stats->rx_compl++;
2e588f84 1319 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1320 stats->rx_pkts++;
2e588f84 1321 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1322 stats->rx_mcast_pkts++;
2e588f84 1323 if (rxcp->err)
ac124ff9 1324 stats->rx_compl_err++;
ab1594e9 1325 u64_stats_update_end(&stats->sync);
4097f663
SP
1326}
1327
2e588f84 1328static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1329{
19fad86f
PR
1330 /* L4 checksum is not reliable for non TCP/UDP packets.
1331 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1332 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1333 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1334}
1335
10ef9ab4
SP
1336static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1337 u16 frag_idx)
6b7c5b94 1338{
10ef9ab4 1339 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1340 struct be_rx_page_info *rx_page_info;
3abcdeda 1341 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1342
3abcdeda 1343 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1344 BUG_ON(!rx_page_info->page);
1345
205859a2 1346 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1347 dma_unmap_page(&adapter->pdev->dev,
1348 dma_unmap_addr(rx_page_info, bus),
1349 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1350 rx_page_info->last_page_user = false;
1351 }
6b7c5b94
SP
1352
1353 atomic_dec(&rxq->used);
1354 return rx_page_info;
1355}
1356
1357/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1358static void be_rx_compl_discard(struct be_rx_obj *rxo,
1359 struct be_rx_compl_info *rxcp)
6b7c5b94 1360{
3abcdeda 1361 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1362 struct be_rx_page_info *page_info;
2e588f84 1363 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1364
e80d9da6 1365 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1366 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1367 put_page(page_info->page);
1368 memset(page_info, 0, sizeof(*page_info));
2e588f84 1369 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1370 }
1371}
1372
1373/*
1374 * skb_fill_rx_data forms a complete skb for an ether frame
1375 * indicated by rxcp.
1376 */
10ef9ab4
SP
1377static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1378 struct be_rx_compl_info *rxcp)
6b7c5b94 1379{
3abcdeda 1380 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1381 struct be_rx_page_info *page_info;
2e588f84
SP
1382 u16 i, j;
1383 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1384 u8 *start;
6b7c5b94 1385
10ef9ab4 1386 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1387 start = page_address(page_info->page) + page_info->page_offset;
1388 prefetch(start);
1389
1390 /* Copy data in the first descriptor of this completion */
2e588f84 1391 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1392
6b7c5b94
SP
1393 skb->len = curr_frag_len;
1394 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1395 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1396 /* Complete packet has now been moved to data */
1397 put_page(page_info->page);
1398 skb->data_len = 0;
1399 skb->tail += curr_frag_len;
1400 } else {
ac1ae5f3
ED
1401 hdr_len = ETH_HLEN;
1402 memcpy(skb->data, start, hdr_len);
6b7c5b94 1403 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1404 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1405 skb_shinfo(skb)->frags[0].page_offset =
1406 page_info->page_offset + hdr_len;
9e903e08 1407 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1408 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1409 skb->truesize += rx_frag_size;
6b7c5b94
SP
1410 skb->tail += hdr_len;
1411 }
205859a2 1412 page_info->page = NULL;
6b7c5b94 1413
2e588f84
SP
1414 if (rxcp->pkt_size <= rx_frag_size) {
1415 BUG_ON(rxcp->num_rcvd != 1);
1416 return;
6b7c5b94
SP
1417 }
1418
1419 /* More frags present for this completion */
2e588f84
SP
1420 index_inc(&rxcp->rxq_idx, rxq->len);
1421 remaining = rxcp->pkt_size - curr_frag_len;
1422 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1423 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1424 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1425
bd46cb6c
AK
1426 /* Coalesce all frags from the same physical page in one slot */
1427 if (page_info->page_offset == 0) {
1428 /* Fresh page */
1429 j++;
b061b39e 1430 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1431 skb_shinfo(skb)->frags[j].page_offset =
1432 page_info->page_offset;
9e903e08 1433 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1434 skb_shinfo(skb)->nr_frags++;
1435 } else {
1436 put_page(page_info->page);
1437 }
1438
9e903e08 1439 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1440 skb->len += curr_frag_len;
1441 skb->data_len += curr_frag_len;
bdb28a97 1442 skb->truesize += rx_frag_size;
2e588f84
SP
1443 remaining -= curr_frag_len;
1444 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1445 page_info->page = NULL;
6b7c5b94 1446 }
bd46cb6c 1447 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1448}
1449
5be93b9a 1450/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1451static void be_rx_compl_process(struct be_rx_obj *rxo,
1452 struct be_rx_compl_info *rxcp)
6b7c5b94 1453{
10ef9ab4 1454 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1455 struct net_device *netdev = adapter->netdev;
6b7c5b94 1456 struct sk_buff *skb;
89420424 1457
bb349bb4 1458 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1459 if (unlikely(!skb)) {
ac124ff9 1460 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1461 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1462 return;
1463 }
1464
10ef9ab4 1465 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1466
6332c8d3 1467 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1468 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1469 else
1470 skb_checksum_none_assert(skb);
6b7c5b94 1471
6332c8d3 1472 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1473 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1474 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1475 skb->rxhash = rxcp->rss_hash;
1476
6b7c5b94 1477
343e43c0 1478 if (rxcp->vlanf)
86a9bad3 1479 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1480
1481 netif_receive_skb(skb);
6b7c5b94
SP
1482}
1483
5be93b9a 1484/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1485static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1486 struct napi_struct *napi,
1487 struct be_rx_compl_info *rxcp)
6b7c5b94 1488{
10ef9ab4 1489 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1490 struct be_rx_page_info *page_info;
5be93b9a 1491 struct sk_buff *skb = NULL;
3abcdeda 1492 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1493 u16 remaining, curr_frag_len;
1494 u16 i, j;
3968fa1e 1495
10ef9ab4 1496 skb = napi_get_frags(napi);
5be93b9a 1497 if (!skb) {
10ef9ab4 1498 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1499 return;
1500 }
1501
2e588f84
SP
1502 remaining = rxcp->pkt_size;
1503 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1504 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1505
1506 curr_frag_len = min(remaining, rx_frag_size);
1507
bd46cb6c
AK
1508 /* Coalesce all frags from the same physical page in one slot */
1509 if (i == 0 || page_info->page_offset == 0) {
1510 /* First frag or Fresh page */
1511 j++;
b061b39e 1512 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1513 skb_shinfo(skb)->frags[j].page_offset =
1514 page_info->page_offset;
9e903e08 1515 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1516 } else {
1517 put_page(page_info->page);
1518 }
9e903e08 1519 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1520 skb->truesize += rx_frag_size;
bd46cb6c 1521 remaining -= curr_frag_len;
2e588f84 1522 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1523 memset(page_info, 0, sizeof(*page_info));
1524 }
bd46cb6c 1525 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1526
5be93b9a 1527 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1528 skb->len = rxcp->pkt_size;
1529 skb->data_len = rxcp->pkt_size;
5be93b9a 1530 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1531 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1532 if (adapter->netdev->features & NETIF_F_RXHASH)
1533 skb->rxhash = rxcp->rss_hash;
5be93b9a 1534
343e43c0 1535 if (rxcp->vlanf)
86a9bad3 1536 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1537
10ef9ab4 1538 napi_gro_frags(napi);
2e588f84
SP
1539}
1540
10ef9ab4
SP
1541static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1542 struct be_rx_compl_info *rxcp)
2e588f84
SP
1543{
1544 rxcp->pkt_size =
1545 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1546 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1547 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1548 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1549 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1550 rxcp->ip_csum =
1551 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1552 rxcp->l4_csum =
1553 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1554 rxcp->ipv6 =
1555 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1556 rxcp->rxq_idx =
1557 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1558 rxcp->num_rcvd =
1559 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1560 rxcp->pkt_type =
1561 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1562 rxcp->rss_hash =
c297977e 1563 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1564 if (rxcp->vlanf) {
1565 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1566 compl);
1567 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1568 compl);
15d72184 1569 }
12004ae9 1570 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1571}
1572
10ef9ab4
SP
1573static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1574 struct be_rx_compl_info *rxcp)
2e588f84
SP
1575{
1576 rxcp->pkt_size =
1577 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1578 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1579 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1580 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1581 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1582 rxcp->ip_csum =
1583 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1584 rxcp->l4_csum =
1585 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1586 rxcp->ipv6 =
1587 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1588 rxcp->rxq_idx =
1589 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1590 rxcp->num_rcvd =
1591 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1592 rxcp->pkt_type =
1593 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1594 rxcp->rss_hash =
c297977e 1595 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1596 if (rxcp->vlanf) {
1597 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1598 compl);
1599 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1600 compl);
15d72184 1601 }
12004ae9 1602 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1603 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1604 ip_frag, compl);
2e588f84
SP
1605}
1606
1607static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1608{
1609 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1610 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1611 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1612
2e588f84
SP
1613 /* For checking the valid bit it is Ok to use either definition as the
1614 * valid bit is at the same position in both v0 and v1 Rx compl */
1615 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1616 return NULL;
6b7c5b94 1617
2e588f84
SP
1618 rmb();
1619 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1620
2e588f84 1621 if (adapter->be3_native)
10ef9ab4 1622 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1623 else
10ef9ab4 1624 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1625
e38b1706
SK
1626 if (rxcp->ip_frag)
1627 rxcp->l4_csum = 0;
1628
15d72184
SP
1629 if (rxcp->vlanf) {
1630 /* vlanf could be wrongly set in some cards.
1631 * ignore if vtm is not set */
752961a1 1632 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1633 rxcp->vlanf = 0;
6b7c5b94 1634
15d72184 1635 if (!lancer_chip(adapter))
3c709f8f 1636 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1637
939cf306 1638 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1639 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1640 rxcp->vlanf = 0;
1641 }
2e588f84
SP
1642
1643 /* As the compl has been parsed, reset it; we wont touch it again */
1644 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1645
3abcdeda 1646 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1647 return rxcp;
1648}
1649
1829b086 1650static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1651{
6b7c5b94 1652 u32 order = get_order(size);
1829b086 1653
6b7c5b94 1654 if (order > 0)
1829b086
ED
1655 gfp |= __GFP_COMP;
1656 return alloc_pages(gfp, order);
6b7c5b94
SP
1657}
1658
1659/*
1660 * Allocate a page, split it to fragments of size rx_frag_size and post as
1661 * receive buffers to BE
1662 */
1829b086 1663static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1664{
3abcdeda 1665 struct be_adapter *adapter = rxo->adapter;
26d92f92 1666 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1667 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1668 struct page *pagep = NULL;
1669 struct be_eth_rx_d *rxd;
1670 u64 page_dmaaddr = 0, frag_dmaaddr;
1671 u32 posted, page_offset = 0;
1672
3abcdeda 1673 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1674 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1675 if (!pagep) {
1829b086 1676 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1677 if (unlikely(!pagep)) {
ac124ff9 1678 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1679 break;
1680 }
2b7bcebf
IV
1681 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1682 0, adapter->big_page_size,
1683 DMA_FROM_DEVICE);
6b7c5b94
SP
1684 page_info->page_offset = 0;
1685 } else {
1686 get_page(pagep);
1687 page_info->page_offset = page_offset + rx_frag_size;
1688 }
1689 page_offset = page_info->page_offset;
1690 page_info->page = pagep;
fac6da5b 1691 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1692 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1693
1694 rxd = queue_head_node(rxq);
1695 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1696 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1697
1698 /* Any space left in the current big page for another frag? */
1699 if ((page_offset + rx_frag_size + rx_frag_size) >
1700 adapter->big_page_size) {
1701 pagep = NULL;
1702 page_info->last_page_user = true;
1703 }
26d92f92
SP
1704
1705 prev_page_info = page_info;
1706 queue_head_inc(rxq);
10ef9ab4 1707 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1708 }
1709 if (pagep)
26d92f92 1710 prev_page_info->last_page_user = true;
6b7c5b94
SP
1711
1712 if (posted) {
6b7c5b94 1713 atomic_add(posted, &rxq->used);
8788fdc2 1714 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1715 } else if (atomic_read(&rxq->used) == 0) {
1716 /* Let be_worker replenish when memory is available */
3abcdeda 1717 rxo->rx_post_starved = true;
6b7c5b94 1718 }
6b7c5b94
SP
1719}
1720
5fb379ee 1721static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1722{
6b7c5b94
SP
1723 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1724
1725 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1726 return NULL;
1727
f3eb62d2 1728 rmb();
6b7c5b94
SP
1729 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1730
1731 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1732
1733 queue_tail_inc(tx_cq);
1734 return txcp;
1735}
1736
3c8def97
SP
1737static u16 be_tx_compl_process(struct be_adapter *adapter,
1738 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1739{
3c8def97 1740 struct be_queue_info *txq = &txo->q;
a73b796e 1741 struct be_eth_wrb *wrb;
3c8def97 1742 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1743 struct sk_buff *sent_skb;
ec43b1a6
SP
1744 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1745 bool unmap_skb_hdr = true;
6b7c5b94 1746
ec43b1a6 1747 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1748 BUG_ON(!sent_skb);
ec43b1a6
SP
1749 sent_skbs[txq->tail] = NULL;
1750
1751 /* skip header wrb */
a73b796e 1752 queue_tail_inc(txq);
6b7c5b94 1753
ec43b1a6 1754 do {
6b7c5b94 1755 cur_index = txq->tail;
a73b796e 1756 wrb = queue_tail_node(txq);
2b7bcebf
IV
1757 unmap_tx_frag(&adapter->pdev->dev, wrb,
1758 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1759 unmap_skb_hdr = false;
1760
6b7c5b94
SP
1761 num_wrbs++;
1762 queue_tail_inc(txq);
ec43b1a6 1763 } while (cur_index != last_index);
6b7c5b94 1764
6b7c5b94 1765 kfree_skb(sent_skb);
4d586b82 1766 return num_wrbs;
6b7c5b94
SP
1767}
1768
10ef9ab4
SP
1769/* Return the number of events in the event queue */
1770static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1771{
10ef9ab4
SP
1772 struct be_eq_entry *eqe;
1773 int num = 0;
859b1e4e 1774
10ef9ab4
SP
1775 do {
1776 eqe = queue_tail_node(&eqo->q);
1777 if (eqe->evt == 0)
1778 break;
859b1e4e 1779
10ef9ab4
SP
1780 rmb();
1781 eqe->evt = 0;
1782 num++;
1783 queue_tail_inc(&eqo->q);
1784 } while (true);
1785
1786 return num;
859b1e4e
SP
1787}
1788
10ef9ab4
SP
1789/* Leaves the EQ is disarmed state */
1790static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1791{
10ef9ab4 1792 int num = events_get(eqo);
859b1e4e 1793
10ef9ab4 1794 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1795}
1796
10ef9ab4 1797static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1798{
1799 struct be_rx_page_info *page_info;
3abcdeda
SP
1800 struct be_queue_info *rxq = &rxo->q;
1801 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1802 struct be_rx_compl_info *rxcp;
d23e946c
SP
1803 struct be_adapter *adapter = rxo->adapter;
1804 int flush_wait = 0;
6b7c5b94
SP
1805 u16 tail;
1806
d23e946c
SP
1807 /* Consume pending rx completions.
1808 * Wait for the flush completion (identified by zero num_rcvd)
1809 * to arrive. Notify CQ even when there are no more CQ entries
1810 * for HW to flush partially coalesced CQ entries.
1811 * In Lancer, there is no need to wait for flush compl.
1812 */
1813 for (;;) {
1814 rxcp = be_rx_compl_get(rxo);
1815 if (rxcp == NULL) {
1816 if (lancer_chip(adapter))
1817 break;
1818
1819 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1820 dev_warn(&adapter->pdev->dev,
1821 "did not receive flush compl\n");
1822 break;
1823 }
1824 be_cq_notify(adapter, rx_cq->id, true, 0);
1825 mdelay(1);
1826 } else {
1827 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1828 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1829 if (rxcp->num_rcvd == 0)
1830 break;
1831 }
6b7c5b94
SP
1832 }
1833
d23e946c
SP
1834 /* After cleanup, leave the CQ in unarmed state */
1835 be_cq_notify(adapter, rx_cq->id, false, 0);
1836
1837 /* Then free posted rx buffers that were not used */
6b7c5b94 1838 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1839 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1840 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1841 put_page(page_info->page);
1842 memset(page_info, 0, sizeof(*page_info));
1843 }
1844 BUG_ON(atomic_read(&rxq->used));
482c9e79 1845 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1846}
1847
0ae57bb3 1848static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1849{
0ae57bb3
SP
1850 struct be_tx_obj *txo;
1851 struct be_queue_info *txq;
a8e9179a 1852 struct be_eth_tx_compl *txcp;
4d586b82 1853 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1854 struct sk_buff *sent_skb;
1855 bool dummy_wrb;
0ae57bb3 1856 int i, pending_txqs;
a8e9179a
SP
1857
1858 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1859 do {
0ae57bb3
SP
1860 pending_txqs = adapter->num_tx_qs;
1861
1862 for_all_tx_queues(adapter, txo, i) {
1863 txq = &txo->q;
1864 while ((txcp = be_tx_compl_get(&txo->cq))) {
1865 end_idx =
1866 AMAP_GET_BITS(struct amap_eth_tx_compl,
1867 wrb_index, txcp);
1868 num_wrbs += be_tx_compl_process(adapter, txo,
1869 end_idx);
1870 cmpl++;
1871 }
1872 if (cmpl) {
1873 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1874 atomic_sub(num_wrbs, &txq->used);
1875 cmpl = 0;
1876 num_wrbs = 0;
1877 }
1878 if (atomic_read(&txq->used) == 0)
1879 pending_txqs--;
a8e9179a
SP
1880 }
1881
0ae57bb3 1882 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1883 break;
1884
1885 mdelay(1);
1886 } while (true);
1887
0ae57bb3
SP
1888 for_all_tx_queues(adapter, txo, i) {
1889 txq = &txo->q;
1890 if (atomic_read(&txq->used))
1891 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1892 atomic_read(&txq->used));
1893
1894 /* free posted tx for which compls will never arrive */
1895 while (atomic_read(&txq->used)) {
1896 sent_skb = txo->sent_skb_list[txq->tail];
1897 end_idx = txq->tail;
1898 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1899 &dummy_wrb);
1900 index_adv(&end_idx, num_wrbs - 1, txq->len);
1901 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1902 atomic_sub(num_wrbs, &txq->used);
1903 }
b03388d6 1904 }
6b7c5b94
SP
1905}
1906
10ef9ab4
SP
1907static void be_evt_queues_destroy(struct be_adapter *adapter)
1908{
1909 struct be_eq_obj *eqo;
1910 int i;
1911
1912 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1913 if (eqo->q.created) {
1914 be_eq_clean(eqo);
10ef9ab4 1915 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
68d7bdcb 1916 netif_napi_del(&eqo->napi);
19d59aa7 1917 }
10ef9ab4
SP
1918 be_queue_free(adapter, &eqo->q);
1919 }
1920}
1921
1922static int be_evt_queues_create(struct be_adapter *adapter)
1923{
1924 struct be_queue_info *eq;
1925 struct be_eq_obj *eqo;
1926 int i, rc;
1927
92bf14ab
SP
1928 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1929 adapter->cfg_num_qs);
10ef9ab4
SP
1930
1931 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
1932 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1933 BE_NAPI_WEIGHT);
10ef9ab4
SP
1934 eqo->adapter = adapter;
1935 eqo->tx_budget = BE_TX_BUDGET;
1936 eqo->idx = i;
1937 eqo->max_eqd = BE_MAX_EQD;
1938 eqo->enable_aic = true;
1939
1940 eq = &eqo->q;
1941 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1942 sizeof(struct be_eq_entry));
1943 if (rc)
1944 return rc;
1945
f2f781a7 1946 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
1947 if (rc)
1948 return rc;
1949 }
1cfafab9 1950 return 0;
10ef9ab4
SP
1951}
1952
5fb379ee
SP
1953static void be_mcc_queues_destroy(struct be_adapter *adapter)
1954{
1955 struct be_queue_info *q;
5fb379ee 1956
8788fdc2 1957 q = &adapter->mcc_obj.q;
5fb379ee 1958 if (q->created)
8788fdc2 1959 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1960 be_queue_free(adapter, q);
1961
8788fdc2 1962 q = &adapter->mcc_obj.cq;
5fb379ee 1963 if (q->created)
8788fdc2 1964 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1965 be_queue_free(adapter, q);
1966}
1967
1968/* Must be called only after TX qs are created as MCC shares TX EQ */
1969static int be_mcc_queues_create(struct be_adapter *adapter)
1970{
1971 struct be_queue_info *q, *cq;
5fb379ee 1972
8788fdc2 1973 cq = &adapter->mcc_obj.cq;
5fb379ee 1974 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1975 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1976 goto err;
1977
10ef9ab4
SP
1978 /* Use the default EQ for MCC completions */
1979 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1980 goto mcc_cq_free;
1981
8788fdc2 1982 q = &adapter->mcc_obj.q;
5fb379ee
SP
1983 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1984 goto mcc_cq_destroy;
1985
8788fdc2 1986 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1987 goto mcc_q_free;
1988
1989 return 0;
1990
1991mcc_q_free:
1992 be_queue_free(adapter, q);
1993mcc_cq_destroy:
8788fdc2 1994 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1995mcc_cq_free:
1996 be_queue_free(adapter, cq);
1997err:
1998 return -1;
1999}
2000
6b7c5b94
SP
2001static void be_tx_queues_destroy(struct be_adapter *adapter)
2002{
2003 struct be_queue_info *q;
3c8def97
SP
2004 struct be_tx_obj *txo;
2005 u8 i;
6b7c5b94 2006
3c8def97
SP
2007 for_all_tx_queues(adapter, txo, i) {
2008 q = &txo->q;
2009 if (q->created)
2010 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2011 be_queue_free(adapter, q);
6b7c5b94 2012
3c8def97
SP
2013 q = &txo->cq;
2014 if (q->created)
2015 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2016 be_queue_free(adapter, q);
2017 }
6b7c5b94
SP
2018}
2019
7707133c 2020static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2021{
10ef9ab4 2022 struct be_queue_info *cq, *eq;
3c8def97 2023 struct be_tx_obj *txo;
92bf14ab 2024 int status, i;
6b7c5b94 2025
92bf14ab 2026 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2027
10ef9ab4
SP
2028 for_all_tx_queues(adapter, txo, i) {
2029 cq = &txo->cq;
2030 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2031 sizeof(struct be_eth_tx_compl));
2032 if (status)
2033 return status;
3c8def97 2034
10ef9ab4
SP
2035 /* If num_evt_qs is less than num_tx_qs, then more than
2036 * one txq share an eq
2037 */
2038 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2039 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2040 if (status)
2041 return status;
6b7c5b94 2042
10ef9ab4
SP
2043 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2044 sizeof(struct be_eth_wrb));
2045 if (status)
2046 return status;
6b7c5b94 2047
94d73aaa 2048 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2049 if (status)
2050 return status;
3c8def97 2051 }
6b7c5b94 2052
d379142b
SP
2053 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2054 adapter->num_tx_qs);
10ef9ab4 2055 return 0;
6b7c5b94
SP
2056}
2057
10ef9ab4 2058static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2059{
2060 struct be_queue_info *q;
3abcdeda
SP
2061 struct be_rx_obj *rxo;
2062 int i;
2063
2064 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2065 q = &rxo->cq;
2066 if (q->created)
2067 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2068 be_queue_free(adapter, q);
ac6a0c4a
SP
2069 }
2070}
2071
10ef9ab4 2072static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2073{
10ef9ab4 2074 struct be_queue_info *eq, *cq;
3abcdeda
SP
2075 struct be_rx_obj *rxo;
2076 int rc, i;
6b7c5b94 2077
92bf14ab
SP
2078 /* We can create as many RSS rings as there are EQs. */
2079 adapter->num_rx_qs = adapter->num_evt_qs;
2080
2081 /* We'll use RSS only if atleast 2 RSS rings are supported.
2082 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2083 */
92bf14ab
SP
2084 if (adapter->num_rx_qs > 1)
2085 adapter->num_rx_qs++;
2086
6b7c5b94 2087 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2088 for_all_rx_queues(adapter, rxo, i) {
2089 rxo->adapter = adapter;
3abcdeda
SP
2090 cq = &rxo->cq;
2091 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2092 sizeof(struct be_eth_rx_compl));
2093 if (rc)
10ef9ab4 2094 return rc;
3abcdeda 2095
10ef9ab4
SP
2096 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2097 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2098 if (rc)
10ef9ab4 2099 return rc;
3abcdeda 2100 }
6b7c5b94 2101
d379142b
SP
2102 dev_info(&adapter->pdev->dev,
2103 "created %d RSS queue(s) and 1 default RX queue\n",
2104 adapter->num_rx_qs - 1);
10ef9ab4 2105 return 0;
b628bde2
SP
2106}
2107
6b7c5b94
SP
2108static irqreturn_t be_intx(int irq, void *dev)
2109{
e49cc34f
SP
2110 struct be_eq_obj *eqo = dev;
2111 struct be_adapter *adapter = eqo->adapter;
2112 int num_evts = 0;
6b7c5b94 2113
d0b9cec3
SP
2114 /* IRQ is not expected when NAPI is scheduled as the EQ
2115 * will not be armed.
2116 * But, this can happen on Lancer INTx where it takes
2117 * a while to de-assert INTx or in BE2 where occasionaly
2118 * an interrupt may be raised even when EQ is unarmed.
2119 * If NAPI is already scheduled, then counting & notifying
2120 * events will orphan them.
e49cc34f 2121 */
d0b9cec3 2122 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2123 num_evts = events_get(eqo);
d0b9cec3
SP
2124 __napi_schedule(&eqo->napi);
2125 if (num_evts)
2126 eqo->spurious_intr = 0;
2127 }
2128 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2129
d0b9cec3
SP
2130 /* Return IRQ_HANDLED only for the the first spurious intr
2131 * after a valid intr to stop the kernel from branding
2132 * this irq as a bad one!
e49cc34f 2133 */
d0b9cec3
SP
2134 if (num_evts || eqo->spurious_intr++ == 0)
2135 return IRQ_HANDLED;
2136 else
2137 return IRQ_NONE;
6b7c5b94
SP
2138}
2139
10ef9ab4 2140static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2141{
10ef9ab4 2142 struct be_eq_obj *eqo = dev;
6b7c5b94 2143
0b545a62
SP
2144 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2145 napi_schedule(&eqo->napi);
6b7c5b94
SP
2146 return IRQ_HANDLED;
2147}
2148
2e588f84 2149static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2150{
e38b1706 2151 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2152}
2153
10ef9ab4
SP
2154static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2155 int budget)
6b7c5b94 2156{
3abcdeda
SP
2157 struct be_adapter *adapter = rxo->adapter;
2158 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2159 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2160 u32 work_done;
2161
2162 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2163 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2164 if (!rxcp)
2165 break;
2166
12004ae9
SP
2167 /* Is it a flush compl that has no data */
2168 if (unlikely(rxcp->num_rcvd == 0))
2169 goto loop_continue;
2170
2171 /* Discard compl with partial DMA Lancer B0 */
2172 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2173 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2174 goto loop_continue;
2175 }
2176
2177 /* On BE drop pkts that arrive due to imperfect filtering in
2178 * promiscuous mode on some skews
2179 */
2180 if (unlikely(rxcp->port != adapter->port_num &&
2181 !lancer_chip(adapter))) {
10ef9ab4 2182 be_rx_compl_discard(rxo, rxcp);
12004ae9 2183 goto loop_continue;
64642811 2184 }
009dd872 2185
12004ae9 2186 if (do_gro(rxcp))
10ef9ab4 2187 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2188 else
10ef9ab4 2189 be_rx_compl_process(rxo, rxcp);
12004ae9 2190loop_continue:
2e588f84 2191 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2192 }
2193
10ef9ab4
SP
2194 if (work_done) {
2195 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2196
10ef9ab4
SP
2197 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2198 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2199 }
10ef9ab4 2200
6b7c5b94
SP
2201 return work_done;
2202}
2203
10ef9ab4
SP
2204static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2205 int budget, int idx)
6b7c5b94 2206{
6b7c5b94 2207 struct be_eth_tx_compl *txcp;
10ef9ab4 2208 int num_wrbs = 0, work_done;
3c8def97 2209
10ef9ab4
SP
2210 for (work_done = 0; work_done < budget; work_done++) {
2211 txcp = be_tx_compl_get(&txo->cq);
2212 if (!txcp)
2213 break;
2214 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2215 AMAP_GET_BITS(struct amap_eth_tx_compl,
2216 wrb_index, txcp));
10ef9ab4 2217 }
6b7c5b94 2218
10ef9ab4
SP
2219 if (work_done) {
2220 be_cq_notify(adapter, txo->cq.id, true, work_done);
2221 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2222
10ef9ab4
SP
2223 /* As Tx wrbs have been freed up, wake up netdev queue
2224 * if it was stopped due to lack of tx wrbs. */
2225 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2226 atomic_read(&txo->q.used) < txo->q.len / 2) {
2227 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2228 }
10ef9ab4
SP
2229
2230 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2231 tx_stats(txo)->tx_compl += work_done;
2232 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2233 }
10ef9ab4
SP
2234 return (work_done < budget); /* Done */
2235}
6b7c5b94 2236
68d7bdcb 2237int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2238{
2239 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2240 struct be_adapter *adapter = eqo->adapter;
0b545a62 2241 int max_work = 0, work, i, num_evts;
10ef9ab4 2242 bool tx_done;
f31e50a8 2243
0b545a62
SP
2244 num_evts = events_get(eqo);
2245
10ef9ab4
SP
2246 /* Process all TXQs serviced by this EQ */
2247 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2248 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2249 eqo->tx_budget, i);
2250 if (!tx_done)
2251 max_work = budget;
f31e50a8
SP
2252 }
2253
10ef9ab4
SP
2254 /* This loop will iterate twice for EQ0 in which
2255 * completions of the last RXQ (default one) are also processed
2256 * For other EQs the loop iterates only once
2257 */
2258 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2259 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2260 max_work = max(work, max_work);
2261 }
6b7c5b94 2262
10ef9ab4
SP
2263 if (is_mcc_eqo(eqo))
2264 be_process_mcc(adapter);
93c86700 2265
10ef9ab4
SP
2266 if (max_work < budget) {
2267 napi_complete(napi);
0b545a62 2268 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2269 } else {
2270 /* As we'll continue in polling mode, count and clear events */
0b545a62 2271 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2272 }
10ef9ab4 2273 return max_work;
6b7c5b94
SP
2274}
2275
f67ef7ba 2276void be_detect_error(struct be_adapter *adapter)
7c185276 2277{
e1cfb67a
PR
2278 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2279 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2280 u32 i;
2281
d23e946c 2282 if (be_hw_error(adapter))
72f02485
SP
2283 return;
2284
e1cfb67a
PR
2285 if (lancer_chip(adapter)) {
2286 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2287 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2288 sliport_err1 = ioread32(adapter->db +
2289 SLIPORT_ERROR1_OFFSET);
2290 sliport_err2 = ioread32(adapter->db +
2291 SLIPORT_ERROR2_OFFSET);
2292 }
2293 } else {
2294 pci_read_config_dword(adapter->pdev,
2295 PCICFG_UE_STATUS_LOW, &ue_lo);
2296 pci_read_config_dword(adapter->pdev,
2297 PCICFG_UE_STATUS_HIGH, &ue_hi);
2298 pci_read_config_dword(adapter->pdev,
2299 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2300 pci_read_config_dword(adapter->pdev,
2301 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2302
f67ef7ba
PR
2303 ue_lo = (ue_lo & ~ue_lo_mask);
2304 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2305 }
7c185276 2306
1451ae6e
AK
2307 /* On certain platforms BE hardware can indicate spurious UEs.
2308 * Allow the h/w to stop working completely in case of a real UE.
2309 * Hence not setting the hw_error for UE detection.
2310 */
2311 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2312 adapter->hw_error = true;
434b3648 2313 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2314 "Error detected in the card\n");
2315 }
2316
2317 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2318 dev_err(&adapter->pdev->dev,
2319 "ERR: sliport status 0x%x\n", sliport_status);
2320 dev_err(&adapter->pdev->dev,
2321 "ERR: sliport error1 0x%x\n", sliport_err1);
2322 dev_err(&adapter->pdev->dev,
2323 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2324 }
2325
e1cfb67a
PR
2326 if (ue_lo) {
2327 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2328 if (ue_lo & 1)
7c185276
AK
2329 dev_err(&adapter->pdev->dev,
2330 "UE: %s bit set\n", ue_status_low_desc[i]);
2331 }
2332 }
f67ef7ba 2333
e1cfb67a
PR
2334 if (ue_hi) {
2335 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2336 if (ue_hi & 1)
7c185276
AK
2337 dev_err(&adapter->pdev->dev,
2338 "UE: %s bit set\n", ue_status_hi_desc[i]);
2339 }
2340 }
2341
2342}
2343
8d56ff11
SP
2344static void be_msix_disable(struct be_adapter *adapter)
2345{
ac6a0c4a 2346 if (msix_enabled(adapter)) {
8d56ff11 2347 pci_disable_msix(adapter->pdev);
ac6a0c4a 2348 adapter->num_msix_vec = 0;
68d7bdcb 2349 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2350 }
2351}
2352
c2bba3df 2353static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2354{
92bf14ab 2355 int i, status, num_vec;
d379142b 2356 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2357
92bf14ab
SP
2358 /* If RoCE is supported, program the max number of NIC vectors that
2359 * may be configured via set-channels, along with vectors needed for
2360 * RoCe. Else, just program the number we'll use initially.
2361 */
2362 if (be_roce_supported(adapter))
2363 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2364 2 * num_online_cpus());
2365 else
2366 num_vec = adapter->cfg_num_qs;
3abcdeda 2367
ac6a0c4a 2368 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2369 adapter->msix_entries[i].entry = i;
2370
ac6a0c4a 2371 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2372 if (status == 0) {
2373 goto done;
92bf14ab 2374 } else if (status >= MIN_MSIX_VECTORS) {
ac6a0c4a 2375 num_vec = status;
c2bba3df
SK
2376 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2377 num_vec);
2378 if (!status)
3abcdeda 2379 goto done;
3abcdeda 2380 }
d379142b
SP
2381
2382 dev_warn(dev, "MSIx enable failed\n");
92bf14ab 2383
c2bba3df
SK
2384 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2385 if (!be_physfn(adapter))
2386 return status;
2387 return 0;
3abcdeda 2388done:
92bf14ab
SP
2389 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2390 adapter->num_msix_roce_vec = num_vec / 2;
2391 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2392 adapter->num_msix_roce_vec);
2393 }
2394
2395 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2396
2397 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2398 adapter->num_msix_vec);
c2bba3df 2399 return 0;
6b7c5b94
SP
2400}
2401
fe6d2a38 2402static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2403 struct be_eq_obj *eqo)
b628bde2 2404{
f2f781a7 2405 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2406}
6b7c5b94 2407
b628bde2
SP
2408static int be_msix_register(struct be_adapter *adapter)
2409{
10ef9ab4
SP
2410 struct net_device *netdev = adapter->netdev;
2411 struct be_eq_obj *eqo;
2412 int status, i, vec;
6b7c5b94 2413
10ef9ab4
SP
2414 for_all_evt_queues(adapter, eqo, i) {
2415 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2416 vec = be_msix_vec_get(adapter, eqo);
2417 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2418 if (status)
2419 goto err_msix;
2420 }
b628bde2 2421
6b7c5b94 2422 return 0;
3abcdeda 2423err_msix:
10ef9ab4
SP
2424 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2425 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2426 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2427 status);
ac6a0c4a 2428 be_msix_disable(adapter);
6b7c5b94
SP
2429 return status;
2430}
2431
2432static int be_irq_register(struct be_adapter *adapter)
2433{
2434 struct net_device *netdev = adapter->netdev;
2435 int status;
2436
ac6a0c4a 2437 if (msix_enabled(adapter)) {
6b7c5b94
SP
2438 status = be_msix_register(adapter);
2439 if (status == 0)
2440 goto done;
ba343c77
SB
2441 /* INTx is not supported for VF */
2442 if (!be_physfn(adapter))
2443 return status;
6b7c5b94
SP
2444 }
2445
e49cc34f 2446 /* INTx: only the first EQ is used */
6b7c5b94
SP
2447 netdev->irq = adapter->pdev->irq;
2448 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2449 &adapter->eq_obj[0]);
6b7c5b94
SP
2450 if (status) {
2451 dev_err(&adapter->pdev->dev,
2452 "INTx request IRQ failed - err %d\n", status);
2453 return status;
2454 }
2455done:
2456 adapter->isr_registered = true;
2457 return 0;
2458}
2459
2460static void be_irq_unregister(struct be_adapter *adapter)
2461{
2462 struct net_device *netdev = adapter->netdev;
10ef9ab4 2463 struct be_eq_obj *eqo;
3abcdeda 2464 int i;
6b7c5b94
SP
2465
2466 if (!adapter->isr_registered)
2467 return;
2468
2469 /* INTx */
ac6a0c4a 2470 if (!msix_enabled(adapter)) {
e49cc34f 2471 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2472 goto done;
2473 }
2474
2475 /* MSIx */
10ef9ab4
SP
2476 for_all_evt_queues(adapter, eqo, i)
2477 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2478
6b7c5b94
SP
2479done:
2480 adapter->isr_registered = false;
6b7c5b94
SP
2481}
2482
10ef9ab4 2483static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2484{
2485 struct be_queue_info *q;
2486 struct be_rx_obj *rxo;
2487 int i;
2488
2489 for_all_rx_queues(adapter, rxo, i) {
2490 q = &rxo->q;
2491 if (q->created) {
2492 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2493 be_rx_cq_clean(rxo);
482c9e79 2494 }
10ef9ab4 2495 be_queue_free(adapter, q);
482c9e79
SP
2496 }
2497}
2498
889cd4b2
SP
2499static int be_close(struct net_device *netdev)
2500{
2501 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2502 struct be_eq_obj *eqo;
2503 int i;
889cd4b2 2504
045508a8
PP
2505 be_roce_dev_close(adapter);
2506
04d3d624
SK
2507 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2508 for_all_evt_queues(adapter, eqo, i)
2509 napi_disable(&eqo->napi);
2510 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2511 }
a323d9bf
SP
2512
2513 be_async_mcc_disable(adapter);
2514
2515 /* Wait for all pending tx completions to arrive so that
2516 * all tx skbs are freed.
2517 */
fba87559 2518 netif_tx_disable(netdev);
6e1f9975 2519 be_tx_compl_clean(adapter);
a323d9bf
SP
2520
2521 be_rx_qs_destroy(adapter);
2522
2523 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2524 if (msix_enabled(adapter))
2525 synchronize_irq(be_msix_vec_get(adapter, eqo));
2526 else
2527 synchronize_irq(netdev->irq);
2528 be_eq_clean(eqo);
63fcb27f
PR
2529 }
2530
889cd4b2
SP
2531 be_irq_unregister(adapter);
2532
482c9e79
SP
2533 return 0;
2534}
2535
10ef9ab4 2536static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2537{
2538 struct be_rx_obj *rxo;
e9008ee9
PR
2539 int rc, i, j;
2540 u8 rsstable[128];
482c9e79
SP
2541
2542 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2543 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2544 sizeof(struct be_eth_rx_d));
2545 if (rc)
2546 return rc;
2547 }
2548
2549 /* The FW would like the default RXQ to be created first */
2550 rxo = default_rxo(adapter);
2551 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2552 adapter->if_handle, false, &rxo->rss_id);
2553 if (rc)
2554 return rc;
2555
2556 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2557 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2558 rx_frag_size, adapter->if_handle,
2559 true, &rxo->rss_id);
482c9e79
SP
2560 if (rc)
2561 return rc;
2562 }
2563
2564 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2565 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2566 for_all_rss_queues(adapter, rxo, i) {
2567 if ((j + i) >= 128)
2568 break;
2569 rsstable[j + i] = rxo->rss_id;
2570 }
2571 }
594ad54a
SR
2572 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2573 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2574
2575 if (!BEx_chip(adapter))
2576 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2577 RSS_ENABLE_UDP_IPV6;
2578
2579 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2580 128);
2581 if (rc) {
2582 adapter->rss_flags = 0;
482c9e79 2583 return rc;
594ad54a 2584 }
482c9e79
SP
2585 }
2586
2587 /* First time posting */
10ef9ab4 2588 for_all_rx_queues(adapter, rxo, i)
482c9e79 2589 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2590 return 0;
2591}
2592
6b7c5b94
SP
2593static int be_open(struct net_device *netdev)
2594{
2595 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2596 struct be_eq_obj *eqo;
3abcdeda 2597 struct be_rx_obj *rxo;
10ef9ab4 2598 struct be_tx_obj *txo;
b236916a 2599 u8 link_status;
3abcdeda 2600 int status, i;
5fb379ee 2601
10ef9ab4 2602 status = be_rx_qs_create(adapter);
482c9e79
SP
2603 if (status)
2604 goto err;
2605
c2bba3df
SK
2606 status = be_irq_register(adapter);
2607 if (status)
2608 goto err;
5fb379ee 2609
10ef9ab4 2610 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2611 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2612
10ef9ab4
SP
2613 for_all_tx_queues(adapter, txo, i)
2614 be_cq_notify(adapter, txo->cq.id, true, 0);
2615
7a1e9b20
SP
2616 be_async_mcc_enable(adapter);
2617
10ef9ab4
SP
2618 for_all_evt_queues(adapter, eqo, i) {
2619 napi_enable(&eqo->napi);
2620 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2621 }
04d3d624 2622 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2623
323ff71e 2624 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2625 if (!status)
2626 be_link_status_update(adapter, link_status);
2627
fba87559 2628 netif_tx_start_all_queues(netdev);
045508a8 2629 be_roce_dev_open(adapter);
889cd4b2
SP
2630 return 0;
2631err:
2632 be_close(adapter->netdev);
2633 return -EIO;
5fb379ee
SP
2634}
2635
71d8d1b5
AK
2636static int be_setup_wol(struct be_adapter *adapter, bool enable)
2637{
2638 struct be_dma_mem cmd;
2639 int status = 0;
2640 u8 mac[ETH_ALEN];
2641
2642 memset(mac, 0, ETH_ALEN);
2643
2644 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2645 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2646 GFP_KERNEL);
71d8d1b5
AK
2647 if (cmd.va == NULL)
2648 return -1;
71d8d1b5
AK
2649
2650 if (enable) {
2651 status = pci_write_config_dword(adapter->pdev,
2652 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2653 if (status) {
2654 dev_err(&adapter->pdev->dev,
2381a55c 2655 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2656 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2657 cmd.dma);
71d8d1b5
AK
2658 return status;
2659 }
2660 status = be_cmd_enable_magic_wol(adapter,
2661 adapter->netdev->dev_addr, &cmd);
2662 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2663 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2664 } else {
2665 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2666 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2667 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2668 }
2669
2b7bcebf 2670 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2671 return status;
2672}
2673
6d87f5c3
AK
2674/*
2675 * Generate a seed MAC address from the PF MAC Address using jhash.
2676 * MAC Address for VFs are assigned incrementally starting from the seed.
2677 * These addresses are programmed in the ASIC by the PF and the VF driver
2678 * queries for the MAC address during its probe.
2679 */
4c876616 2680static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2681{
f9449ab7 2682 u32 vf;
3abcdeda 2683 int status = 0;
6d87f5c3 2684 u8 mac[ETH_ALEN];
11ac75ed 2685 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2686
2687 be_vf_eth_addr_generate(adapter, mac);
2688
11ac75ed 2689 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2690 if (BEx_chip(adapter))
590c391d 2691 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2692 vf_cfg->if_handle,
2693 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2694 else
2695 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2696 vf + 1);
590c391d 2697
6d87f5c3
AK
2698 if (status)
2699 dev_err(&adapter->pdev->dev,
590c391d 2700 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2701 else
11ac75ed 2702 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2703
2704 mac[5] += 1;
2705 }
2706 return status;
2707}
2708
4c876616
SP
2709static int be_vfs_mac_query(struct be_adapter *adapter)
2710{
2711 int status, vf;
2712 u8 mac[ETH_ALEN];
2713 struct be_vf_cfg *vf_cfg;
95046b92 2714 bool active = false;
4c876616
SP
2715
2716 for_all_vfs(adapter, vf_cfg, vf) {
2717 be_cmd_get_mac_from_list(adapter, mac, &active,
2718 &vf_cfg->pmac_id, 0);
2719
2720 status = be_cmd_mac_addr_query(adapter, mac, false,
2721 vf_cfg->if_handle, 0);
2722 if (status)
2723 return status;
2724 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2725 }
2726 return 0;
2727}
2728
f9449ab7 2729static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2730{
11ac75ed 2731 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2732 u32 vf;
2733
257a3feb 2734 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2735 dev_warn(&adapter->pdev->dev,
2736 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2737 goto done;
2738 }
2739
b4c1df93
SP
2740 pci_disable_sriov(adapter->pdev);
2741
11ac75ed 2742 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2743 if (BEx_chip(adapter))
11ac75ed
SP
2744 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2745 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2746 else
2747 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2748 vf + 1);
f9449ab7 2749
11ac75ed
SP
2750 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2751 }
39f1d94d
SP
2752done:
2753 kfree(adapter->vf_cfg);
2754 adapter->num_vfs = 0;
6d87f5c3
AK
2755}
2756
7707133c
SP
2757static void be_clear_queues(struct be_adapter *adapter)
2758{
2759 be_mcc_queues_destroy(adapter);
2760 be_rx_cqs_destroy(adapter);
2761 be_tx_queues_destroy(adapter);
2762 be_evt_queues_destroy(adapter);
2763}
2764
68d7bdcb 2765static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 2766{
191eb756
SP
2767 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2768 cancel_delayed_work_sync(&adapter->work);
2769 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2770 }
68d7bdcb
SP
2771}
2772
2773static int be_clear(struct be_adapter *adapter)
2774{
2775 int i;
2776
2777 be_cancel_worker(adapter);
191eb756 2778
11ac75ed 2779 if (sriov_enabled(adapter))
f9449ab7
SP
2780 be_vf_clear(adapter);
2781
2d17f403
SP
2782 /* delete the primary mac along with the uc-mac list */
2783 for (i = 0; i < (adapter->uc_macs + 1); i++)
fbc13f01 2784 be_cmd_pmac_del(adapter, adapter->if_handle,
2d17f403
SP
2785 adapter->pmac_id[i], 0);
2786 adapter->uc_macs = 0;
fbc13f01 2787
f9449ab7 2788 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 2789
7707133c 2790 be_clear_queues(adapter);
a54769f5 2791
abb93951
PR
2792 kfree(adapter->pmac_id);
2793 adapter->pmac_id = NULL;
2794
10ef9ab4 2795 be_msix_disable(adapter);
a54769f5
SP
2796 return 0;
2797}
2798
4c876616 2799static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2800{
92bf14ab 2801 struct be_resources res = {0};
4c876616
SP
2802 struct be_vf_cfg *vf_cfg;
2803 u32 cap_flags, en_flags, vf;
abb93951
PR
2804 int status;
2805
4c876616
SP
2806 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2807 BE_IF_FLAGS_MULTICAST;
abb93951 2808
4c876616 2809 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
2810 if (!BE3_chip(adapter)) {
2811 status = be_cmd_get_profile_config(adapter, &res,
2812 vf + 1);
2813 if (!status)
2814 cap_flags = res.if_cap_flags;
2815 }
4c876616
SP
2816
2817 /* If a FW profile exists, then cap_flags are updated */
2818 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2819 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2820 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2821 &vf_cfg->if_handle, vf + 1);
2822 if (status)
2823 goto err;
2824 }
2825err:
2826 return status;
abb93951
PR
2827}
2828
39f1d94d 2829static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2830{
11ac75ed 2831 struct be_vf_cfg *vf_cfg;
30128031
SP
2832 int vf;
2833
39f1d94d
SP
2834 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2835 GFP_KERNEL);
2836 if (!adapter->vf_cfg)
2837 return -ENOMEM;
2838
11ac75ed
SP
2839 for_all_vfs(adapter, vf_cfg, vf) {
2840 vf_cfg->if_handle = -1;
2841 vf_cfg->pmac_id = -1;
30128031 2842 }
39f1d94d 2843 return 0;
30128031
SP
2844}
2845
f9449ab7
SP
2846static int be_vf_setup(struct be_adapter *adapter)
2847{
11ac75ed 2848 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2849 u16 def_vlan, lnk_speed;
4c876616
SP
2850 int status, old_vfs, vf;
2851 struct device *dev = &adapter->pdev->dev;
04a06028 2852 u32 privileges;
39f1d94d 2853
257a3feb 2854 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
2855 if (old_vfs) {
2856 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2857 if (old_vfs != num_vfs)
2858 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2859 adapter->num_vfs = old_vfs;
39f1d94d 2860 } else {
92bf14ab 2861 if (num_vfs > be_max_vfs(adapter))
4c876616 2862 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
2863 be_max_vfs(adapter), num_vfs);
2864 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 2865 if (!adapter->num_vfs)
4c876616 2866 return 0;
39f1d94d
SP
2867 }
2868
2869 status = be_vf_setup_init(adapter);
2870 if (status)
2871 goto err;
30128031 2872
4c876616
SP
2873 if (old_vfs) {
2874 for_all_vfs(adapter, vf_cfg, vf) {
2875 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2876 if (status)
2877 goto err;
2878 }
2879 } else {
2880 status = be_vfs_if_create(adapter);
f9449ab7
SP
2881 if (status)
2882 goto err;
f9449ab7
SP
2883 }
2884
4c876616
SP
2885 if (old_vfs) {
2886 status = be_vfs_mac_query(adapter);
2887 if (status)
2888 goto err;
2889 } else {
39f1d94d
SP
2890 status = be_vf_eth_addr_config(adapter);
2891 if (status)
2892 goto err;
2893 }
f9449ab7 2894
11ac75ed 2895 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
2896 /* Allow VFs to programs MAC/VLAN filters */
2897 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2898 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2899 status = be_cmd_set_fn_privileges(adapter,
2900 privileges |
2901 BE_PRIV_FILTMGMT,
2902 vf + 1);
2903 if (!status)
2904 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2905 vf);
2906 }
2907
4c876616
SP
2908 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2909 * Allow full available bandwidth
2910 */
2911 if (BE3_chip(adapter) && !old_vfs)
2912 be_cmd_set_qos(adapter, 1000, vf+1);
2913
2914 status = be_cmd_link_status_query(adapter, &lnk_speed,
2915 NULL, vf + 1);
2916 if (!status)
2917 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2918
2919 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2920 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2921 if (status)
2922 goto err;
2923 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2924
2925 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 2926 }
b4c1df93
SP
2927
2928 if (!old_vfs) {
2929 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2930 if (status) {
2931 dev_err(dev, "SRIOV enable failed\n");
2932 adapter->num_vfs = 0;
2933 goto err;
2934 }
2935 }
f9449ab7
SP
2936 return 0;
2937err:
4c876616
SP
2938 dev_err(dev, "VF setup failed\n");
2939 be_vf_clear(adapter);
f9449ab7
SP
2940 return status;
2941}
2942
92bf14ab
SP
2943/* On BE2/BE3 FW does not suggest the supported limits */
2944static void BEx_get_resources(struct be_adapter *adapter,
2945 struct be_resources *res)
2946{
2947 struct pci_dev *pdev = adapter->pdev;
2948 bool use_sriov = false;
2949
2950 if (BE3_chip(adapter) && be_physfn(adapter)) {
2951 int max_vfs;
2952
2953 max_vfs = pci_sriov_get_totalvfs(pdev);
2954 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
2955 use_sriov = res->max_vfs && num_vfs;
2956 }
2957
2958 if (be_physfn(adapter))
2959 res->max_uc_mac = BE_UC_PMAC_COUNT;
2960 else
2961 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2962
2963 if (adapter->function_mode & FLEX10_MODE)
2964 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2965 else
2966 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2967 res->max_mcast_mac = BE_MAX_MC;
2968
2969 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
2970 !be_physfn(adapter))
2971 res->max_tx_qs = 1;
2972 else
2973 res->max_tx_qs = BE3_MAX_TX_QS;
2974
2975 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2976 !use_sriov && be_physfn(adapter))
2977 res->max_rss_qs = (adapter->be3_native) ?
2978 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2979 res->max_rx_qs = res->max_rss_qs + 1;
2980
68d7bdcb 2981 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
92bf14ab
SP
2982
2983 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
2984 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
2985 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
2986}
2987
30128031
SP
2988static void be_setup_init(struct be_adapter *adapter)
2989{
2990 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2991 adapter->phy.link_speed = -1;
30128031
SP
2992 adapter->if_handle = -1;
2993 adapter->be3_native = false;
2994 adapter->promiscuous = false;
f25b119c
PR
2995 if (be_physfn(adapter))
2996 adapter->cmd_privileges = MAX_PRIVILEGES;
2997 else
2998 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2999}
3000
92bf14ab 3001static int be_get_resources(struct be_adapter *adapter)
abb93951 3002{
92bf14ab
SP
3003 struct device *dev = &adapter->pdev->dev;
3004 struct be_resources res = {0};
3005 int status;
abb93951 3006
92bf14ab
SP
3007 if (BEx_chip(adapter)) {
3008 BEx_get_resources(adapter, &res);
3009 adapter->res = res;
abb93951
PR
3010 }
3011
92bf14ab
SP
3012 /* For BE3 only check if FW suggests a different max-txqs value */
3013 if (BE3_chip(adapter)) {
3014 status = be_cmd_get_profile_config(adapter, &res, 0);
3015 if (!status && res.max_tx_qs)
3016 adapter->res.max_tx_qs =
3017 min(adapter->res.max_tx_qs, res.max_tx_qs);
3018 }
abb93951 3019
92bf14ab
SP
3020 /* For Lancer, SH etc read per-function resource limits from FW.
3021 * GET_FUNC_CONFIG returns per function guaranteed limits.
3022 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3023 */
3024 if (!BEx_chip(adapter)) {
3025 status = be_cmd_get_func_config(adapter, &res);
3026 if (status)
3027 return status;
abb93951 3028
92bf14ab
SP
3029 /* If RoCE may be enabled stash away half the EQs for RoCE */
3030 if (be_roce_supported(adapter))
3031 res.max_evt_qs /= 2;
3032 adapter->res = res;
abb93951 3033
92bf14ab
SP
3034 if (be_physfn(adapter)) {
3035 status = be_cmd_get_profile_config(adapter, &res, 0);
3036 if (status)
3037 return status;
3038 adapter->res.max_vfs = res.max_vfs;
3039 }
abb93951 3040
92bf14ab
SP
3041 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3042 be_max_txqs(adapter), be_max_rxqs(adapter),
3043 be_max_rss(adapter), be_max_eqs(adapter),
3044 be_max_vfs(adapter));
3045 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3046 be_max_uc(adapter), be_max_mc(adapter),
3047 be_max_vlans(adapter));
abb93951 3048 }
4c876616 3049
92bf14ab 3050 return 0;
abb93951
PR
3051}
3052
39f1d94d
SP
3053/* Routine to query per function resource limits */
3054static int be_get_config(struct be_adapter *adapter)
3055{
4c876616 3056 int status;
39f1d94d 3057
abb93951
PR
3058 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3059 &adapter->function_mode,
0ad3157e
VV
3060 &adapter->function_caps,
3061 &adapter->asic_rev);
abb93951 3062 if (status)
92bf14ab 3063 return status;
abb93951 3064
92bf14ab
SP
3065 status = be_get_resources(adapter);
3066 if (status)
3067 return status;
abb93951
PR
3068
3069 /* primary mac needs 1 pmac entry */
92bf14ab
SP
3070 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3071 GFP_KERNEL);
3072 if (!adapter->pmac_id)
3073 return -ENOMEM;
abb93951 3074
92bf14ab
SP
3075 /* Sanitize cfg_num_qs based on HW and platform limits */
3076 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3077
3078 return 0;
39f1d94d
SP
3079}
3080
95046b92
SP
3081static int be_mac_setup(struct be_adapter *adapter)
3082{
3083 u8 mac[ETH_ALEN];
3084 int status;
3085
3086 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3087 status = be_cmd_get_perm_mac(adapter, mac);
3088 if (status)
3089 return status;
3090
3091 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3092 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3093 } else {
3094 /* Maybe the HW was reset; dev_addr must be re-programmed */
3095 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3096 }
3097
3098 /* On BE3 VFs this cmd may fail due to lack of privilege.
3099 * Ignore the failure as in this case pmac_id is fetched
3100 * in the IFACE_CREATE cmd.
3101 */
3102 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3103 &adapter->pmac_id[0], 0);
3104 return 0;
3105}
3106
68d7bdcb
SP
3107static void be_schedule_worker(struct be_adapter *adapter)
3108{
3109 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3110 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3111}
3112
7707133c 3113static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3114{
68d7bdcb 3115 struct net_device *netdev = adapter->netdev;
10ef9ab4 3116 int status;
ba343c77 3117
7707133c 3118 status = be_evt_queues_create(adapter);
abb93951
PR
3119 if (status)
3120 goto err;
73d540f2 3121
7707133c 3122 status = be_tx_qs_create(adapter);
c2bba3df
SK
3123 if (status)
3124 goto err;
10ef9ab4 3125
7707133c 3126 status = be_rx_cqs_create(adapter);
10ef9ab4 3127 if (status)
a54769f5 3128 goto err;
6b7c5b94 3129
7707133c 3130 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3131 if (status)
3132 goto err;
3133
68d7bdcb
SP
3134 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3135 if (status)
3136 goto err;
3137
3138 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3139 if (status)
3140 goto err;
3141
7707133c
SP
3142 return 0;
3143err:
3144 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3145 return status;
3146}
3147
68d7bdcb
SP
3148int be_update_queues(struct be_adapter *adapter)
3149{
3150 struct net_device *netdev = adapter->netdev;
3151 int status;
3152
3153 if (netif_running(netdev))
3154 be_close(netdev);
3155
3156 be_cancel_worker(adapter);
3157
3158 /* If any vectors have been shared with RoCE we cannot re-program
3159 * the MSIx table.
3160 */
3161 if (!adapter->num_msix_roce_vec)
3162 be_msix_disable(adapter);
3163
3164 be_clear_queues(adapter);
3165
3166 if (!msix_enabled(adapter)) {
3167 status = be_msix_enable(adapter);
3168 if (status)
3169 return status;
3170 }
3171
3172 status = be_setup_queues(adapter);
3173 if (status)
3174 return status;
3175
3176 be_schedule_worker(adapter);
3177
3178 if (netif_running(netdev))
3179 status = be_open(netdev);
3180
3181 return status;
3182}
3183
7707133c
SP
3184static int be_setup(struct be_adapter *adapter)
3185{
3186 struct device *dev = &adapter->pdev->dev;
3187 u32 tx_fc, rx_fc, en_flags;
3188 int status;
3189
3190 be_setup_init(adapter);
3191
3192 if (!lancer_chip(adapter))
3193 be_cmd_req_native_mode(adapter);
3194
3195 status = be_get_config(adapter);
10ef9ab4 3196 if (status)
a54769f5 3197 goto err;
6b7c5b94 3198
7707133c 3199 status = be_msix_enable(adapter);
10ef9ab4 3200 if (status)
a54769f5 3201 goto err;
6b7c5b94 3202
f9449ab7 3203 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3204 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3205 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3206 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3207 en_flags = en_flags & be_if_cap_flags(adapter);
3208 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3209 &adapter->if_handle, 0);
7707133c 3210 if (status)
a54769f5 3211 goto err;
6b7c5b94 3212
68d7bdcb
SP
3213 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3214 rtnl_lock();
7707133c 3215 status = be_setup_queues(adapter);
68d7bdcb 3216 rtnl_unlock();
95046b92 3217 if (status)
1578e777
PR
3218 goto err;
3219
7707133c
SP
3220 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3221 /* In UMC mode FW does not return right privileges.
3222 * Override with correct privilege equivalent to PF.
3223 */
3224 if (be_is_mc(adapter))
3225 adapter->cmd_privileges = MAX_PRIVILEGES;
3226
3227 status = be_mac_setup(adapter);
10ef9ab4
SP
3228 if (status)
3229 goto err;
3230
eeb65ced 3231 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3232
1d1e9a46 3233 if (adapter->vlans_added)
10329df8 3234 be_vid_config(adapter);
7ab8b0b4 3235
a54769f5 3236 be_set_rx_mode(adapter->netdev);
5fb379ee 3237
ddc3f5cb 3238 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3239
ddc3f5cb
AK
3240 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3241 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3242 adapter->rx_fc);
2dc1deb6 3243
92bf14ab
SP
3244 if (be_physfn(adapter) && num_vfs) {
3245 if (be_max_vfs(adapter))
39f1d94d
SP
3246 be_vf_setup(adapter);
3247 else
3248 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3249 }
3250
f25b119c
PR
3251 status = be_cmd_get_phy_info(adapter);
3252 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3253 adapter->phy.fc_autoneg = 1;
3254
68d7bdcb 3255 be_schedule_worker(adapter);
f9449ab7 3256 return 0;
a54769f5
SP
3257err:
3258 be_clear(adapter);
3259 return status;
3260}
6b7c5b94 3261
66268739
IV
3262#ifdef CONFIG_NET_POLL_CONTROLLER
3263static void be_netpoll(struct net_device *netdev)
3264{
3265 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3266 struct be_eq_obj *eqo;
66268739
IV
3267 int i;
3268
e49cc34f
SP
3269 for_all_evt_queues(adapter, eqo, i) {
3270 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3271 napi_schedule(&eqo->napi);
3272 }
10ef9ab4
SP
3273
3274 return;
66268739
IV
3275}
3276#endif
3277
84517482 3278#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3279static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3280
fa9a6fed 3281static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3282 const u8 *p, u32 img_start, int image_size,
3283 int hdr_size)
fa9a6fed
SB
3284{
3285 u32 crc_offset;
3286 u8 flashed_crc[4];
3287 int status;
3f0d4560
AK
3288
3289 crc_offset = hdr_size + img_start + image_size - 4;
3290
fa9a6fed 3291 p += crc_offset;
3f0d4560
AK
3292
3293 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3294 (image_size - 4));
fa9a6fed
SB
3295 if (status) {
3296 dev_err(&adapter->pdev->dev,
3297 "could not get crc from flash, not flashing redboot\n");
3298 return false;
3299 }
3300
3301 /*update redboot only if crc does not match*/
3302 if (!memcmp(flashed_crc, p, 4))
3303 return false;
3304 else
3305 return true;
fa9a6fed
SB
3306}
3307
306f1348
SP
3308static bool phy_flashing_required(struct be_adapter *adapter)
3309{
42f11cf2
AK
3310 return (adapter->phy.phy_type == TN_8022 &&
3311 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3312}
3313
c165541e
PR
3314static bool is_comp_in_ufi(struct be_adapter *adapter,
3315 struct flash_section_info *fsec, int type)
3316{
3317 int i = 0, img_type = 0;
3318 struct flash_section_info_g2 *fsec_g2 = NULL;
3319
ca34fe38 3320 if (BE2_chip(adapter))
c165541e
PR
3321 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3322
3323 for (i = 0; i < MAX_FLASH_COMP; i++) {
3324 if (fsec_g2)
3325 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3326 else
3327 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3328
3329 if (img_type == type)
3330 return true;
3331 }
3332 return false;
3333
3334}
3335
4188e7df 3336static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3337 int header_size,
3338 const struct firmware *fw)
3339{
3340 struct flash_section_info *fsec = NULL;
3341 const u8 *p = fw->data;
3342
3343 p += header_size;
3344 while (p < (fw->data + fw->size)) {
3345 fsec = (struct flash_section_info *)p;
3346 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3347 return fsec;
3348 p += 32;
3349 }
3350 return NULL;
3351}
3352
773a2d7c
PR
3353static int be_flash(struct be_adapter *adapter, const u8 *img,
3354 struct be_dma_mem *flash_cmd, int optype, int img_size)
3355{
3356 u32 total_bytes = 0, flash_op, num_bytes = 0;
3357 int status = 0;
3358 struct be_cmd_write_flashrom *req = flash_cmd->va;
3359
3360 total_bytes = img_size;
3361 while (total_bytes) {
3362 num_bytes = min_t(u32, 32*1024, total_bytes);
3363
3364 total_bytes -= num_bytes;
3365
3366 if (!total_bytes) {
3367 if (optype == OPTYPE_PHY_FW)
3368 flash_op = FLASHROM_OPER_PHY_FLASH;
3369 else
3370 flash_op = FLASHROM_OPER_FLASH;
3371 } else {
3372 if (optype == OPTYPE_PHY_FW)
3373 flash_op = FLASHROM_OPER_PHY_SAVE;
3374 else
3375 flash_op = FLASHROM_OPER_SAVE;
3376 }
3377
be716446 3378 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3379 img += num_bytes;
3380 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3381 flash_op, num_bytes);
3382 if (status) {
3383 if (status == ILLEGAL_IOCTL_REQ &&
3384 optype == OPTYPE_PHY_FW)
3385 break;
3386 dev_err(&adapter->pdev->dev,
3387 "cmd to write to flash rom failed.\n");
3388 return status;
3389 }
3390 }
3391 return 0;
3392}
3393
0ad3157e 3394/* For BE2, BE3 and BE3-R */
ca34fe38 3395static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3396 const struct firmware *fw,
3397 struct be_dma_mem *flash_cmd,
3398 int num_of_images)
3f0d4560 3399
84517482 3400{
3f0d4560 3401 int status = 0, i, filehdr_size = 0;
c165541e 3402 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3403 const u8 *p = fw->data;
215faf9c 3404 const struct flash_comp *pflashcomp;
773a2d7c 3405 int num_comp, redboot;
c165541e
PR
3406 struct flash_section_info *fsec = NULL;
3407
3408 struct flash_comp gen3_flash_types[] = {
3409 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3410 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3411 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3412 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3413 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3414 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3415 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3416 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3417 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3418 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3419 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3420 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3421 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3422 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3423 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3424 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3425 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3426 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3427 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3428 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3429 };
c165541e
PR
3430
3431 struct flash_comp gen2_flash_types[] = {
3432 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3433 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3434 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3435 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3436 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3437 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3438 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3439 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3440 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3441 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3442 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3443 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3444 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3445 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3446 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3447 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3448 };
3449
ca34fe38 3450 if (BE3_chip(adapter)) {
3f0d4560
AK
3451 pflashcomp = gen3_flash_types;
3452 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3453 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3454 } else {
3455 pflashcomp = gen2_flash_types;
3456 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3457 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3458 }
ca34fe38 3459
c165541e
PR
3460 /* Get flash section info*/
3461 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3462 if (!fsec) {
3463 dev_err(&adapter->pdev->dev,
3464 "Invalid Cookie. UFI corrupted ?\n");
3465 return -1;
3466 }
9fe96934 3467 for (i = 0; i < num_comp; i++) {
c165541e 3468 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3469 continue;
c165541e
PR
3470
3471 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3472 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3473 continue;
3474
773a2d7c
PR
3475 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3476 !phy_flashing_required(adapter))
306f1348 3477 continue;
c165541e 3478
773a2d7c
PR
3479 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3480 redboot = be_flash_redboot(adapter, fw->data,
3481 pflashcomp[i].offset, pflashcomp[i].size,
3482 filehdr_size + img_hdrs_size);
3483 if (!redboot)
3484 continue;
3485 }
c165541e 3486
3f0d4560 3487 p = fw->data;
c165541e 3488 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3489 if (p + pflashcomp[i].size > fw->data + fw->size)
3490 return -1;
773a2d7c
PR
3491
3492 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3493 pflashcomp[i].size);
3494 if (status) {
3495 dev_err(&adapter->pdev->dev,
3496 "Flashing section type %d failed.\n",
3497 pflashcomp[i].img_type);
3498 return status;
84517482 3499 }
84517482 3500 }
84517482
AK
3501 return 0;
3502}
3503
773a2d7c
PR
3504static int be_flash_skyhawk(struct be_adapter *adapter,
3505 const struct firmware *fw,
3506 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3507{
773a2d7c
PR
3508 int status = 0, i, filehdr_size = 0;
3509 int img_offset, img_size, img_optype, redboot;
3510 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3511 const u8 *p = fw->data;
3512 struct flash_section_info *fsec = NULL;
3513
3514 filehdr_size = sizeof(struct flash_file_hdr_g3);
3515 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3516 if (!fsec) {
3517 dev_err(&adapter->pdev->dev,
3518 "Invalid Cookie. UFI corrupted ?\n");
3519 return -1;
3520 }
3521
3522 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3523 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3524 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3525
3526 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3527 case IMAGE_FIRMWARE_iSCSI:
3528 img_optype = OPTYPE_ISCSI_ACTIVE;
3529 break;
3530 case IMAGE_BOOT_CODE:
3531 img_optype = OPTYPE_REDBOOT;
3532 break;
3533 case IMAGE_OPTION_ROM_ISCSI:
3534 img_optype = OPTYPE_BIOS;
3535 break;
3536 case IMAGE_OPTION_ROM_PXE:
3537 img_optype = OPTYPE_PXE_BIOS;
3538 break;
3539 case IMAGE_OPTION_ROM_FCoE:
3540 img_optype = OPTYPE_FCOE_BIOS;
3541 break;
3542 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3543 img_optype = OPTYPE_ISCSI_BACKUP;
3544 break;
3545 case IMAGE_NCSI:
3546 img_optype = OPTYPE_NCSI_FW;
3547 break;
3548 default:
3549 continue;
3550 }
3551
3552 if (img_optype == OPTYPE_REDBOOT) {
3553 redboot = be_flash_redboot(adapter, fw->data,
3554 img_offset, img_size,
3555 filehdr_size + img_hdrs_size);
3556 if (!redboot)
3557 continue;
3558 }
3559
3560 p = fw->data;
3561 p += filehdr_size + img_offset + img_hdrs_size;
3562 if (p + img_size > fw->data + fw->size)
3563 return -1;
3564
3565 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3566 if (status) {
3567 dev_err(&adapter->pdev->dev,
3568 "Flashing section type %d failed.\n",
3569 fsec->fsec_entry[i].type);
3570 return status;
3571 }
3572 }
3573 return 0;
3f0d4560
AK
3574}
3575
485bf569
SN
3576static int lancer_fw_download(struct be_adapter *adapter,
3577 const struct firmware *fw)
84517482 3578{
485bf569
SN
3579#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3580#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3581 struct be_dma_mem flash_cmd;
485bf569
SN
3582 const u8 *data_ptr = NULL;
3583 u8 *dest_image_ptr = NULL;
3584 size_t image_size = 0;
3585 u32 chunk_size = 0;
3586 u32 data_written = 0;
3587 u32 offset = 0;
3588 int status = 0;
3589 u8 add_status = 0;
f67ef7ba 3590 u8 change_status;
84517482 3591
485bf569 3592 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3593 dev_err(&adapter->pdev->dev,
485bf569
SN
3594 "FW Image not properly aligned. "
3595 "Length must be 4 byte aligned.\n");
3596 status = -EINVAL;
3597 goto lancer_fw_exit;
d9efd2af
SB
3598 }
3599
485bf569
SN
3600 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3601 + LANCER_FW_DOWNLOAD_CHUNK;
3602 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3603 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3604 if (!flash_cmd.va) {
3605 status = -ENOMEM;
485bf569
SN
3606 goto lancer_fw_exit;
3607 }
84517482 3608
485bf569
SN
3609 dest_image_ptr = flash_cmd.va +
3610 sizeof(struct lancer_cmd_req_write_object);
3611 image_size = fw->size;
3612 data_ptr = fw->data;
3613
3614 while (image_size) {
3615 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3616
3617 /* Copy the image chunk content. */
3618 memcpy(dest_image_ptr, data_ptr, chunk_size);
3619
3620 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3621 chunk_size, offset,
3622 LANCER_FW_DOWNLOAD_LOCATION,
3623 &data_written, &change_status,
3624 &add_status);
485bf569
SN
3625 if (status)
3626 break;
3627
3628 offset += data_written;
3629 data_ptr += data_written;
3630 image_size -= data_written;
3631 }
3632
3633 if (!status) {
3634 /* Commit the FW written */
3635 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3636 0, offset,
3637 LANCER_FW_DOWNLOAD_LOCATION,
3638 &data_written, &change_status,
3639 &add_status);
485bf569
SN
3640 }
3641
3642 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3643 flash_cmd.dma);
3644 if (status) {
3645 dev_err(&adapter->pdev->dev,
3646 "Firmware load error. "
3647 "Status code: 0x%x Additional Status: 0x%x\n",
3648 status, add_status);
3649 goto lancer_fw_exit;
3650 }
3651
f67ef7ba 3652 if (change_status == LANCER_FW_RESET_NEEDED) {
5c510811
SK
3653 status = lancer_physdev_ctrl(adapter,
3654 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3655 if (status) {
3656 dev_err(&adapter->pdev->dev,
3657 "Adapter busy for FW reset.\n"
3658 "New FW will not be active.\n");
3659 goto lancer_fw_exit;
3660 }
3661 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3662 dev_err(&adapter->pdev->dev,
3663 "System reboot required for new FW"
3664 " to be active\n");
3665 }
3666
485bf569
SN
3667 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3668lancer_fw_exit:
3669 return status;
3670}
3671
ca34fe38
SP
3672#define UFI_TYPE2 2
3673#define UFI_TYPE3 3
0ad3157e 3674#define UFI_TYPE3R 10
ca34fe38
SP
3675#define UFI_TYPE4 4
3676static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3677 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3678{
3679 if (fhdr == NULL)
3680 goto be_get_ufi_exit;
3681
ca34fe38
SP
3682 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3683 return UFI_TYPE4;
0ad3157e
VV
3684 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3685 if (fhdr->asic_type_rev == 0x10)
3686 return UFI_TYPE3R;
3687 else
3688 return UFI_TYPE3;
3689 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3690 return UFI_TYPE2;
773a2d7c
PR
3691
3692be_get_ufi_exit:
3693 dev_err(&adapter->pdev->dev,
3694 "UFI and Interface are not compatible for flashing\n");
3695 return -1;
3696}
3697
485bf569
SN
3698static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3699{
485bf569
SN
3700 struct flash_file_hdr_g3 *fhdr3;
3701 struct image_hdr *img_hdr_ptr = NULL;
3702 struct be_dma_mem flash_cmd;
3703 const u8 *p;
773a2d7c 3704 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3705
be716446 3706 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3707 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3708 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3709 if (!flash_cmd.va) {
3710 status = -ENOMEM;
485bf569 3711 goto be_fw_exit;
84517482
AK
3712 }
3713
773a2d7c 3714 p = fw->data;
0ad3157e 3715 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3716
0ad3157e 3717 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3718
773a2d7c
PR
3719 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3720 for (i = 0; i < num_imgs; i++) {
3721 img_hdr_ptr = (struct image_hdr *)(fw->data +
3722 (sizeof(struct flash_file_hdr_g3) +
3723 i * sizeof(struct image_hdr)));
3724 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3725 switch (ufi_type) {
3726 case UFI_TYPE4:
773a2d7c
PR
3727 status = be_flash_skyhawk(adapter, fw,
3728 &flash_cmd, num_imgs);
0ad3157e
VV
3729 break;
3730 case UFI_TYPE3R:
ca34fe38
SP
3731 status = be_flash_BEx(adapter, fw, &flash_cmd,
3732 num_imgs);
0ad3157e
VV
3733 break;
3734 case UFI_TYPE3:
3735 /* Do not flash this ufi on BE3-R cards */
3736 if (adapter->asic_rev < 0x10)
3737 status = be_flash_BEx(adapter, fw,
3738 &flash_cmd,
3739 num_imgs);
3740 else {
3741 status = -1;
3742 dev_err(&adapter->pdev->dev,
3743 "Can't load BE3 UFI on BE3R\n");
3744 }
3745 }
3f0d4560 3746 }
773a2d7c
PR
3747 }
3748
ca34fe38
SP
3749 if (ufi_type == UFI_TYPE2)
3750 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3751 else if (ufi_type == -1)
3f0d4560 3752 status = -1;
84517482 3753
2b7bcebf
IV
3754 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3755 flash_cmd.dma);
84517482
AK
3756 if (status) {
3757 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3758 goto be_fw_exit;
84517482
AK
3759 }
3760
af901ca1 3761 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3762
485bf569
SN
3763be_fw_exit:
3764 return status;
3765}
3766
3767int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3768{
3769 const struct firmware *fw;
3770 int status;
3771
3772 if (!netif_running(adapter->netdev)) {
3773 dev_err(&adapter->pdev->dev,
3774 "Firmware load not allowed (interface is down)\n");
3775 return -1;
3776 }
3777
3778 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3779 if (status)
3780 goto fw_exit;
3781
3782 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3783
3784 if (lancer_chip(adapter))
3785 status = lancer_fw_download(adapter, fw);
3786 else
3787 status = be_fw_download(adapter, fw);
3788
eeb65ced
SK
3789 if (!status)
3790 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3791 adapter->fw_on_flash);
3792
84517482
AK
3793fw_exit:
3794 release_firmware(fw);
3795 return status;
3796}
3797
e5686ad8 3798static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3799 .ndo_open = be_open,
3800 .ndo_stop = be_close,
3801 .ndo_start_xmit = be_xmit,
a54769f5 3802 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3803 .ndo_set_mac_address = be_mac_addr_set,
3804 .ndo_change_mtu = be_change_mtu,
ab1594e9 3805 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3806 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3807 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3808 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3809 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3810 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3811 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3812 .ndo_get_vf_config = be_get_vf_config,
3813#ifdef CONFIG_NET_POLL_CONTROLLER
3814 .ndo_poll_controller = be_netpoll,
3815#endif
6b7c5b94
SP
3816};
3817
3818static void be_netdev_init(struct net_device *netdev)
3819{
3820 struct be_adapter *adapter = netdev_priv(netdev);
3821
6332c8d3 3822 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3823 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3824 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3825 if (be_multi_rxq(adapter))
3826 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3827
3828 netdev->features |= netdev->hw_features |
f646968f 3829 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3830
eb8a50d9 3831 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3832 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3833
fbc13f01
AK
3834 netdev->priv_flags |= IFF_UNICAST_FLT;
3835
6b7c5b94
SP
3836 netdev->flags |= IFF_MULTICAST;
3837
b7e5887e 3838 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3839
10ef9ab4 3840 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3841
3842 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
3843}
3844
3845static void be_unmap_pci_bars(struct be_adapter *adapter)
3846{
c5b3ad4c
SP
3847 if (adapter->csr)
3848 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3849 if (adapter->db)
ce66f781 3850 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3851}
3852
ce66f781
SP
3853static int db_bar(struct be_adapter *adapter)
3854{
3855 if (lancer_chip(adapter) || !be_physfn(adapter))
3856 return 0;
3857 else
3858 return 4;
3859}
3860
3861static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3862{
dbf0f2a7 3863 if (skyhawk_chip(adapter)) {
ce66f781
SP
3864 adapter->roce_db.size = 4096;
3865 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3866 db_bar(adapter));
3867 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3868 db_bar(adapter));
3869 }
045508a8 3870 return 0;
6b7c5b94
SP
3871}
3872
3873static int be_map_pci_bars(struct be_adapter *adapter)
3874{
3875 u8 __iomem *addr;
ce66f781 3876 u32 sli_intf;
6b7c5b94 3877
ce66f781
SP
3878 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3879 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3880 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3881
c5b3ad4c
SP
3882 if (BEx_chip(adapter) && be_physfn(adapter)) {
3883 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3884 if (adapter->csr == NULL)
3885 return -ENOMEM;
3886 }
3887
ce66f781 3888 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3889 if (addr == NULL)
3890 goto pci_map_err;
ba343c77 3891 adapter->db = addr;
ce66f781
SP
3892
3893 be_roce_map_pci_bars(adapter);
6b7c5b94 3894 return 0;
ce66f781 3895
6b7c5b94
SP
3896pci_map_err:
3897 be_unmap_pci_bars(adapter);
3898 return -ENOMEM;
3899}
3900
6b7c5b94
SP
3901static void be_ctrl_cleanup(struct be_adapter *adapter)
3902{
8788fdc2 3903 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3904
3905 be_unmap_pci_bars(adapter);
3906
3907 if (mem->va)
2b7bcebf
IV
3908 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3909 mem->dma);
e7b909a6 3910
5b8821b7 3911 mem = &adapter->rx_filter;
e7b909a6 3912 if (mem->va)
2b7bcebf
IV
3913 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3914 mem->dma);
6b7c5b94
SP
3915}
3916
6b7c5b94
SP
3917static int be_ctrl_init(struct be_adapter *adapter)
3918{
8788fdc2
SP
3919 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3920 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3921 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3922 u32 sli_intf;
6b7c5b94 3923 int status;
6b7c5b94 3924
ce66f781
SP
3925 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3926 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3927 SLI_INTF_FAMILY_SHIFT;
3928 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3929
6b7c5b94
SP
3930 status = be_map_pci_bars(adapter);
3931 if (status)
e7b909a6 3932 goto done;
6b7c5b94
SP
3933
3934 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3935 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3936 mbox_mem_alloc->size,
3937 &mbox_mem_alloc->dma,
3938 GFP_KERNEL);
6b7c5b94 3939 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3940 status = -ENOMEM;
3941 goto unmap_pci_bars;
6b7c5b94
SP
3942 }
3943 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3944 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3945 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3946 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3947
5b8821b7 3948 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
3949 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
3950 rx_filter->size, &rx_filter->dma,
3951 GFP_KERNEL);
5b8821b7 3952 if (rx_filter->va == NULL) {
e7b909a6
SP
3953 status = -ENOMEM;
3954 goto free_mbox;
3955 }
1f9061d2 3956
2984961c 3957 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3958 spin_lock_init(&adapter->mcc_lock);
3959 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3960
dd131e76 3961 init_completion(&adapter->flash_compl);
cf588477 3962 pci_save_state(adapter->pdev);
6b7c5b94 3963 return 0;
e7b909a6
SP
3964
3965free_mbox:
2b7bcebf
IV
3966 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3967 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3968
3969unmap_pci_bars:
3970 be_unmap_pci_bars(adapter);
3971
3972done:
3973 return status;
6b7c5b94
SP
3974}
3975
3976static void be_stats_cleanup(struct be_adapter *adapter)
3977{
3abcdeda 3978 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3979
3980 if (cmd->va)
2b7bcebf
IV
3981 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3982 cmd->va, cmd->dma);
6b7c5b94
SP
3983}
3984
3985static int be_stats_init(struct be_adapter *adapter)
3986{
3abcdeda 3987 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3988
ca34fe38
SP
3989 if (lancer_chip(adapter))
3990 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3991 else if (BE2_chip(adapter))
89a88ab8 3992 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3993 else
3994 /* BE3 and Skyhawk */
3995 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3996
ede23fa8
JP
3997 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3998 GFP_KERNEL);
6b7c5b94
SP
3999 if (cmd->va == NULL)
4000 return -1;
4001 return 0;
4002}
4003
3bc6b06c 4004static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4005{
4006 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4007
6b7c5b94
SP
4008 if (!adapter)
4009 return;
4010
045508a8 4011 be_roce_dev_remove(adapter);
8cef7a78 4012 be_intr_set(adapter, false);
045508a8 4013
f67ef7ba
PR
4014 cancel_delayed_work_sync(&adapter->func_recovery_work);
4015
6b7c5b94
SP
4016 unregister_netdev(adapter->netdev);
4017
5fb379ee
SP
4018 be_clear(adapter);
4019
bf99e50d
PR
4020 /* tell fw we're done with firing cmds */
4021 be_cmd_fw_clean(adapter);
4022
6b7c5b94
SP
4023 be_stats_cleanup(adapter);
4024
4025 be_ctrl_cleanup(adapter);
4026
d6b6d987
SP
4027 pci_disable_pcie_error_reporting(pdev);
4028
6b7c5b94
SP
4029 pci_set_drvdata(pdev, NULL);
4030 pci_release_regions(pdev);
4031 pci_disable_device(pdev);
4032
4033 free_netdev(adapter->netdev);
4034}
4035
4762f6ce
AK
4036bool be_is_wol_supported(struct be_adapter *adapter)
4037{
4038 return ((adapter->wol_cap & BE_WOL_CAP) &&
4039 !be_is_wol_excluded(adapter)) ? true : false;
4040}
4041
941a77d5
SK
4042u32 be_get_fw_log_level(struct be_adapter *adapter)
4043{
4044 struct be_dma_mem extfat_cmd;
4045 struct be_fat_conf_params *cfgs;
4046 int status;
4047 u32 level = 0;
4048 int j;
4049
f25b119c
PR
4050 if (lancer_chip(adapter))
4051 return 0;
4052
941a77d5
SK
4053 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4054 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4055 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4056 &extfat_cmd.dma);
4057
4058 if (!extfat_cmd.va) {
4059 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4060 __func__);
4061 goto err;
4062 }
4063
4064 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4065 if (!status) {
4066 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4067 sizeof(struct be_cmd_resp_hdr));
ac46a462 4068 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
4069 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4070 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4071 }
4072 }
4073 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4074 extfat_cmd.dma);
4075err:
4076 return level;
4077}
abb93951 4078
39f1d94d 4079static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4080{
6b7c5b94 4081 int status;
941a77d5 4082 u32 level;
6b7c5b94 4083
9e1453c5
AK
4084 status = be_cmd_get_cntl_attributes(adapter);
4085 if (status)
4086 return status;
4087
4762f6ce
AK
4088 status = be_cmd_get_acpi_wol_cap(adapter);
4089 if (status) {
4090 /* in case of a failure to get wol capabillities
4091 * check the exclusion list to determine WOL capability */
4092 if (!be_is_wol_excluded(adapter))
4093 adapter->wol_cap |= BE_WOL_CAP;
4094 }
4095
4096 if (be_is_wol_supported(adapter))
4097 adapter->wol = true;
4098
7aeb2156
PR
4099 /* Must be a power of 2 or else MODULO will BUG_ON */
4100 adapter->be_get_temp_freq = 64;
4101
941a77d5
SK
4102 level = be_get_fw_log_level(adapter);
4103 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4104
92bf14ab 4105 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4106 return 0;
6b7c5b94
SP
4107}
4108
f67ef7ba 4109static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4110{
01e5b2c4 4111 struct device *dev = &adapter->pdev->dev;
d8110f62 4112 int status;
d8110f62 4113
f67ef7ba
PR
4114 status = lancer_test_and_set_rdy_state(adapter);
4115 if (status)
4116 goto err;
d8110f62 4117
f67ef7ba
PR
4118 if (netif_running(adapter->netdev))
4119 be_close(adapter->netdev);
d8110f62 4120
f67ef7ba
PR
4121 be_clear(adapter);
4122
01e5b2c4 4123 be_clear_all_error(adapter);
f67ef7ba
PR
4124
4125 status = be_setup(adapter);
4126 if (status)
4127 goto err;
d8110f62 4128
f67ef7ba
PR
4129 if (netif_running(adapter->netdev)) {
4130 status = be_open(adapter->netdev);
d8110f62
PR
4131 if (status)
4132 goto err;
f67ef7ba 4133 }
d8110f62 4134
01e5b2c4 4135 dev_err(dev, "Error recovery successful\n");
f67ef7ba
PR
4136 return 0;
4137err:
01e5b2c4
SK
4138 if (status == -EAGAIN)
4139 dev_err(dev, "Waiting for resource provisioning\n");
4140 else
4141 dev_err(dev, "Error recovery failed\n");
d8110f62 4142
f67ef7ba
PR
4143 return status;
4144}
4145
4146static void be_func_recovery_task(struct work_struct *work)
4147{
4148 struct be_adapter *adapter =
4149 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4150 int status = 0;
d8110f62 4151
f67ef7ba 4152 be_detect_error(adapter);
d8110f62 4153
f67ef7ba 4154 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4155
f67ef7ba
PR
4156 rtnl_lock();
4157 netif_device_detach(adapter->netdev);
4158 rtnl_unlock();
d8110f62 4159
f67ef7ba 4160 status = lancer_recover_func(adapter);
f67ef7ba
PR
4161 if (!status)
4162 netif_device_attach(adapter->netdev);
d8110f62 4163 }
f67ef7ba 4164
01e5b2c4
SK
4165 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4166 * no need to attempt further recovery.
4167 */
4168 if (!status || status == -EAGAIN)
4169 schedule_delayed_work(&adapter->func_recovery_work,
4170 msecs_to_jiffies(1000));
d8110f62
PR
4171}
4172
4173static void be_worker(struct work_struct *work)
4174{
4175 struct be_adapter *adapter =
4176 container_of(work, struct be_adapter, work.work);
4177 struct be_rx_obj *rxo;
10ef9ab4 4178 struct be_eq_obj *eqo;
d8110f62
PR
4179 int i;
4180
d8110f62
PR
4181 /* when interrupts are not yet enabled, just reap any pending
4182 * mcc completions */
4183 if (!netif_running(adapter->netdev)) {
072a9c48 4184 local_bh_disable();
10ef9ab4 4185 be_process_mcc(adapter);
072a9c48 4186 local_bh_enable();
d8110f62
PR
4187 goto reschedule;
4188 }
4189
4190 if (!adapter->stats_cmd_sent) {
4191 if (lancer_chip(adapter))
4192 lancer_cmd_get_pport_stats(adapter,
4193 &adapter->stats_cmd);
4194 else
4195 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4196 }
4197
d696b5e2
VV
4198 if (be_physfn(adapter) &&
4199 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4200 be_cmd_get_die_temperature(adapter);
4201
d8110f62 4202 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4203 if (rxo->rx_post_starved) {
4204 rxo->rx_post_starved = false;
4205 be_post_rx_frags(rxo, GFP_KERNEL);
4206 }
4207 }
4208
10ef9ab4
SP
4209 for_all_evt_queues(adapter, eqo, i)
4210 be_eqd_update(adapter, eqo);
4211
d8110f62
PR
4212reschedule:
4213 adapter->work_counter++;
4214 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4215}
4216
257a3feb 4217/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4218static bool be_reset_required(struct be_adapter *adapter)
4219{
257a3feb 4220 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4221}
4222
d379142b
SP
4223static char *mc_name(struct be_adapter *adapter)
4224{
4225 if (adapter->function_mode & FLEX10_MODE)
4226 return "FLEX10";
4227 else if (adapter->function_mode & VNIC_MODE)
4228 return "vNIC";
4229 else if (adapter->function_mode & UMC_ENABLED)
4230 return "UMC";
4231 else
4232 return "";
4233}
4234
4235static inline char *func_name(struct be_adapter *adapter)
4236{
4237 return be_physfn(adapter) ? "PF" : "VF";
4238}
4239
1dd06ae8 4240static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4241{
4242 int status = 0;
4243 struct be_adapter *adapter;
4244 struct net_device *netdev;
b4e32a71 4245 char port_name;
6b7c5b94
SP
4246
4247 status = pci_enable_device(pdev);
4248 if (status)
4249 goto do_none;
4250
4251 status = pci_request_regions(pdev, DRV_NAME);
4252 if (status)
4253 goto disable_dev;
4254 pci_set_master(pdev);
4255
7f640062 4256 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4257 if (netdev == NULL) {
4258 status = -ENOMEM;
4259 goto rel_reg;
4260 }
4261 adapter = netdev_priv(netdev);
4262 adapter->pdev = pdev;
4263 pci_set_drvdata(pdev, adapter);
4264 adapter->netdev = netdev;
2243e2e9 4265 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4266
2b7bcebf 4267 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94 4268 if (!status) {
2bd92cd2
CH
4269 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4270 if (status < 0) {
4271 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4272 goto free_netdev;
4273 }
6b7c5b94
SP
4274 netdev->features |= NETIF_F_HIGHDMA;
4275 } else {
2b7bcebf 4276 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
0c5fed09
SK
4277 if (!status)
4278 status = dma_set_coherent_mask(&pdev->dev,
4279 DMA_BIT_MASK(32));
6b7c5b94
SP
4280 if (status) {
4281 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4282 goto free_netdev;
4283 }
4284 }
4285
d6b6d987
SP
4286 status = pci_enable_pcie_error_reporting(pdev);
4287 if (status)
4ce1fd61 4288 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
d6b6d987 4289
6b7c5b94
SP
4290 status = be_ctrl_init(adapter);
4291 if (status)
39f1d94d 4292 goto free_netdev;
6b7c5b94 4293
2243e2e9 4294 /* sync up with fw's ready state */
ba343c77 4295 if (be_physfn(adapter)) {
bf99e50d 4296 status = be_fw_wait_ready(adapter);
ba343c77
SB
4297 if (status)
4298 goto ctrl_clean;
ba343c77 4299 }
6b7c5b94 4300
39f1d94d
SP
4301 if (be_reset_required(adapter)) {
4302 status = be_cmd_reset_function(adapter);
4303 if (status)
4304 goto ctrl_clean;
556ae191 4305
2d177be8
KA
4306 /* Wait for interrupts to quiesce after an FLR */
4307 msleep(100);
4308 }
8cef7a78
SK
4309
4310 /* Allow interrupts for other ULPs running on NIC function */
4311 be_intr_set(adapter, true);
10ef9ab4 4312
2d177be8
KA
4313 /* tell fw we're ready to fire cmds */
4314 status = be_cmd_fw_init(adapter);
4315 if (status)
4316 goto ctrl_clean;
4317
2243e2e9
SP
4318 status = be_stats_init(adapter);
4319 if (status)
4320 goto ctrl_clean;
4321
39f1d94d 4322 status = be_get_initial_config(adapter);
6b7c5b94
SP
4323 if (status)
4324 goto stats_clean;
6b7c5b94
SP
4325
4326 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4327 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4328 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4329
5fb379ee
SP
4330 status = be_setup(adapter);
4331 if (status)
55f5c3c5 4332 goto stats_clean;
2243e2e9 4333
3abcdeda 4334 be_netdev_init(netdev);
6b7c5b94
SP
4335 status = register_netdev(netdev);
4336 if (status != 0)
5fb379ee 4337 goto unsetup;
6b7c5b94 4338
045508a8
PP
4339 be_roce_dev_add(adapter);
4340
f67ef7ba
PR
4341 schedule_delayed_work(&adapter->func_recovery_work,
4342 msecs_to_jiffies(1000));
b4e32a71
PR
4343
4344 be_cmd_query_port_name(adapter, &port_name);
4345
d379142b
SP
4346 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4347 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4348
6b7c5b94
SP
4349 return 0;
4350
5fb379ee
SP
4351unsetup:
4352 be_clear(adapter);
6b7c5b94
SP
4353stats_clean:
4354 be_stats_cleanup(adapter);
4355ctrl_clean:
4356 be_ctrl_cleanup(adapter);
f9449ab7 4357free_netdev:
fe6d2a38 4358 free_netdev(netdev);
8d56ff11 4359 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4360rel_reg:
4361 pci_release_regions(pdev);
4362disable_dev:
4363 pci_disable_device(pdev);
4364do_none:
c4ca2374 4365 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4366 return status;
4367}
4368
4369static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4370{
4371 struct be_adapter *adapter = pci_get_drvdata(pdev);
4372 struct net_device *netdev = adapter->netdev;
4373
71d8d1b5
AK
4374 if (adapter->wol)
4375 be_setup_wol(adapter, true);
4376
f67ef7ba
PR
4377 cancel_delayed_work_sync(&adapter->func_recovery_work);
4378
6b7c5b94
SP
4379 netif_device_detach(netdev);
4380 if (netif_running(netdev)) {
4381 rtnl_lock();
4382 be_close(netdev);
4383 rtnl_unlock();
4384 }
9b0365f1 4385 be_clear(adapter);
6b7c5b94
SP
4386
4387 pci_save_state(pdev);
4388 pci_disable_device(pdev);
4389 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4390 return 0;
4391}
4392
4393static int be_resume(struct pci_dev *pdev)
4394{
4395 int status = 0;
4396 struct be_adapter *adapter = pci_get_drvdata(pdev);
4397 struct net_device *netdev = adapter->netdev;
4398
4399 netif_device_detach(netdev);
4400
4401 status = pci_enable_device(pdev);
4402 if (status)
4403 return status;
4404
1ca01512 4405 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4406 pci_restore_state(pdev);
4407
2243e2e9
SP
4408 /* tell fw we're ready to fire cmds */
4409 status = be_cmd_fw_init(adapter);
4410 if (status)
4411 return status;
4412
9b0365f1 4413 be_setup(adapter);
6b7c5b94
SP
4414 if (netif_running(netdev)) {
4415 rtnl_lock();
4416 be_open(netdev);
4417 rtnl_unlock();
4418 }
f67ef7ba
PR
4419
4420 schedule_delayed_work(&adapter->func_recovery_work,
4421 msecs_to_jiffies(1000));
6b7c5b94 4422 netif_device_attach(netdev);
71d8d1b5
AK
4423
4424 if (adapter->wol)
4425 be_setup_wol(adapter, false);
a4ca055f 4426
6b7c5b94
SP
4427 return 0;
4428}
4429
82456b03
SP
4430/*
4431 * An FLR will stop BE from DMAing any data.
4432 */
4433static void be_shutdown(struct pci_dev *pdev)
4434{
4435 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4436
2d5d4154
AK
4437 if (!adapter)
4438 return;
82456b03 4439
0f4a6828 4440 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4441 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4442
2d5d4154 4443 netif_device_detach(adapter->netdev);
82456b03 4444
57841869
AK
4445 be_cmd_reset_function(adapter);
4446
82456b03 4447 pci_disable_device(pdev);
82456b03
SP
4448}
4449
cf588477
SP
4450static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4451 pci_channel_state_t state)
4452{
4453 struct be_adapter *adapter = pci_get_drvdata(pdev);
4454 struct net_device *netdev = adapter->netdev;
4455
4456 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4457
01e5b2c4
SK
4458 if (!adapter->eeh_error) {
4459 adapter->eeh_error = true;
cf588477 4460
01e5b2c4 4461 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4462
cf588477 4463 rtnl_lock();
01e5b2c4
SK
4464 netif_device_detach(netdev);
4465 if (netif_running(netdev))
4466 be_close(netdev);
cf588477 4467 rtnl_unlock();
01e5b2c4
SK
4468
4469 be_clear(adapter);
cf588477 4470 }
cf588477
SP
4471
4472 if (state == pci_channel_io_perm_failure)
4473 return PCI_ERS_RESULT_DISCONNECT;
4474
4475 pci_disable_device(pdev);
4476
eeb7fc7b
SK
4477 /* The error could cause the FW to trigger a flash debug dump.
4478 * Resetting the card while flash dump is in progress
c8a54163
PR
4479 * can cause it not to recover; wait for it to finish.
4480 * Wait only for first function as it is needed only once per
4481 * adapter.
eeb7fc7b 4482 */
c8a54163
PR
4483 if (pdev->devfn == 0)
4484 ssleep(30);
4485
cf588477
SP
4486 return PCI_ERS_RESULT_NEED_RESET;
4487}
4488
4489static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4490{
4491 struct be_adapter *adapter = pci_get_drvdata(pdev);
4492 int status;
4493
4494 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4495
4496 status = pci_enable_device(pdev);
4497 if (status)
4498 return PCI_ERS_RESULT_DISCONNECT;
4499
4500 pci_set_master(pdev);
1ca01512 4501 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4502 pci_restore_state(pdev);
4503
4504 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4505 dev_info(&adapter->pdev->dev,
4506 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4507 status = be_fw_wait_ready(adapter);
cf588477
SP
4508 if (status)
4509 return PCI_ERS_RESULT_DISCONNECT;
4510
d6b6d987 4511 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4512 be_clear_all_error(adapter);
cf588477
SP
4513 return PCI_ERS_RESULT_RECOVERED;
4514}
4515
4516static void be_eeh_resume(struct pci_dev *pdev)
4517{
4518 int status = 0;
4519 struct be_adapter *adapter = pci_get_drvdata(pdev);
4520 struct net_device *netdev = adapter->netdev;
4521
4522 dev_info(&adapter->pdev->dev, "EEH resume\n");
4523
4524 pci_save_state(pdev);
4525
2d177be8 4526 status = be_cmd_reset_function(adapter);
cf588477
SP
4527 if (status)
4528 goto err;
4529
2d177be8
KA
4530 /* tell fw we're ready to fire cmds */
4531 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4532 if (status)
4533 goto err;
4534
cf588477
SP
4535 status = be_setup(adapter);
4536 if (status)
4537 goto err;
4538
4539 if (netif_running(netdev)) {
4540 status = be_open(netdev);
4541 if (status)
4542 goto err;
4543 }
f67ef7ba
PR
4544
4545 schedule_delayed_work(&adapter->func_recovery_work,
4546 msecs_to_jiffies(1000));
cf588477
SP
4547 netif_device_attach(netdev);
4548 return;
4549err:
4550 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4551}
4552
3646f0e5 4553static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4554 .error_detected = be_eeh_err_detected,
4555 .slot_reset = be_eeh_reset,
4556 .resume = be_eeh_resume,
4557};
4558
6b7c5b94
SP
4559static struct pci_driver be_driver = {
4560 .name = DRV_NAME,
4561 .id_table = be_dev_ids,
4562 .probe = be_probe,
4563 .remove = be_remove,
4564 .suspend = be_suspend,
cf588477 4565 .resume = be_resume,
82456b03 4566 .shutdown = be_shutdown,
cf588477 4567 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4568};
4569
4570static int __init be_init_module(void)
4571{
8e95a202
JP
4572 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4573 rx_frag_size != 2048) {
6b7c5b94
SP
4574 printk(KERN_WARNING DRV_NAME
4575 " : Module param rx_frag_size must be 2048/4096/8192."
4576 " Using 2048\n");
4577 rx_frag_size = 2048;
4578 }
6b7c5b94
SP
4579
4580 return pci_register_driver(&be_driver);
4581}
4582module_init(be_init_module);
4583
4584static void __exit be_exit_module(void)
4585{
4586 pci_unregister_driver(&be_driver);
4587}
4588module_exit(be_exit_module);
This page took 1.379143 seconds and 5 git commands to generate.