net: vlan: rename NETIF_F_HW_VLAN_* feature flags to NETIF_F_HW_VLAN_CTAG_*
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf 148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
1f9061d2 149 GFP_KERNEL | __GFP_ZERO);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
195
196 wmb();
8788fdc2 197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
198}
199
8788fdc2 200static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
201{
202 u32 val = 0;
203 val |= qid & DB_TXULP_RING_ID_MASK;
204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
205
206 wmb();
8788fdc2 207 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
208}
209
8788fdc2 210static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
211 bool arm, bool clear_int, u16 num_popped)
212{
213 u32 val = 0;
214 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
215 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
216 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 217
f67ef7ba 218 if (adapter->eeh_error)
cf588477
SP
219 return;
220
6b7c5b94
SP
221 if (arm)
222 val |= 1 << DB_EQ_REARM_SHIFT;
223 if (clear_int)
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
228}
229
8788fdc2 230void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
231{
232 u32 val = 0;
233 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
234 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
235 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 236
f67ef7ba 237 if (adapter->eeh_error)
cf588477
SP
238 return;
239
6b7c5b94
SP
240 if (arm)
241 val |= 1 << DB_CQ_REARM_SHIFT;
242 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 243 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
244}
245
6b7c5b94
SP
246static int be_mac_addr_set(struct net_device *netdev, void *p)
247{
248 struct be_adapter *adapter = netdev_priv(netdev);
249 struct sockaddr *addr = p;
250 int status = 0;
e3a7ae2c 251 u8 current_mac[ETH_ALEN];
fbc13f01 252 u32 pmac_id = adapter->pmac_id[0];
704e4c88 253 bool active_mac = true;
6b7c5b94 254
ca9e4988
AK
255 if (!is_valid_ether_addr(addr->sa_data))
256 return -EADDRNOTAVAIL;
257
704e4c88
PR
258 /* For BE VF, MAC address is already activated by PF.
259 * Hence only operation left is updating netdev->devaddr.
260 * Update it if user is passing the same MAC which was used
261 * during configuring VF MAC from PF(Hypervisor).
262 */
263 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
264 status = be_cmd_mac_addr_query(adapter, current_mac,
265 false, adapter->if_handle, 0);
266 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
267 goto done;
268 else
269 goto err;
270 }
271
272 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
273 goto done;
274
275 /* For Lancer check if any MAC is active.
276 * If active, get its mac id.
277 */
278 if (lancer_chip(adapter) && !be_physfn(adapter))
279 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
280 &pmac_id, 0);
281
282 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
283 adapter->if_handle,
284 &adapter->pmac_id[0], 0);
285
a65027e4 286 if (status)
e3a7ae2c 287 goto err;
6b7c5b94 288
704e4c88
PR
289 if (active_mac)
290 be_cmd_pmac_del(adapter, adapter->if_handle,
291 pmac_id, 0);
292done:
e3a7ae2c
SK
293 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
294 return 0;
295err:
296 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
297 return status;
298}
299
ca34fe38
SP
300/* BE2 supports only v0 cmd */
301static void *hw_stats_from_cmd(struct be_adapter *adapter)
302{
303 if (BE2_chip(adapter)) {
304 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
305
306 return &cmd->hw_stats;
307 } else {
308 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
311 }
312}
313
314/* BE2 supports only v0 cmd */
315static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
316{
317 if (BE2_chip(adapter)) {
318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319
320 return &hw_stats->erx;
321 } else {
322 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
323
324 return &hw_stats->erx;
325 }
326}
327
328static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 329{
ac124ff9
SP
330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
331 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
332 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 333 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
334 &rxf_stats->port[adapter->port_num];
335 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 336
ac124ff9 337 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
338 drvs->rx_pause_frames = port_stats->rx_pause_frames;
339 drvs->rx_crc_errors = port_stats->rx_crc_errors;
340 drvs->rx_control_frames = port_stats->rx_control_frames;
341 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
342 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
343 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
344 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
345 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
346 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
347 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
348 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
349 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
350 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
351 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 352 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
353 drvs->rx_dropped_header_too_small =
354 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
355 drvs->rx_address_mismatch_drops =
356 port_stats->rx_address_mismatch_drops +
357 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
358 drvs->rx_alignment_symbol_errors =
359 port_stats->rx_alignment_symbol_errors;
360
361 drvs->tx_pauseframes = port_stats->tx_pauseframes;
362 drvs->tx_controlframes = port_stats->tx_controlframes;
363
364 if (adapter->port_num)
ac124ff9 365 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 366 else
ac124ff9 367 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 368 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 369 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
370 drvs->forwarded_packets = rxf_stats->forwarded_packets;
371 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
372 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
373 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
374 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
375}
376
ca34fe38 377static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 378{
ac124ff9
SP
379 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
380 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
381 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 382 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
383 &rxf_stats->port[adapter->port_num];
384 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 385
ac124ff9 386 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
387 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
388 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
389 drvs->rx_pause_frames = port_stats->rx_pause_frames;
390 drvs->rx_crc_errors = port_stats->rx_crc_errors;
391 drvs->rx_control_frames = port_stats->rx_control_frames;
392 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
393 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
394 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
395 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
396 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
397 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
398 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
399 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
400 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
401 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
402 drvs->rx_dropped_header_too_small =
403 port_stats->rx_dropped_header_too_small;
404 drvs->rx_input_fifo_overflow_drop =
405 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 406 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
407 drvs->rx_alignment_symbol_errors =
408 port_stats->rx_alignment_symbol_errors;
ac124ff9 409 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
410 drvs->tx_pauseframes = port_stats->tx_pauseframes;
411 drvs->tx_controlframes = port_stats->tx_controlframes;
412 drvs->jabber_events = port_stats->jabber_events;
413 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 414 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
415 drvs->forwarded_packets = rxf_stats->forwarded_packets;
416 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
417 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
418 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
419 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
420}
421
005d5696
SX
422static void populate_lancer_stats(struct be_adapter *adapter)
423{
89a88ab8 424
005d5696 425 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
426 struct lancer_pport_stats *pport_stats =
427 pport_stats_from_cmd(adapter);
428
429 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
430 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
431 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
432 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 433 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 434 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
435 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
436 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
437 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
438 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
439 drvs->rx_dropped_tcp_length =
440 pport_stats->rx_dropped_invalid_tcp_length;
441 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
442 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
443 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
444 drvs->rx_dropped_header_too_small =
445 pport_stats->rx_dropped_header_too_small;
446 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
447 drvs->rx_address_mismatch_drops =
448 pport_stats->rx_address_mismatch_drops +
449 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 450 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 451 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
452 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
453 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 454 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
455 drvs->forwarded_packets = pport_stats->num_forwards_lo;
456 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 457 drvs->rx_drops_too_many_frags =
ac124ff9 458 pport_stats->rx_drops_too_many_frags_lo;
005d5696 459}
89a88ab8 460
09c1c68f
SP
461static void accumulate_16bit_val(u32 *acc, u16 val)
462{
463#define lo(x) (x & 0xFFFF)
464#define hi(x) (x & 0xFFFF0000)
465 bool wrapped = val < lo(*acc);
466 u32 newacc = hi(*acc) + val;
467
468 if (wrapped)
469 newacc += 65536;
470 ACCESS_ONCE(*acc) = newacc;
471}
472
89a88ab8
AK
473void be_parse_stats(struct be_adapter *adapter)
474{
ac124ff9
SP
475 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
476 struct be_rx_obj *rxo;
477 int i;
478
ca34fe38
SP
479 if (lancer_chip(adapter)) {
480 populate_lancer_stats(adapter);
005d5696 481 } else {
ca34fe38
SP
482 if (BE2_chip(adapter))
483 populate_be_v0_stats(adapter);
484 else
485 /* for BE3 and Skyhawk */
486 populate_be_v1_stats(adapter);
d51ebd33 487
ca34fe38
SP
488 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
489 for_all_rx_queues(adapter, rxo, i) {
490 /* below erx HW counter can actually wrap around after
491 * 65535. Driver accumulates a 32-bit value
492 */
493 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
494 (u16)erx->rx_drops_no_fragments \
495 [rxo->q.id]);
496 }
09c1c68f 497 }
89a88ab8
AK
498}
499
ab1594e9
SP
500static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
501 struct rtnl_link_stats64 *stats)
6b7c5b94 502{
ab1594e9 503 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 504 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 505 struct be_rx_obj *rxo;
3c8def97 506 struct be_tx_obj *txo;
ab1594e9
SP
507 u64 pkts, bytes;
508 unsigned int start;
3abcdeda 509 int i;
6b7c5b94 510
3abcdeda 511 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
512 const struct be_rx_stats *rx_stats = rx_stats(rxo);
513 do {
514 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
515 pkts = rx_stats(rxo)->rx_pkts;
516 bytes = rx_stats(rxo)->rx_bytes;
517 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
518 stats->rx_packets += pkts;
519 stats->rx_bytes += bytes;
520 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
521 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
522 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
523 }
524
3c8def97 525 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
526 const struct be_tx_stats *tx_stats = tx_stats(txo);
527 do {
528 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
529 pkts = tx_stats(txo)->tx_pkts;
530 bytes = tx_stats(txo)->tx_bytes;
531 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
532 stats->tx_packets += pkts;
533 stats->tx_bytes += bytes;
3c8def97 534 }
6b7c5b94
SP
535
536 /* bad pkts received */
ab1594e9 537 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
538 drvs->rx_alignment_symbol_errors +
539 drvs->rx_in_range_errors +
540 drvs->rx_out_range_errors +
541 drvs->rx_frame_too_long +
542 drvs->rx_dropped_too_small +
543 drvs->rx_dropped_too_short +
544 drvs->rx_dropped_header_too_small +
545 drvs->rx_dropped_tcp_length +
ab1594e9 546 drvs->rx_dropped_runt;
68110868 547
6b7c5b94 548 /* detailed rx errors */
ab1594e9 549 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
550 drvs->rx_out_range_errors +
551 drvs->rx_frame_too_long;
68110868 552
ab1594e9 553 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
554
555 /* frame alignment errors */
ab1594e9 556 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 557
6b7c5b94
SP
558 /* receiver fifo overrun */
559 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 560 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
561 drvs->rx_input_fifo_overflow_drop +
562 drvs->rx_drops_no_pbuf;
ab1594e9 563 return stats;
6b7c5b94
SP
564}
565
b236916a 566void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 567{
6b7c5b94
SP
568 struct net_device *netdev = adapter->netdev;
569
b236916a 570 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 571 netif_carrier_off(netdev);
b236916a 572 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 573 }
b236916a
AK
574
575 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
576 netif_carrier_on(netdev);
577 else
578 netif_carrier_off(netdev);
6b7c5b94
SP
579}
580
3c8def97 581static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 582 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 583{
3c8def97
SP
584 struct be_tx_stats *stats = tx_stats(txo);
585
ab1594e9 586 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
587 stats->tx_reqs++;
588 stats->tx_wrbs += wrb_cnt;
589 stats->tx_bytes += copied;
590 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 591 if (stopped)
ac124ff9 592 stats->tx_stops++;
ab1594e9 593 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
594}
595
596/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
597static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
598 bool *dummy)
6b7c5b94 599{
ebc8d2ab
DM
600 int cnt = (skb->len > skb->data_len);
601
602 cnt += skb_shinfo(skb)->nr_frags;
603
6b7c5b94
SP
604 /* to account for hdr wrb */
605 cnt++;
fe6d2a38
SP
606 if (lancer_chip(adapter) || !(cnt & 1)) {
607 *dummy = false;
608 } else {
6b7c5b94
SP
609 /* add a dummy to make it an even num */
610 cnt++;
611 *dummy = true;
fe6d2a38 612 }
6b7c5b94
SP
613 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
614 return cnt;
615}
616
617static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
618{
619 wrb->frag_pa_hi = upper_32_bits(addr);
620 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
621 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 622 wrb->rsvd0 = 0;
6b7c5b94
SP
623}
624
1ded132d
AK
625static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
626 struct sk_buff *skb)
627{
628 u8 vlan_prio;
629 u16 vlan_tag;
630
631 vlan_tag = vlan_tx_tag_get(skb);
632 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
633 /* If vlan priority provided by OS is NOT in available bmap */
634 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
635 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
636 adapter->recommended_prio;
637
638 return vlan_tag;
639}
640
93040ae5
SK
641static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
642{
643 return vlan_tx_tag_present(skb) || adapter->pvid;
644}
645
cc4ce020
SK
646static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
647 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 648{
1ded132d 649 u16 vlan_tag;
cc4ce020 650
6b7c5b94
SP
651 memset(hdr, 0, sizeof(*hdr));
652
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
654
49e4b847 655 if (skb_is_gso(skb)) {
6b7c5b94
SP
656 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
658 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 659 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 660 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
661 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
662 if (is_tcp_pkt(skb))
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
664 else if (is_udp_pkt(skb))
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
666 }
667
4c5102f9 668 if (vlan_tx_tag_present(skb)) {
6b7c5b94 669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 670 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
672 }
673
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
675 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
676 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
678}
679
2b7bcebf 680static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
681 bool unmap_single)
682{
683 dma_addr_t dma;
684
685 be_dws_le_to_cpu(wrb, sizeof(*wrb));
686
687 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 688 if (wrb->frag_len) {
7101e111 689 if (unmap_single)
2b7bcebf
IV
690 dma_unmap_single(dev, dma, wrb->frag_len,
691 DMA_TO_DEVICE);
7101e111 692 else
2b7bcebf 693 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
694 }
695}
6b7c5b94 696
3c8def97 697static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
698 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
699{
7101e111
SP
700 dma_addr_t busaddr;
701 int i, copied = 0;
2b7bcebf 702 struct device *dev = &adapter->pdev->dev;
6b7c5b94 703 struct sk_buff *first_skb = skb;
6b7c5b94
SP
704 struct be_eth_wrb *wrb;
705 struct be_eth_hdr_wrb *hdr;
7101e111
SP
706 bool map_single = false;
707 u16 map_head;
6b7c5b94 708
6b7c5b94
SP
709 hdr = queue_head_node(txq);
710 queue_head_inc(txq);
7101e111 711 map_head = txq->head;
6b7c5b94 712
ebc8d2ab 713 if (skb->len > skb->data_len) {
e743d313 714 int len = skb_headlen(skb);
2b7bcebf
IV
715 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
716 if (dma_mapping_error(dev, busaddr))
7101e111
SP
717 goto dma_err;
718 map_single = true;
ebc8d2ab
DM
719 wrb = queue_head_node(txq);
720 wrb_fill(wrb, busaddr, len);
721 be_dws_cpu_to_le(wrb, sizeof(*wrb));
722 queue_head_inc(txq);
723 copied += len;
724 }
6b7c5b94 725
ebc8d2ab 726 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 727 const struct skb_frag_struct *frag =
ebc8d2ab 728 &skb_shinfo(skb)->frags[i];
b061b39e 729 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 730 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 731 if (dma_mapping_error(dev, busaddr))
7101e111 732 goto dma_err;
ebc8d2ab 733 wrb = queue_head_node(txq);
9e903e08 734 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
735 be_dws_cpu_to_le(wrb, sizeof(*wrb));
736 queue_head_inc(txq);
9e903e08 737 copied += skb_frag_size(frag);
6b7c5b94
SP
738 }
739
740 if (dummy_wrb) {
741 wrb = queue_head_node(txq);
742 wrb_fill(wrb, 0, 0);
743 be_dws_cpu_to_le(wrb, sizeof(*wrb));
744 queue_head_inc(txq);
745 }
746
cc4ce020 747 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
748 be_dws_cpu_to_le(hdr, sizeof(*hdr));
749
750 return copied;
7101e111
SP
751dma_err:
752 txq->head = map_head;
753 while (copied) {
754 wrb = queue_head_node(txq);
2b7bcebf 755 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
756 map_single = false;
757 copied -= wrb->frag_len;
758 queue_head_inc(txq);
759 }
760 return 0;
6b7c5b94
SP
761}
762
93040ae5
SK
763static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
764 struct sk_buff *skb)
765{
766 u16 vlan_tag = 0;
767
768 skb = skb_share_check(skb, GFP_ATOMIC);
769 if (unlikely(!skb))
770 return skb;
771
772 if (vlan_tx_tag_present(skb)) {
773 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
774 __vlan_put_tag(skb, vlan_tag);
775 skb->vlan_tci = 0;
776 }
777
778 return skb;
779}
780
61357325 781static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 782 struct net_device *netdev)
6b7c5b94
SP
783{
784 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
785 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
786 struct be_queue_info *txq = &txo->q;
93040ae5 787 struct iphdr *ip = NULL;
6b7c5b94 788 u32 wrb_cnt = 0, copied = 0;
93040ae5 789 u32 start = txq->head, eth_hdr_len;
6b7c5b94
SP
790 bool dummy_wrb, stopped = false;
791
93040ae5
SK
792 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
793 VLAN_ETH_HLEN : ETH_HLEN;
794
795 /* HW has a bug which considers padding bytes as legal
796 * and modifies the IPv4 hdr's 'tot_len' field
1ded132d 797 */
93040ae5
SK
798 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
799 is_ipv4_pkt(skb)) {
800 ip = (struct iphdr *)ip_hdr(skb);
801 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
802 }
1ded132d 803
93040ae5
SK
804 /* HW has a bug wherein it will calculate CSUM for VLAN
805 * pkts even though it is disabled.
806 * Manually insert VLAN in pkt.
807 */
808 if (skb->ip_summed != CHECKSUM_PARTIAL &&
809 be_vlan_tag_chk(adapter, skb)) {
810 skb = be_insert_vlan_in_pkt(adapter, skb);
1ded132d
AK
811 if (unlikely(!skb))
812 goto tx_drop;
1ded132d
AK
813 }
814
fe6d2a38 815 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 816
3c8def97 817 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 818 if (copied) {
cd8f76c0
ED
819 int gso_segs = skb_shinfo(skb)->gso_segs;
820
c190e3c8 821 /* record the sent skb in the sent_skb table */
3c8def97
SP
822 BUG_ON(txo->sent_skb_list[start]);
823 txo->sent_skb_list[start] = skb;
c190e3c8
AK
824
825 /* Ensure txq has space for the next skb; Else stop the queue
826 * *BEFORE* ringing the tx doorbell, so that we serialze the
827 * tx compls of the current transmit which'll wake up the queue
828 */
7101e111 829 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
830 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
831 txq->len) {
3c8def97 832 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
833 stopped = true;
834 }
6b7c5b94 835
c190e3c8 836 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 837
cd8f76c0 838 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
839 } else {
840 txq->head = start;
841 dev_kfree_skb_any(skb);
6b7c5b94 842 }
1ded132d 843tx_drop:
6b7c5b94
SP
844 return NETDEV_TX_OK;
845}
846
847static int be_change_mtu(struct net_device *netdev, int new_mtu)
848{
849 struct be_adapter *adapter = netdev_priv(netdev);
850 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
851 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
852 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
853 dev_info(&adapter->pdev->dev,
854 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
855 BE_MIN_MTU,
856 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
857 return -EINVAL;
858 }
859 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
860 netdev->mtu, new_mtu);
861 netdev->mtu = new_mtu;
862 return 0;
863}
864
865/*
82903e4b
AK
866 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
867 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 868 */
10329df8 869static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 870{
10329df8
SP
871 u16 vids[BE_NUM_VLANS_SUPPORTED];
872 u16 num = 0, i;
82903e4b 873 int status = 0;
1da87b7f 874
c0e64ef4
SP
875 /* No need to further configure vids if in promiscuous mode */
876 if (adapter->promiscuous)
877 return 0;
878
0fc16ebf
PR
879 if (adapter->vlans_added > adapter->max_vlans)
880 goto set_vlan_promisc;
881
882 /* Construct VLAN Table to give to HW */
883 for (i = 0; i < VLAN_N_VID; i++)
884 if (adapter->vlan_tag[i])
10329df8 885 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
886
887 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 888 vids, num, 1, 0);
0fc16ebf
PR
889
890 /* Set to VLAN promisc mode as setting VLAN filter failed */
891 if (status) {
892 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
893 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
894 goto set_vlan_promisc;
6b7c5b94 895 }
1da87b7f 896
b31c50a7 897 return status;
0fc16ebf
PR
898
899set_vlan_promisc:
900 status = be_cmd_vlan_config(adapter, adapter->if_handle,
901 NULL, 0, 1, 1);
902 return status;
6b7c5b94
SP
903}
904
8e586137 905static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
906{
907 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 908 int status = 0;
6b7c5b94 909
a85e9986 910 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
911 status = -EINVAL;
912 goto ret;
913 }
ba343c77 914
a85e9986
PR
915 /* Packets with VID 0 are always received by Lancer by default */
916 if (lancer_chip(adapter) && vid == 0)
917 goto ret;
918
6b7c5b94 919 adapter->vlan_tag[vid] = 1;
82903e4b 920 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 921 status = be_vid_config(adapter);
8e586137 922
80817cbf
AK
923 if (!status)
924 adapter->vlans_added++;
925 else
926 adapter->vlan_tag[vid] = 0;
927ret:
928 return status;
6b7c5b94
SP
929}
930
8e586137 931static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
932{
933 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 934 int status = 0;
6b7c5b94 935
a85e9986 936 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
937 status = -EINVAL;
938 goto ret;
939 }
ba343c77 940
a85e9986
PR
941 /* Packets with VID 0 are always received by Lancer by default */
942 if (lancer_chip(adapter) && vid == 0)
943 goto ret;
944
6b7c5b94 945 adapter->vlan_tag[vid] = 0;
82903e4b 946 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 947 status = be_vid_config(adapter);
8e586137 948
80817cbf
AK
949 if (!status)
950 adapter->vlans_added--;
951 else
952 adapter->vlan_tag[vid] = 1;
953ret:
954 return status;
6b7c5b94
SP
955}
956
a54769f5 957static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
958{
959 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 960 int status;
6b7c5b94 961
24307eef 962 if (netdev->flags & IFF_PROMISC) {
5b8821b7 963 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
964 adapter->promiscuous = true;
965 goto done;
6b7c5b94
SP
966 }
967
25985edc 968 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
969 if (adapter->promiscuous) {
970 adapter->promiscuous = false;
5b8821b7 971 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
972
973 if (adapter->vlans_added)
10329df8 974 be_vid_config(adapter);
6b7c5b94
SP
975 }
976
e7b909a6 977 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 978 if (netdev->flags & IFF_ALLMULTI ||
abb93951 979 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 980 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 981 goto done;
6b7c5b94 982 }
6b7c5b94 983
fbc13f01
AK
984 if (netdev_uc_count(netdev) != adapter->uc_macs) {
985 struct netdev_hw_addr *ha;
986 int i = 1; /* First slot is claimed by the Primary MAC */
987
988 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
989 be_cmd_pmac_del(adapter, adapter->if_handle,
990 adapter->pmac_id[i], 0);
991 }
992
993 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
994 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
995 adapter->promiscuous = true;
996 goto done;
997 }
998
999 netdev_for_each_uc_addr(ha, adapter->netdev) {
1000 adapter->uc_macs++; /* First slot is for Primary MAC */
1001 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1002 adapter->if_handle,
1003 &adapter->pmac_id[adapter->uc_macs], 0);
1004 }
1005 }
1006
0fc16ebf
PR
1007 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1008
1009 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1010 if (status) {
1011 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1012 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1013 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1014 }
24307eef
SP
1015done:
1016 return;
6b7c5b94
SP
1017}
1018
ba343c77
SB
1019static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1020{
1021 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1022 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1023 int status;
704e4c88
PR
1024 bool active_mac = false;
1025 u32 pmac_id;
1026 u8 old_mac[ETH_ALEN];
ba343c77 1027
11ac75ed 1028 if (!sriov_enabled(adapter))
ba343c77
SB
1029 return -EPERM;
1030
11ac75ed 1031 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1032 return -EINVAL;
1033
590c391d 1034 if (lancer_chip(adapter)) {
704e4c88
PR
1035 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1036 &pmac_id, vf + 1);
1037 if (!status && active_mac)
1038 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1039 pmac_id, vf + 1);
1040
590c391d
PR
1041 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1042 } else {
11ac75ed
SP
1043 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1044 vf_cfg->pmac_id, vf + 1);
ba343c77 1045
11ac75ed
SP
1046 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1047 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1048 }
1049
64600ea5 1050 if (status)
ba343c77
SB
1051 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1052 mac, vf);
64600ea5 1053 else
11ac75ed 1054 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1055
ba343c77
SB
1056 return status;
1057}
1058
64600ea5
AK
1059static int be_get_vf_config(struct net_device *netdev, int vf,
1060 struct ifla_vf_info *vi)
1061{
1062 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1063 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1064
11ac75ed 1065 if (!sriov_enabled(adapter))
64600ea5
AK
1066 return -EPERM;
1067
11ac75ed 1068 if (vf >= adapter->num_vfs)
64600ea5
AK
1069 return -EINVAL;
1070
1071 vi->vf = vf;
11ac75ed
SP
1072 vi->tx_rate = vf_cfg->tx_rate;
1073 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1074 vi->qos = 0;
11ac75ed 1075 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1076
1077 return 0;
1078}
1079
1da87b7f
AK
1080static int be_set_vf_vlan(struct net_device *netdev,
1081 int vf, u16 vlan, u8 qos)
1082{
1083 struct be_adapter *adapter = netdev_priv(netdev);
1084 int status = 0;
1085
11ac75ed 1086 if (!sriov_enabled(adapter))
1da87b7f
AK
1087 return -EPERM;
1088
11ac75ed 1089 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1090 return -EINVAL;
1091
1092 if (vlan) {
f1f3ee1b
AK
1093 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1094 /* If this is new value, program it. Else skip. */
1095 adapter->vf_cfg[vf].vlan_tag = vlan;
1096
1097 status = be_cmd_set_hsw_config(adapter, vlan,
1098 vf + 1, adapter->vf_cfg[vf].if_handle);
1099 }
1da87b7f 1100 } else {
f1f3ee1b 1101 /* Reset Transparent Vlan Tagging. */
11ac75ed 1102 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1103 vlan = adapter->vf_cfg[vf].def_vid;
1104 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1105 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1106 }
1107
1da87b7f
AK
1108
1109 if (status)
1110 dev_info(&adapter->pdev->dev,
1111 "VLAN %d config on VF %d failed\n", vlan, vf);
1112 return status;
1113}
1114
e1d18735
AK
1115static int be_set_vf_tx_rate(struct net_device *netdev,
1116 int vf, int rate)
1117{
1118 struct be_adapter *adapter = netdev_priv(netdev);
1119 int status = 0;
1120
11ac75ed 1121 if (!sriov_enabled(adapter))
e1d18735
AK
1122 return -EPERM;
1123
94f434c2 1124 if (vf >= adapter->num_vfs)
e1d18735
AK
1125 return -EINVAL;
1126
94f434c2
AK
1127 if (rate < 100 || rate > 10000) {
1128 dev_err(&adapter->pdev->dev,
1129 "tx rate must be between 100 and 10000 Mbps\n");
1130 return -EINVAL;
1131 }
e1d18735 1132
d5c18473
PR
1133 if (lancer_chip(adapter))
1134 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1135 else
1136 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1137
1138 if (status)
94f434c2 1139 dev_err(&adapter->pdev->dev,
e1d18735 1140 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1141 else
1142 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1143 return status;
1144}
1145
39f1d94d
SP
1146static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1147{
1148 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1149 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1150 u16 offset, stride;
1151
1152 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1153 if (!pos)
1154 return 0;
39f1d94d
SP
1155 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1156 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1157
1158 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1159 while (dev) {
2f6a0260 1160 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1161 vfs++;
1162 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1163 assigned_vfs++;
1164 }
1165 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1166 }
1167 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1168}
1169
10ef9ab4 1170static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1171{
10ef9ab4 1172 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1173 ulong now = jiffies;
ac124ff9 1174 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1175 u64 pkts;
1176 unsigned int start, eqd;
ac124ff9 1177
10ef9ab4
SP
1178 if (!eqo->enable_aic) {
1179 eqd = eqo->eqd;
1180 goto modify_eqd;
1181 }
1182
1183 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1184 return;
6b7c5b94 1185
10ef9ab4
SP
1186 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1187
4097f663 1188 /* Wrapped around */
3abcdeda
SP
1189 if (time_before(now, stats->rx_jiffies)) {
1190 stats->rx_jiffies = now;
4097f663
SP
1191 return;
1192 }
6b7c5b94 1193
ac124ff9
SP
1194 /* Update once a second */
1195 if (delta < HZ)
6b7c5b94
SP
1196 return;
1197
ab1594e9
SP
1198 do {
1199 start = u64_stats_fetch_begin_bh(&stats->sync);
1200 pkts = stats->rx_pkts;
1201 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1202
68c3e5a7 1203 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1204 stats->rx_pkts_prev = pkts;
3abcdeda 1205 stats->rx_jiffies = now;
10ef9ab4
SP
1206 eqd = (stats->rx_pps / 110000) << 3;
1207 eqd = min(eqd, eqo->max_eqd);
1208 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1209 if (eqd < 10)
1210 eqd = 0;
10ef9ab4
SP
1211
1212modify_eqd:
1213 if (eqd != eqo->cur_eqd) {
1214 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1215 eqo->cur_eqd = eqd;
ac124ff9 1216 }
6b7c5b94
SP
1217}
1218
3abcdeda 1219static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1220 struct be_rx_compl_info *rxcp)
4097f663 1221{
ac124ff9 1222 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1223
ab1594e9 1224 u64_stats_update_begin(&stats->sync);
3abcdeda 1225 stats->rx_compl++;
2e588f84 1226 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1227 stats->rx_pkts++;
2e588f84 1228 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1229 stats->rx_mcast_pkts++;
2e588f84 1230 if (rxcp->err)
ac124ff9 1231 stats->rx_compl_err++;
ab1594e9 1232 u64_stats_update_end(&stats->sync);
4097f663
SP
1233}
1234
2e588f84 1235static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1236{
19fad86f
PR
1237 /* L4 checksum is not reliable for non TCP/UDP packets.
1238 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1239 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1240 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1241}
1242
10ef9ab4
SP
1243static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1244 u16 frag_idx)
6b7c5b94 1245{
10ef9ab4 1246 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1247 struct be_rx_page_info *rx_page_info;
3abcdeda 1248 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1249
3abcdeda 1250 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1251 BUG_ON(!rx_page_info->page);
1252
205859a2 1253 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1254 dma_unmap_page(&adapter->pdev->dev,
1255 dma_unmap_addr(rx_page_info, bus),
1256 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1257 rx_page_info->last_page_user = false;
1258 }
6b7c5b94
SP
1259
1260 atomic_dec(&rxq->used);
1261 return rx_page_info;
1262}
1263
1264/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1265static void be_rx_compl_discard(struct be_rx_obj *rxo,
1266 struct be_rx_compl_info *rxcp)
6b7c5b94 1267{
3abcdeda 1268 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1269 struct be_rx_page_info *page_info;
2e588f84 1270 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1271
e80d9da6 1272 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1273 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1274 put_page(page_info->page);
1275 memset(page_info, 0, sizeof(*page_info));
2e588f84 1276 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1277 }
1278}
1279
1280/*
1281 * skb_fill_rx_data forms a complete skb for an ether frame
1282 * indicated by rxcp.
1283 */
10ef9ab4
SP
1284static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1285 struct be_rx_compl_info *rxcp)
6b7c5b94 1286{
3abcdeda 1287 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1288 struct be_rx_page_info *page_info;
2e588f84
SP
1289 u16 i, j;
1290 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1291 u8 *start;
6b7c5b94 1292
10ef9ab4 1293 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1294 start = page_address(page_info->page) + page_info->page_offset;
1295 prefetch(start);
1296
1297 /* Copy data in the first descriptor of this completion */
2e588f84 1298 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1299
6b7c5b94
SP
1300 skb->len = curr_frag_len;
1301 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1302 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1303 /* Complete packet has now been moved to data */
1304 put_page(page_info->page);
1305 skb->data_len = 0;
1306 skb->tail += curr_frag_len;
1307 } else {
ac1ae5f3
ED
1308 hdr_len = ETH_HLEN;
1309 memcpy(skb->data, start, hdr_len);
6b7c5b94 1310 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1311 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1312 skb_shinfo(skb)->frags[0].page_offset =
1313 page_info->page_offset + hdr_len;
9e903e08 1314 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1315 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1316 skb->truesize += rx_frag_size;
6b7c5b94
SP
1317 skb->tail += hdr_len;
1318 }
205859a2 1319 page_info->page = NULL;
6b7c5b94 1320
2e588f84
SP
1321 if (rxcp->pkt_size <= rx_frag_size) {
1322 BUG_ON(rxcp->num_rcvd != 1);
1323 return;
6b7c5b94
SP
1324 }
1325
1326 /* More frags present for this completion */
2e588f84
SP
1327 index_inc(&rxcp->rxq_idx, rxq->len);
1328 remaining = rxcp->pkt_size - curr_frag_len;
1329 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1330 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1331 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1332
bd46cb6c
AK
1333 /* Coalesce all frags from the same physical page in one slot */
1334 if (page_info->page_offset == 0) {
1335 /* Fresh page */
1336 j++;
b061b39e 1337 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1338 skb_shinfo(skb)->frags[j].page_offset =
1339 page_info->page_offset;
9e903e08 1340 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1341 skb_shinfo(skb)->nr_frags++;
1342 } else {
1343 put_page(page_info->page);
1344 }
1345
9e903e08 1346 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1347 skb->len += curr_frag_len;
1348 skb->data_len += curr_frag_len;
bdb28a97 1349 skb->truesize += rx_frag_size;
2e588f84
SP
1350 remaining -= curr_frag_len;
1351 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1352 page_info->page = NULL;
6b7c5b94 1353 }
bd46cb6c 1354 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1355}
1356
5be93b9a 1357/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1358static void be_rx_compl_process(struct be_rx_obj *rxo,
1359 struct be_rx_compl_info *rxcp)
6b7c5b94 1360{
10ef9ab4 1361 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1362 struct net_device *netdev = adapter->netdev;
6b7c5b94 1363 struct sk_buff *skb;
89420424 1364
bb349bb4 1365 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1366 if (unlikely(!skb)) {
ac124ff9 1367 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1368 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1369 return;
1370 }
1371
10ef9ab4 1372 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1373
6332c8d3 1374 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1376 else
1377 skb_checksum_none_assert(skb);
6b7c5b94 1378
6332c8d3 1379 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1380 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1381 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1382 skb->rxhash = rxcp->rss_hash;
1383
6b7c5b94 1384
343e43c0 1385 if (rxcp->vlanf)
4c5102f9
AK
1386 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1387
1388 netif_receive_skb(skb);
6b7c5b94
SP
1389}
1390
5be93b9a 1391/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1392void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1393 struct be_rx_compl_info *rxcp)
6b7c5b94 1394{
10ef9ab4 1395 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1396 struct be_rx_page_info *page_info;
5be93b9a 1397 struct sk_buff *skb = NULL;
3abcdeda 1398 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1399 u16 remaining, curr_frag_len;
1400 u16 i, j;
3968fa1e 1401
10ef9ab4 1402 skb = napi_get_frags(napi);
5be93b9a 1403 if (!skb) {
10ef9ab4 1404 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1405 return;
1406 }
1407
2e588f84
SP
1408 remaining = rxcp->pkt_size;
1409 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1410 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1411
1412 curr_frag_len = min(remaining, rx_frag_size);
1413
bd46cb6c
AK
1414 /* Coalesce all frags from the same physical page in one slot */
1415 if (i == 0 || page_info->page_offset == 0) {
1416 /* First frag or Fresh page */
1417 j++;
b061b39e 1418 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1419 skb_shinfo(skb)->frags[j].page_offset =
1420 page_info->page_offset;
9e903e08 1421 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1422 } else {
1423 put_page(page_info->page);
1424 }
9e903e08 1425 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1426 skb->truesize += rx_frag_size;
bd46cb6c 1427 remaining -= curr_frag_len;
2e588f84 1428 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1429 memset(page_info, 0, sizeof(*page_info));
1430 }
bd46cb6c 1431 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1432
5be93b9a 1433 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1434 skb->len = rxcp->pkt_size;
1435 skb->data_len = rxcp->pkt_size;
5be93b9a 1436 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1437 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1438 if (adapter->netdev->features & NETIF_F_RXHASH)
1439 skb->rxhash = rxcp->rss_hash;
5be93b9a 1440
343e43c0 1441 if (rxcp->vlanf)
4c5102f9
AK
1442 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1443
10ef9ab4 1444 napi_gro_frags(napi);
2e588f84
SP
1445}
1446
10ef9ab4
SP
1447static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1448 struct be_rx_compl_info *rxcp)
2e588f84
SP
1449{
1450 rxcp->pkt_size =
1451 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1452 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1453 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1454 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1455 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1456 rxcp->ip_csum =
1457 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1458 rxcp->l4_csum =
1459 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1460 rxcp->ipv6 =
1461 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1462 rxcp->rxq_idx =
1463 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1464 rxcp->num_rcvd =
1465 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1466 rxcp->pkt_type =
1467 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1468 rxcp->rss_hash =
c297977e 1469 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1470 if (rxcp->vlanf) {
1471 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1472 compl);
1473 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1474 compl);
15d72184 1475 }
12004ae9 1476 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1477}
1478
10ef9ab4
SP
1479static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1480 struct be_rx_compl_info *rxcp)
2e588f84
SP
1481{
1482 rxcp->pkt_size =
1483 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1484 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1485 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1486 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1487 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1488 rxcp->ip_csum =
1489 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1490 rxcp->l4_csum =
1491 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1492 rxcp->ipv6 =
1493 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1494 rxcp->rxq_idx =
1495 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1496 rxcp->num_rcvd =
1497 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1498 rxcp->pkt_type =
1499 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1500 rxcp->rss_hash =
c297977e 1501 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1502 if (rxcp->vlanf) {
1503 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1504 compl);
1505 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1506 compl);
15d72184 1507 }
12004ae9 1508 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1509}
1510
1511static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1512{
1513 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1514 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1515 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1516
2e588f84
SP
1517 /* For checking the valid bit it is Ok to use either definition as the
1518 * valid bit is at the same position in both v0 and v1 Rx compl */
1519 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1520 return NULL;
6b7c5b94 1521
2e588f84
SP
1522 rmb();
1523 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1524
2e588f84 1525 if (adapter->be3_native)
10ef9ab4 1526 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1527 else
10ef9ab4 1528 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1529
15d72184
SP
1530 if (rxcp->vlanf) {
1531 /* vlanf could be wrongly set in some cards.
1532 * ignore if vtm is not set */
752961a1 1533 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1534 rxcp->vlanf = 0;
6b7c5b94 1535
15d72184 1536 if (!lancer_chip(adapter))
3c709f8f 1537 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1538
939cf306 1539 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1540 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1541 rxcp->vlanf = 0;
1542 }
2e588f84
SP
1543
1544 /* As the compl has been parsed, reset it; we wont touch it again */
1545 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1546
3abcdeda 1547 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1548 return rxcp;
1549}
1550
1829b086 1551static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1552{
6b7c5b94 1553 u32 order = get_order(size);
1829b086 1554
6b7c5b94 1555 if (order > 0)
1829b086
ED
1556 gfp |= __GFP_COMP;
1557 return alloc_pages(gfp, order);
6b7c5b94
SP
1558}
1559
1560/*
1561 * Allocate a page, split it to fragments of size rx_frag_size and post as
1562 * receive buffers to BE
1563 */
1829b086 1564static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1565{
3abcdeda 1566 struct be_adapter *adapter = rxo->adapter;
26d92f92 1567 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1568 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1569 struct page *pagep = NULL;
1570 struct be_eth_rx_d *rxd;
1571 u64 page_dmaaddr = 0, frag_dmaaddr;
1572 u32 posted, page_offset = 0;
1573
3abcdeda 1574 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1575 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1576 if (!pagep) {
1829b086 1577 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1578 if (unlikely(!pagep)) {
ac124ff9 1579 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1580 break;
1581 }
2b7bcebf
IV
1582 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1583 0, adapter->big_page_size,
1584 DMA_FROM_DEVICE);
6b7c5b94
SP
1585 page_info->page_offset = 0;
1586 } else {
1587 get_page(pagep);
1588 page_info->page_offset = page_offset + rx_frag_size;
1589 }
1590 page_offset = page_info->page_offset;
1591 page_info->page = pagep;
fac6da5b 1592 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1593 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1594
1595 rxd = queue_head_node(rxq);
1596 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1597 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1598
1599 /* Any space left in the current big page for another frag? */
1600 if ((page_offset + rx_frag_size + rx_frag_size) >
1601 adapter->big_page_size) {
1602 pagep = NULL;
1603 page_info->last_page_user = true;
1604 }
26d92f92
SP
1605
1606 prev_page_info = page_info;
1607 queue_head_inc(rxq);
10ef9ab4 1608 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1609 }
1610 if (pagep)
26d92f92 1611 prev_page_info->last_page_user = true;
6b7c5b94
SP
1612
1613 if (posted) {
6b7c5b94 1614 atomic_add(posted, &rxq->used);
8788fdc2 1615 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1616 } else if (atomic_read(&rxq->used) == 0) {
1617 /* Let be_worker replenish when memory is available */
3abcdeda 1618 rxo->rx_post_starved = true;
6b7c5b94 1619 }
6b7c5b94
SP
1620}
1621
5fb379ee 1622static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1623{
6b7c5b94
SP
1624 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1625
1626 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1627 return NULL;
1628
f3eb62d2 1629 rmb();
6b7c5b94
SP
1630 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1631
1632 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1633
1634 queue_tail_inc(tx_cq);
1635 return txcp;
1636}
1637
3c8def97
SP
1638static u16 be_tx_compl_process(struct be_adapter *adapter,
1639 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1640{
3c8def97 1641 struct be_queue_info *txq = &txo->q;
a73b796e 1642 struct be_eth_wrb *wrb;
3c8def97 1643 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1644 struct sk_buff *sent_skb;
ec43b1a6
SP
1645 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1646 bool unmap_skb_hdr = true;
6b7c5b94 1647
ec43b1a6 1648 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1649 BUG_ON(!sent_skb);
ec43b1a6
SP
1650 sent_skbs[txq->tail] = NULL;
1651
1652 /* skip header wrb */
a73b796e 1653 queue_tail_inc(txq);
6b7c5b94 1654
ec43b1a6 1655 do {
6b7c5b94 1656 cur_index = txq->tail;
a73b796e 1657 wrb = queue_tail_node(txq);
2b7bcebf
IV
1658 unmap_tx_frag(&adapter->pdev->dev, wrb,
1659 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1660 unmap_skb_hdr = false;
1661
6b7c5b94
SP
1662 num_wrbs++;
1663 queue_tail_inc(txq);
ec43b1a6 1664 } while (cur_index != last_index);
6b7c5b94 1665
6b7c5b94 1666 kfree_skb(sent_skb);
4d586b82 1667 return num_wrbs;
6b7c5b94
SP
1668}
1669
10ef9ab4
SP
1670/* Return the number of events in the event queue */
1671static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1672{
10ef9ab4
SP
1673 struct be_eq_entry *eqe;
1674 int num = 0;
859b1e4e 1675
10ef9ab4
SP
1676 do {
1677 eqe = queue_tail_node(&eqo->q);
1678 if (eqe->evt == 0)
1679 break;
859b1e4e 1680
10ef9ab4
SP
1681 rmb();
1682 eqe->evt = 0;
1683 num++;
1684 queue_tail_inc(&eqo->q);
1685 } while (true);
1686
1687 return num;
859b1e4e
SP
1688}
1689
10ef9ab4
SP
1690/* Leaves the EQ is disarmed state */
1691static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1692{
10ef9ab4 1693 int num = events_get(eqo);
859b1e4e 1694
10ef9ab4 1695 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1696}
1697
10ef9ab4 1698static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1699{
1700 struct be_rx_page_info *page_info;
3abcdeda
SP
1701 struct be_queue_info *rxq = &rxo->q;
1702 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1703 struct be_rx_compl_info *rxcp;
d23e946c
SP
1704 struct be_adapter *adapter = rxo->adapter;
1705 int flush_wait = 0;
6b7c5b94
SP
1706 u16 tail;
1707
d23e946c
SP
1708 /* Consume pending rx completions.
1709 * Wait for the flush completion (identified by zero num_rcvd)
1710 * to arrive. Notify CQ even when there are no more CQ entries
1711 * for HW to flush partially coalesced CQ entries.
1712 * In Lancer, there is no need to wait for flush compl.
1713 */
1714 for (;;) {
1715 rxcp = be_rx_compl_get(rxo);
1716 if (rxcp == NULL) {
1717 if (lancer_chip(adapter))
1718 break;
1719
1720 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1721 dev_warn(&adapter->pdev->dev,
1722 "did not receive flush compl\n");
1723 break;
1724 }
1725 be_cq_notify(adapter, rx_cq->id, true, 0);
1726 mdelay(1);
1727 } else {
1728 be_rx_compl_discard(rxo, rxcp);
1729 be_cq_notify(adapter, rx_cq->id, true, 1);
1730 if (rxcp->num_rcvd == 0)
1731 break;
1732 }
6b7c5b94
SP
1733 }
1734
d23e946c
SP
1735 /* After cleanup, leave the CQ in unarmed state */
1736 be_cq_notify(adapter, rx_cq->id, false, 0);
1737
1738 /* Then free posted rx buffers that were not used */
6b7c5b94 1739 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1740 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1741 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1742 put_page(page_info->page);
1743 memset(page_info, 0, sizeof(*page_info));
1744 }
1745 BUG_ON(atomic_read(&rxq->used));
482c9e79 1746 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1747}
1748
0ae57bb3 1749static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1750{
0ae57bb3
SP
1751 struct be_tx_obj *txo;
1752 struct be_queue_info *txq;
a8e9179a 1753 struct be_eth_tx_compl *txcp;
4d586b82 1754 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1755 struct sk_buff *sent_skb;
1756 bool dummy_wrb;
0ae57bb3 1757 int i, pending_txqs;
a8e9179a
SP
1758
1759 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1760 do {
0ae57bb3
SP
1761 pending_txqs = adapter->num_tx_qs;
1762
1763 for_all_tx_queues(adapter, txo, i) {
1764 txq = &txo->q;
1765 while ((txcp = be_tx_compl_get(&txo->cq))) {
1766 end_idx =
1767 AMAP_GET_BITS(struct amap_eth_tx_compl,
1768 wrb_index, txcp);
1769 num_wrbs += be_tx_compl_process(adapter, txo,
1770 end_idx);
1771 cmpl++;
1772 }
1773 if (cmpl) {
1774 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1775 atomic_sub(num_wrbs, &txq->used);
1776 cmpl = 0;
1777 num_wrbs = 0;
1778 }
1779 if (atomic_read(&txq->used) == 0)
1780 pending_txqs--;
a8e9179a
SP
1781 }
1782
0ae57bb3 1783 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1784 break;
1785
1786 mdelay(1);
1787 } while (true);
1788
0ae57bb3
SP
1789 for_all_tx_queues(adapter, txo, i) {
1790 txq = &txo->q;
1791 if (atomic_read(&txq->used))
1792 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1793 atomic_read(&txq->used));
1794
1795 /* free posted tx for which compls will never arrive */
1796 while (atomic_read(&txq->used)) {
1797 sent_skb = txo->sent_skb_list[txq->tail];
1798 end_idx = txq->tail;
1799 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1800 &dummy_wrb);
1801 index_adv(&end_idx, num_wrbs - 1, txq->len);
1802 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1803 atomic_sub(num_wrbs, &txq->used);
1804 }
b03388d6 1805 }
6b7c5b94
SP
1806}
1807
10ef9ab4
SP
1808static void be_evt_queues_destroy(struct be_adapter *adapter)
1809{
1810 struct be_eq_obj *eqo;
1811 int i;
1812
1813 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1814 if (eqo->q.created) {
1815 be_eq_clean(eqo);
10ef9ab4 1816 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1817 }
10ef9ab4
SP
1818 be_queue_free(adapter, &eqo->q);
1819 }
1820}
1821
1822static int be_evt_queues_create(struct be_adapter *adapter)
1823{
1824 struct be_queue_info *eq;
1825 struct be_eq_obj *eqo;
1826 int i, rc;
1827
1828 adapter->num_evt_qs = num_irqs(adapter);
1829
1830 for_all_evt_queues(adapter, eqo, i) {
1831 eqo->adapter = adapter;
1832 eqo->tx_budget = BE_TX_BUDGET;
1833 eqo->idx = i;
1834 eqo->max_eqd = BE_MAX_EQD;
1835 eqo->enable_aic = true;
1836
1837 eq = &eqo->q;
1838 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1839 sizeof(struct be_eq_entry));
1840 if (rc)
1841 return rc;
1842
1843 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1844 if (rc)
1845 return rc;
1846 }
1cfafab9 1847 return 0;
10ef9ab4
SP
1848}
1849
5fb379ee
SP
1850static void be_mcc_queues_destroy(struct be_adapter *adapter)
1851{
1852 struct be_queue_info *q;
5fb379ee 1853
8788fdc2 1854 q = &adapter->mcc_obj.q;
5fb379ee 1855 if (q->created)
8788fdc2 1856 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1857 be_queue_free(adapter, q);
1858
8788fdc2 1859 q = &adapter->mcc_obj.cq;
5fb379ee 1860 if (q->created)
8788fdc2 1861 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1862 be_queue_free(adapter, q);
1863}
1864
1865/* Must be called only after TX qs are created as MCC shares TX EQ */
1866static int be_mcc_queues_create(struct be_adapter *adapter)
1867{
1868 struct be_queue_info *q, *cq;
5fb379ee 1869
8788fdc2 1870 cq = &adapter->mcc_obj.cq;
5fb379ee 1871 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1872 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1873 goto err;
1874
10ef9ab4
SP
1875 /* Use the default EQ for MCC completions */
1876 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1877 goto mcc_cq_free;
1878
8788fdc2 1879 q = &adapter->mcc_obj.q;
5fb379ee
SP
1880 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1881 goto mcc_cq_destroy;
1882
8788fdc2 1883 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1884 goto mcc_q_free;
1885
1886 return 0;
1887
1888mcc_q_free:
1889 be_queue_free(adapter, q);
1890mcc_cq_destroy:
8788fdc2 1891 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1892mcc_cq_free:
1893 be_queue_free(adapter, cq);
1894err:
1895 return -1;
1896}
1897
6b7c5b94
SP
1898static void be_tx_queues_destroy(struct be_adapter *adapter)
1899{
1900 struct be_queue_info *q;
3c8def97
SP
1901 struct be_tx_obj *txo;
1902 u8 i;
6b7c5b94 1903
3c8def97
SP
1904 for_all_tx_queues(adapter, txo, i) {
1905 q = &txo->q;
1906 if (q->created)
1907 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1908 be_queue_free(adapter, q);
6b7c5b94 1909
3c8def97
SP
1910 q = &txo->cq;
1911 if (q->created)
1912 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1913 be_queue_free(adapter, q);
1914 }
6b7c5b94
SP
1915}
1916
dafc0fe3
SP
1917static int be_num_txqs_want(struct be_adapter *adapter)
1918{
abb93951
PR
1919 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1920 be_is_mc(adapter) ||
1921 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 1922 BE2_chip(adapter))
dafc0fe3
SP
1923 return 1;
1924 else
abb93951 1925 return adapter->max_tx_queues;
dafc0fe3
SP
1926}
1927
10ef9ab4 1928static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1929{
10ef9ab4
SP
1930 struct be_queue_info *cq, *eq;
1931 int status;
3c8def97
SP
1932 struct be_tx_obj *txo;
1933 u8 i;
6b7c5b94 1934
dafc0fe3 1935 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1936 if (adapter->num_tx_qs != MAX_TX_QS) {
1937 rtnl_lock();
dafc0fe3
SP
1938 netif_set_real_num_tx_queues(adapter->netdev,
1939 adapter->num_tx_qs);
3bb62f4f
PR
1940 rtnl_unlock();
1941 }
dafc0fe3 1942
10ef9ab4
SP
1943 for_all_tx_queues(adapter, txo, i) {
1944 cq = &txo->cq;
1945 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1946 sizeof(struct be_eth_tx_compl));
1947 if (status)
1948 return status;
3c8def97 1949
10ef9ab4
SP
1950 /* If num_evt_qs is less than num_tx_qs, then more than
1951 * one txq share an eq
1952 */
1953 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1954 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1955 if (status)
1956 return status;
1957 }
1958 return 0;
1959}
6b7c5b94 1960
10ef9ab4
SP
1961static int be_tx_qs_create(struct be_adapter *adapter)
1962{
1963 struct be_tx_obj *txo;
1964 int i, status;
fe6d2a38 1965
3c8def97 1966 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1967 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1968 sizeof(struct be_eth_wrb));
1969 if (status)
1970 return status;
6b7c5b94 1971
10ef9ab4
SP
1972 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1973 if (status)
1974 return status;
3c8def97 1975 }
6b7c5b94 1976
d379142b
SP
1977 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1978 adapter->num_tx_qs);
10ef9ab4 1979 return 0;
6b7c5b94
SP
1980}
1981
10ef9ab4 1982static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1983{
1984 struct be_queue_info *q;
3abcdeda
SP
1985 struct be_rx_obj *rxo;
1986 int i;
1987
1988 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1989 q = &rxo->cq;
1990 if (q->created)
1991 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1992 be_queue_free(adapter, q);
ac6a0c4a
SP
1993 }
1994}
1995
10ef9ab4 1996static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1997{
10ef9ab4 1998 struct be_queue_info *eq, *cq;
3abcdeda
SP
1999 struct be_rx_obj *rxo;
2000 int rc, i;
6b7c5b94 2001
10ef9ab4
SP
2002 /* We'll create as many RSS rings as there are irqs.
2003 * But when there's only one irq there's no use creating RSS rings
2004 */
2005 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2006 num_irqs(adapter) + 1 : 1;
7f640062
SP
2007 if (adapter->num_rx_qs != MAX_RX_QS) {
2008 rtnl_lock();
2009 netif_set_real_num_rx_queues(adapter->netdev,
2010 adapter->num_rx_qs);
2011 rtnl_unlock();
2012 }
ac6a0c4a 2013
6b7c5b94 2014 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2015 for_all_rx_queues(adapter, rxo, i) {
2016 rxo->adapter = adapter;
3abcdeda
SP
2017 cq = &rxo->cq;
2018 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2019 sizeof(struct be_eth_rx_compl));
2020 if (rc)
10ef9ab4 2021 return rc;
3abcdeda 2022
10ef9ab4
SP
2023 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2024 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2025 if (rc)
10ef9ab4 2026 return rc;
3abcdeda 2027 }
6b7c5b94 2028
d379142b
SP
2029 dev_info(&adapter->pdev->dev,
2030 "created %d RSS queue(s) and 1 default RX queue\n",
2031 adapter->num_rx_qs - 1);
10ef9ab4 2032 return 0;
b628bde2
SP
2033}
2034
6b7c5b94
SP
2035static irqreturn_t be_intx(int irq, void *dev)
2036{
e49cc34f
SP
2037 struct be_eq_obj *eqo = dev;
2038 struct be_adapter *adapter = eqo->adapter;
2039 int num_evts = 0;
6b7c5b94 2040
d0b9cec3
SP
2041 /* IRQ is not expected when NAPI is scheduled as the EQ
2042 * will not be armed.
2043 * But, this can happen on Lancer INTx where it takes
2044 * a while to de-assert INTx or in BE2 where occasionaly
2045 * an interrupt may be raised even when EQ is unarmed.
2046 * If NAPI is already scheduled, then counting & notifying
2047 * events will orphan them.
e49cc34f 2048 */
d0b9cec3 2049 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2050 num_evts = events_get(eqo);
d0b9cec3
SP
2051 __napi_schedule(&eqo->napi);
2052 if (num_evts)
2053 eqo->spurious_intr = 0;
2054 }
2055 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2056
d0b9cec3
SP
2057 /* Return IRQ_HANDLED only for the the first spurious intr
2058 * after a valid intr to stop the kernel from branding
2059 * this irq as a bad one!
e49cc34f 2060 */
d0b9cec3
SP
2061 if (num_evts || eqo->spurious_intr++ == 0)
2062 return IRQ_HANDLED;
2063 else
2064 return IRQ_NONE;
6b7c5b94
SP
2065}
2066
10ef9ab4 2067static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2068{
10ef9ab4 2069 struct be_eq_obj *eqo = dev;
6b7c5b94 2070
0b545a62
SP
2071 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2072 napi_schedule(&eqo->napi);
6b7c5b94
SP
2073 return IRQ_HANDLED;
2074}
2075
2e588f84 2076static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2077{
2e588f84 2078 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2079}
2080
10ef9ab4
SP
2081static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2082 int budget)
6b7c5b94 2083{
3abcdeda
SP
2084 struct be_adapter *adapter = rxo->adapter;
2085 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2086 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2087 u32 work_done;
2088
2089 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2090 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2091 if (!rxcp)
2092 break;
2093
12004ae9
SP
2094 /* Is it a flush compl that has no data */
2095 if (unlikely(rxcp->num_rcvd == 0))
2096 goto loop_continue;
2097
2098 /* Discard compl with partial DMA Lancer B0 */
2099 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2100 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2101 goto loop_continue;
2102 }
2103
2104 /* On BE drop pkts that arrive due to imperfect filtering in
2105 * promiscuous mode on some skews
2106 */
2107 if (unlikely(rxcp->port != adapter->port_num &&
2108 !lancer_chip(adapter))) {
10ef9ab4 2109 be_rx_compl_discard(rxo, rxcp);
12004ae9 2110 goto loop_continue;
64642811 2111 }
009dd872 2112
12004ae9 2113 if (do_gro(rxcp))
10ef9ab4 2114 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2115 else
10ef9ab4 2116 be_rx_compl_process(rxo, rxcp);
12004ae9 2117loop_continue:
2e588f84 2118 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2119 }
2120
10ef9ab4
SP
2121 if (work_done) {
2122 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2123
10ef9ab4
SP
2124 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2125 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2126 }
10ef9ab4 2127
6b7c5b94
SP
2128 return work_done;
2129}
2130
10ef9ab4
SP
2131static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2132 int budget, int idx)
6b7c5b94 2133{
6b7c5b94 2134 struct be_eth_tx_compl *txcp;
10ef9ab4 2135 int num_wrbs = 0, work_done;
3c8def97 2136
10ef9ab4
SP
2137 for (work_done = 0; work_done < budget; work_done++) {
2138 txcp = be_tx_compl_get(&txo->cq);
2139 if (!txcp)
2140 break;
2141 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2142 AMAP_GET_BITS(struct amap_eth_tx_compl,
2143 wrb_index, txcp));
10ef9ab4 2144 }
6b7c5b94 2145
10ef9ab4
SP
2146 if (work_done) {
2147 be_cq_notify(adapter, txo->cq.id, true, work_done);
2148 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2149
10ef9ab4
SP
2150 /* As Tx wrbs have been freed up, wake up netdev queue
2151 * if it was stopped due to lack of tx wrbs. */
2152 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2153 atomic_read(&txo->q.used) < txo->q.len / 2) {
2154 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2155 }
10ef9ab4
SP
2156
2157 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2158 tx_stats(txo)->tx_compl += work_done;
2159 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2160 }
10ef9ab4
SP
2161 return (work_done < budget); /* Done */
2162}
6b7c5b94 2163
10ef9ab4
SP
2164int be_poll(struct napi_struct *napi, int budget)
2165{
2166 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2167 struct be_adapter *adapter = eqo->adapter;
0b545a62 2168 int max_work = 0, work, i, num_evts;
10ef9ab4 2169 bool tx_done;
f31e50a8 2170
0b545a62
SP
2171 num_evts = events_get(eqo);
2172
10ef9ab4
SP
2173 /* Process all TXQs serviced by this EQ */
2174 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2175 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2176 eqo->tx_budget, i);
2177 if (!tx_done)
2178 max_work = budget;
f31e50a8
SP
2179 }
2180
10ef9ab4
SP
2181 /* This loop will iterate twice for EQ0 in which
2182 * completions of the last RXQ (default one) are also processed
2183 * For other EQs the loop iterates only once
2184 */
2185 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2186 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2187 max_work = max(work, max_work);
2188 }
6b7c5b94 2189
10ef9ab4
SP
2190 if (is_mcc_eqo(eqo))
2191 be_process_mcc(adapter);
93c86700 2192
10ef9ab4
SP
2193 if (max_work < budget) {
2194 napi_complete(napi);
0b545a62 2195 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2196 } else {
2197 /* As we'll continue in polling mode, count and clear events */
0b545a62 2198 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2199 }
10ef9ab4 2200 return max_work;
6b7c5b94
SP
2201}
2202
f67ef7ba 2203void be_detect_error(struct be_adapter *adapter)
7c185276 2204{
e1cfb67a
PR
2205 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2206 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2207 u32 i;
2208
d23e946c 2209 if (be_hw_error(adapter))
72f02485
SP
2210 return;
2211
e1cfb67a
PR
2212 if (lancer_chip(adapter)) {
2213 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2214 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2215 sliport_err1 = ioread32(adapter->db +
2216 SLIPORT_ERROR1_OFFSET);
2217 sliport_err2 = ioread32(adapter->db +
2218 SLIPORT_ERROR2_OFFSET);
2219 }
2220 } else {
2221 pci_read_config_dword(adapter->pdev,
2222 PCICFG_UE_STATUS_LOW, &ue_lo);
2223 pci_read_config_dword(adapter->pdev,
2224 PCICFG_UE_STATUS_HIGH, &ue_hi);
2225 pci_read_config_dword(adapter->pdev,
2226 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2227 pci_read_config_dword(adapter->pdev,
2228 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2229
f67ef7ba
PR
2230 ue_lo = (ue_lo & ~ue_lo_mask);
2231 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2232 }
7c185276 2233
1451ae6e
AK
2234 /* On certain platforms BE hardware can indicate spurious UEs.
2235 * Allow the h/w to stop working completely in case of a real UE.
2236 * Hence not setting the hw_error for UE detection.
2237 */
2238 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2239 adapter->hw_error = true;
434b3648 2240 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2241 "Error detected in the card\n");
2242 }
2243
2244 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2245 dev_err(&adapter->pdev->dev,
2246 "ERR: sliport status 0x%x\n", sliport_status);
2247 dev_err(&adapter->pdev->dev,
2248 "ERR: sliport error1 0x%x\n", sliport_err1);
2249 dev_err(&adapter->pdev->dev,
2250 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2251 }
2252
e1cfb67a
PR
2253 if (ue_lo) {
2254 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2255 if (ue_lo & 1)
7c185276
AK
2256 dev_err(&adapter->pdev->dev,
2257 "UE: %s bit set\n", ue_status_low_desc[i]);
2258 }
2259 }
f67ef7ba 2260
e1cfb67a
PR
2261 if (ue_hi) {
2262 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2263 if (ue_hi & 1)
7c185276
AK
2264 dev_err(&adapter->pdev->dev,
2265 "UE: %s bit set\n", ue_status_hi_desc[i]);
2266 }
2267 }
2268
2269}
2270
8d56ff11
SP
2271static void be_msix_disable(struct be_adapter *adapter)
2272{
ac6a0c4a 2273 if (msix_enabled(adapter)) {
8d56ff11 2274 pci_disable_msix(adapter->pdev);
ac6a0c4a 2275 adapter->num_msix_vec = 0;
3abcdeda
SP
2276 }
2277}
2278
10ef9ab4
SP
2279static uint be_num_rss_want(struct be_adapter *adapter)
2280{
30e80b55 2281 u32 num = 0;
abb93951 2282
10ef9ab4 2283 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2284 (lancer_chip(adapter) ||
2285 (!sriov_want(adapter) && be_physfn(adapter)))) {
2286 num = adapter->max_rss_queues;
30e80b55
YM
2287 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2288 }
2289 return num;
10ef9ab4
SP
2290}
2291
6b7c5b94
SP
2292static void be_msix_enable(struct be_adapter *adapter)
2293{
10ef9ab4 2294#define BE_MIN_MSIX_VECTORS 1
045508a8 2295 int i, status, num_vec, num_roce_vec = 0;
d379142b 2296 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2297
10ef9ab4
SP
2298 /* If RSS queues are not used, need a vec for default RX Q */
2299 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2300 if (be_roce_supported(adapter)) {
2301 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2302 (num_online_cpus() + 1));
2303 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2304 num_vec += num_roce_vec;
2305 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2306 }
10ef9ab4 2307 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2308
ac6a0c4a 2309 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2310 adapter->msix_entries[i].entry = i;
2311
ac6a0c4a 2312 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2313 if (status == 0) {
2314 goto done;
2315 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2316 num_vec = status;
3abcdeda 2317 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2318 num_vec) == 0)
3abcdeda 2319 goto done;
3abcdeda 2320 }
d379142b
SP
2321
2322 dev_warn(dev, "MSIx enable failed\n");
3abcdeda
SP
2323 return;
2324done:
045508a8
PP
2325 if (be_roce_supported(adapter)) {
2326 if (num_vec > num_roce_vec) {
2327 adapter->num_msix_vec = num_vec - num_roce_vec;
2328 adapter->num_msix_roce_vec =
2329 num_vec - adapter->num_msix_vec;
2330 } else {
2331 adapter->num_msix_vec = num_vec;
2332 adapter->num_msix_roce_vec = 0;
2333 }
2334 } else
2335 adapter->num_msix_vec = num_vec;
d379142b 2336 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
ac6a0c4a 2337 return;
6b7c5b94
SP
2338}
2339
fe6d2a38 2340static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2341 struct be_eq_obj *eqo)
b628bde2 2342{
10ef9ab4 2343 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2344}
6b7c5b94 2345
b628bde2
SP
2346static int be_msix_register(struct be_adapter *adapter)
2347{
10ef9ab4
SP
2348 struct net_device *netdev = adapter->netdev;
2349 struct be_eq_obj *eqo;
2350 int status, i, vec;
6b7c5b94 2351
10ef9ab4
SP
2352 for_all_evt_queues(adapter, eqo, i) {
2353 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2354 vec = be_msix_vec_get(adapter, eqo);
2355 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2356 if (status)
2357 goto err_msix;
2358 }
b628bde2 2359
6b7c5b94 2360 return 0;
3abcdeda 2361err_msix:
10ef9ab4
SP
2362 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2363 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2364 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2365 status);
ac6a0c4a 2366 be_msix_disable(adapter);
6b7c5b94
SP
2367 return status;
2368}
2369
2370static int be_irq_register(struct be_adapter *adapter)
2371{
2372 struct net_device *netdev = adapter->netdev;
2373 int status;
2374
ac6a0c4a 2375 if (msix_enabled(adapter)) {
6b7c5b94
SP
2376 status = be_msix_register(adapter);
2377 if (status == 0)
2378 goto done;
ba343c77
SB
2379 /* INTx is not supported for VF */
2380 if (!be_physfn(adapter))
2381 return status;
6b7c5b94
SP
2382 }
2383
e49cc34f 2384 /* INTx: only the first EQ is used */
6b7c5b94
SP
2385 netdev->irq = adapter->pdev->irq;
2386 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2387 &adapter->eq_obj[0]);
6b7c5b94
SP
2388 if (status) {
2389 dev_err(&adapter->pdev->dev,
2390 "INTx request IRQ failed - err %d\n", status);
2391 return status;
2392 }
2393done:
2394 adapter->isr_registered = true;
2395 return 0;
2396}
2397
2398static void be_irq_unregister(struct be_adapter *adapter)
2399{
2400 struct net_device *netdev = adapter->netdev;
10ef9ab4 2401 struct be_eq_obj *eqo;
3abcdeda 2402 int i;
6b7c5b94
SP
2403
2404 if (!adapter->isr_registered)
2405 return;
2406
2407 /* INTx */
ac6a0c4a 2408 if (!msix_enabled(adapter)) {
e49cc34f 2409 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2410 goto done;
2411 }
2412
2413 /* MSIx */
10ef9ab4
SP
2414 for_all_evt_queues(adapter, eqo, i)
2415 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2416
6b7c5b94
SP
2417done:
2418 adapter->isr_registered = false;
6b7c5b94
SP
2419}
2420
10ef9ab4 2421static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2422{
2423 struct be_queue_info *q;
2424 struct be_rx_obj *rxo;
2425 int i;
2426
2427 for_all_rx_queues(adapter, rxo, i) {
2428 q = &rxo->q;
2429 if (q->created) {
2430 be_cmd_rxq_destroy(adapter, q);
2431 /* After the rxq is invalidated, wait for a grace time
2432 * of 1ms for all dma to end and the flush compl to
2433 * arrive
2434 */
2435 mdelay(1);
10ef9ab4 2436 be_rx_cq_clean(rxo);
482c9e79 2437 }
10ef9ab4 2438 be_queue_free(adapter, q);
482c9e79
SP
2439 }
2440}
2441
889cd4b2
SP
2442static int be_close(struct net_device *netdev)
2443{
2444 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2445 struct be_eq_obj *eqo;
2446 int i;
889cd4b2 2447
045508a8
PP
2448 be_roce_dev_close(adapter);
2449
a323d9bf 2450 for_all_evt_queues(adapter, eqo, i)
10ef9ab4 2451 napi_disable(&eqo->napi);
a323d9bf
SP
2452
2453 be_async_mcc_disable(adapter);
2454
2455 /* Wait for all pending tx completions to arrive so that
2456 * all tx skbs are freed.
2457 */
2458 be_tx_compl_clean(adapter);
2459
2460 be_rx_qs_destroy(adapter);
2461
2462 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2463 if (msix_enabled(adapter))
2464 synchronize_irq(be_msix_vec_get(adapter, eqo));
2465 else
2466 synchronize_irq(netdev->irq);
2467 be_eq_clean(eqo);
63fcb27f
PR
2468 }
2469
889cd4b2
SP
2470 be_irq_unregister(adapter);
2471
482c9e79
SP
2472 return 0;
2473}
2474
10ef9ab4 2475static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2476{
2477 struct be_rx_obj *rxo;
e9008ee9
PR
2478 int rc, i, j;
2479 u8 rsstable[128];
482c9e79
SP
2480
2481 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2482 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2483 sizeof(struct be_eth_rx_d));
2484 if (rc)
2485 return rc;
2486 }
2487
2488 /* The FW would like the default RXQ to be created first */
2489 rxo = default_rxo(adapter);
2490 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2491 adapter->if_handle, false, &rxo->rss_id);
2492 if (rc)
2493 return rc;
2494
2495 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2496 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2497 rx_frag_size, adapter->if_handle,
2498 true, &rxo->rss_id);
482c9e79
SP
2499 if (rc)
2500 return rc;
2501 }
2502
2503 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2504 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2505 for_all_rss_queues(adapter, rxo, i) {
2506 if ((j + i) >= 128)
2507 break;
2508 rsstable[j + i] = rxo->rss_id;
2509 }
2510 }
2511 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2512 if (rc)
2513 return rc;
2514 }
2515
2516 /* First time posting */
10ef9ab4 2517 for_all_rx_queues(adapter, rxo, i)
482c9e79 2518 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2519 return 0;
2520}
2521
6b7c5b94
SP
2522static int be_open(struct net_device *netdev)
2523{
2524 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2525 struct be_eq_obj *eqo;
3abcdeda 2526 struct be_rx_obj *rxo;
10ef9ab4 2527 struct be_tx_obj *txo;
b236916a 2528 u8 link_status;
3abcdeda 2529 int status, i;
5fb379ee 2530
10ef9ab4 2531 status = be_rx_qs_create(adapter);
482c9e79
SP
2532 if (status)
2533 goto err;
2534
5fb379ee
SP
2535 be_irq_register(adapter);
2536
10ef9ab4 2537 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2538 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2539
10ef9ab4
SP
2540 for_all_tx_queues(adapter, txo, i)
2541 be_cq_notify(adapter, txo->cq.id, true, 0);
2542
7a1e9b20
SP
2543 be_async_mcc_enable(adapter);
2544
10ef9ab4
SP
2545 for_all_evt_queues(adapter, eqo, i) {
2546 napi_enable(&eqo->napi);
2547 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2548 }
2549
323ff71e 2550 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2551 if (!status)
2552 be_link_status_update(adapter, link_status);
2553
045508a8 2554 be_roce_dev_open(adapter);
889cd4b2
SP
2555 return 0;
2556err:
2557 be_close(adapter->netdev);
2558 return -EIO;
5fb379ee
SP
2559}
2560
71d8d1b5
AK
2561static int be_setup_wol(struct be_adapter *adapter, bool enable)
2562{
2563 struct be_dma_mem cmd;
2564 int status = 0;
2565 u8 mac[ETH_ALEN];
2566
2567 memset(mac, 0, ETH_ALEN);
2568
2569 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf 2570 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1f9061d2 2571 GFP_KERNEL | __GFP_ZERO);
71d8d1b5
AK
2572 if (cmd.va == NULL)
2573 return -1;
71d8d1b5
AK
2574
2575 if (enable) {
2576 status = pci_write_config_dword(adapter->pdev,
2577 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2578 if (status) {
2579 dev_err(&adapter->pdev->dev,
2381a55c 2580 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2581 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2582 cmd.dma);
71d8d1b5
AK
2583 return status;
2584 }
2585 status = be_cmd_enable_magic_wol(adapter,
2586 adapter->netdev->dev_addr, &cmd);
2587 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2588 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2589 } else {
2590 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2591 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2592 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2593 }
2594
2b7bcebf 2595 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2596 return status;
2597}
2598
6d87f5c3
AK
2599/*
2600 * Generate a seed MAC address from the PF MAC Address using jhash.
2601 * MAC Address for VFs are assigned incrementally starting from the seed.
2602 * These addresses are programmed in the ASIC by the PF and the VF driver
2603 * queries for the MAC address during its probe.
2604 */
4c876616 2605static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2606{
f9449ab7 2607 u32 vf;
3abcdeda 2608 int status = 0;
6d87f5c3 2609 u8 mac[ETH_ALEN];
11ac75ed 2610 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2611
2612 be_vf_eth_addr_generate(adapter, mac);
2613
11ac75ed 2614 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2615 if (lancer_chip(adapter)) {
2616 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2617 } else {
2618 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2619 vf_cfg->if_handle,
2620 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2621 }
2622
6d87f5c3
AK
2623 if (status)
2624 dev_err(&adapter->pdev->dev,
590c391d 2625 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2626 else
11ac75ed 2627 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2628
2629 mac[5] += 1;
2630 }
2631 return status;
2632}
2633
4c876616
SP
2634static int be_vfs_mac_query(struct be_adapter *adapter)
2635{
2636 int status, vf;
2637 u8 mac[ETH_ALEN];
2638 struct be_vf_cfg *vf_cfg;
2639 bool active;
2640
2641 for_all_vfs(adapter, vf_cfg, vf) {
2642 be_cmd_get_mac_from_list(adapter, mac, &active,
2643 &vf_cfg->pmac_id, 0);
2644
2645 status = be_cmd_mac_addr_query(adapter, mac, false,
2646 vf_cfg->if_handle, 0);
2647 if (status)
2648 return status;
2649 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2650 }
2651 return 0;
2652}
2653
f9449ab7 2654static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2655{
11ac75ed 2656 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2657 u32 vf;
2658
39f1d94d 2659 if (be_find_vfs(adapter, ASSIGNED)) {
4c876616
SP
2660 dev_warn(&adapter->pdev->dev,
2661 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2662 goto done;
2663 }
2664
11ac75ed 2665 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2666 if (lancer_chip(adapter))
2667 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2668 else
11ac75ed
SP
2669 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2670 vf_cfg->pmac_id, vf + 1);
f9449ab7 2671
11ac75ed
SP
2672 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2673 }
39f1d94d
SP
2674 pci_disable_sriov(adapter->pdev);
2675done:
2676 kfree(adapter->vf_cfg);
2677 adapter->num_vfs = 0;
6d87f5c3
AK
2678}
2679
a54769f5
SP
2680static int be_clear(struct be_adapter *adapter)
2681{
fbc13f01
AK
2682 int i = 1;
2683
191eb756
SP
2684 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2685 cancel_delayed_work_sync(&adapter->work);
2686 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2687 }
2688
11ac75ed 2689 if (sriov_enabled(adapter))
f9449ab7
SP
2690 be_vf_clear(adapter);
2691
fbc13f01
AK
2692 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2693 be_cmd_pmac_del(adapter, adapter->if_handle,
2694 adapter->pmac_id[i], 0);
2695
f9449ab7 2696 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2697
2698 be_mcc_queues_destroy(adapter);
10ef9ab4 2699 be_rx_cqs_destroy(adapter);
a54769f5 2700 be_tx_queues_destroy(adapter);
10ef9ab4 2701 be_evt_queues_destroy(adapter);
a54769f5 2702
abb93951
PR
2703 kfree(adapter->pmac_id);
2704 adapter->pmac_id = NULL;
2705
10ef9ab4 2706 be_msix_disable(adapter);
a54769f5
SP
2707 return 0;
2708}
2709
4c876616 2710static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2711{
4c876616
SP
2712 struct be_vf_cfg *vf_cfg;
2713 u32 cap_flags, en_flags, vf;
abb93951
PR
2714 int status;
2715
4c876616
SP
2716 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2717 BE_IF_FLAGS_MULTICAST;
abb93951 2718
4c876616
SP
2719 for_all_vfs(adapter, vf_cfg, vf) {
2720 if (!BE3_chip(adapter))
2721 be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2722
2723 /* If a FW profile exists, then cap_flags are updated */
2724 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2725 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2726 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2727 &vf_cfg->if_handle, vf + 1);
2728 if (status)
2729 goto err;
2730 }
2731err:
2732 return status;
abb93951
PR
2733}
2734
39f1d94d 2735static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2736{
11ac75ed 2737 struct be_vf_cfg *vf_cfg;
30128031
SP
2738 int vf;
2739
39f1d94d
SP
2740 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2741 GFP_KERNEL);
2742 if (!adapter->vf_cfg)
2743 return -ENOMEM;
2744
11ac75ed
SP
2745 for_all_vfs(adapter, vf_cfg, vf) {
2746 vf_cfg->if_handle = -1;
2747 vf_cfg->pmac_id = -1;
30128031 2748 }
39f1d94d 2749 return 0;
30128031
SP
2750}
2751
f9449ab7
SP
2752static int be_vf_setup(struct be_adapter *adapter)
2753{
11ac75ed 2754 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2755 u16 def_vlan, lnk_speed;
4c876616
SP
2756 int status, old_vfs, vf;
2757 struct device *dev = &adapter->pdev->dev;
39f1d94d 2758
4c876616
SP
2759 old_vfs = be_find_vfs(adapter, ENABLED);
2760 if (old_vfs) {
2761 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2762 if (old_vfs != num_vfs)
2763 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2764 adapter->num_vfs = old_vfs;
39f1d94d 2765 } else {
4c876616
SP
2766 if (num_vfs > adapter->dev_num_vfs)
2767 dev_info(dev, "Device supports %d VFs and not %d\n",
2768 adapter->dev_num_vfs, num_vfs);
2769 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2770
2771 status = pci_enable_sriov(adapter->pdev, num_vfs);
2772 if (status) {
2773 dev_err(dev, "SRIOV enable failed\n");
2774 adapter->num_vfs = 0;
2775 return 0;
2776 }
39f1d94d
SP
2777 }
2778
2779 status = be_vf_setup_init(adapter);
2780 if (status)
2781 goto err;
30128031 2782
4c876616
SP
2783 if (old_vfs) {
2784 for_all_vfs(adapter, vf_cfg, vf) {
2785 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2786 if (status)
2787 goto err;
2788 }
2789 } else {
2790 status = be_vfs_if_create(adapter);
f9449ab7
SP
2791 if (status)
2792 goto err;
f9449ab7
SP
2793 }
2794
4c876616
SP
2795 if (old_vfs) {
2796 status = be_vfs_mac_query(adapter);
2797 if (status)
2798 goto err;
2799 } else {
39f1d94d
SP
2800 status = be_vf_eth_addr_config(adapter);
2801 if (status)
2802 goto err;
2803 }
f9449ab7 2804
11ac75ed 2805 for_all_vfs(adapter, vf_cfg, vf) {
4c876616
SP
2806 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2807 * Allow full available bandwidth
2808 */
2809 if (BE3_chip(adapter) && !old_vfs)
2810 be_cmd_set_qos(adapter, 1000, vf+1);
2811
2812 status = be_cmd_link_status_query(adapter, &lnk_speed,
2813 NULL, vf + 1);
2814 if (!status)
2815 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2816
2817 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2818 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2819 if (status)
2820 goto err;
2821 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2822
2823 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7
SP
2824 }
2825 return 0;
2826err:
4c876616
SP
2827 dev_err(dev, "VF setup failed\n");
2828 be_vf_clear(adapter);
f9449ab7
SP
2829 return status;
2830}
2831
30128031
SP
2832static void be_setup_init(struct be_adapter *adapter)
2833{
2834 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2835 adapter->phy.link_speed = -1;
30128031
SP
2836 adapter->if_handle = -1;
2837 adapter->be3_native = false;
2838 adapter->promiscuous = false;
f25b119c
PR
2839 if (be_physfn(adapter))
2840 adapter->cmd_privileges = MAX_PRIVILEGES;
2841 else
2842 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2843}
2844
1578e777
PR
2845static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2846 bool *active_mac, u32 *pmac_id)
590c391d 2847{
1578e777 2848 int status = 0;
e5e1ee89 2849
1578e777
PR
2850 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2851 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2852 if (!lancer_chip(adapter) && !be_physfn(adapter))
2853 *active_mac = true;
2854 else
2855 *active_mac = false;
e5e1ee89 2856
1578e777
PR
2857 return status;
2858 }
e5e1ee89 2859
1578e777
PR
2860 if (lancer_chip(adapter)) {
2861 status = be_cmd_get_mac_from_list(adapter, mac,
2862 active_mac, pmac_id, 0);
2863 if (*active_mac) {
5ee4979b
SP
2864 status = be_cmd_mac_addr_query(adapter, mac, false,
2865 if_handle, *pmac_id);
1578e777
PR
2866 }
2867 } else if (be_physfn(adapter)) {
2868 /* For BE3, for PF get permanent MAC */
5ee4979b 2869 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2870 *active_mac = false;
e5e1ee89 2871 } else {
1578e777 2872 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 2873 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
2874 if_handle, 0);
2875 *active_mac = true;
e5e1ee89 2876 }
590c391d
PR
2877 return status;
2878}
2879
abb93951
PR
2880static void be_get_resources(struct be_adapter *adapter)
2881{
4c876616
SP
2882 u16 dev_num_vfs;
2883 int pos, status;
abb93951
PR
2884 bool profile_present = false;
2885
4c876616 2886 if (!BEx_chip(adapter)) {
abb93951 2887 status = be_cmd_get_func_config(adapter);
abb93951
PR
2888 if (!status)
2889 profile_present = true;
2890 }
2891
2892 if (profile_present) {
2893 /* Sanity fixes for Lancer */
2894 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2895 BE_UC_PMAC_COUNT);
2896 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2897 BE_NUM_VLANS_SUPPORTED);
2898 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2899 BE_MAX_MC);
2900 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2901 MAX_TX_QS);
2902 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2903 BE3_MAX_RSS_QS);
2904 adapter->max_event_queues = min_t(u16,
2905 adapter->max_event_queues,
2906 BE3_MAX_RSS_QS);
2907
2908 if (adapter->max_rss_queues &&
2909 adapter->max_rss_queues == adapter->max_rx_queues)
2910 adapter->max_rss_queues -= 1;
2911
2912 if (adapter->max_event_queues < adapter->max_rss_queues)
2913 adapter->max_rss_queues = adapter->max_event_queues;
2914
2915 } else {
2916 if (be_physfn(adapter))
2917 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2918 else
2919 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2920
2921 if (adapter->function_mode & FLEX10_MODE)
2922 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2923 else
2924 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2925
2926 adapter->max_mcast_mac = BE_MAX_MC;
2927 adapter->max_tx_queues = MAX_TX_QS;
2928 adapter->max_rss_queues = (adapter->be3_native) ?
2929 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2930 adapter->max_event_queues = BE3_MAX_RSS_QS;
2931
2932 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2933 BE_IF_FLAGS_BROADCAST |
2934 BE_IF_FLAGS_MULTICAST |
2935 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2936 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2937 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2938 BE_IF_FLAGS_PROMISCUOUS;
2939
2940 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2941 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2942 }
4c876616
SP
2943
2944 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2945 if (pos) {
2946 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2947 &dev_num_vfs);
2948 if (BE3_chip(adapter))
2949 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2950 adapter->dev_num_vfs = dev_num_vfs;
2951 }
abb93951
PR
2952}
2953
39f1d94d
SP
2954/* Routine to query per function resource limits */
2955static int be_get_config(struct be_adapter *adapter)
2956{
4c876616 2957 int status;
39f1d94d 2958
abb93951
PR
2959 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2960 &adapter->function_mode,
2961 &adapter->function_caps);
2962 if (status)
2963 goto err;
2964
2965 be_get_resources(adapter);
2966
2967 /* primary mac needs 1 pmac entry */
2968 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2969 sizeof(u32), GFP_KERNEL);
2970 if (!adapter->pmac_id) {
2971 status = -ENOMEM;
2972 goto err;
2973 }
2974
abb93951
PR
2975err:
2976 return status;
39f1d94d
SP
2977}
2978
5fb379ee
SP
2979static int be_setup(struct be_adapter *adapter)
2980{
39f1d94d 2981 struct device *dev = &adapter->pdev->dev;
abb93951 2982 u32 en_flags;
a54769f5 2983 u32 tx_fc, rx_fc;
10ef9ab4 2984 int status;
ba343c77 2985 u8 mac[ETH_ALEN];
1578e777 2986 bool active_mac;
ba343c77 2987
30128031 2988 be_setup_init(adapter);
6b7c5b94 2989
abb93951
PR
2990 if (!lancer_chip(adapter))
2991 be_cmd_req_native_mode(adapter);
39f1d94d 2992
abb93951
PR
2993 status = be_get_config(adapter);
2994 if (status)
2995 goto err;
73d540f2 2996
10ef9ab4
SP
2997 be_msix_enable(adapter);
2998
2999 status = be_evt_queues_create(adapter);
3000 if (status)
a54769f5 3001 goto err;
6b7c5b94 3002
10ef9ab4
SP
3003 status = be_tx_cqs_create(adapter);
3004 if (status)
3005 goto err;
3006
3007 status = be_rx_cqs_create(adapter);
3008 if (status)
a54769f5 3009 goto err;
6b7c5b94 3010
f9449ab7 3011 status = be_mcc_queues_create(adapter);
10ef9ab4 3012 if (status)
a54769f5 3013 goto err;
6b7c5b94 3014
f25b119c
PR
3015 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3016 /* In UMC mode FW does not return right privileges.
3017 * Override with correct privilege equivalent to PF.
3018 */
3019 if (be_is_mc(adapter))
3020 adapter->cmd_privileges = MAX_PRIVILEGES;
3021
f9449ab7
SP
3022 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3023 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 3024
abb93951 3025 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3026 en_flags |= BE_IF_FLAGS_RSS;
1578e777 3027
abb93951 3028 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 3029
abb93951 3030 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 3031 &adapter->if_handle, 0);
5fb379ee 3032 if (status != 0)
a54769f5 3033 goto err;
6b7c5b94 3034
1578e777
PR
3035 memset(mac, 0, ETH_ALEN);
3036 active_mac = false;
3037 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3038 &active_mac, &adapter->pmac_id[0]);
3039 if (status != 0)
3040 goto err;
3041
3042 if (!active_mac) {
3043 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3044 &adapter->pmac_id[0], 0);
3045 if (status != 0)
3046 goto err;
3047 }
3048
3049 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3050 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3051 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 3052 }
0dffc83e 3053
10ef9ab4
SP
3054 status = be_tx_qs_create(adapter);
3055 if (status)
3056 goto err;
3057
04b71175 3058 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 3059
1d1e9a46 3060 if (adapter->vlans_added)
10329df8 3061 be_vid_config(adapter);
7ab8b0b4 3062
a54769f5 3063 be_set_rx_mode(adapter->netdev);
5fb379ee 3064
ddc3f5cb 3065 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3066
ddc3f5cb
AK
3067 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3068 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3069 adapter->rx_fc);
2dc1deb6 3070
39f1d94d
SP
3071 if (be_physfn(adapter) && num_vfs) {
3072 if (adapter->dev_num_vfs)
3073 be_vf_setup(adapter);
3074 else
3075 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3076 }
3077
f25b119c
PR
3078 status = be_cmd_get_phy_info(adapter);
3079 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3080 adapter->phy.fc_autoneg = 1;
3081
191eb756
SP
3082 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3083 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3084 return 0;
a54769f5
SP
3085err:
3086 be_clear(adapter);
3087 return status;
3088}
6b7c5b94 3089
66268739
IV
3090#ifdef CONFIG_NET_POLL_CONTROLLER
3091static void be_netpoll(struct net_device *netdev)
3092{
3093 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3094 struct be_eq_obj *eqo;
66268739
IV
3095 int i;
3096
e49cc34f
SP
3097 for_all_evt_queues(adapter, eqo, i) {
3098 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3099 napi_schedule(&eqo->napi);
3100 }
10ef9ab4
SP
3101
3102 return;
66268739
IV
3103}
3104#endif
3105
84517482 3106#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3107char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3108
fa9a6fed 3109static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3110 const u8 *p, u32 img_start, int image_size,
3111 int hdr_size)
fa9a6fed
SB
3112{
3113 u32 crc_offset;
3114 u8 flashed_crc[4];
3115 int status;
3f0d4560
AK
3116
3117 crc_offset = hdr_size + img_start + image_size - 4;
3118
fa9a6fed 3119 p += crc_offset;
3f0d4560
AK
3120
3121 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3122 (image_size - 4));
fa9a6fed
SB
3123 if (status) {
3124 dev_err(&adapter->pdev->dev,
3125 "could not get crc from flash, not flashing redboot\n");
3126 return false;
3127 }
3128
3129 /*update redboot only if crc does not match*/
3130 if (!memcmp(flashed_crc, p, 4))
3131 return false;
3132 else
3133 return true;
fa9a6fed
SB
3134}
3135
306f1348
SP
3136static bool phy_flashing_required(struct be_adapter *adapter)
3137{
42f11cf2
AK
3138 return (adapter->phy.phy_type == TN_8022 &&
3139 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3140}
3141
c165541e
PR
3142static bool is_comp_in_ufi(struct be_adapter *adapter,
3143 struct flash_section_info *fsec, int type)
3144{
3145 int i = 0, img_type = 0;
3146 struct flash_section_info_g2 *fsec_g2 = NULL;
3147
ca34fe38 3148 if (BE2_chip(adapter))
c165541e
PR
3149 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3150
3151 for (i = 0; i < MAX_FLASH_COMP; i++) {
3152 if (fsec_g2)
3153 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3154 else
3155 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3156
3157 if (img_type == type)
3158 return true;
3159 }
3160 return false;
3161
3162}
3163
3164struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3165 int header_size,
3166 const struct firmware *fw)
3167{
3168 struct flash_section_info *fsec = NULL;
3169 const u8 *p = fw->data;
3170
3171 p += header_size;
3172 while (p < (fw->data + fw->size)) {
3173 fsec = (struct flash_section_info *)p;
3174 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3175 return fsec;
3176 p += 32;
3177 }
3178 return NULL;
3179}
3180
773a2d7c
PR
3181static int be_flash(struct be_adapter *adapter, const u8 *img,
3182 struct be_dma_mem *flash_cmd, int optype, int img_size)
3183{
3184 u32 total_bytes = 0, flash_op, num_bytes = 0;
3185 int status = 0;
3186 struct be_cmd_write_flashrom *req = flash_cmd->va;
3187
3188 total_bytes = img_size;
3189 while (total_bytes) {
3190 num_bytes = min_t(u32, 32*1024, total_bytes);
3191
3192 total_bytes -= num_bytes;
3193
3194 if (!total_bytes) {
3195 if (optype == OPTYPE_PHY_FW)
3196 flash_op = FLASHROM_OPER_PHY_FLASH;
3197 else
3198 flash_op = FLASHROM_OPER_FLASH;
3199 } else {
3200 if (optype == OPTYPE_PHY_FW)
3201 flash_op = FLASHROM_OPER_PHY_SAVE;
3202 else
3203 flash_op = FLASHROM_OPER_SAVE;
3204 }
3205
be716446 3206 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3207 img += num_bytes;
3208 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3209 flash_op, num_bytes);
3210 if (status) {
3211 if (status == ILLEGAL_IOCTL_REQ &&
3212 optype == OPTYPE_PHY_FW)
3213 break;
3214 dev_err(&adapter->pdev->dev,
3215 "cmd to write to flash rom failed.\n");
3216 return status;
3217 }
3218 }
3219 return 0;
3220}
3221
ca34fe38
SP
3222/* For BE2 and BE3 */
3223static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3224 const struct firmware *fw,
3225 struct be_dma_mem *flash_cmd,
3226 int num_of_images)
3f0d4560 3227
84517482 3228{
3f0d4560 3229 int status = 0, i, filehdr_size = 0;
c165541e 3230 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3231 const u8 *p = fw->data;
215faf9c 3232 const struct flash_comp *pflashcomp;
773a2d7c 3233 int num_comp, redboot;
c165541e
PR
3234 struct flash_section_info *fsec = NULL;
3235
3236 struct flash_comp gen3_flash_types[] = {
3237 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3238 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3239 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3240 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3241 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3242 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3243 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3244 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3245 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3246 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3247 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3248 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3249 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3250 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3251 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3252 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3253 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3254 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3255 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3256 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3257 };
c165541e
PR
3258
3259 struct flash_comp gen2_flash_types[] = {
3260 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3261 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3262 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3263 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3264 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3265 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3266 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3267 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3268 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3269 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3270 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3271 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3272 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3273 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3274 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3275 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3276 };
3277
ca34fe38 3278 if (BE3_chip(adapter)) {
3f0d4560
AK
3279 pflashcomp = gen3_flash_types;
3280 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3281 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3282 } else {
3283 pflashcomp = gen2_flash_types;
3284 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3285 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3286 }
ca34fe38 3287
c165541e
PR
3288 /* Get flash section info*/
3289 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3290 if (!fsec) {
3291 dev_err(&adapter->pdev->dev,
3292 "Invalid Cookie. UFI corrupted ?\n");
3293 return -1;
3294 }
9fe96934 3295 for (i = 0; i < num_comp; i++) {
c165541e 3296 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3297 continue;
c165541e
PR
3298
3299 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3300 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3301 continue;
3302
773a2d7c
PR
3303 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3304 !phy_flashing_required(adapter))
306f1348 3305 continue;
c165541e 3306
773a2d7c
PR
3307 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3308 redboot = be_flash_redboot(adapter, fw->data,
3309 pflashcomp[i].offset, pflashcomp[i].size,
3310 filehdr_size + img_hdrs_size);
3311 if (!redboot)
3312 continue;
3313 }
c165541e 3314
3f0d4560 3315 p = fw->data;
c165541e 3316 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3317 if (p + pflashcomp[i].size > fw->data + fw->size)
3318 return -1;
773a2d7c
PR
3319
3320 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3321 pflashcomp[i].size);
3322 if (status) {
3323 dev_err(&adapter->pdev->dev,
3324 "Flashing section type %d failed.\n",
3325 pflashcomp[i].img_type);
3326 return status;
84517482 3327 }
84517482 3328 }
84517482
AK
3329 return 0;
3330}
3331
773a2d7c
PR
3332static int be_flash_skyhawk(struct be_adapter *adapter,
3333 const struct firmware *fw,
3334 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3335{
773a2d7c
PR
3336 int status = 0, i, filehdr_size = 0;
3337 int img_offset, img_size, img_optype, redboot;
3338 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3339 const u8 *p = fw->data;
3340 struct flash_section_info *fsec = NULL;
3341
3342 filehdr_size = sizeof(struct flash_file_hdr_g3);
3343 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3344 if (!fsec) {
3345 dev_err(&adapter->pdev->dev,
3346 "Invalid Cookie. UFI corrupted ?\n");
3347 return -1;
3348 }
3349
3350 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3351 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3352 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3353
3354 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3355 case IMAGE_FIRMWARE_iSCSI:
3356 img_optype = OPTYPE_ISCSI_ACTIVE;
3357 break;
3358 case IMAGE_BOOT_CODE:
3359 img_optype = OPTYPE_REDBOOT;
3360 break;
3361 case IMAGE_OPTION_ROM_ISCSI:
3362 img_optype = OPTYPE_BIOS;
3363 break;
3364 case IMAGE_OPTION_ROM_PXE:
3365 img_optype = OPTYPE_PXE_BIOS;
3366 break;
3367 case IMAGE_OPTION_ROM_FCoE:
3368 img_optype = OPTYPE_FCOE_BIOS;
3369 break;
3370 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3371 img_optype = OPTYPE_ISCSI_BACKUP;
3372 break;
3373 case IMAGE_NCSI:
3374 img_optype = OPTYPE_NCSI_FW;
3375 break;
3376 default:
3377 continue;
3378 }
3379
3380 if (img_optype == OPTYPE_REDBOOT) {
3381 redboot = be_flash_redboot(adapter, fw->data,
3382 img_offset, img_size,
3383 filehdr_size + img_hdrs_size);
3384 if (!redboot)
3385 continue;
3386 }
3387
3388 p = fw->data;
3389 p += filehdr_size + img_offset + img_hdrs_size;
3390 if (p + img_size > fw->data + fw->size)
3391 return -1;
3392
3393 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3394 if (status) {
3395 dev_err(&adapter->pdev->dev,
3396 "Flashing section type %d failed.\n",
3397 fsec->fsec_entry[i].type);
3398 return status;
3399 }
3400 }
3401 return 0;
3f0d4560
AK
3402}
3403
f67ef7ba
PR
3404static int lancer_wait_idle(struct be_adapter *adapter)
3405{
3406#define SLIPORT_IDLE_TIMEOUT 30
3407 u32 reg_val;
3408 int status = 0, i;
3409
3410 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3411 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3412 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3413 break;
3414
3415 ssleep(1);
3416 }
3417
3418 if (i == SLIPORT_IDLE_TIMEOUT)
3419 status = -1;
3420
3421 return status;
3422}
3423
3424static int lancer_fw_reset(struct be_adapter *adapter)
3425{
3426 int status = 0;
3427
3428 status = lancer_wait_idle(adapter);
3429 if (status)
3430 return status;
3431
3432 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3433 PHYSDEV_CONTROL_OFFSET);
3434
3435 return status;
3436}
3437
485bf569
SN
3438static int lancer_fw_download(struct be_adapter *adapter,
3439 const struct firmware *fw)
84517482 3440{
485bf569
SN
3441#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3442#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3443 struct be_dma_mem flash_cmd;
485bf569
SN
3444 const u8 *data_ptr = NULL;
3445 u8 *dest_image_ptr = NULL;
3446 size_t image_size = 0;
3447 u32 chunk_size = 0;
3448 u32 data_written = 0;
3449 u32 offset = 0;
3450 int status = 0;
3451 u8 add_status = 0;
f67ef7ba 3452 u8 change_status;
84517482 3453
485bf569 3454 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3455 dev_err(&adapter->pdev->dev,
485bf569
SN
3456 "FW Image not properly aligned. "
3457 "Length must be 4 byte aligned.\n");
3458 status = -EINVAL;
3459 goto lancer_fw_exit;
d9efd2af
SB
3460 }
3461
485bf569
SN
3462 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3463 + LANCER_FW_DOWNLOAD_CHUNK;
3464 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3465 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3466 if (!flash_cmd.va) {
3467 status = -ENOMEM;
485bf569
SN
3468 goto lancer_fw_exit;
3469 }
84517482 3470
485bf569
SN
3471 dest_image_ptr = flash_cmd.va +
3472 sizeof(struct lancer_cmd_req_write_object);
3473 image_size = fw->size;
3474 data_ptr = fw->data;
3475
3476 while (image_size) {
3477 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3478
3479 /* Copy the image chunk content. */
3480 memcpy(dest_image_ptr, data_ptr, chunk_size);
3481
3482 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3483 chunk_size, offset,
3484 LANCER_FW_DOWNLOAD_LOCATION,
3485 &data_written, &change_status,
3486 &add_status);
485bf569
SN
3487 if (status)
3488 break;
3489
3490 offset += data_written;
3491 data_ptr += data_written;
3492 image_size -= data_written;
3493 }
3494
3495 if (!status) {
3496 /* Commit the FW written */
3497 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3498 0, offset,
3499 LANCER_FW_DOWNLOAD_LOCATION,
3500 &data_written, &change_status,
3501 &add_status);
485bf569
SN
3502 }
3503
3504 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3505 flash_cmd.dma);
3506 if (status) {
3507 dev_err(&adapter->pdev->dev,
3508 "Firmware load error. "
3509 "Status code: 0x%x Additional Status: 0x%x\n",
3510 status, add_status);
3511 goto lancer_fw_exit;
3512 }
3513
f67ef7ba
PR
3514 if (change_status == LANCER_FW_RESET_NEEDED) {
3515 status = lancer_fw_reset(adapter);
3516 if (status) {
3517 dev_err(&adapter->pdev->dev,
3518 "Adapter busy for FW reset.\n"
3519 "New FW will not be active.\n");
3520 goto lancer_fw_exit;
3521 }
3522 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3523 dev_err(&adapter->pdev->dev,
3524 "System reboot required for new FW"
3525 " to be active\n");
3526 }
3527
485bf569
SN
3528 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3529lancer_fw_exit:
3530 return status;
3531}
3532
ca34fe38
SP
3533#define UFI_TYPE2 2
3534#define UFI_TYPE3 3
3535#define UFI_TYPE4 4
3536static int be_get_ufi_type(struct be_adapter *adapter,
3537 struct flash_file_hdr_g2 *fhdr)
773a2d7c
PR
3538{
3539 if (fhdr == NULL)
3540 goto be_get_ufi_exit;
3541
ca34fe38
SP
3542 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3543 return UFI_TYPE4;
3544 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3545 return UFI_TYPE3;
3546 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3547 return UFI_TYPE2;
773a2d7c
PR
3548
3549be_get_ufi_exit:
3550 dev_err(&adapter->pdev->dev,
3551 "UFI and Interface are not compatible for flashing\n");
3552 return -1;
3553}
3554
485bf569
SN
3555static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3556{
3557 struct flash_file_hdr_g2 *fhdr;
3558 struct flash_file_hdr_g3 *fhdr3;
3559 struct image_hdr *img_hdr_ptr = NULL;
3560 struct be_dma_mem flash_cmd;
3561 const u8 *p;
773a2d7c 3562 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3563
be716446 3564 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3565 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3566 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3567 if (!flash_cmd.va) {
3568 status = -ENOMEM;
485bf569 3569 goto be_fw_exit;
84517482
AK
3570 }
3571
773a2d7c
PR
3572 p = fw->data;
3573 fhdr = (struct flash_file_hdr_g2 *)p;
3574
ca34fe38 3575 ufi_type = be_get_ufi_type(adapter, fhdr);
773a2d7c
PR
3576
3577 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3578 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3579 for (i = 0; i < num_imgs; i++) {
3580 img_hdr_ptr = (struct image_hdr *)(fw->data +
3581 (sizeof(struct flash_file_hdr_g3) +
3582 i * sizeof(struct image_hdr)));
3583 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
ca34fe38 3584 if (ufi_type == UFI_TYPE4)
773a2d7c
PR
3585 status = be_flash_skyhawk(adapter, fw,
3586 &flash_cmd, num_imgs);
ca34fe38
SP
3587 else if (ufi_type == UFI_TYPE3)
3588 status = be_flash_BEx(adapter, fw, &flash_cmd,
3589 num_imgs);
3f0d4560 3590 }
773a2d7c
PR
3591 }
3592
ca34fe38
SP
3593 if (ufi_type == UFI_TYPE2)
3594 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3595 else if (ufi_type == -1)
3f0d4560 3596 status = -1;
84517482 3597
2b7bcebf
IV
3598 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3599 flash_cmd.dma);
84517482
AK
3600 if (status) {
3601 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3602 goto be_fw_exit;
84517482
AK
3603 }
3604
af901ca1 3605 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3606
485bf569
SN
3607be_fw_exit:
3608 return status;
3609}
3610
3611int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3612{
3613 const struct firmware *fw;
3614 int status;
3615
3616 if (!netif_running(adapter->netdev)) {
3617 dev_err(&adapter->pdev->dev,
3618 "Firmware load not allowed (interface is down)\n");
3619 return -1;
3620 }
3621
3622 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3623 if (status)
3624 goto fw_exit;
3625
3626 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3627
3628 if (lancer_chip(adapter))
3629 status = lancer_fw_download(adapter, fw);
3630 else
3631 status = be_fw_download(adapter, fw);
3632
84517482
AK
3633fw_exit:
3634 release_firmware(fw);
3635 return status;
3636}
3637
e5686ad8 3638static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3639 .ndo_open = be_open,
3640 .ndo_stop = be_close,
3641 .ndo_start_xmit = be_xmit,
a54769f5 3642 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3643 .ndo_set_mac_address = be_mac_addr_set,
3644 .ndo_change_mtu = be_change_mtu,
ab1594e9 3645 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3646 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3647 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3648 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3649 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3650 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3651 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3652 .ndo_get_vf_config = be_get_vf_config,
3653#ifdef CONFIG_NET_POLL_CONTROLLER
3654 .ndo_poll_controller = be_netpoll,
3655#endif
6b7c5b94
SP
3656};
3657
3658static void be_netdev_init(struct net_device *netdev)
3659{
3660 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3661 struct be_eq_obj *eqo;
3abcdeda 3662 int i;
6b7c5b94 3663
6332c8d3 3664 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3665 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3666 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3667 if (be_multi_rxq(adapter))
3668 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3669
3670 netdev->features |= netdev->hw_features |
f646968f 3671 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3672
eb8a50d9 3673 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3674 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3675
fbc13f01
AK
3676 netdev->priv_flags |= IFF_UNICAST_FLT;
3677
6b7c5b94
SP
3678 netdev->flags |= IFF_MULTICAST;
3679
b7e5887e 3680 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3681
10ef9ab4 3682 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3683
3684 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3685
10ef9ab4
SP
3686 for_all_evt_queues(adapter, eqo, i)
3687 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3688}
3689
3690static void be_unmap_pci_bars(struct be_adapter *adapter)
3691{
c5b3ad4c
SP
3692 if (adapter->csr)
3693 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3694 if (adapter->db)
ce66f781 3695 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3696}
3697
ce66f781
SP
3698static int db_bar(struct be_adapter *adapter)
3699{
3700 if (lancer_chip(adapter) || !be_physfn(adapter))
3701 return 0;
3702 else
3703 return 4;
3704}
3705
3706static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3707{
dbf0f2a7 3708 if (skyhawk_chip(adapter)) {
ce66f781
SP
3709 adapter->roce_db.size = 4096;
3710 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3711 db_bar(adapter));
3712 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3713 db_bar(adapter));
3714 }
045508a8 3715 return 0;
6b7c5b94
SP
3716}
3717
3718static int be_map_pci_bars(struct be_adapter *adapter)
3719{
3720 u8 __iomem *addr;
ce66f781 3721 u32 sli_intf;
6b7c5b94 3722
ce66f781
SP
3723 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3724 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3725 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3726
c5b3ad4c
SP
3727 if (BEx_chip(adapter) && be_physfn(adapter)) {
3728 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3729 if (adapter->csr == NULL)
3730 return -ENOMEM;
3731 }
3732
ce66f781 3733 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3734 if (addr == NULL)
3735 goto pci_map_err;
ba343c77 3736 adapter->db = addr;
ce66f781
SP
3737
3738 be_roce_map_pci_bars(adapter);
6b7c5b94 3739 return 0;
ce66f781 3740
6b7c5b94
SP
3741pci_map_err:
3742 be_unmap_pci_bars(adapter);
3743 return -ENOMEM;
3744}
3745
6b7c5b94
SP
3746static void be_ctrl_cleanup(struct be_adapter *adapter)
3747{
8788fdc2 3748 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3749
3750 be_unmap_pci_bars(adapter);
3751
3752 if (mem->va)
2b7bcebf
IV
3753 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3754 mem->dma);
e7b909a6 3755
5b8821b7 3756 mem = &adapter->rx_filter;
e7b909a6 3757 if (mem->va)
2b7bcebf
IV
3758 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3759 mem->dma);
6b7c5b94
SP
3760}
3761
6b7c5b94
SP
3762static int be_ctrl_init(struct be_adapter *adapter)
3763{
8788fdc2
SP
3764 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3765 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3766 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3767 u32 sli_intf;
6b7c5b94 3768 int status;
6b7c5b94 3769
ce66f781
SP
3770 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3771 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3772 SLI_INTF_FAMILY_SHIFT;
3773 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3774
6b7c5b94
SP
3775 status = be_map_pci_bars(adapter);
3776 if (status)
e7b909a6 3777 goto done;
6b7c5b94
SP
3778
3779 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3780 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3781 mbox_mem_alloc->size,
3782 &mbox_mem_alloc->dma,
3783 GFP_KERNEL);
6b7c5b94 3784 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3785 status = -ENOMEM;
3786 goto unmap_pci_bars;
6b7c5b94
SP
3787 }
3788 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3789 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3790 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3791 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3792
5b8821b7
SP
3793 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3794 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
1f9061d2
JP
3795 &rx_filter->dma,
3796 GFP_KERNEL | __GFP_ZERO);
5b8821b7 3797 if (rx_filter->va == NULL) {
e7b909a6
SP
3798 status = -ENOMEM;
3799 goto free_mbox;
3800 }
1f9061d2 3801
2984961c 3802 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3803 spin_lock_init(&adapter->mcc_lock);
3804 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3805
dd131e76 3806 init_completion(&adapter->flash_compl);
cf588477 3807 pci_save_state(adapter->pdev);
6b7c5b94 3808 return 0;
e7b909a6
SP
3809
3810free_mbox:
2b7bcebf
IV
3811 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3812 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3813
3814unmap_pci_bars:
3815 be_unmap_pci_bars(adapter);
3816
3817done:
3818 return status;
6b7c5b94
SP
3819}
3820
3821static void be_stats_cleanup(struct be_adapter *adapter)
3822{
3abcdeda 3823 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3824
3825 if (cmd->va)
2b7bcebf
IV
3826 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3827 cmd->va, cmd->dma);
6b7c5b94
SP
3828}
3829
3830static int be_stats_init(struct be_adapter *adapter)
3831{
3abcdeda 3832 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3833
ca34fe38
SP
3834 if (lancer_chip(adapter))
3835 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3836 else if (BE2_chip(adapter))
89a88ab8 3837 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3838 else
3839 /* BE3 and Skyhawk */
3840 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3841
2b7bcebf 3842 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
1f9061d2 3843 GFP_KERNEL | __GFP_ZERO);
6b7c5b94
SP
3844 if (cmd->va == NULL)
3845 return -1;
3846 return 0;
3847}
3848
3bc6b06c 3849static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
3850{
3851 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3852
6b7c5b94
SP
3853 if (!adapter)
3854 return;
3855
045508a8 3856 be_roce_dev_remove(adapter);
8cef7a78 3857 be_intr_set(adapter, false);
045508a8 3858
f67ef7ba
PR
3859 cancel_delayed_work_sync(&adapter->func_recovery_work);
3860
6b7c5b94
SP
3861 unregister_netdev(adapter->netdev);
3862
5fb379ee
SP
3863 be_clear(adapter);
3864
bf99e50d
PR
3865 /* tell fw we're done with firing cmds */
3866 be_cmd_fw_clean(adapter);
3867
6b7c5b94
SP
3868 be_stats_cleanup(adapter);
3869
3870 be_ctrl_cleanup(adapter);
3871
d6b6d987
SP
3872 pci_disable_pcie_error_reporting(pdev);
3873
6b7c5b94
SP
3874 pci_set_drvdata(pdev, NULL);
3875 pci_release_regions(pdev);
3876 pci_disable_device(pdev);
3877
3878 free_netdev(adapter->netdev);
3879}
3880
4762f6ce
AK
3881bool be_is_wol_supported(struct be_adapter *adapter)
3882{
3883 return ((adapter->wol_cap & BE_WOL_CAP) &&
3884 !be_is_wol_excluded(adapter)) ? true : false;
3885}
3886
941a77d5
SK
3887u32 be_get_fw_log_level(struct be_adapter *adapter)
3888{
3889 struct be_dma_mem extfat_cmd;
3890 struct be_fat_conf_params *cfgs;
3891 int status;
3892 u32 level = 0;
3893 int j;
3894
f25b119c
PR
3895 if (lancer_chip(adapter))
3896 return 0;
3897
941a77d5
SK
3898 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3899 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3900 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3901 &extfat_cmd.dma);
3902
3903 if (!extfat_cmd.va) {
3904 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3905 __func__);
3906 goto err;
3907 }
3908
3909 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3910 if (!status) {
3911 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3912 sizeof(struct be_cmd_resp_hdr));
ac46a462 3913 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
3914 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3915 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3916 }
3917 }
3918 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3919 extfat_cmd.dma);
3920err:
3921 return level;
3922}
abb93951 3923
39f1d94d 3924static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3925{
6b7c5b94 3926 int status;
941a77d5 3927 u32 level;
6b7c5b94 3928
9e1453c5
AK
3929 status = be_cmd_get_cntl_attributes(adapter);
3930 if (status)
3931 return status;
3932
4762f6ce
AK
3933 status = be_cmd_get_acpi_wol_cap(adapter);
3934 if (status) {
3935 /* in case of a failure to get wol capabillities
3936 * check the exclusion list to determine WOL capability */
3937 if (!be_is_wol_excluded(adapter))
3938 adapter->wol_cap |= BE_WOL_CAP;
3939 }
3940
3941 if (be_is_wol_supported(adapter))
3942 adapter->wol = true;
3943
7aeb2156
PR
3944 /* Must be a power of 2 or else MODULO will BUG_ON */
3945 adapter->be_get_temp_freq = 64;
3946
941a77d5
SK
3947 level = be_get_fw_log_level(adapter);
3948 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3949
2243e2e9 3950 return 0;
6b7c5b94
SP
3951}
3952
f67ef7ba 3953static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
3954{
3955 int status;
d8110f62 3956
f67ef7ba
PR
3957 status = lancer_test_and_set_rdy_state(adapter);
3958 if (status)
3959 goto err;
d8110f62 3960
f67ef7ba
PR
3961 if (netif_running(adapter->netdev))
3962 be_close(adapter->netdev);
d8110f62 3963
f67ef7ba
PR
3964 be_clear(adapter);
3965
3966 adapter->hw_error = false;
3967 adapter->fw_timeout = false;
3968
3969 status = be_setup(adapter);
3970 if (status)
3971 goto err;
d8110f62 3972
f67ef7ba
PR
3973 if (netif_running(adapter->netdev)) {
3974 status = be_open(adapter->netdev);
d8110f62
PR
3975 if (status)
3976 goto err;
f67ef7ba 3977 }
d8110f62 3978
f67ef7ba
PR
3979 dev_err(&adapter->pdev->dev,
3980 "Adapter SLIPORT recovery succeeded\n");
3981 return 0;
3982err:
67297ad8
PR
3983 if (adapter->eeh_error)
3984 dev_err(&adapter->pdev->dev,
3985 "Adapter SLIPORT recovery failed\n");
d8110f62 3986
f67ef7ba
PR
3987 return status;
3988}
3989
3990static void be_func_recovery_task(struct work_struct *work)
3991{
3992 struct be_adapter *adapter =
3993 container_of(work, struct be_adapter, func_recovery_work.work);
3994 int status;
d8110f62 3995
f67ef7ba 3996 be_detect_error(adapter);
d8110f62 3997
f67ef7ba 3998 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 3999
f67ef7ba
PR
4000 if (adapter->eeh_error)
4001 goto out;
d8110f62 4002
f67ef7ba
PR
4003 rtnl_lock();
4004 netif_device_detach(adapter->netdev);
4005 rtnl_unlock();
d8110f62 4006
f67ef7ba 4007 status = lancer_recover_func(adapter);
d8110f62 4008
f67ef7ba
PR
4009 if (!status)
4010 netif_device_attach(adapter->netdev);
d8110f62 4011 }
f67ef7ba
PR
4012
4013out:
4014 schedule_delayed_work(&adapter->func_recovery_work,
4015 msecs_to_jiffies(1000));
d8110f62
PR
4016}
4017
4018static void be_worker(struct work_struct *work)
4019{
4020 struct be_adapter *adapter =
4021 container_of(work, struct be_adapter, work.work);
4022 struct be_rx_obj *rxo;
10ef9ab4 4023 struct be_eq_obj *eqo;
d8110f62
PR
4024 int i;
4025
d8110f62
PR
4026 /* when interrupts are not yet enabled, just reap any pending
4027 * mcc completions */
4028 if (!netif_running(adapter->netdev)) {
072a9c48 4029 local_bh_disable();
10ef9ab4 4030 be_process_mcc(adapter);
072a9c48 4031 local_bh_enable();
d8110f62
PR
4032 goto reschedule;
4033 }
4034
4035 if (!adapter->stats_cmd_sent) {
4036 if (lancer_chip(adapter))
4037 lancer_cmd_get_pport_stats(adapter,
4038 &adapter->stats_cmd);
4039 else
4040 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4041 }
4042
7aeb2156
PR
4043 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4044 be_cmd_get_die_temperature(adapter);
4045
d8110f62 4046 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4047 if (rxo->rx_post_starved) {
4048 rxo->rx_post_starved = false;
4049 be_post_rx_frags(rxo, GFP_KERNEL);
4050 }
4051 }
4052
10ef9ab4
SP
4053 for_all_evt_queues(adapter, eqo, i)
4054 be_eqd_update(adapter, eqo);
4055
d8110f62
PR
4056reschedule:
4057 adapter->work_counter++;
4058 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4059}
4060
39f1d94d
SP
4061static bool be_reset_required(struct be_adapter *adapter)
4062{
d79c0a20 4063 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4064}
4065
d379142b
SP
4066static char *mc_name(struct be_adapter *adapter)
4067{
4068 if (adapter->function_mode & FLEX10_MODE)
4069 return "FLEX10";
4070 else if (adapter->function_mode & VNIC_MODE)
4071 return "vNIC";
4072 else if (adapter->function_mode & UMC_ENABLED)
4073 return "UMC";
4074 else
4075 return "";
4076}
4077
4078static inline char *func_name(struct be_adapter *adapter)
4079{
4080 return be_physfn(adapter) ? "PF" : "VF";
4081}
4082
1dd06ae8 4083static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4084{
4085 int status = 0;
4086 struct be_adapter *adapter;
4087 struct net_device *netdev;
b4e32a71 4088 char port_name;
6b7c5b94
SP
4089
4090 status = pci_enable_device(pdev);
4091 if (status)
4092 goto do_none;
4093
4094 status = pci_request_regions(pdev, DRV_NAME);
4095 if (status)
4096 goto disable_dev;
4097 pci_set_master(pdev);
4098
7f640062 4099 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4100 if (netdev == NULL) {
4101 status = -ENOMEM;
4102 goto rel_reg;
4103 }
4104 adapter = netdev_priv(netdev);
4105 adapter->pdev = pdev;
4106 pci_set_drvdata(pdev, adapter);
4107 adapter->netdev = netdev;
2243e2e9 4108 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4109
2b7bcebf 4110 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4111 if (!status) {
4112 netdev->features |= NETIF_F_HIGHDMA;
4113 } else {
2b7bcebf 4114 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4115 if (status) {
4116 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4117 goto free_netdev;
4118 }
4119 }
4120
d6b6d987
SP
4121 status = pci_enable_pcie_error_reporting(pdev);
4122 if (status)
4123 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4124
6b7c5b94
SP
4125 status = be_ctrl_init(adapter);
4126 if (status)
39f1d94d 4127 goto free_netdev;
6b7c5b94 4128
2243e2e9 4129 /* sync up with fw's ready state */
ba343c77 4130 if (be_physfn(adapter)) {
bf99e50d 4131 status = be_fw_wait_ready(adapter);
ba343c77
SB
4132 if (status)
4133 goto ctrl_clean;
ba343c77 4134 }
6b7c5b94 4135
2243e2e9
SP
4136 /* tell fw we're ready to fire cmds */
4137 status = be_cmd_fw_init(adapter);
6b7c5b94 4138 if (status)
2243e2e9
SP
4139 goto ctrl_clean;
4140
39f1d94d
SP
4141 if (be_reset_required(adapter)) {
4142 status = be_cmd_reset_function(adapter);
4143 if (status)
4144 goto ctrl_clean;
4145 }
556ae191 4146
8cef7a78
SK
4147 /* Wait for interrupts to quiesce after an FLR */
4148 msleep(100);
4149
4150 /* Allow interrupts for other ULPs running on NIC function */
4151 be_intr_set(adapter, true);
10ef9ab4 4152
2243e2e9
SP
4153 status = be_stats_init(adapter);
4154 if (status)
4155 goto ctrl_clean;
4156
39f1d94d 4157 status = be_get_initial_config(adapter);
6b7c5b94
SP
4158 if (status)
4159 goto stats_clean;
6b7c5b94
SP
4160
4161 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4162 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4163 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4164
5fb379ee
SP
4165 status = be_setup(adapter);
4166 if (status)
55f5c3c5 4167 goto stats_clean;
2243e2e9 4168
3abcdeda 4169 be_netdev_init(netdev);
6b7c5b94
SP
4170 status = register_netdev(netdev);
4171 if (status != 0)
5fb379ee 4172 goto unsetup;
6b7c5b94 4173
045508a8
PP
4174 be_roce_dev_add(adapter);
4175
f67ef7ba
PR
4176 schedule_delayed_work(&adapter->func_recovery_work,
4177 msecs_to_jiffies(1000));
b4e32a71
PR
4178
4179 be_cmd_query_port_name(adapter, &port_name);
4180
d379142b
SP
4181 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4182 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4183
6b7c5b94
SP
4184 return 0;
4185
5fb379ee
SP
4186unsetup:
4187 be_clear(adapter);
6b7c5b94
SP
4188stats_clean:
4189 be_stats_cleanup(adapter);
4190ctrl_clean:
4191 be_ctrl_cleanup(adapter);
f9449ab7 4192free_netdev:
fe6d2a38 4193 free_netdev(netdev);
8d56ff11 4194 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4195rel_reg:
4196 pci_release_regions(pdev);
4197disable_dev:
4198 pci_disable_device(pdev);
4199do_none:
c4ca2374 4200 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4201 return status;
4202}
4203
4204static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4205{
4206 struct be_adapter *adapter = pci_get_drvdata(pdev);
4207 struct net_device *netdev = adapter->netdev;
4208
71d8d1b5
AK
4209 if (adapter->wol)
4210 be_setup_wol(adapter, true);
4211
f67ef7ba
PR
4212 cancel_delayed_work_sync(&adapter->func_recovery_work);
4213
6b7c5b94
SP
4214 netif_device_detach(netdev);
4215 if (netif_running(netdev)) {
4216 rtnl_lock();
4217 be_close(netdev);
4218 rtnl_unlock();
4219 }
9b0365f1 4220 be_clear(adapter);
6b7c5b94
SP
4221
4222 pci_save_state(pdev);
4223 pci_disable_device(pdev);
4224 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4225 return 0;
4226}
4227
4228static int be_resume(struct pci_dev *pdev)
4229{
4230 int status = 0;
4231 struct be_adapter *adapter = pci_get_drvdata(pdev);
4232 struct net_device *netdev = adapter->netdev;
4233
4234 netif_device_detach(netdev);
4235
4236 status = pci_enable_device(pdev);
4237 if (status)
4238 return status;
4239
4240 pci_set_power_state(pdev, 0);
4241 pci_restore_state(pdev);
4242
2243e2e9
SP
4243 /* tell fw we're ready to fire cmds */
4244 status = be_cmd_fw_init(adapter);
4245 if (status)
4246 return status;
4247
9b0365f1 4248 be_setup(adapter);
6b7c5b94
SP
4249 if (netif_running(netdev)) {
4250 rtnl_lock();
4251 be_open(netdev);
4252 rtnl_unlock();
4253 }
f67ef7ba
PR
4254
4255 schedule_delayed_work(&adapter->func_recovery_work,
4256 msecs_to_jiffies(1000));
6b7c5b94 4257 netif_device_attach(netdev);
71d8d1b5
AK
4258
4259 if (adapter->wol)
4260 be_setup_wol(adapter, false);
a4ca055f 4261
6b7c5b94
SP
4262 return 0;
4263}
4264
82456b03
SP
4265/*
4266 * An FLR will stop BE from DMAing any data.
4267 */
4268static void be_shutdown(struct pci_dev *pdev)
4269{
4270 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4271
2d5d4154
AK
4272 if (!adapter)
4273 return;
82456b03 4274
0f4a6828 4275 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4276 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4277
2d5d4154 4278 netif_device_detach(adapter->netdev);
82456b03 4279
57841869
AK
4280 be_cmd_reset_function(adapter);
4281
82456b03 4282 pci_disable_device(pdev);
82456b03
SP
4283}
4284
cf588477
SP
4285static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4286 pci_channel_state_t state)
4287{
4288 struct be_adapter *adapter = pci_get_drvdata(pdev);
4289 struct net_device *netdev = adapter->netdev;
4290
4291 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4292
f67ef7ba
PR
4293 adapter->eeh_error = true;
4294
4295 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4296
f67ef7ba 4297 rtnl_lock();
cf588477 4298 netif_device_detach(netdev);
f67ef7ba 4299 rtnl_unlock();
cf588477
SP
4300
4301 if (netif_running(netdev)) {
4302 rtnl_lock();
4303 be_close(netdev);
4304 rtnl_unlock();
4305 }
4306 be_clear(adapter);
4307
4308 if (state == pci_channel_io_perm_failure)
4309 return PCI_ERS_RESULT_DISCONNECT;
4310
4311 pci_disable_device(pdev);
4312
eeb7fc7b
SK
4313 /* The error could cause the FW to trigger a flash debug dump.
4314 * Resetting the card while flash dump is in progress
c8a54163
PR
4315 * can cause it not to recover; wait for it to finish.
4316 * Wait only for first function as it is needed only once per
4317 * adapter.
eeb7fc7b 4318 */
c8a54163
PR
4319 if (pdev->devfn == 0)
4320 ssleep(30);
4321
cf588477
SP
4322 return PCI_ERS_RESULT_NEED_RESET;
4323}
4324
4325static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4326{
4327 struct be_adapter *adapter = pci_get_drvdata(pdev);
4328 int status;
4329
4330 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4331 be_clear_all_error(adapter);
cf588477
SP
4332
4333 status = pci_enable_device(pdev);
4334 if (status)
4335 return PCI_ERS_RESULT_DISCONNECT;
4336
4337 pci_set_master(pdev);
4338 pci_set_power_state(pdev, 0);
4339 pci_restore_state(pdev);
4340
4341 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4342 dev_info(&adapter->pdev->dev,
4343 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4344 status = be_fw_wait_ready(adapter);
cf588477
SP
4345 if (status)
4346 return PCI_ERS_RESULT_DISCONNECT;
4347
d6b6d987 4348 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4349 return PCI_ERS_RESULT_RECOVERED;
4350}
4351
4352static void be_eeh_resume(struct pci_dev *pdev)
4353{
4354 int status = 0;
4355 struct be_adapter *adapter = pci_get_drvdata(pdev);
4356 struct net_device *netdev = adapter->netdev;
4357
4358 dev_info(&adapter->pdev->dev, "EEH resume\n");
4359
4360 pci_save_state(pdev);
4361
4362 /* tell fw we're ready to fire cmds */
4363 status = be_cmd_fw_init(adapter);
4364 if (status)
4365 goto err;
4366
bf99e50d
PR
4367 status = be_cmd_reset_function(adapter);
4368 if (status)
4369 goto err;
4370
cf588477
SP
4371 status = be_setup(adapter);
4372 if (status)
4373 goto err;
4374
4375 if (netif_running(netdev)) {
4376 status = be_open(netdev);
4377 if (status)
4378 goto err;
4379 }
f67ef7ba
PR
4380
4381 schedule_delayed_work(&adapter->func_recovery_work,
4382 msecs_to_jiffies(1000));
cf588477
SP
4383 netif_device_attach(netdev);
4384 return;
4385err:
4386 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4387}
4388
3646f0e5 4389static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4390 .error_detected = be_eeh_err_detected,
4391 .slot_reset = be_eeh_reset,
4392 .resume = be_eeh_resume,
4393};
4394
6b7c5b94
SP
4395static struct pci_driver be_driver = {
4396 .name = DRV_NAME,
4397 .id_table = be_dev_ids,
4398 .probe = be_probe,
4399 .remove = be_remove,
4400 .suspend = be_suspend,
cf588477 4401 .resume = be_resume,
82456b03 4402 .shutdown = be_shutdown,
cf588477 4403 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4404};
4405
4406static int __init be_init_module(void)
4407{
8e95a202
JP
4408 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4409 rx_frag_size != 2048) {
6b7c5b94
SP
4410 printk(KERN_WARNING DRV_NAME
4411 " : Module param rx_frag_size must be 2048/4096/8192."
4412 " Using 2048\n");
4413 rx_frag_size = 2048;
4414 }
6b7c5b94
SP
4415
4416 return pci_register_driver(&be_driver);
4417}
4418module_init(be_init_module);
4419
4420static void __exit be_exit_module(void)
4421{
4422 pci_unregister_driver(&be_driver);
4423}
4424module_exit(be_exit_module);
This page took 0.773674 seconds and 5 git commands to generate.