Merge tag 'davinci-fixes-for-v3.15-rc4' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
6b7c5b94 42static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
42c8b11e 114 "NETC",
7c185276
AK
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
6b7c5b94 124
752961a1 125
6b7c5b94
SP
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137 u16 len, u16 entry_size)
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781
SP
156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157 &reg);
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781
SP
167 pci_write_config_dword(adapter->pdev,
168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
195}
196
94d73aaa
VV
197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
6b7c5b94
SP
199{
200 u32 val = 0;
94d73aaa 201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
203
204 wmb();
94d73aaa 205 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
206}
207
8788fdc2 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
209 bool arm, bool clear_int, u16 num_popped)
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
214 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 215
f67ef7ba 216 if (adapter->eeh_error)
cf588477
SP
217 return;
218
6b7c5b94
SP
219 if (arm)
220 val |= 1 << DB_EQ_REARM_SHIFT;
221 if (clear_int)
222 val |= 1 << DB_EQ_CLR_SHIFT;
223 val |= 1 << DB_EQ_EVNT_SHIFT;
224 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 225 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
226}
227
8788fdc2 228void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
229{
230 u32 val = 0;
231 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
232 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
233 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 234
f67ef7ba 235 if (adapter->eeh_error)
cf588477
SP
236 return;
237
6b7c5b94
SP
238 if (arm)
239 val |= 1 << DB_CQ_REARM_SHIFT;
240 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 241 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
242}
243
6b7c5b94
SP
244static int be_mac_addr_set(struct net_device *netdev, void *p)
245{
246 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 247 struct device *dev = &adapter->pdev->dev;
6b7c5b94 248 struct sockaddr *addr = p;
5a712c13
SP
249 int status;
250 u8 mac[ETH_ALEN];
251 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 252
ca9e4988
AK
253 if (!is_valid_ether_addr(addr->sa_data))
254 return -EADDRNOTAVAIL;
255
ff32f8ab
VV
256 /* Proceed further only if, User provided MAC is different
257 * from active MAC
258 */
259 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
260 return 0;
261
5a712c13
SP
262 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
263 * privilege or if PF did not provision the new MAC address.
264 * On BE3, this cmd will always fail if the VF doesn't have the
265 * FILTMGMT privilege. This failure is OK, only if the PF programmed
266 * the MAC for the VF.
704e4c88 267 */
5a712c13
SP
268 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
269 adapter->if_handle, &adapter->pmac_id[0], 0);
270 if (!status) {
271 curr_pmac_id = adapter->pmac_id[0];
272
273 /* Delete the old programmed MAC. This call may fail if the
274 * old MAC was already deleted by the PF driver.
275 */
276 if (adapter->pmac_id[0] != old_pmac_id)
277 be_cmd_pmac_del(adapter, adapter->if_handle,
278 old_pmac_id, 0);
704e4c88
PR
279 }
280
5a712c13
SP
281 /* Decide if the new MAC is successfully activated only after
282 * querying the FW
704e4c88 283 */
b188f090
SR
284 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
285 adapter->if_handle, true, 0);
a65027e4 286 if (status)
e3a7ae2c 287 goto err;
6b7c5b94 288
5a712c13
SP
289 /* The MAC change did not happen, either due to lack of privilege
290 * or PF didn't pre-provision.
291 */
61d23e9f 292 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
293 status = -EPERM;
294 goto err;
295 }
296
e3a7ae2c 297 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 298 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
299 return 0;
300err:
5a712c13 301 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
302 return status;
303}
304
ca34fe38
SP
305/* BE2 supports only v0 cmd */
306static void *hw_stats_from_cmd(struct be_adapter *adapter)
307{
308 if (BE2_chip(adapter)) {
309 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
61000861 312 } else if (BE3_chip(adapter)) {
ca34fe38
SP
313 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
314
61000861
AK
315 return &cmd->hw_stats;
316 } else {
317 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
318
ca34fe38
SP
319 return &cmd->hw_stats;
320 }
321}
322
323/* BE2 supports only v0 cmd */
324static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
328
329 return &hw_stats->erx;
61000861 330 } else if (BE3_chip(adapter)) {
ca34fe38
SP
331 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
332
61000861
AK
333 return &hw_stats->erx;
334 } else {
335 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
336
ca34fe38
SP
337 return &hw_stats->erx;
338 }
339}
340
341static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 342{
ac124ff9
SP
343 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
344 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
345 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 346 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
347 &rxf_stats->port[adapter->port_num];
348 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 349
ac124ff9 350 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
351 drvs->rx_pause_frames = port_stats->rx_pause_frames;
352 drvs->rx_crc_errors = port_stats->rx_crc_errors;
353 drvs->rx_control_frames = port_stats->rx_control_frames;
354 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
355 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
356 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
357 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
358 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
359 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
360 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
361 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
362 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
363 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
364 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 365 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
366 drvs->rx_dropped_header_too_small =
367 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
368 drvs->rx_address_filtered =
369 port_stats->rx_address_filtered +
370 port_stats->rx_vlan_filtered;
89a88ab8
AK
371 drvs->rx_alignment_symbol_errors =
372 port_stats->rx_alignment_symbol_errors;
373
374 drvs->tx_pauseframes = port_stats->tx_pauseframes;
375 drvs->tx_controlframes = port_stats->tx_controlframes;
376
377 if (adapter->port_num)
ac124ff9 378 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 379 else
ac124ff9 380 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 381 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 382 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
383 drvs->forwarded_packets = rxf_stats->forwarded_packets;
384 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
385 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
386 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
387 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
388}
389
ca34fe38 390static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 391{
ac124ff9
SP
392 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
393 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
394 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 395 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
396 &rxf_stats->port[adapter->port_num];
397 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 398
ac124ff9 399 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
400 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
401 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
402 drvs->rx_pause_frames = port_stats->rx_pause_frames;
403 drvs->rx_crc_errors = port_stats->rx_crc_errors;
404 drvs->rx_control_frames = port_stats->rx_control_frames;
405 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
406 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
407 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
408 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
409 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
410 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
411 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
412 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
413 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
414 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
415 drvs->rx_dropped_header_too_small =
416 port_stats->rx_dropped_header_too_small;
417 drvs->rx_input_fifo_overflow_drop =
418 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 419 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
420 drvs->rx_alignment_symbol_errors =
421 port_stats->rx_alignment_symbol_errors;
ac124ff9 422 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
423 drvs->tx_pauseframes = port_stats->tx_pauseframes;
424 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 425 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
426 drvs->jabber_events = port_stats->jabber_events;
427 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 428 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
429 drvs->forwarded_packets = rxf_stats->forwarded_packets;
430 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
431 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
432 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
433 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
434}
435
61000861
AK
436static void populate_be_v2_stats(struct be_adapter *adapter)
437{
438 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
439 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
440 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
441 struct be_port_rxf_stats_v2 *port_stats =
442 &rxf_stats->port[adapter->port_num];
443 struct be_drv_stats *drvs = &adapter->drv_stats;
444
445 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
446 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
447 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
448 drvs->rx_pause_frames = port_stats->rx_pause_frames;
449 drvs->rx_crc_errors = port_stats->rx_crc_errors;
450 drvs->rx_control_frames = port_stats->rx_control_frames;
451 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
452 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
453 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
454 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
455 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
456 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
457 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
458 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
459 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
460 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
461 drvs->rx_dropped_header_too_small =
462 port_stats->rx_dropped_header_too_small;
463 drvs->rx_input_fifo_overflow_drop =
464 port_stats->rx_input_fifo_overflow_drop;
465 drvs->rx_address_filtered = port_stats->rx_address_filtered;
466 drvs->rx_alignment_symbol_errors =
467 port_stats->rx_alignment_symbol_errors;
468 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
469 drvs->tx_pauseframes = port_stats->tx_pauseframes;
470 drvs->tx_controlframes = port_stats->tx_controlframes;
471 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
472 drvs->jabber_events = port_stats->jabber_events;
473 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
474 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
475 drvs->forwarded_packets = rxf_stats->forwarded_packets;
476 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
477 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
478 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
479 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
461ae379
AK
480 if (be_roce_supported(adapter)) {
481 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
482 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
483 drvs->rx_roce_frames = port_stats->roce_frames_received;
484 drvs->roce_drops_crc = port_stats->roce_drops_crc;
485 drvs->roce_drops_payload_len =
486 port_stats->roce_drops_payload_len;
487 }
61000861
AK
488}
489
005d5696
SX
490static void populate_lancer_stats(struct be_adapter *adapter)
491{
89a88ab8 492
005d5696 493 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
494 struct lancer_pport_stats *pport_stats =
495 pport_stats_from_cmd(adapter);
496
497 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
498 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
499 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
500 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 501 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 502 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
503 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
504 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
505 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
506 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
507 drvs->rx_dropped_tcp_length =
508 pport_stats->rx_dropped_invalid_tcp_length;
509 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
510 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
511 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
512 drvs->rx_dropped_header_too_small =
513 pport_stats->rx_dropped_header_too_small;
514 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
515 drvs->rx_address_filtered =
516 pport_stats->rx_address_filtered +
517 pport_stats->rx_vlan_filtered;
ac124ff9 518 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 519 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
520 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
521 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 522 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
523 drvs->forwarded_packets = pport_stats->num_forwards_lo;
524 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 525 drvs->rx_drops_too_many_frags =
ac124ff9 526 pport_stats->rx_drops_too_many_frags_lo;
005d5696 527}
89a88ab8 528
09c1c68f
SP
529static void accumulate_16bit_val(u32 *acc, u16 val)
530{
531#define lo(x) (x & 0xFFFF)
532#define hi(x) (x & 0xFFFF0000)
533 bool wrapped = val < lo(*acc);
534 u32 newacc = hi(*acc) + val;
535
536 if (wrapped)
537 newacc += 65536;
538 ACCESS_ONCE(*acc) = newacc;
539}
540
4188e7df 541static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
542 struct be_rx_obj *rxo,
543 u32 erx_stat)
544{
545 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
547 else
548 /* below erx HW counter can actually wrap around after
549 * 65535. Driver accumulates a 32-bit value
550 */
551 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
552 (u16)erx_stat);
553}
554
89a88ab8
AK
555void be_parse_stats(struct be_adapter *adapter)
556{
61000861 557 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
558 struct be_rx_obj *rxo;
559 int i;
a6c578ef 560 u32 erx_stat;
ac124ff9 561
ca34fe38
SP
562 if (lancer_chip(adapter)) {
563 populate_lancer_stats(adapter);
005d5696 564 } else {
ca34fe38
SP
565 if (BE2_chip(adapter))
566 populate_be_v0_stats(adapter);
61000861
AK
567 else if (BE3_chip(adapter))
568 /* for BE3 */
ca34fe38 569 populate_be_v1_stats(adapter);
61000861
AK
570 else
571 populate_be_v2_stats(adapter);
d51ebd33 572
61000861 573 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 574 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
575 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 577 }
09c1c68f 578 }
89a88ab8
AK
579}
580
ab1594e9
SP
581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
582 struct rtnl_link_stats64 *stats)
6b7c5b94 583{
ab1594e9 584 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 585 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 586 struct be_rx_obj *rxo;
3c8def97 587 struct be_tx_obj *txo;
ab1594e9
SP
588 u64 pkts, bytes;
589 unsigned int start;
3abcdeda 590 int i;
6b7c5b94 591
3abcdeda 592 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
593 const struct be_rx_stats *rx_stats = rx_stats(rxo);
594 do {
57a7744e 595 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
596 pkts = rx_stats(rxo)->rx_pkts;
597 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 598 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
599 stats->rx_packets += pkts;
600 stats->rx_bytes += bytes;
601 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
602 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
603 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
604 }
605
3c8def97 606 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
607 const struct be_tx_stats *tx_stats = tx_stats(txo);
608 do {
57a7744e 609 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
610 pkts = tx_stats(txo)->tx_pkts;
611 bytes = tx_stats(txo)->tx_bytes;
57a7744e 612 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
613 stats->tx_packets += pkts;
614 stats->tx_bytes += bytes;
3c8def97 615 }
6b7c5b94
SP
616
617 /* bad pkts received */
ab1594e9 618 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
619 drvs->rx_alignment_symbol_errors +
620 drvs->rx_in_range_errors +
621 drvs->rx_out_range_errors +
622 drvs->rx_frame_too_long +
623 drvs->rx_dropped_too_small +
624 drvs->rx_dropped_too_short +
625 drvs->rx_dropped_header_too_small +
626 drvs->rx_dropped_tcp_length +
ab1594e9 627 drvs->rx_dropped_runt;
68110868 628
6b7c5b94 629 /* detailed rx errors */
ab1594e9 630 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
631 drvs->rx_out_range_errors +
632 drvs->rx_frame_too_long;
68110868 633
ab1594e9 634 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
635
636 /* frame alignment errors */
ab1594e9 637 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 638
6b7c5b94
SP
639 /* receiver fifo overrun */
640 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 641 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
642 drvs->rx_input_fifo_overflow_drop +
643 drvs->rx_drops_no_pbuf;
ab1594e9 644 return stats;
6b7c5b94
SP
645}
646
b236916a 647void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 648{
6b7c5b94
SP
649 struct net_device *netdev = adapter->netdev;
650
b236916a 651 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 652 netif_carrier_off(netdev);
b236916a 653 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 654 }
b236916a 655
bdce2ad7 656 if (link_status)
b236916a
AK
657 netif_carrier_on(netdev);
658 else
659 netif_carrier_off(netdev);
6b7c5b94
SP
660}
661
3c8def97 662static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 663 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 664{
3c8def97
SP
665 struct be_tx_stats *stats = tx_stats(txo);
666
ab1594e9 667 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
668 stats->tx_reqs++;
669 stats->tx_wrbs += wrb_cnt;
670 stats->tx_bytes += copied;
671 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 672 if (stopped)
ac124ff9 673 stats->tx_stops++;
ab1594e9 674 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
675}
676
677/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
678static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
679 bool *dummy)
6b7c5b94 680{
ebc8d2ab
DM
681 int cnt = (skb->len > skb->data_len);
682
683 cnt += skb_shinfo(skb)->nr_frags;
684
6b7c5b94
SP
685 /* to account for hdr wrb */
686 cnt++;
fe6d2a38
SP
687 if (lancer_chip(adapter) || !(cnt & 1)) {
688 *dummy = false;
689 } else {
6b7c5b94
SP
690 /* add a dummy to make it an even num */
691 cnt++;
692 *dummy = true;
fe6d2a38 693 }
6b7c5b94
SP
694 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
695 return cnt;
696}
697
698static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
699{
700 wrb->frag_pa_hi = upper_32_bits(addr);
701 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
702 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 703 wrb->rsvd0 = 0;
6b7c5b94
SP
704}
705
1ded132d
AK
706static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
707 struct sk_buff *skb)
708{
709 u8 vlan_prio;
710 u16 vlan_tag;
711
712 vlan_tag = vlan_tx_tag_get(skb);
713 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
714 /* If vlan priority provided by OS is NOT in available bmap */
715 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
716 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
717 adapter->recommended_prio;
718
719 return vlan_tag;
720}
721
c9c47142
SP
722/* Used only for IP tunnel packets */
723static u16 skb_inner_ip_proto(struct sk_buff *skb)
724{
725 return (inner_ip_hdr(skb)->version == 4) ?
726 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
727}
728
729static u16 skb_ip_proto(struct sk_buff *skb)
730{
731 return (ip_hdr(skb)->version == 4) ?
732 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
733}
734
cc4ce020 735static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 736 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 737{
c9c47142 738 u16 vlan_tag, proto;
cc4ce020 739
6b7c5b94
SP
740 memset(hdr, 0, sizeof(*hdr));
741
742 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
743
49e4b847 744 if (skb_is_gso(skb)) {
6b7c5b94
SP
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
747 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 748 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 749 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94 750 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142
SP
751 if (skb->encapsulation) {
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
753 proto = skb_inner_ip_proto(skb);
754 } else {
755 proto = skb_ip_proto(skb);
756 }
757 if (proto == IPPROTO_TCP)
6b7c5b94 758 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
c9c47142 759 else if (proto == IPPROTO_UDP)
6b7c5b94
SP
760 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
761 }
762
4c5102f9 763 if (vlan_tx_tag_present(skb)) {
6b7c5b94 764 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 765 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 766 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
767 }
768
bc0c3405
AK
769 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
773 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
774}
775
2b7bcebf 776static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
777 bool unmap_single)
778{
779 dma_addr_t dma;
780
781 be_dws_le_to_cpu(wrb, sizeof(*wrb));
782
783 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 784 if (wrb->frag_len) {
7101e111 785 if (unmap_single)
2b7bcebf
IV
786 dma_unmap_single(dev, dma, wrb->frag_len,
787 DMA_TO_DEVICE);
7101e111 788 else
2b7bcebf 789 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
790 }
791}
6b7c5b94 792
3c8def97 793static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
794 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
795 bool skip_hw_vlan)
6b7c5b94 796{
7101e111
SP
797 dma_addr_t busaddr;
798 int i, copied = 0;
2b7bcebf 799 struct device *dev = &adapter->pdev->dev;
6b7c5b94 800 struct sk_buff *first_skb = skb;
6b7c5b94
SP
801 struct be_eth_wrb *wrb;
802 struct be_eth_hdr_wrb *hdr;
7101e111
SP
803 bool map_single = false;
804 u16 map_head;
6b7c5b94 805
6b7c5b94
SP
806 hdr = queue_head_node(txq);
807 queue_head_inc(txq);
7101e111 808 map_head = txq->head;
6b7c5b94 809
ebc8d2ab 810 if (skb->len > skb->data_len) {
e743d313 811 int len = skb_headlen(skb);
2b7bcebf
IV
812 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
813 if (dma_mapping_error(dev, busaddr))
7101e111
SP
814 goto dma_err;
815 map_single = true;
ebc8d2ab
DM
816 wrb = queue_head_node(txq);
817 wrb_fill(wrb, busaddr, len);
818 be_dws_cpu_to_le(wrb, sizeof(*wrb));
819 queue_head_inc(txq);
820 copied += len;
821 }
6b7c5b94 822
ebc8d2ab 823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 824 const struct skb_frag_struct *frag =
ebc8d2ab 825 &skb_shinfo(skb)->frags[i];
b061b39e 826 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 827 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 828 if (dma_mapping_error(dev, busaddr))
7101e111 829 goto dma_err;
ebc8d2ab 830 wrb = queue_head_node(txq);
9e903e08 831 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
832 be_dws_cpu_to_le(wrb, sizeof(*wrb));
833 queue_head_inc(txq);
9e903e08 834 copied += skb_frag_size(frag);
6b7c5b94
SP
835 }
836
837 if (dummy_wrb) {
838 wrb = queue_head_node(txq);
839 wrb_fill(wrb, 0, 0);
840 be_dws_cpu_to_le(wrb, sizeof(*wrb));
841 queue_head_inc(txq);
842 }
843
bc0c3405 844 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
845 be_dws_cpu_to_le(hdr, sizeof(*hdr));
846
847 return copied;
7101e111
SP
848dma_err:
849 txq->head = map_head;
850 while (copied) {
851 wrb = queue_head_node(txq);
2b7bcebf 852 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
853 map_single = false;
854 copied -= wrb->frag_len;
855 queue_head_inc(txq);
856 }
857 return 0;
6b7c5b94
SP
858}
859
93040ae5 860static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
861 struct sk_buff *skb,
862 bool *skip_hw_vlan)
93040ae5
SK
863{
864 u16 vlan_tag = 0;
865
866 skb = skb_share_check(skb, GFP_ATOMIC);
867 if (unlikely(!skb))
868 return skb;
869
efee8e87 870 if (vlan_tx_tag_present(skb))
93040ae5 871 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
872
873 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
874 if (!vlan_tag)
875 vlan_tag = adapter->pvid;
876 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
877 * skip VLAN insertion
878 */
879 if (skip_hw_vlan)
880 *skip_hw_vlan = true;
881 }
bc0c3405
AK
882
883 if (vlan_tag) {
58717686 884 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
885 if (unlikely(!skb))
886 return skb;
bc0c3405
AK
887 skb->vlan_tci = 0;
888 }
889
890 /* Insert the outer VLAN, if any */
891 if (adapter->qnq_vid) {
892 vlan_tag = adapter->qnq_vid;
58717686 893 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
894 if (unlikely(!skb))
895 return skb;
896 if (skip_hw_vlan)
897 *skip_hw_vlan = true;
898 }
899
93040ae5
SK
900 return skb;
901}
902
bc0c3405
AK
903static bool be_ipv6_exthdr_check(struct sk_buff *skb)
904{
905 struct ethhdr *eh = (struct ethhdr *)skb->data;
906 u16 offset = ETH_HLEN;
907
908 if (eh->h_proto == htons(ETH_P_IPV6)) {
909 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
910
911 offset += sizeof(struct ipv6hdr);
912 if (ip6h->nexthdr != NEXTHDR_TCP &&
913 ip6h->nexthdr != NEXTHDR_UDP) {
914 struct ipv6_opt_hdr *ehdr =
915 (struct ipv6_opt_hdr *) (skb->data + offset);
916
917 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
918 if (ehdr->hdrlen == 0xff)
919 return true;
920 }
921 }
922 return false;
923}
924
925static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
926{
927 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
928}
929
ee9c799c
SP
930static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
931 struct sk_buff *skb)
bc0c3405 932{
ee9c799c 933 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
934}
935
ec495fac
VV
936static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
937 struct sk_buff *skb,
938 bool *skip_hw_vlan)
6b7c5b94 939{
d2cb6ce7 940 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
941 unsigned int eth_hdr_len;
942 struct iphdr *ip;
93040ae5 943
1297f9db
AK
944 /* For padded packets, BE HW modifies tot_len field in IP header
945 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 946 * For padded packets, Lancer computes incorrect checksum.
1ded132d 947 */
ee9c799c
SP
948 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
949 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
950 if (skb->len <= 60 &&
951 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 952 is_ipv4_pkt(skb)) {
93040ae5
SK
953 ip = (struct iphdr *)ip_hdr(skb);
954 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
955 }
1ded132d 956
d2cb6ce7 957 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 958 * tagging in pvid-tagging mode
d2cb6ce7 959 */
f93f160b 960 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 961 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 962 *skip_hw_vlan = true;
d2cb6ce7 963
93040ae5
SK
964 /* HW has a bug wherein it will calculate CSUM for VLAN
965 * pkts even though it is disabled.
966 * Manually insert VLAN in pkt.
967 */
968 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
969 vlan_tx_tag_present(skb)) {
970 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 971 if (unlikely(!skb))
c9128951 972 goto err;
bc0c3405
AK
973 }
974
975 /* HW may lockup when VLAN HW tagging is requested on
976 * certain ipv6 packets. Drop such pkts if the HW workaround to
977 * skip HW tagging is not enabled by FW.
978 */
979 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
980 (adapter->pvid || adapter->qnq_vid) &&
981 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
982 goto tx_drop;
983
984 /* Manual VLAN tag insertion to prevent:
985 * ASIC lockup when the ASIC inserts VLAN tag into
986 * certain ipv6 packets. Insert VLAN tags in driver,
987 * and set event, completion, vlan bits accordingly
988 * in the Tx WRB.
989 */
990 if (be_ipv6_tx_stall_chk(adapter, skb) &&
991 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 992 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 993 if (unlikely(!skb))
c9128951 994 goto err;
1ded132d
AK
995 }
996
ee9c799c
SP
997 return skb;
998tx_drop:
999 dev_kfree_skb_any(skb);
c9128951 1000err:
ee9c799c
SP
1001 return NULL;
1002}
1003
ec495fac
VV
1004static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1005 struct sk_buff *skb,
1006 bool *skip_hw_vlan)
1007{
1008 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1009 * less may cause a transmit stall on that port. So the work-around is
1010 * to pad short packets (<= 32 bytes) to a 36-byte length.
1011 */
1012 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1013 if (skb_padto(skb, 36))
1014 return NULL;
1015 skb->len = 36;
1016 }
1017
1018 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1019 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1020 if (!skb)
1021 return NULL;
1022 }
1023
1024 return skb;
1025}
1026
ee9c799c
SP
1027static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1028{
1029 struct be_adapter *adapter = netdev_priv(netdev);
1030 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1031 struct be_queue_info *txq = &txo->q;
1032 bool dummy_wrb, stopped = false;
1033 u32 wrb_cnt = 0, copied = 0;
1034 bool skip_hw_vlan = false;
1035 u32 start = txq->head;
1036
1037 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1038 if (!skb) {
1039 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1040 return NETDEV_TX_OK;
bc617526 1041 }
ee9c799c 1042
fe6d2a38 1043 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1044
bc0c3405
AK
1045 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1046 skip_hw_vlan);
c190e3c8 1047 if (copied) {
cd8f76c0
ED
1048 int gso_segs = skb_shinfo(skb)->gso_segs;
1049
c190e3c8 1050 /* record the sent skb in the sent_skb table */
3c8def97
SP
1051 BUG_ON(txo->sent_skb_list[start]);
1052 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1053
1054 /* Ensure txq has space for the next skb; Else stop the queue
1055 * *BEFORE* ringing the tx doorbell, so that we serialze the
1056 * tx compls of the current transmit which'll wake up the queue
1057 */
7101e111 1058 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1059 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1060 txq->len) {
3c8def97 1061 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1062 stopped = true;
1063 }
6b7c5b94 1064
94d73aaa 1065 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1066
cd8f76c0 1067 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1068 } else {
1069 txq->head = start;
bc617526 1070 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1071 dev_kfree_skb_any(skb);
6b7c5b94 1072 }
6b7c5b94
SP
1073 return NETDEV_TX_OK;
1074}
1075
1076static int be_change_mtu(struct net_device *netdev, int new_mtu)
1077{
1078 struct be_adapter *adapter = netdev_priv(netdev);
1079 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
1080 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1081 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
1082 dev_info(&adapter->pdev->dev,
1083 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
1084 BE_MIN_MTU,
1085 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1086 return -EINVAL;
1087 }
1088 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1089 netdev->mtu, new_mtu);
1090 netdev->mtu = new_mtu;
1091 return 0;
1092}
1093
1094/*
82903e4b
AK
1095 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1096 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1097 */
10329df8 1098static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1099{
10329df8
SP
1100 u16 vids[BE_NUM_VLANS_SUPPORTED];
1101 u16 num = 0, i;
82903e4b 1102 int status = 0;
1da87b7f 1103
c0e64ef4
SP
1104 /* No need to further configure vids if in promiscuous mode */
1105 if (adapter->promiscuous)
1106 return 0;
1107
92bf14ab 1108 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1109 goto set_vlan_promisc;
1110
1111 /* Construct VLAN Table to give to HW */
1112 for (i = 0; i < VLAN_N_VID; i++)
1113 if (adapter->vlan_tag[i])
10329df8 1114 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1115
1116 status = be_cmd_vlan_config(adapter, adapter->if_handle,
012bd387 1117 vids, num, 0);
0fc16ebf 1118
0fc16ebf 1119 if (status) {
d9d604f8
AK
1120 /* Set to VLAN promisc mode as setting VLAN filter failed */
1121 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1122 goto set_vlan_promisc;
1123 dev_err(&adapter->pdev->dev,
1124 "Setting HW VLAN filtering failed.\n");
1125 } else {
1126 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1127 /* hw VLAN filtering re-enabled. */
1128 status = be_cmd_rx_filter(adapter,
1129 BE_FLAGS_VLAN_PROMISC, OFF);
1130 if (!status) {
1131 dev_info(&adapter->pdev->dev,
1132 "Disabling VLAN Promiscuous mode.\n");
1133 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1134 }
1135 }
6b7c5b94 1136 }
1da87b7f 1137
b31c50a7 1138 return status;
0fc16ebf
PR
1139
1140set_vlan_promisc:
a6b74e01
SK
1141 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1142 return 0;
d9d604f8
AK
1143
1144 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1145 if (!status) {
1146 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1147 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1148 } else
1149 dev_err(&adapter->pdev->dev,
1150 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1151 return status;
6b7c5b94
SP
1152}
1153
80d5c368 1154static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1155{
1156 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1157 int status = 0;
6b7c5b94 1158
a85e9986
PR
1159 /* Packets with VID 0 are always received by Lancer by default */
1160 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1161 return status;
1162
1163 if (adapter->vlan_tag[vid])
1164 return status;
a85e9986 1165
6b7c5b94 1166 adapter->vlan_tag[vid] = 1;
a6b74e01 1167 adapter->vlans_added++;
8e586137 1168
a6b74e01
SK
1169 status = be_vid_config(adapter);
1170 if (status) {
1171 adapter->vlans_added--;
80817cbf 1172 adapter->vlan_tag[vid] = 0;
a6b74e01 1173 }
48291c22 1174
80817cbf 1175 return status;
6b7c5b94
SP
1176}
1177
80d5c368 1178static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1179{
1180 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1181 int status = 0;
6b7c5b94 1182
a85e9986
PR
1183 /* Packets with VID 0 are always received by Lancer by default */
1184 if (lancer_chip(adapter) && vid == 0)
1185 goto ret;
1186
6b7c5b94 1187 adapter->vlan_tag[vid] = 0;
a6b74e01 1188 status = be_vid_config(adapter);
80817cbf
AK
1189 if (!status)
1190 adapter->vlans_added--;
1191 else
1192 adapter->vlan_tag[vid] = 1;
1193ret:
1194 return status;
6b7c5b94
SP
1195}
1196
7ad09458
S
1197static void be_clear_promisc(struct be_adapter *adapter)
1198{
1199 adapter->promiscuous = false;
1200 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1201
1202 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1203}
1204
a54769f5 1205static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1206{
1207 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1208 int status;
6b7c5b94 1209
24307eef 1210 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1211 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1212 adapter->promiscuous = true;
1213 goto done;
6b7c5b94
SP
1214 }
1215
25985edc 1216 /* BE was previously in promiscuous mode; disable it */
24307eef 1217 if (adapter->promiscuous) {
7ad09458 1218 be_clear_promisc(adapter);
c0e64ef4 1219 if (adapter->vlans_added)
10329df8 1220 be_vid_config(adapter);
6b7c5b94
SP
1221 }
1222
e7b909a6 1223 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1224 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1225 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1226 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1227 goto done;
6b7c5b94 1228 }
6b7c5b94 1229
fbc13f01
AK
1230 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1231 struct netdev_hw_addr *ha;
1232 int i = 1; /* First slot is claimed by the Primary MAC */
1233
1234 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1235 be_cmd_pmac_del(adapter, adapter->if_handle,
1236 adapter->pmac_id[i], 0);
1237 }
1238
92bf14ab 1239 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1240 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1241 adapter->promiscuous = true;
1242 goto done;
1243 }
1244
1245 netdev_for_each_uc_addr(ha, adapter->netdev) {
1246 adapter->uc_macs++; /* First slot is for Primary MAC */
1247 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1248 adapter->if_handle,
1249 &adapter->pmac_id[adapter->uc_macs], 0);
1250 }
1251 }
1252
0fc16ebf
PR
1253 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1254
1255 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1256 if (status) {
1257 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1258 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1259 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1260 }
24307eef
SP
1261done:
1262 return;
6b7c5b94
SP
1263}
1264
ba343c77
SB
1265static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1266{
1267 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1268 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1269 int status;
1270
11ac75ed 1271 if (!sriov_enabled(adapter))
ba343c77
SB
1272 return -EPERM;
1273
11ac75ed 1274 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1275 return -EINVAL;
1276
3175d8c2
SP
1277 if (BEx_chip(adapter)) {
1278 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1279 vf + 1);
ba343c77 1280
11ac75ed
SP
1281 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1282 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1283 } else {
1284 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1285 vf + 1);
590c391d
PR
1286 }
1287
64600ea5 1288 if (status)
ba343c77
SB
1289 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1290 mac, vf);
64600ea5 1291 else
11ac75ed 1292 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1293
ba343c77
SB
1294 return status;
1295}
1296
64600ea5
AK
1297static int be_get_vf_config(struct net_device *netdev, int vf,
1298 struct ifla_vf_info *vi)
1299{
1300 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1301 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1302
11ac75ed 1303 if (!sriov_enabled(adapter))
64600ea5
AK
1304 return -EPERM;
1305
11ac75ed 1306 if (vf >= adapter->num_vfs)
64600ea5
AK
1307 return -EINVAL;
1308
1309 vi->vf = vf;
11ac75ed 1310 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1311 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1312 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1313 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1314 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1315
1316 return 0;
1317}
1318
1da87b7f
AK
1319static int be_set_vf_vlan(struct net_device *netdev,
1320 int vf, u16 vlan, u8 qos)
1321{
1322 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1323 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1324 int status = 0;
1325
11ac75ed 1326 if (!sriov_enabled(adapter))
1da87b7f
AK
1327 return -EPERM;
1328
b9fc0e53 1329 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1330 return -EINVAL;
1331
b9fc0e53
AK
1332 if (vlan || qos) {
1333 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1334 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1335 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1336 vf_cfg->if_handle, 0);
1da87b7f 1337 } else {
f1f3ee1b 1338 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1339 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1340 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1341 }
1342
c502224e
SK
1343 if (!status)
1344 vf_cfg->vlan_tag = vlan;
1345 else
1da87b7f 1346 dev_info(&adapter->pdev->dev,
c502224e 1347 "VLAN %d config on VF %d failed\n", vlan, vf);
1da87b7f
AK
1348 return status;
1349}
1350
e1d18735
AK
1351static int be_set_vf_tx_rate(struct net_device *netdev,
1352 int vf, int rate)
1353{
1354 struct be_adapter *adapter = netdev_priv(netdev);
1355 int status = 0;
1356
11ac75ed 1357 if (!sriov_enabled(adapter))
e1d18735
AK
1358 return -EPERM;
1359
94f434c2 1360 if (vf >= adapter->num_vfs)
e1d18735
AK
1361 return -EINVAL;
1362
94f434c2
AK
1363 if (rate < 100 || rate > 10000) {
1364 dev_err(&adapter->pdev->dev,
1365 "tx rate must be between 100 and 10000 Mbps\n");
1366 return -EINVAL;
1367 }
e1d18735 1368
a401801c 1369 status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
e1d18735 1370 if (status)
94f434c2 1371 dev_err(&adapter->pdev->dev,
e1d18735 1372 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1373 else
1374 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1375 return status;
1376}
bdce2ad7
SR
1377static int be_set_vf_link_state(struct net_device *netdev, int vf,
1378 int link_state)
1379{
1380 struct be_adapter *adapter = netdev_priv(netdev);
1381 int status;
1382
1383 if (!sriov_enabled(adapter))
1384 return -EPERM;
1385
1386 if (vf >= adapter->num_vfs)
1387 return -EINVAL;
1388
1389 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1390 if (!status)
1391 adapter->vf_cfg[vf].plink_tracking = link_state;
1392
1393 return status;
1394}
e1d18735 1395
2632bafd
SP
1396static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1397 ulong now)
6b7c5b94 1398{
2632bafd
SP
1399 aic->rx_pkts_prev = rx_pkts;
1400 aic->tx_reqs_prev = tx_pkts;
1401 aic->jiffies = now;
1402}
ac124ff9 1403
2632bafd
SP
1404static void be_eqd_update(struct be_adapter *adapter)
1405{
1406 struct be_set_eqd set_eqd[MAX_EVT_QS];
1407 int eqd, i, num = 0, start;
1408 struct be_aic_obj *aic;
1409 struct be_eq_obj *eqo;
1410 struct be_rx_obj *rxo;
1411 struct be_tx_obj *txo;
1412 u64 rx_pkts, tx_pkts;
1413 ulong now;
1414 u32 pps, delta;
10ef9ab4 1415
2632bafd
SP
1416 for_all_evt_queues(adapter, eqo, i) {
1417 aic = &adapter->aic_obj[eqo->idx];
1418 if (!aic->enable) {
1419 if (aic->jiffies)
1420 aic->jiffies = 0;
1421 eqd = aic->et_eqd;
1422 goto modify_eqd;
1423 }
6b7c5b94 1424
2632bafd
SP
1425 rxo = &adapter->rx_obj[eqo->idx];
1426 do {
57a7744e 1427 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1428 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1429 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1430
2632bafd
SP
1431 txo = &adapter->tx_obj[eqo->idx];
1432 do {
57a7744e 1433 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1434 tx_pkts = txo->stats.tx_reqs;
57a7744e 1435 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1436
6b7c5b94 1437
2632bafd
SP
1438 /* Skip, if wrapped around or first calculation */
1439 now = jiffies;
1440 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1441 rx_pkts < aic->rx_pkts_prev ||
1442 tx_pkts < aic->tx_reqs_prev) {
1443 be_aic_update(aic, rx_pkts, tx_pkts, now);
1444 continue;
1445 }
1446
1447 delta = jiffies_to_msecs(now - aic->jiffies);
1448 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1449 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1450 eqd = (pps / 15000) << 2;
10ef9ab4 1451
2632bafd
SP
1452 if (eqd < 8)
1453 eqd = 0;
1454 eqd = min_t(u32, eqd, aic->max_eqd);
1455 eqd = max_t(u32, eqd, aic->min_eqd);
1456
1457 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1458modify_eqd:
2632bafd
SP
1459 if (eqd != aic->prev_eqd) {
1460 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1461 set_eqd[num].eq_id = eqo->q.id;
1462 aic->prev_eqd = eqd;
1463 num++;
1464 }
ac124ff9 1465 }
2632bafd
SP
1466
1467 if (num)
1468 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1469}
1470
3abcdeda 1471static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1472 struct be_rx_compl_info *rxcp)
4097f663 1473{
ac124ff9 1474 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1475
ab1594e9 1476 u64_stats_update_begin(&stats->sync);
3abcdeda 1477 stats->rx_compl++;
2e588f84 1478 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1479 stats->rx_pkts++;
2e588f84 1480 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1481 stats->rx_mcast_pkts++;
2e588f84 1482 if (rxcp->err)
ac124ff9 1483 stats->rx_compl_err++;
ab1594e9 1484 u64_stats_update_end(&stats->sync);
4097f663
SP
1485}
1486
2e588f84 1487static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1488{
19fad86f 1489 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1490 * Also ignore ipcksm for ipv6 pkts
1491 */
2e588f84 1492 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1493 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1494}
1495
0b0ef1d0 1496static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1497{
10ef9ab4 1498 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1499 struct be_rx_page_info *rx_page_info;
3abcdeda 1500 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1501 u16 frag_idx = rxq->tail;
6b7c5b94 1502
3abcdeda 1503 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1504 BUG_ON(!rx_page_info->page);
1505
e50287be 1506 if (rx_page_info->last_frag) {
2b7bcebf
IV
1507 dma_unmap_page(&adapter->pdev->dev,
1508 dma_unmap_addr(rx_page_info, bus),
1509 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1510 rx_page_info->last_frag = false;
1511 } else {
1512 dma_sync_single_for_cpu(&adapter->pdev->dev,
1513 dma_unmap_addr(rx_page_info, bus),
1514 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1515 }
6b7c5b94 1516
0b0ef1d0 1517 queue_tail_inc(rxq);
6b7c5b94
SP
1518 atomic_dec(&rxq->used);
1519 return rx_page_info;
1520}
1521
1522/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1523static void be_rx_compl_discard(struct be_rx_obj *rxo,
1524 struct be_rx_compl_info *rxcp)
6b7c5b94 1525{
6b7c5b94 1526 struct be_rx_page_info *page_info;
2e588f84 1527 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1528
e80d9da6 1529 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1530 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1531 put_page(page_info->page);
1532 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1533 }
1534}
1535
1536/*
1537 * skb_fill_rx_data forms a complete skb for an ether frame
1538 * indicated by rxcp.
1539 */
10ef9ab4
SP
1540static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1541 struct be_rx_compl_info *rxcp)
6b7c5b94 1542{
6b7c5b94 1543 struct be_rx_page_info *page_info;
2e588f84
SP
1544 u16 i, j;
1545 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1546 u8 *start;
6b7c5b94 1547
0b0ef1d0 1548 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1549 start = page_address(page_info->page) + page_info->page_offset;
1550 prefetch(start);
1551
1552 /* Copy data in the first descriptor of this completion */
2e588f84 1553 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1554
6b7c5b94
SP
1555 skb->len = curr_frag_len;
1556 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1557 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1558 /* Complete packet has now been moved to data */
1559 put_page(page_info->page);
1560 skb->data_len = 0;
1561 skb->tail += curr_frag_len;
1562 } else {
ac1ae5f3
ED
1563 hdr_len = ETH_HLEN;
1564 memcpy(skb->data, start, hdr_len);
6b7c5b94 1565 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1566 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1567 skb_shinfo(skb)->frags[0].page_offset =
1568 page_info->page_offset + hdr_len;
9e903e08 1569 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1570 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1571 skb->truesize += rx_frag_size;
6b7c5b94
SP
1572 skb->tail += hdr_len;
1573 }
205859a2 1574 page_info->page = NULL;
6b7c5b94 1575
2e588f84
SP
1576 if (rxcp->pkt_size <= rx_frag_size) {
1577 BUG_ON(rxcp->num_rcvd != 1);
1578 return;
6b7c5b94
SP
1579 }
1580
1581 /* More frags present for this completion */
2e588f84
SP
1582 remaining = rxcp->pkt_size - curr_frag_len;
1583 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1584 page_info = get_rx_page_info(rxo);
2e588f84 1585 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1586
bd46cb6c
AK
1587 /* Coalesce all frags from the same physical page in one slot */
1588 if (page_info->page_offset == 0) {
1589 /* Fresh page */
1590 j++;
b061b39e 1591 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1592 skb_shinfo(skb)->frags[j].page_offset =
1593 page_info->page_offset;
9e903e08 1594 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1595 skb_shinfo(skb)->nr_frags++;
1596 } else {
1597 put_page(page_info->page);
1598 }
1599
9e903e08 1600 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1601 skb->len += curr_frag_len;
1602 skb->data_len += curr_frag_len;
bdb28a97 1603 skb->truesize += rx_frag_size;
2e588f84 1604 remaining -= curr_frag_len;
205859a2 1605 page_info->page = NULL;
6b7c5b94 1606 }
bd46cb6c 1607 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1608}
1609
5be93b9a 1610/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1611static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1612 struct be_rx_compl_info *rxcp)
6b7c5b94 1613{
10ef9ab4 1614 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1615 struct net_device *netdev = adapter->netdev;
6b7c5b94 1616 struct sk_buff *skb;
89420424 1617
bb349bb4 1618 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1619 if (unlikely(!skb)) {
ac124ff9 1620 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1621 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1622 return;
1623 }
1624
10ef9ab4 1625 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1626
6332c8d3 1627 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1628 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1629 else
1630 skb_checksum_none_assert(skb);
6b7c5b94 1631
6332c8d3 1632 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1633 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1634 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1635 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1636
1637 skb->encapsulation = rxcp->tunneled;
6384a4d0 1638 skb_mark_napi_id(skb, napi);
6b7c5b94 1639
343e43c0 1640 if (rxcp->vlanf)
86a9bad3 1641 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1642
1643 netif_receive_skb(skb);
6b7c5b94
SP
1644}
1645
5be93b9a 1646/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1647static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1648 struct napi_struct *napi,
1649 struct be_rx_compl_info *rxcp)
6b7c5b94 1650{
10ef9ab4 1651 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1652 struct be_rx_page_info *page_info;
5be93b9a 1653 struct sk_buff *skb = NULL;
2e588f84
SP
1654 u16 remaining, curr_frag_len;
1655 u16 i, j;
3968fa1e 1656
10ef9ab4 1657 skb = napi_get_frags(napi);
5be93b9a 1658 if (!skb) {
10ef9ab4 1659 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1660 return;
1661 }
1662
2e588f84
SP
1663 remaining = rxcp->pkt_size;
1664 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1665 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1666
1667 curr_frag_len = min(remaining, rx_frag_size);
1668
bd46cb6c
AK
1669 /* Coalesce all frags from the same physical page in one slot */
1670 if (i == 0 || page_info->page_offset == 0) {
1671 /* First frag or Fresh page */
1672 j++;
b061b39e 1673 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1674 skb_shinfo(skb)->frags[j].page_offset =
1675 page_info->page_offset;
9e903e08 1676 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1677 } else {
1678 put_page(page_info->page);
1679 }
9e903e08 1680 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1681 skb->truesize += rx_frag_size;
bd46cb6c 1682 remaining -= curr_frag_len;
6b7c5b94
SP
1683 memset(page_info, 0, sizeof(*page_info));
1684 }
bd46cb6c 1685 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1686
5be93b9a 1687 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1688 skb->len = rxcp->pkt_size;
1689 skb->data_len = rxcp->pkt_size;
5be93b9a 1690 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1691 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1692 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1693 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1694
1695 skb->encapsulation = rxcp->tunneled;
6384a4d0 1696 skb_mark_napi_id(skb, napi);
5be93b9a 1697
343e43c0 1698 if (rxcp->vlanf)
86a9bad3 1699 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1700
10ef9ab4 1701 napi_gro_frags(napi);
2e588f84
SP
1702}
1703
10ef9ab4
SP
1704static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1705 struct be_rx_compl_info *rxcp)
2e588f84
SP
1706{
1707 rxcp->pkt_size =
1708 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1709 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1710 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1711 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1712 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1713 rxcp->ip_csum =
1714 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1715 rxcp->l4_csum =
1716 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1717 rxcp->ipv6 =
1718 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1719 rxcp->num_rcvd =
1720 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1721 rxcp->pkt_type =
1722 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1723 rxcp->rss_hash =
c297977e 1724 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184 1725 if (rxcp->vlanf) {
f93f160b 1726 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
3c709f8f
DM
1727 compl);
1728 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1729 compl);
15d72184 1730 }
12004ae9 1731 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
c9c47142
SP
1732 rxcp->tunneled =
1733 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
2e588f84
SP
1734}
1735
10ef9ab4
SP
1736static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1737 struct be_rx_compl_info *rxcp)
2e588f84
SP
1738{
1739 rxcp->pkt_size =
1740 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1741 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1742 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1743 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1744 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1745 rxcp->ip_csum =
1746 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1747 rxcp->l4_csum =
1748 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1749 rxcp->ipv6 =
1750 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1751 rxcp->num_rcvd =
1752 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1753 rxcp->pkt_type =
1754 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1755 rxcp->rss_hash =
c297977e 1756 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184 1757 if (rxcp->vlanf) {
f93f160b 1758 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
3c709f8f
DM
1759 compl);
1760 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1761 compl);
15d72184 1762 }
12004ae9 1763 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1764 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1765 ip_frag, compl);
2e588f84
SP
1766}
1767
1768static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1769{
1770 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1771 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1772 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1773
2e588f84
SP
1774 /* For checking the valid bit it is Ok to use either definition as the
1775 * valid bit is at the same position in both v0 and v1 Rx compl */
1776 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1777 return NULL;
6b7c5b94 1778
2e588f84
SP
1779 rmb();
1780 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1781
2e588f84 1782 if (adapter->be3_native)
10ef9ab4 1783 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1784 else
10ef9ab4 1785 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1786
e38b1706
SK
1787 if (rxcp->ip_frag)
1788 rxcp->l4_csum = 0;
1789
15d72184 1790 if (rxcp->vlanf) {
f93f160b
VV
1791 /* In QNQ modes, if qnq bit is not set, then the packet was
1792 * tagged only with the transparent outer vlan-tag and must
1793 * not be treated as a vlan packet by host
1794 */
1795 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1796 rxcp->vlanf = 0;
6b7c5b94 1797
15d72184 1798 if (!lancer_chip(adapter))
3c709f8f 1799 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1800
939cf306 1801 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1802 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1803 rxcp->vlanf = 0;
1804 }
2e588f84
SP
1805
1806 /* As the compl has been parsed, reset it; we wont touch it again */
1807 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1808
3abcdeda 1809 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1810 return rxcp;
1811}
1812
1829b086 1813static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1814{
6b7c5b94 1815 u32 order = get_order(size);
1829b086 1816
6b7c5b94 1817 if (order > 0)
1829b086
ED
1818 gfp |= __GFP_COMP;
1819 return alloc_pages(gfp, order);
6b7c5b94
SP
1820}
1821
1822/*
1823 * Allocate a page, split it to fragments of size rx_frag_size and post as
1824 * receive buffers to BE
1825 */
1829b086 1826static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1827{
3abcdeda 1828 struct be_adapter *adapter = rxo->adapter;
26d92f92 1829 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1830 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1831 struct page *pagep = NULL;
ba42fad0 1832 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1833 struct be_eth_rx_d *rxd;
1834 u64 page_dmaaddr = 0, frag_dmaaddr;
1835 u32 posted, page_offset = 0;
1836
3abcdeda 1837 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1838 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1839 if (!pagep) {
1829b086 1840 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1841 if (unlikely(!pagep)) {
ac124ff9 1842 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1843 break;
1844 }
ba42fad0
IV
1845 page_dmaaddr = dma_map_page(dev, pagep, 0,
1846 adapter->big_page_size,
2b7bcebf 1847 DMA_FROM_DEVICE);
ba42fad0
IV
1848 if (dma_mapping_error(dev, page_dmaaddr)) {
1849 put_page(pagep);
1850 pagep = NULL;
1851 rx_stats(rxo)->rx_post_fail++;
1852 break;
1853 }
e50287be 1854 page_offset = 0;
6b7c5b94
SP
1855 } else {
1856 get_page(pagep);
e50287be 1857 page_offset += rx_frag_size;
6b7c5b94 1858 }
e50287be 1859 page_info->page_offset = page_offset;
6b7c5b94 1860 page_info->page = pagep;
6b7c5b94
SP
1861
1862 rxd = queue_head_node(rxq);
e50287be 1863 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1864 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1865 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1866
1867 /* Any space left in the current big page for another frag? */
1868 if ((page_offset + rx_frag_size + rx_frag_size) >
1869 adapter->big_page_size) {
1870 pagep = NULL;
e50287be
SP
1871 page_info->last_frag = true;
1872 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1873 } else {
1874 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1875 }
26d92f92
SP
1876
1877 prev_page_info = page_info;
1878 queue_head_inc(rxq);
10ef9ab4 1879 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1880 }
e50287be
SP
1881
1882 /* Mark the last frag of a page when we break out of the above loop
1883 * with no more slots available in the RXQ
1884 */
1885 if (pagep) {
1886 prev_page_info->last_frag = true;
1887 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1888 }
6b7c5b94
SP
1889
1890 if (posted) {
6b7c5b94 1891 atomic_add(posted, &rxq->used);
6384a4d0
SP
1892 if (rxo->rx_post_starved)
1893 rxo->rx_post_starved = false;
8788fdc2 1894 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1895 } else if (atomic_read(&rxq->used) == 0) {
1896 /* Let be_worker replenish when memory is available */
3abcdeda 1897 rxo->rx_post_starved = true;
6b7c5b94 1898 }
6b7c5b94
SP
1899}
1900
5fb379ee 1901static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1902{
6b7c5b94
SP
1903 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1904
1905 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1906 return NULL;
1907
f3eb62d2 1908 rmb();
6b7c5b94
SP
1909 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1910
1911 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1912
1913 queue_tail_inc(tx_cq);
1914 return txcp;
1915}
1916
3c8def97
SP
1917static u16 be_tx_compl_process(struct be_adapter *adapter,
1918 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1919{
3c8def97 1920 struct be_queue_info *txq = &txo->q;
a73b796e 1921 struct be_eth_wrb *wrb;
3c8def97 1922 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1923 struct sk_buff *sent_skb;
ec43b1a6
SP
1924 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1925 bool unmap_skb_hdr = true;
6b7c5b94 1926
ec43b1a6 1927 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1928 BUG_ON(!sent_skb);
ec43b1a6
SP
1929 sent_skbs[txq->tail] = NULL;
1930
1931 /* skip header wrb */
a73b796e 1932 queue_tail_inc(txq);
6b7c5b94 1933
ec43b1a6 1934 do {
6b7c5b94 1935 cur_index = txq->tail;
a73b796e 1936 wrb = queue_tail_node(txq);
2b7bcebf
IV
1937 unmap_tx_frag(&adapter->pdev->dev, wrb,
1938 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1939 unmap_skb_hdr = false;
1940
6b7c5b94
SP
1941 num_wrbs++;
1942 queue_tail_inc(txq);
ec43b1a6 1943 } while (cur_index != last_index);
6b7c5b94 1944
d8ec2c02 1945 dev_kfree_skb_any(sent_skb);
4d586b82 1946 return num_wrbs;
6b7c5b94
SP
1947}
1948
10ef9ab4
SP
1949/* Return the number of events in the event queue */
1950static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1951{
10ef9ab4
SP
1952 struct be_eq_entry *eqe;
1953 int num = 0;
859b1e4e 1954
10ef9ab4
SP
1955 do {
1956 eqe = queue_tail_node(&eqo->q);
1957 if (eqe->evt == 0)
1958 break;
859b1e4e 1959
10ef9ab4
SP
1960 rmb();
1961 eqe->evt = 0;
1962 num++;
1963 queue_tail_inc(&eqo->q);
1964 } while (true);
1965
1966 return num;
859b1e4e
SP
1967}
1968
10ef9ab4
SP
1969/* Leaves the EQ is disarmed state */
1970static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1971{
10ef9ab4 1972 int num = events_get(eqo);
859b1e4e 1973
10ef9ab4 1974 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1975}
1976
10ef9ab4 1977static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1978{
1979 struct be_rx_page_info *page_info;
3abcdeda
SP
1980 struct be_queue_info *rxq = &rxo->q;
1981 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1982 struct be_rx_compl_info *rxcp;
d23e946c
SP
1983 struct be_adapter *adapter = rxo->adapter;
1984 int flush_wait = 0;
6b7c5b94 1985
d23e946c
SP
1986 /* Consume pending rx completions.
1987 * Wait for the flush completion (identified by zero num_rcvd)
1988 * to arrive. Notify CQ even when there are no more CQ entries
1989 * for HW to flush partially coalesced CQ entries.
1990 * In Lancer, there is no need to wait for flush compl.
1991 */
1992 for (;;) {
1993 rxcp = be_rx_compl_get(rxo);
1994 if (rxcp == NULL) {
1995 if (lancer_chip(adapter))
1996 break;
1997
1998 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1999 dev_warn(&adapter->pdev->dev,
2000 "did not receive flush compl\n");
2001 break;
2002 }
2003 be_cq_notify(adapter, rx_cq->id, true, 0);
2004 mdelay(1);
2005 } else {
2006 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2007 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2008 if (rxcp->num_rcvd == 0)
2009 break;
2010 }
6b7c5b94
SP
2011 }
2012
d23e946c
SP
2013 /* After cleanup, leave the CQ in unarmed state */
2014 be_cq_notify(adapter, rx_cq->id, false, 0);
2015
2016 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2017 while (atomic_read(&rxq->used) > 0) {
2018 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2019 put_page(page_info->page);
2020 memset(page_info, 0, sizeof(*page_info));
2021 }
2022 BUG_ON(atomic_read(&rxq->used));
482c9e79 2023 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2024}
2025
0ae57bb3 2026static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2027{
0ae57bb3
SP
2028 struct be_tx_obj *txo;
2029 struct be_queue_info *txq;
a8e9179a 2030 struct be_eth_tx_compl *txcp;
4d586b82 2031 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2032 struct sk_buff *sent_skb;
2033 bool dummy_wrb;
0ae57bb3 2034 int i, pending_txqs;
a8e9179a 2035
1a3d0717 2036 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2037 do {
0ae57bb3
SP
2038 pending_txqs = adapter->num_tx_qs;
2039
2040 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2041 cmpl = 0;
2042 num_wrbs = 0;
0ae57bb3
SP
2043 txq = &txo->q;
2044 while ((txcp = be_tx_compl_get(&txo->cq))) {
2045 end_idx =
2046 AMAP_GET_BITS(struct amap_eth_tx_compl,
2047 wrb_index, txcp);
2048 num_wrbs += be_tx_compl_process(adapter, txo,
2049 end_idx);
2050 cmpl++;
2051 }
2052 if (cmpl) {
2053 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2054 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2055 timeo = 0;
0ae57bb3
SP
2056 }
2057 if (atomic_read(&txq->used) == 0)
2058 pending_txqs--;
a8e9179a
SP
2059 }
2060
1a3d0717 2061 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2062 break;
2063
2064 mdelay(1);
2065 } while (true);
2066
0ae57bb3
SP
2067 for_all_tx_queues(adapter, txo, i) {
2068 txq = &txo->q;
2069 if (atomic_read(&txq->used))
2070 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2071 atomic_read(&txq->used));
2072
2073 /* free posted tx for which compls will never arrive */
2074 while (atomic_read(&txq->used)) {
2075 sent_skb = txo->sent_skb_list[txq->tail];
2076 end_idx = txq->tail;
2077 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2078 &dummy_wrb);
2079 index_adv(&end_idx, num_wrbs - 1, txq->len);
2080 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2081 atomic_sub(num_wrbs, &txq->used);
2082 }
b03388d6 2083 }
6b7c5b94
SP
2084}
2085
10ef9ab4
SP
2086static void be_evt_queues_destroy(struct be_adapter *adapter)
2087{
2088 struct be_eq_obj *eqo;
2089 int i;
2090
2091 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2092 if (eqo->q.created) {
2093 be_eq_clean(eqo);
10ef9ab4 2094 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2095 napi_hash_del(&eqo->napi);
68d7bdcb 2096 netif_napi_del(&eqo->napi);
19d59aa7 2097 }
10ef9ab4
SP
2098 be_queue_free(adapter, &eqo->q);
2099 }
2100}
2101
2102static int be_evt_queues_create(struct be_adapter *adapter)
2103{
2104 struct be_queue_info *eq;
2105 struct be_eq_obj *eqo;
2632bafd 2106 struct be_aic_obj *aic;
10ef9ab4
SP
2107 int i, rc;
2108
92bf14ab
SP
2109 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2110 adapter->cfg_num_qs);
10ef9ab4
SP
2111
2112 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2113 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2114 BE_NAPI_WEIGHT);
6384a4d0 2115 napi_hash_add(&eqo->napi);
2632bafd 2116 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2117 eqo->adapter = adapter;
2118 eqo->tx_budget = BE_TX_BUDGET;
2119 eqo->idx = i;
2632bafd
SP
2120 aic->max_eqd = BE_MAX_EQD;
2121 aic->enable = true;
10ef9ab4
SP
2122
2123 eq = &eqo->q;
2124 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2125 sizeof(struct be_eq_entry));
2126 if (rc)
2127 return rc;
2128
f2f781a7 2129 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2130 if (rc)
2131 return rc;
2132 }
1cfafab9 2133 return 0;
10ef9ab4
SP
2134}
2135
5fb379ee
SP
2136static void be_mcc_queues_destroy(struct be_adapter *adapter)
2137{
2138 struct be_queue_info *q;
5fb379ee 2139
8788fdc2 2140 q = &adapter->mcc_obj.q;
5fb379ee 2141 if (q->created)
8788fdc2 2142 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2143 be_queue_free(adapter, q);
2144
8788fdc2 2145 q = &adapter->mcc_obj.cq;
5fb379ee 2146 if (q->created)
8788fdc2 2147 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2148 be_queue_free(adapter, q);
2149}
2150
2151/* Must be called only after TX qs are created as MCC shares TX EQ */
2152static int be_mcc_queues_create(struct be_adapter *adapter)
2153{
2154 struct be_queue_info *q, *cq;
5fb379ee 2155
8788fdc2 2156 cq = &adapter->mcc_obj.cq;
5fb379ee 2157 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 2158 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2159 goto err;
2160
10ef9ab4
SP
2161 /* Use the default EQ for MCC completions */
2162 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2163 goto mcc_cq_free;
2164
8788fdc2 2165 q = &adapter->mcc_obj.q;
5fb379ee
SP
2166 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2167 goto mcc_cq_destroy;
2168
8788fdc2 2169 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2170 goto mcc_q_free;
2171
2172 return 0;
2173
2174mcc_q_free:
2175 be_queue_free(adapter, q);
2176mcc_cq_destroy:
8788fdc2 2177 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2178mcc_cq_free:
2179 be_queue_free(adapter, cq);
2180err:
2181 return -1;
2182}
2183
6b7c5b94
SP
2184static void be_tx_queues_destroy(struct be_adapter *adapter)
2185{
2186 struct be_queue_info *q;
3c8def97
SP
2187 struct be_tx_obj *txo;
2188 u8 i;
6b7c5b94 2189
3c8def97
SP
2190 for_all_tx_queues(adapter, txo, i) {
2191 q = &txo->q;
2192 if (q->created)
2193 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2194 be_queue_free(adapter, q);
6b7c5b94 2195
3c8def97
SP
2196 q = &txo->cq;
2197 if (q->created)
2198 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2199 be_queue_free(adapter, q);
2200 }
6b7c5b94
SP
2201}
2202
7707133c 2203static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2204{
10ef9ab4 2205 struct be_queue_info *cq, *eq;
3c8def97 2206 struct be_tx_obj *txo;
92bf14ab 2207 int status, i;
6b7c5b94 2208
92bf14ab 2209 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2210
10ef9ab4
SP
2211 for_all_tx_queues(adapter, txo, i) {
2212 cq = &txo->cq;
2213 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2214 sizeof(struct be_eth_tx_compl));
2215 if (status)
2216 return status;
3c8def97 2217
827da44c
JS
2218 u64_stats_init(&txo->stats.sync);
2219 u64_stats_init(&txo->stats.sync_compl);
2220
10ef9ab4
SP
2221 /* If num_evt_qs is less than num_tx_qs, then more than
2222 * one txq share an eq
2223 */
2224 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2225 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2226 if (status)
2227 return status;
6b7c5b94 2228
10ef9ab4
SP
2229 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2230 sizeof(struct be_eth_wrb));
2231 if (status)
2232 return status;
6b7c5b94 2233
94d73aaa 2234 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2235 if (status)
2236 return status;
3c8def97 2237 }
6b7c5b94 2238
d379142b
SP
2239 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2240 adapter->num_tx_qs);
10ef9ab4 2241 return 0;
6b7c5b94
SP
2242}
2243
10ef9ab4 2244static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2245{
2246 struct be_queue_info *q;
3abcdeda
SP
2247 struct be_rx_obj *rxo;
2248 int i;
2249
2250 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2251 q = &rxo->cq;
2252 if (q->created)
2253 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2254 be_queue_free(adapter, q);
ac6a0c4a
SP
2255 }
2256}
2257
10ef9ab4 2258static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2259{
10ef9ab4 2260 struct be_queue_info *eq, *cq;
3abcdeda
SP
2261 struct be_rx_obj *rxo;
2262 int rc, i;
6b7c5b94 2263
92bf14ab
SP
2264 /* We can create as many RSS rings as there are EQs. */
2265 adapter->num_rx_qs = adapter->num_evt_qs;
2266
2267 /* We'll use RSS only if atleast 2 RSS rings are supported.
2268 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2269 */
92bf14ab
SP
2270 if (adapter->num_rx_qs > 1)
2271 adapter->num_rx_qs++;
2272
6b7c5b94 2273 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2274 for_all_rx_queues(adapter, rxo, i) {
2275 rxo->adapter = adapter;
3abcdeda
SP
2276 cq = &rxo->cq;
2277 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2278 sizeof(struct be_eth_rx_compl));
2279 if (rc)
10ef9ab4 2280 return rc;
3abcdeda 2281
827da44c 2282 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2283 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2284 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2285 if (rc)
10ef9ab4 2286 return rc;
3abcdeda 2287 }
6b7c5b94 2288
d379142b
SP
2289 dev_info(&adapter->pdev->dev,
2290 "created %d RSS queue(s) and 1 default RX queue\n",
2291 adapter->num_rx_qs - 1);
10ef9ab4 2292 return 0;
b628bde2
SP
2293}
2294
6b7c5b94
SP
2295static irqreturn_t be_intx(int irq, void *dev)
2296{
e49cc34f
SP
2297 struct be_eq_obj *eqo = dev;
2298 struct be_adapter *adapter = eqo->adapter;
2299 int num_evts = 0;
6b7c5b94 2300
d0b9cec3
SP
2301 /* IRQ is not expected when NAPI is scheduled as the EQ
2302 * will not be armed.
2303 * But, this can happen on Lancer INTx where it takes
2304 * a while to de-assert INTx or in BE2 where occasionaly
2305 * an interrupt may be raised even when EQ is unarmed.
2306 * If NAPI is already scheduled, then counting & notifying
2307 * events will orphan them.
e49cc34f 2308 */
d0b9cec3 2309 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2310 num_evts = events_get(eqo);
d0b9cec3
SP
2311 __napi_schedule(&eqo->napi);
2312 if (num_evts)
2313 eqo->spurious_intr = 0;
2314 }
2315 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2316
d0b9cec3
SP
2317 /* Return IRQ_HANDLED only for the the first spurious intr
2318 * after a valid intr to stop the kernel from branding
2319 * this irq as a bad one!
e49cc34f 2320 */
d0b9cec3
SP
2321 if (num_evts || eqo->spurious_intr++ == 0)
2322 return IRQ_HANDLED;
2323 else
2324 return IRQ_NONE;
6b7c5b94
SP
2325}
2326
10ef9ab4 2327static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2328{
10ef9ab4 2329 struct be_eq_obj *eqo = dev;
6b7c5b94 2330
0b545a62
SP
2331 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2332 napi_schedule(&eqo->napi);
6b7c5b94
SP
2333 return IRQ_HANDLED;
2334}
2335
2e588f84 2336static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2337{
e38b1706 2338 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2339}
2340
10ef9ab4 2341static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
6384a4d0 2342 int budget, int polling)
6b7c5b94 2343{
3abcdeda
SP
2344 struct be_adapter *adapter = rxo->adapter;
2345 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2346 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2347 u32 work_done;
2348
2349 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2350 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2351 if (!rxcp)
2352 break;
2353
12004ae9
SP
2354 /* Is it a flush compl that has no data */
2355 if (unlikely(rxcp->num_rcvd == 0))
2356 goto loop_continue;
2357
2358 /* Discard compl with partial DMA Lancer B0 */
2359 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2360 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2361 goto loop_continue;
2362 }
2363
2364 /* On BE drop pkts that arrive due to imperfect filtering in
2365 * promiscuous mode on some skews
2366 */
2367 if (unlikely(rxcp->port != adapter->port_num &&
2368 !lancer_chip(adapter))) {
10ef9ab4 2369 be_rx_compl_discard(rxo, rxcp);
12004ae9 2370 goto loop_continue;
64642811 2371 }
009dd872 2372
6384a4d0
SP
2373 /* Don't do gro when we're busy_polling */
2374 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2375 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2376 else
6384a4d0
SP
2377 be_rx_compl_process(rxo, napi, rxcp);
2378
12004ae9 2379loop_continue:
2e588f84 2380 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2381 }
2382
10ef9ab4
SP
2383 if (work_done) {
2384 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2385
6384a4d0
SP
2386 /* When an rx-obj gets into post_starved state, just
2387 * let be_worker do the posting.
2388 */
2389 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2390 !rxo->rx_post_starved)
10ef9ab4 2391 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2392 }
10ef9ab4 2393
6b7c5b94
SP
2394 return work_done;
2395}
2396
10ef9ab4
SP
2397static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2398 int budget, int idx)
6b7c5b94 2399{
6b7c5b94 2400 struct be_eth_tx_compl *txcp;
10ef9ab4 2401 int num_wrbs = 0, work_done;
3c8def97 2402
10ef9ab4
SP
2403 for (work_done = 0; work_done < budget; work_done++) {
2404 txcp = be_tx_compl_get(&txo->cq);
2405 if (!txcp)
2406 break;
2407 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2408 AMAP_GET_BITS(struct amap_eth_tx_compl,
2409 wrb_index, txcp));
10ef9ab4 2410 }
6b7c5b94 2411
10ef9ab4
SP
2412 if (work_done) {
2413 be_cq_notify(adapter, txo->cq.id, true, work_done);
2414 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2415
10ef9ab4
SP
2416 /* As Tx wrbs have been freed up, wake up netdev queue
2417 * if it was stopped due to lack of tx wrbs. */
2418 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2419 atomic_read(&txo->q.used) < txo->q.len / 2) {
2420 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2421 }
10ef9ab4
SP
2422
2423 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2424 tx_stats(txo)->tx_compl += work_done;
2425 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2426 }
10ef9ab4
SP
2427 return (work_done < budget); /* Done */
2428}
6b7c5b94 2429
68d7bdcb 2430int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2431{
2432 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2433 struct be_adapter *adapter = eqo->adapter;
0b545a62 2434 int max_work = 0, work, i, num_evts;
6384a4d0 2435 struct be_rx_obj *rxo;
10ef9ab4 2436 bool tx_done;
f31e50a8 2437
0b545a62
SP
2438 num_evts = events_get(eqo);
2439
10ef9ab4
SP
2440 /* Process all TXQs serviced by this EQ */
2441 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2442 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2443 eqo->tx_budget, i);
2444 if (!tx_done)
2445 max_work = budget;
f31e50a8
SP
2446 }
2447
6384a4d0
SP
2448 if (be_lock_napi(eqo)) {
2449 /* This loop will iterate twice for EQ0 in which
2450 * completions of the last RXQ (default one) are also processed
2451 * For other EQs the loop iterates only once
2452 */
2453 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2454 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2455 max_work = max(work, max_work);
2456 }
2457 be_unlock_napi(eqo);
2458 } else {
2459 max_work = budget;
10ef9ab4 2460 }
6b7c5b94 2461
10ef9ab4
SP
2462 if (is_mcc_eqo(eqo))
2463 be_process_mcc(adapter);
93c86700 2464
10ef9ab4
SP
2465 if (max_work < budget) {
2466 napi_complete(napi);
0b545a62 2467 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2468 } else {
2469 /* As we'll continue in polling mode, count and clear events */
0b545a62 2470 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2471 }
10ef9ab4 2472 return max_work;
6b7c5b94
SP
2473}
2474
6384a4d0
SP
2475#ifdef CONFIG_NET_RX_BUSY_POLL
2476static int be_busy_poll(struct napi_struct *napi)
2477{
2478 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2479 struct be_adapter *adapter = eqo->adapter;
2480 struct be_rx_obj *rxo;
2481 int i, work = 0;
2482
2483 if (!be_lock_busy_poll(eqo))
2484 return LL_FLUSH_BUSY;
2485
2486 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2487 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2488 if (work)
2489 break;
2490 }
2491
2492 be_unlock_busy_poll(eqo);
2493 return work;
2494}
2495#endif
2496
f67ef7ba 2497void be_detect_error(struct be_adapter *adapter)
7c185276 2498{
e1cfb67a
PR
2499 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2500 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2501 u32 i;
eb0eecc1
SK
2502 bool error_detected = false;
2503 struct device *dev = &adapter->pdev->dev;
2504 struct net_device *netdev = adapter->netdev;
7c185276 2505
d23e946c 2506 if (be_hw_error(adapter))
72f02485
SP
2507 return;
2508
e1cfb67a
PR
2509 if (lancer_chip(adapter)) {
2510 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2511 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2512 sliport_err1 = ioread32(adapter->db +
2513 SLIPORT_ERROR1_OFFSET);
2514 sliport_err2 = ioread32(adapter->db +
2515 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2516 adapter->hw_error = true;
2517 /* Do not log error messages if its a FW reset */
2518 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2519 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2520 dev_info(dev, "Firmware update in progress\n");
2521 } else {
2522 error_detected = true;
2523 dev_err(dev, "Error detected in the card\n");
2524 dev_err(dev, "ERR: sliport status 0x%x\n",
2525 sliport_status);
2526 dev_err(dev, "ERR: sliport error1 0x%x\n",
2527 sliport_err1);
2528 dev_err(dev, "ERR: sliport error2 0x%x\n",
2529 sliport_err2);
2530 }
e1cfb67a
PR
2531 }
2532 } else {
2533 pci_read_config_dword(adapter->pdev,
2534 PCICFG_UE_STATUS_LOW, &ue_lo);
2535 pci_read_config_dword(adapter->pdev,
2536 PCICFG_UE_STATUS_HIGH, &ue_hi);
2537 pci_read_config_dword(adapter->pdev,
2538 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2539 pci_read_config_dword(adapter->pdev,
2540 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2541
f67ef7ba
PR
2542 ue_lo = (ue_lo & ~ue_lo_mask);
2543 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2544
eb0eecc1
SK
2545 /* On certain platforms BE hardware can indicate spurious UEs.
2546 * Allow HW to stop working completely in case of a real UE.
2547 * Hence not setting the hw_error for UE detection.
2548 */
f67ef7ba 2549
eb0eecc1
SK
2550 if (ue_lo || ue_hi) {
2551 error_detected = true;
2552 dev_err(dev,
2553 "Unrecoverable Error detected in the adapter");
2554 dev_err(dev, "Please reboot server to recover");
2555 if (skyhawk_chip(adapter))
2556 adapter->hw_error = true;
2557 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2558 if (ue_lo & 1)
2559 dev_err(dev, "UE: %s bit set\n",
2560 ue_status_low_desc[i]);
2561 }
2562 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2563 if (ue_hi & 1)
2564 dev_err(dev, "UE: %s bit set\n",
2565 ue_status_hi_desc[i]);
2566 }
7c185276
AK
2567 }
2568 }
eb0eecc1
SK
2569 if (error_detected)
2570 netif_carrier_off(netdev);
7c185276
AK
2571}
2572
8d56ff11
SP
2573static void be_msix_disable(struct be_adapter *adapter)
2574{
ac6a0c4a 2575 if (msix_enabled(adapter)) {
8d56ff11 2576 pci_disable_msix(adapter->pdev);
ac6a0c4a 2577 adapter->num_msix_vec = 0;
68d7bdcb 2578 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2579 }
2580}
2581
c2bba3df 2582static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2583{
7dc4c064 2584 int i, num_vec;
d379142b 2585 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2586
92bf14ab
SP
2587 /* If RoCE is supported, program the max number of NIC vectors that
2588 * may be configured via set-channels, along with vectors needed for
2589 * RoCe. Else, just program the number we'll use initially.
2590 */
2591 if (be_roce_supported(adapter))
2592 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2593 2 * num_online_cpus());
2594 else
2595 num_vec = adapter->cfg_num_qs;
3abcdeda 2596
ac6a0c4a 2597 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2598 adapter->msix_entries[i].entry = i;
2599
7dc4c064
AG
2600 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2601 MIN_MSIX_VECTORS, num_vec);
2602 if (num_vec < 0)
2603 goto fail;
92bf14ab 2604
92bf14ab
SP
2605 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2606 adapter->num_msix_roce_vec = num_vec / 2;
2607 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2608 adapter->num_msix_roce_vec);
2609 }
2610
2611 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2612
2613 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2614 adapter->num_msix_vec);
c2bba3df 2615 return 0;
7dc4c064
AG
2616
2617fail:
2618 dev_warn(dev, "MSIx enable failed\n");
2619
2620 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2621 if (!be_physfn(adapter))
2622 return num_vec;
2623 return 0;
6b7c5b94
SP
2624}
2625
fe6d2a38 2626static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2627 struct be_eq_obj *eqo)
b628bde2 2628{
f2f781a7 2629 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2630}
6b7c5b94 2631
b628bde2
SP
2632static int be_msix_register(struct be_adapter *adapter)
2633{
10ef9ab4
SP
2634 struct net_device *netdev = adapter->netdev;
2635 struct be_eq_obj *eqo;
2636 int status, i, vec;
6b7c5b94 2637
10ef9ab4
SP
2638 for_all_evt_queues(adapter, eqo, i) {
2639 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2640 vec = be_msix_vec_get(adapter, eqo);
2641 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2642 if (status)
2643 goto err_msix;
2644 }
b628bde2 2645
6b7c5b94 2646 return 0;
3abcdeda 2647err_msix:
10ef9ab4
SP
2648 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2649 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2650 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2651 status);
ac6a0c4a 2652 be_msix_disable(adapter);
6b7c5b94
SP
2653 return status;
2654}
2655
2656static int be_irq_register(struct be_adapter *adapter)
2657{
2658 struct net_device *netdev = adapter->netdev;
2659 int status;
2660
ac6a0c4a 2661 if (msix_enabled(adapter)) {
6b7c5b94
SP
2662 status = be_msix_register(adapter);
2663 if (status == 0)
2664 goto done;
ba343c77
SB
2665 /* INTx is not supported for VF */
2666 if (!be_physfn(adapter))
2667 return status;
6b7c5b94
SP
2668 }
2669
e49cc34f 2670 /* INTx: only the first EQ is used */
6b7c5b94
SP
2671 netdev->irq = adapter->pdev->irq;
2672 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2673 &adapter->eq_obj[0]);
6b7c5b94
SP
2674 if (status) {
2675 dev_err(&adapter->pdev->dev,
2676 "INTx request IRQ failed - err %d\n", status);
2677 return status;
2678 }
2679done:
2680 adapter->isr_registered = true;
2681 return 0;
2682}
2683
2684static void be_irq_unregister(struct be_adapter *adapter)
2685{
2686 struct net_device *netdev = adapter->netdev;
10ef9ab4 2687 struct be_eq_obj *eqo;
3abcdeda 2688 int i;
6b7c5b94
SP
2689
2690 if (!adapter->isr_registered)
2691 return;
2692
2693 /* INTx */
ac6a0c4a 2694 if (!msix_enabled(adapter)) {
e49cc34f 2695 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2696 goto done;
2697 }
2698
2699 /* MSIx */
10ef9ab4
SP
2700 for_all_evt_queues(adapter, eqo, i)
2701 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2702
6b7c5b94
SP
2703done:
2704 adapter->isr_registered = false;
6b7c5b94
SP
2705}
2706
10ef9ab4 2707static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2708{
2709 struct be_queue_info *q;
2710 struct be_rx_obj *rxo;
2711 int i;
2712
2713 for_all_rx_queues(adapter, rxo, i) {
2714 q = &rxo->q;
2715 if (q->created) {
2716 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2717 be_rx_cq_clean(rxo);
482c9e79 2718 }
10ef9ab4 2719 be_queue_free(adapter, q);
482c9e79
SP
2720 }
2721}
2722
889cd4b2
SP
2723static int be_close(struct net_device *netdev)
2724{
2725 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2726 struct be_eq_obj *eqo;
2727 int i;
889cd4b2 2728
e1ad8e33
KA
2729 /* This protection is needed as be_close() may be called even when the
2730 * adapter is in cleared state (after eeh perm failure)
2731 */
2732 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2733 return 0;
2734
045508a8
PP
2735 be_roce_dev_close(adapter);
2736
dff345c5
IV
2737 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2738 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2739 napi_disable(&eqo->napi);
6384a4d0
SP
2740 be_disable_busy_poll(eqo);
2741 }
71237b6f 2742 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2743 }
a323d9bf
SP
2744
2745 be_async_mcc_disable(adapter);
2746
2747 /* Wait for all pending tx completions to arrive so that
2748 * all tx skbs are freed.
2749 */
fba87559 2750 netif_tx_disable(netdev);
6e1f9975 2751 be_tx_compl_clean(adapter);
a323d9bf
SP
2752
2753 be_rx_qs_destroy(adapter);
2754
d11a347d
AK
2755 for (i = 1; i < (adapter->uc_macs + 1); i++)
2756 be_cmd_pmac_del(adapter, adapter->if_handle,
2757 adapter->pmac_id[i], 0);
2758 adapter->uc_macs = 0;
2759
a323d9bf 2760 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2761 if (msix_enabled(adapter))
2762 synchronize_irq(be_msix_vec_get(adapter, eqo));
2763 else
2764 synchronize_irq(netdev->irq);
2765 be_eq_clean(eqo);
63fcb27f
PR
2766 }
2767
889cd4b2
SP
2768 be_irq_unregister(adapter);
2769
482c9e79
SP
2770 return 0;
2771}
2772
10ef9ab4 2773static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2774{
2775 struct be_rx_obj *rxo;
e9008ee9
PR
2776 int rc, i, j;
2777 u8 rsstable[128];
482c9e79
SP
2778
2779 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2780 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2781 sizeof(struct be_eth_rx_d));
2782 if (rc)
2783 return rc;
2784 }
2785
2786 /* The FW would like the default RXQ to be created first */
2787 rxo = default_rxo(adapter);
2788 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2789 adapter->if_handle, false, &rxo->rss_id);
2790 if (rc)
2791 return rc;
2792
2793 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2794 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2795 rx_frag_size, adapter->if_handle,
2796 true, &rxo->rss_id);
482c9e79
SP
2797 if (rc)
2798 return rc;
2799 }
2800
2801 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2802 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2803 for_all_rss_queues(adapter, rxo, i) {
2804 if ((j + i) >= 128)
2805 break;
2806 rsstable[j + i] = rxo->rss_id;
2807 }
2808 }
594ad54a
SR
2809 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2810 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2811
2812 if (!BEx_chip(adapter))
2813 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2814 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2815 } else {
2816 /* Disable RSS, if only default RX Q is created */
2817 adapter->rss_flags = RSS_ENABLE_NONE;
2818 }
594ad54a 2819
da1388d6
VV
2820 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2821 128);
2822 if (rc) {
2823 adapter->rss_flags = RSS_ENABLE_NONE;
2824 return rc;
482c9e79
SP
2825 }
2826
2827 /* First time posting */
10ef9ab4 2828 for_all_rx_queues(adapter, rxo, i)
482c9e79 2829 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2830 return 0;
2831}
2832
6b7c5b94
SP
2833static int be_open(struct net_device *netdev)
2834{
2835 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2836 struct be_eq_obj *eqo;
3abcdeda 2837 struct be_rx_obj *rxo;
10ef9ab4 2838 struct be_tx_obj *txo;
b236916a 2839 u8 link_status;
3abcdeda 2840 int status, i;
5fb379ee 2841
10ef9ab4 2842 status = be_rx_qs_create(adapter);
482c9e79
SP
2843 if (status)
2844 goto err;
2845
c2bba3df
SK
2846 status = be_irq_register(adapter);
2847 if (status)
2848 goto err;
5fb379ee 2849
10ef9ab4 2850 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2851 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2852
10ef9ab4
SP
2853 for_all_tx_queues(adapter, txo, i)
2854 be_cq_notify(adapter, txo->cq.id, true, 0);
2855
7a1e9b20
SP
2856 be_async_mcc_enable(adapter);
2857
10ef9ab4
SP
2858 for_all_evt_queues(adapter, eqo, i) {
2859 napi_enable(&eqo->napi);
6384a4d0 2860 be_enable_busy_poll(eqo);
10ef9ab4
SP
2861 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2862 }
04d3d624 2863 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2864
323ff71e 2865 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2866 if (!status)
2867 be_link_status_update(adapter, link_status);
2868
fba87559 2869 netif_tx_start_all_queues(netdev);
045508a8 2870 be_roce_dev_open(adapter);
c9c47142 2871
c5abe7c0 2872#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2873 if (skyhawk_chip(adapter))
2874 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2875#endif
2876
889cd4b2
SP
2877 return 0;
2878err:
2879 be_close(adapter->netdev);
2880 return -EIO;
5fb379ee
SP
2881}
2882
71d8d1b5
AK
2883static int be_setup_wol(struct be_adapter *adapter, bool enable)
2884{
2885 struct be_dma_mem cmd;
2886 int status = 0;
2887 u8 mac[ETH_ALEN];
2888
2889 memset(mac, 0, ETH_ALEN);
2890
2891 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2892 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2893 GFP_KERNEL);
71d8d1b5
AK
2894 if (cmd.va == NULL)
2895 return -1;
71d8d1b5
AK
2896
2897 if (enable) {
2898 status = pci_write_config_dword(adapter->pdev,
2899 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2900 if (status) {
2901 dev_err(&adapter->pdev->dev,
2381a55c 2902 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2903 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2904 cmd.dma);
71d8d1b5
AK
2905 return status;
2906 }
2907 status = be_cmd_enable_magic_wol(adapter,
2908 adapter->netdev->dev_addr, &cmd);
2909 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2910 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2911 } else {
2912 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2913 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2914 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2915 }
2916
2b7bcebf 2917 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2918 return status;
2919}
2920
6d87f5c3
AK
2921/*
2922 * Generate a seed MAC address from the PF MAC Address using jhash.
2923 * MAC Address for VFs are assigned incrementally starting from the seed.
2924 * These addresses are programmed in the ASIC by the PF and the VF driver
2925 * queries for the MAC address during its probe.
2926 */
4c876616 2927static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2928{
f9449ab7 2929 u32 vf;
3abcdeda 2930 int status = 0;
6d87f5c3 2931 u8 mac[ETH_ALEN];
11ac75ed 2932 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2933
2934 be_vf_eth_addr_generate(adapter, mac);
2935
11ac75ed 2936 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2937 if (BEx_chip(adapter))
590c391d 2938 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2939 vf_cfg->if_handle,
2940 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2941 else
2942 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2943 vf + 1);
590c391d 2944
6d87f5c3
AK
2945 if (status)
2946 dev_err(&adapter->pdev->dev,
590c391d 2947 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2948 else
11ac75ed 2949 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2950
2951 mac[5] += 1;
2952 }
2953 return status;
2954}
2955
4c876616
SP
2956static int be_vfs_mac_query(struct be_adapter *adapter)
2957{
2958 int status, vf;
2959 u8 mac[ETH_ALEN];
2960 struct be_vf_cfg *vf_cfg;
4c876616
SP
2961
2962 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
2963 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2964 mac, vf_cfg->if_handle,
2965 false, vf+1);
4c876616
SP
2966 if (status)
2967 return status;
2968 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2969 }
2970 return 0;
2971}
2972
f9449ab7 2973static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2974{
11ac75ed 2975 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2976 u32 vf;
2977
257a3feb 2978 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2979 dev_warn(&adapter->pdev->dev,
2980 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2981 goto done;
2982 }
2983
b4c1df93
SP
2984 pci_disable_sriov(adapter->pdev);
2985
11ac75ed 2986 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2987 if (BEx_chip(adapter))
11ac75ed
SP
2988 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2989 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2990 else
2991 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2992 vf + 1);
f9449ab7 2993
11ac75ed
SP
2994 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2995 }
39f1d94d
SP
2996done:
2997 kfree(adapter->vf_cfg);
2998 adapter->num_vfs = 0;
6d87f5c3
AK
2999}
3000
7707133c
SP
3001static void be_clear_queues(struct be_adapter *adapter)
3002{
3003 be_mcc_queues_destroy(adapter);
3004 be_rx_cqs_destroy(adapter);
3005 be_tx_queues_destroy(adapter);
3006 be_evt_queues_destroy(adapter);
3007}
3008
68d7bdcb 3009static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3010{
191eb756
SP
3011 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3012 cancel_delayed_work_sync(&adapter->work);
3013 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3014 }
68d7bdcb
SP
3015}
3016
b05004ad 3017static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3018{
3019 int i;
3020
b05004ad
SK
3021 if (adapter->pmac_id) {
3022 for (i = 0; i < (adapter->uc_macs + 1); i++)
3023 be_cmd_pmac_del(adapter, adapter->if_handle,
3024 adapter->pmac_id[i], 0);
3025 adapter->uc_macs = 0;
3026
3027 kfree(adapter->pmac_id);
3028 adapter->pmac_id = NULL;
3029 }
3030}
3031
c5abe7c0 3032#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3033static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3034{
3035 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3036 be_cmd_manage_iface(adapter, adapter->if_handle,
3037 OP_CONVERT_TUNNEL_TO_NORMAL);
3038
3039 if (adapter->vxlan_port)
3040 be_cmd_set_vxlan_port(adapter, 0);
3041
3042 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3043 adapter->vxlan_port = 0;
3044}
c5abe7c0 3045#endif
c9c47142 3046
b05004ad
SK
3047static int be_clear(struct be_adapter *adapter)
3048{
68d7bdcb 3049 be_cancel_worker(adapter);
191eb756 3050
11ac75ed 3051 if (sriov_enabled(adapter))
f9449ab7
SP
3052 be_vf_clear(adapter);
3053
c5abe7c0 3054#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3055 be_disable_vxlan_offloads(adapter);
c5abe7c0 3056#endif
2d17f403 3057 /* delete the primary mac along with the uc-mac list */
b05004ad 3058 be_mac_clear(adapter);
fbc13f01 3059
f9449ab7 3060 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3061
7707133c 3062 be_clear_queues(adapter);
a54769f5 3063
10ef9ab4 3064 be_msix_disable(adapter);
e1ad8e33 3065 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3066 return 0;
3067}
3068
4c876616 3069static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3070{
92bf14ab 3071 struct be_resources res = {0};
4c876616
SP
3072 struct be_vf_cfg *vf_cfg;
3073 u32 cap_flags, en_flags, vf;
922bbe88 3074 int status = 0;
abb93951 3075
4c876616
SP
3076 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3077 BE_IF_FLAGS_MULTICAST;
abb93951 3078
4c876616 3079 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3080 if (!BE3_chip(adapter)) {
3081 status = be_cmd_get_profile_config(adapter, &res,
3082 vf + 1);
3083 if (!status)
3084 cap_flags = res.if_cap_flags;
3085 }
4c876616
SP
3086
3087 /* If a FW profile exists, then cap_flags are updated */
3088 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3089 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3090 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3091 &vf_cfg->if_handle, vf + 1);
3092 if (status)
3093 goto err;
3094 }
3095err:
3096 return status;
abb93951
PR
3097}
3098
39f1d94d 3099static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3100{
11ac75ed 3101 struct be_vf_cfg *vf_cfg;
30128031
SP
3102 int vf;
3103
39f1d94d
SP
3104 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3105 GFP_KERNEL);
3106 if (!adapter->vf_cfg)
3107 return -ENOMEM;
3108
11ac75ed
SP
3109 for_all_vfs(adapter, vf_cfg, vf) {
3110 vf_cfg->if_handle = -1;
3111 vf_cfg->pmac_id = -1;
30128031 3112 }
39f1d94d 3113 return 0;
30128031
SP
3114}
3115
f9449ab7
SP
3116static int be_vf_setup(struct be_adapter *adapter)
3117{
c502224e 3118 struct device *dev = &adapter->pdev->dev;
11ac75ed 3119 struct be_vf_cfg *vf_cfg;
4c876616 3120 int status, old_vfs, vf;
04a06028 3121 u32 privileges;
c502224e 3122 u16 lnk_speed;
39f1d94d 3123
257a3feb 3124 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
3125 if (old_vfs) {
3126 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3127 if (old_vfs != num_vfs)
3128 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3129 adapter->num_vfs = old_vfs;
39f1d94d 3130 } else {
92bf14ab 3131 if (num_vfs > be_max_vfs(adapter))
4c876616 3132 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
3133 be_max_vfs(adapter), num_vfs);
3134 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 3135 if (!adapter->num_vfs)
4c876616 3136 return 0;
39f1d94d
SP
3137 }
3138
3139 status = be_vf_setup_init(adapter);
3140 if (status)
3141 goto err;
30128031 3142
4c876616
SP
3143 if (old_vfs) {
3144 for_all_vfs(adapter, vf_cfg, vf) {
3145 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3146 if (status)
3147 goto err;
3148 }
3149 } else {
3150 status = be_vfs_if_create(adapter);
f9449ab7
SP
3151 if (status)
3152 goto err;
f9449ab7
SP
3153 }
3154
4c876616
SP
3155 if (old_vfs) {
3156 status = be_vfs_mac_query(adapter);
3157 if (status)
3158 goto err;
3159 } else {
39f1d94d
SP
3160 status = be_vf_eth_addr_config(adapter);
3161 if (status)
3162 goto err;
3163 }
f9449ab7 3164
11ac75ed 3165 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3166 /* Allow VFs to programs MAC/VLAN filters */
3167 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3168 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3169 status = be_cmd_set_fn_privileges(adapter,
3170 privileges |
3171 BE_PRIV_FILTMGMT,
3172 vf + 1);
3173 if (!status)
3174 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3175 vf);
3176 }
3177
4c876616
SP
3178 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3179 * Allow full available bandwidth
3180 */
3181 if (BE3_chip(adapter) && !old_vfs)
a401801c 3182 be_cmd_config_qos(adapter, 1000, vf + 1);
4c876616
SP
3183
3184 status = be_cmd_link_status_query(adapter, &lnk_speed,
3185 NULL, vf + 1);
3186 if (!status)
3187 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b 3188
bdce2ad7 3189 if (!old_vfs) {
0599863d 3190 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3191 be_cmd_set_logical_link_config(adapter,
3192 IFLA_VF_LINK_STATE_AUTO,
3193 vf+1);
3194 }
f9449ab7 3195 }
b4c1df93
SP
3196
3197 if (!old_vfs) {
3198 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3199 if (status) {
3200 dev_err(dev, "SRIOV enable failed\n");
3201 adapter->num_vfs = 0;
3202 goto err;
3203 }
3204 }
f9449ab7
SP
3205 return 0;
3206err:
4c876616
SP
3207 dev_err(dev, "VF setup failed\n");
3208 be_vf_clear(adapter);
f9449ab7
SP
3209 return status;
3210}
3211
f93f160b
VV
3212/* Converting function_mode bits on BE3 to SH mc_type enums */
3213
3214static u8 be_convert_mc_type(u32 function_mode)
3215{
3216 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3217 return vNIC1;
3218 else if (function_mode & FLEX10_MODE)
3219 return FLEX10;
3220 else if (function_mode & VNIC_MODE)
3221 return vNIC2;
3222 else if (function_mode & UMC_ENABLED)
3223 return UMC;
3224 else
3225 return MC_NONE;
3226}
3227
92bf14ab
SP
3228/* On BE2/BE3 FW does not suggest the supported limits */
3229static void BEx_get_resources(struct be_adapter *adapter,
3230 struct be_resources *res)
3231{
3232 struct pci_dev *pdev = adapter->pdev;
3233 bool use_sriov = false;
ecf1f6e1
SR
3234 int max_vfs = 0;
3235
3236 if (be_physfn(adapter) && BE3_chip(adapter)) {
3237 be_cmd_get_profile_config(adapter, res, 0);
3238 /* Some old versions of BE3 FW don't report max_vfs value */
3239 if (res->max_vfs == 0) {
3240 max_vfs = pci_sriov_get_totalvfs(pdev);
3241 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3242 }
3243 use_sriov = res->max_vfs && sriov_want(adapter);
92bf14ab
SP
3244 }
3245
3246 if (be_physfn(adapter))
3247 res->max_uc_mac = BE_UC_PMAC_COUNT;
3248 else
3249 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3250
f93f160b
VV
3251 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3252
3253 if (be_is_mc(adapter)) {
3254 /* Assuming that there are 4 channels per port,
3255 * when multi-channel is enabled
3256 */
3257 if (be_is_qnq_mode(adapter))
3258 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3259 else
3260 /* In a non-qnq multichannel mode, the pvid
3261 * takes up one vlan entry
3262 */
3263 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3264 } else {
92bf14ab 3265 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3266 }
3267
92bf14ab
SP
3268 res->max_mcast_mac = BE_MAX_MC;
3269
a5243dab
VV
3270 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3271 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3272 * *only* if it is RSS-capable.
3273 */
3274 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3275 !be_physfn(adapter) || (be_is_mc(adapter) &&
3276 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
92bf14ab
SP
3277 res->max_tx_qs = 1;
3278 else
3279 res->max_tx_qs = BE3_MAX_TX_QS;
3280
3281 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3282 !use_sriov && be_physfn(adapter))
3283 res->max_rss_qs = (adapter->be3_native) ?
3284 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3285 res->max_rx_qs = res->max_rss_qs + 1;
3286
e3dc867c 3287 if (be_physfn(adapter))
ecf1f6e1 3288 res->max_evt_qs = (res->max_vfs > 0) ?
e3dc867c
SR
3289 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3290 else
3291 res->max_evt_qs = 1;
92bf14ab
SP
3292
3293 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3294 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3295 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3296}
3297
30128031
SP
3298static void be_setup_init(struct be_adapter *adapter)
3299{
3300 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3301 adapter->phy.link_speed = -1;
30128031
SP
3302 adapter->if_handle = -1;
3303 adapter->be3_native = false;
3304 adapter->promiscuous = false;
f25b119c
PR
3305 if (be_physfn(adapter))
3306 adapter->cmd_privileges = MAX_PRIVILEGES;
3307 else
3308 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3309}
3310
92bf14ab 3311static int be_get_resources(struct be_adapter *adapter)
abb93951 3312{
92bf14ab
SP
3313 struct device *dev = &adapter->pdev->dev;
3314 struct be_resources res = {0};
3315 int status;
abb93951 3316
92bf14ab
SP
3317 if (BEx_chip(adapter)) {
3318 BEx_get_resources(adapter, &res);
3319 adapter->res = res;
abb93951
PR
3320 }
3321
92bf14ab
SP
3322 /* For Lancer, SH etc read per-function resource limits from FW.
3323 * GET_FUNC_CONFIG returns per function guaranteed limits.
3324 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3325 */
3326 if (!BEx_chip(adapter)) {
3327 status = be_cmd_get_func_config(adapter, &res);
3328 if (status)
3329 return status;
abb93951 3330
92bf14ab
SP
3331 /* If RoCE may be enabled stash away half the EQs for RoCE */
3332 if (be_roce_supported(adapter))
3333 res.max_evt_qs /= 2;
3334 adapter->res = res;
abb93951 3335
92bf14ab
SP
3336 if (be_physfn(adapter)) {
3337 status = be_cmd_get_profile_config(adapter, &res, 0);
3338 if (status)
3339 return status;
3340 adapter->res.max_vfs = res.max_vfs;
3341 }
abb93951 3342
92bf14ab
SP
3343 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3344 be_max_txqs(adapter), be_max_rxqs(adapter),
3345 be_max_rss(adapter), be_max_eqs(adapter),
3346 be_max_vfs(adapter));
3347 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3348 be_max_uc(adapter), be_max_mc(adapter),
3349 be_max_vlans(adapter));
abb93951 3350 }
4c876616 3351
92bf14ab 3352 return 0;
abb93951
PR
3353}
3354
39f1d94d
SP
3355/* Routine to query per function resource limits */
3356static int be_get_config(struct be_adapter *adapter)
3357{
542963b7 3358 u16 profile_id;
4c876616 3359 int status;
39f1d94d 3360
abb93951
PR
3361 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3362 &adapter->function_mode,
0ad3157e
VV
3363 &adapter->function_caps,
3364 &adapter->asic_rev);
abb93951 3365 if (status)
92bf14ab 3366 return status;
abb93951 3367
542963b7
VV
3368 if (be_physfn(adapter)) {
3369 status = be_cmd_get_active_profile(adapter, &profile_id);
3370 if (!status)
3371 dev_info(&adapter->pdev->dev,
3372 "Using profile 0x%x\n", profile_id);
3373 }
3374
92bf14ab
SP
3375 status = be_get_resources(adapter);
3376 if (status)
3377 return status;
abb93951 3378
46ee9c14
RN
3379 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3380 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3381 if (!adapter->pmac_id)
3382 return -ENOMEM;
abb93951 3383
92bf14ab
SP
3384 /* Sanitize cfg_num_qs based on HW and platform limits */
3385 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3386
3387 return 0;
39f1d94d
SP
3388}
3389
95046b92
SP
3390static int be_mac_setup(struct be_adapter *adapter)
3391{
3392 u8 mac[ETH_ALEN];
3393 int status;
3394
3395 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3396 status = be_cmd_get_perm_mac(adapter, mac);
3397 if (status)
3398 return status;
3399
3400 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3401 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3402 } else {
3403 /* Maybe the HW was reset; dev_addr must be re-programmed */
3404 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3405 }
3406
2c7a9dc1
AK
3407 /* For BE3-R VFs, the PF programs the initial MAC address */
3408 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3409 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3410 &adapter->pmac_id[0], 0);
95046b92
SP
3411 return 0;
3412}
3413
68d7bdcb
SP
3414static void be_schedule_worker(struct be_adapter *adapter)
3415{
3416 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3417 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3418}
3419
7707133c 3420static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3421{
68d7bdcb 3422 struct net_device *netdev = adapter->netdev;
10ef9ab4 3423 int status;
ba343c77 3424
7707133c 3425 status = be_evt_queues_create(adapter);
abb93951
PR
3426 if (status)
3427 goto err;
73d540f2 3428
7707133c 3429 status = be_tx_qs_create(adapter);
c2bba3df
SK
3430 if (status)
3431 goto err;
10ef9ab4 3432
7707133c 3433 status = be_rx_cqs_create(adapter);
10ef9ab4 3434 if (status)
a54769f5 3435 goto err;
6b7c5b94 3436
7707133c 3437 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3438 if (status)
3439 goto err;
3440
68d7bdcb
SP
3441 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3442 if (status)
3443 goto err;
3444
3445 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3446 if (status)
3447 goto err;
3448
7707133c
SP
3449 return 0;
3450err:
3451 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3452 return status;
3453}
3454
68d7bdcb
SP
3455int be_update_queues(struct be_adapter *adapter)
3456{
3457 struct net_device *netdev = adapter->netdev;
3458 int status;
3459
3460 if (netif_running(netdev))
3461 be_close(netdev);
3462
3463 be_cancel_worker(adapter);
3464
3465 /* If any vectors have been shared with RoCE we cannot re-program
3466 * the MSIx table.
3467 */
3468 if (!adapter->num_msix_roce_vec)
3469 be_msix_disable(adapter);
3470
3471 be_clear_queues(adapter);
3472
3473 if (!msix_enabled(adapter)) {
3474 status = be_msix_enable(adapter);
3475 if (status)
3476 return status;
3477 }
3478
3479 status = be_setup_queues(adapter);
3480 if (status)
3481 return status;
3482
3483 be_schedule_worker(adapter);
3484
3485 if (netif_running(netdev))
3486 status = be_open(netdev);
3487
3488 return status;
3489}
3490
7707133c
SP
3491static int be_setup(struct be_adapter *adapter)
3492{
3493 struct device *dev = &adapter->pdev->dev;
3494 u32 tx_fc, rx_fc, en_flags;
3495 int status;
3496
3497 be_setup_init(adapter);
3498
3499 if (!lancer_chip(adapter))
3500 be_cmd_req_native_mode(adapter);
3501
3502 status = be_get_config(adapter);
10ef9ab4 3503 if (status)
a54769f5 3504 goto err;
6b7c5b94 3505
7707133c 3506 status = be_msix_enable(adapter);
10ef9ab4 3507 if (status)
a54769f5 3508 goto err;
6b7c5b94 3509
f9449ab7 3510 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3511 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3512 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3513 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3514 en_flags = en_flags & be_if_cap_flags(adapter);
3515 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3516 &adapter->if_handle, 0);
7707133c 3517 if (status)
a54769f5 3518 goto err;
6b7c5b94 3519
68d7bdcb
SP
3520 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3521 rtnl_lock();
7707133c 3522 status = be_setup_queues(adapter);
68d7bdcb 3523 rtnl_unlock();
95046b92 3524 if (status)
1578e777
PR
3525 goto err;
3526
7707133c 3527 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3528
3529 status = be_mac_setup(adapter);
10ef9ab4
SP
3530 if (status)
3531 goto err;
3532
eeb65ced 3533 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3534
e9e2a904
SK
3535 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3536 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3537 adapter->fw_ver);
3538 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3539 }
3540
1d1e9a46 3541 if (adapter->vlans_added)
10329df8 3542 be_vid_config(adapter);
7ab8b0b4 3543
a54769f5 3544 be_set_rx_mode(adapter->netdev);
5fb379ee 3545
76a9e08e
SR
3546 be_cmd_get_acpi_wol_cap(adapter);
3547
ddc3f5cb 3548 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3549
ddc3f5cb
AK
3550 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3551 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3552 adapter->rx_fc);
2dc1deb6 3553
bdce2ad7
SR
3554 if (be_physfn(adapter))
3555 be_cmd_set_logical_link_config(adapter,
3556 IFLA_VF_LINK_STATE_AUTO, 0);
3557
b905b5d4 3558 if (sriov_want(adapter)) {
92bf14ab 3559 if (be_max_vfs(adapter))
39f1d94d
SP
3560 be_vf_setup(adapter);
3561 else
3562 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3563 }
3564
f25b119c
PR
3565 status = be_cmd_get_phy_info(adapter);
3566 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3567 adapter->phy.fc_autoneg = 1;
3568
68d7bdcb 3569 be_schedule_worker(adapter);
e1ad8e33 3570 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3571 return 0;
a54769f5
SP
3572err:
3573 be_clear(adapter);
3574 return status;
3575}
6b7c5b94 3576
66268739
IV
3577#ifdef CONFIG_NET_POLL_CONTROLLER
3578static void be_netpoll(struct net_device *netdev)
3579{
3580 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3581 struct be_eq_obj *eqo;
66268739
IV
3582 int i;
3583
e49cc34f
SP
3584 for_all_evt_queues(adapter, eqo, i) {
3585 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3586 napi_schedule(&eqo->napi);
3587 }
10ef9ab4
SP
3588
3589 return;
66268739
IV
3590}
3591#endif
3592
84517482 3593#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3594static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3595
fa9a6fed 3596static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3597 const u8 *p, u32 img_start, int image_size,
3598 int hdr_size)
fa9a6fed
SB
3599{
3600 u32 crc_offset;
3601 u8 flashed_crc[4];
3602 int status;
3f0d4560
AK
3603
3604 crc_offset = hdr_size + img_start + image_size - 4;
3605
fa9a6fed 3606 p += crc_offset;
3f0d4560
AK
3607
3608 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3609 (image_size - 4));
fa9a6fed
SB
3610 if (status) {
3611 dev_err(&adapter->pdev->dev,
3612 "could not get crc from flash, not flashing redboot\n");
3613 return false;
3614 }
3615
3616 /*update redboot only if crc does not match*/
3617 if (!memcmp(flashed_crc, p, 4))
3618 return false;
3619 else
3620 return true;
fa9a6fed
SB
3621}
3622
306f1348
SP
3623static bool phy_flashing_required(struct be_adapter *adapter)
3624{
42f11cf2
AK
3625 return (adapter->phy.phy_type == TN_8022 &&
3626 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3627}
3628
c165541e
PR
3629static bool is_comp_in_ufi(struct be_adapter *adapter,
3630 struct flash_section_info *fsec, int type)
3631{
3632 int i = 0, img_type = 0;
3633 struct flash_section_info_g2 *fsec_g2 = NULL;
3634
ca34fe38 3635 if (BE2_chip(adapter))
c165541e
PR
3636 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3637
3638 for (i = 0; i < MAX_FLASH_COMP; i++) {
3639 if (fsec_g2)
3640 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3641 else
3642 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3643
3644 if (img_type == type)
3645 return true;
3646 }
3647 return false;
3648
3649}
3650
4188e7df 3651static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3652 int header_size,
3653 const struct firmware *fw)
3654{
3655 struct flash_section_info *fsec = NULL;
3656 const u8 *p = fw->data;
3657
3658 p += header_size;
3659 while (p < (fw->data + fw->size)) {
3660 fsec = (struct flash_section_info *)p;
3661 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3662 return fsec;
3663 p += 32;
3664 }
3665 return NULL;
3666}
3667
773a2d7c
PR
3668static int be_flash(struct be_adapter *adapter, const u8 *img,
3669 struct be_dma_mem *flash_cmd, int optype, int img_size)
3670{
3671 u32 total_bytes = 0, flash_op, num_bytes = 0;
3672 int status = 0;
3673 struct be_cmd_write_flashrom *req = flash_cmd->va;
3674
3675 total_bytes = img_size;
3676 while (total_bytes) {
3677 num_bytes = min_t(u32, 32*1024, total_bytes);
3678
3679 total_bytes -= num_bytes;
3680
3681 if (!total_bytes) {
3682 if (optype == OPTYPE_PHY_FW)
3683 flash_op = FLASHROM_OPER_PHY_FLASH;
3684 else
3685 flash_op = FLASHROM_OPER_FLASH;
3686 } else {
3687 if (optype == OPTYPE_PHY_FW)
3688 flash_op = FLASHROM_OPER_PHY_SAVE;
3689 else
3690 flash_op = FLASHROM_OPER_SAVE;
3691 }
3692
be716446 3693 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3694 img += num_bytes;
3695 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3696 flash_op, num_bytes);
3697 if (status) {
3698 if (status == ILLEGAL_IOCTL_REQ &&
3699 optype == OPTYPE_PHY_FW)
3700 break;
3701 dev_err(&adapter->pdev->dev,
3702 "cmd to write to flash rom failed.\n");
3703 return status;
3704 }
3705 }
3706 return 0;
3707}
3708
0ad3157e 3709/* For BE2, BE3 and BE3-R */
ca34fe38 3710static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3711 const struct firmware *fw,
3712 struct be_dma_mem *flash_cmd,
3713 int num_of_images)
3f0d4560 3714
84517482 3715{
3f0d4560 3716 int status = 0, i, filehdr_size = 0;
c165541e 3717 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3718 const u8 *p = fw->data;
215faf9c 3719 const struct flash_comp *pflashcomp;
773a2d7c 3720 int num_comp, redboot;
c165541e
PR
3721 struct flash_section_info *fsec = NULL;
3722
3723 struct flash_comp gen3_flash_types[] = {
3724 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3725 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3726 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3727 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3728 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3729 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3730 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3731 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3732 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3733 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3734 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3735 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3736 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3737 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3738 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3739 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3740 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3741 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3742 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3743 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3744 };
c165541e
PR
3745
3746 struct flash_comp gen2_flash_types[] = {
3747 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3748 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3749 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3750 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3751 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3752 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3753 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3754 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3755 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3756 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3757 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3758 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3759 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3760 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3761 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3762 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3763 };
3764
ca34fe38 3765 if (BE3_chip(adapter)) {
3f0d4560
AK
3766 pflashcomp = gen3_flash_types;
3767 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3768 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3769 } else {
3770 pflashcomp = gen2_flash_types;
3771 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3772 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3773 }
ca34fe38 3774
c165541e
PR
3775 /* Get flash section info*/
3776 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3777 if (!fsec) {
3778 dev_err(&adapter->pdev->dev,
3779 "Invalid Cookie. UFI corrupted ?\n");
3780 return -1;
3781 }
9fe96934 3782 for (i = 0; i < num_comp; i++) {
c165541e 3783 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3784 continue;
c165541e
PR
3785
3786 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3787 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3788 continue;
3789
773a2d7c
PR
3790 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3791 !phy_flashing_required(adapter))
306f1348 3792 continue;
c165541e 3793
773a2d7c
PR
3794 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3795 redboot = be_flash_redboot(adapter, fw->data,
3796 pflashcomp[i].offset, pflashcomp[i].size,
3797 filehdr_size + img_hdrs_size);
3798 if (!redboot)
3799 continue;
3800 }
c165541e 3801
3f0d4560 3802 p = fw->data;
c165541e 3803 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3804 if (p + pflashcomp[i].size > fw->data + fw->size)
3805 return -1;
773a2d7c
PR
3806
3807 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3808 pflashcomp[i].size);
3809 if (status) {
3810 dev_err(&adapter->pdev->dev,
3811 "Flashing section type %d failed.\n",
3812 pflashcomp[i].img_type);
3813 return status;
84517482 3814 }
84517482 3815 }
84517482
AK
3816 return 0;
3817}
3818
773a2d7c
PR
3819static int be_flash_skyhawk(struct be_adapter *adapter,
3820 const struct firmware *fw,
3821 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3822{
773a2d7c
PR
3823 int status = 0, i, filehdr_size = 0;
3824 int img_offset, img_size, img_optype, redboot;
3825 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3826 const u8 *p = fw->data;
3827 struct flash_section_info *fsec = NULL;
3828
3829 filehdr_size = sizeof(struct flash_file_hdr_g3);
3830 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3831 if (!fsec) {
3832 dev_err(&adapter->pdev->dev,
3833 "Invalid Cookie. UFI corrupted ?\n");
3834 return -1;
3835 }
3836
3837 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3838 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3839 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3840
3841 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3842 case IMAGE_FIRMWARE_iSCSI:
3843 img_optype = OPTYPE_ISCSI_ACTIVE;
3844 break;
3845 case IMAGE_BOOT_CODE:
3846 img_optype = OPTYPE_REDBOOT;
3847 break;
3848 case IMAGE_OPTION_ROM_ISCSI:
3849 img_optype = OPTYPE_BIOS;
3850 break;
3851 case IMAGE_OPTION_ROM_PXE:
3852 img_optype = OPTYPE_PXE_BIOS;
3853 break;
3854 case IMAGE_OPTION_ROM_FCoE:
3855 img_optype = OPTYPE_FCOE_BIOS;
3856 break;
3857 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3858 img_optype = OPTYPE_ISCSI_BACKUP;
3859 break;
3860 case IMAGE_NCSI:
3861 img_optype = OPTYPE_NCSI_FW;
3862 break;
3863 default:
3864 continue;
3865 }
3866
3867 if (img_optype == OPTYPE_REDBOOT) {
3868 redboot = be_flash_redboot(adapter, fw->data,
3869 img_offset, img_size,
3870 filehdr_size + img_hdrs_size);
3871 if (!redboot)
3872 continue;
3873 }
3874
3875 p = fw->data;
3876 p += filehdr_size + img_offset + img_hdrs_size;
3877 if (p + img_size > fw->data + fw->size)
3878 return -1;
3879
3880 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3881 if (status) {
3882 dev_err(&adapter->pdev->dev,
3883 "Flashing section type %d failed.\n",
3884 fsec->fsec_entry[i].type);
3885 return status;
3886 }
3887 }
3888 return 0;
3f0d4560
AK
3889}
3890
485bf569
SN
3891static int lancer_fw_download(struct be_adapter *adapter,
3892 const struct firmware *fw)
84517482 3893{
485bf569
SN
3894#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3895#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3896 struct be_dma_mem flash_cmd;
485bf569
SN
3897 const u8 *data_ptr = NULL;
3898 u8 *dest_image_ptr = NULL;
3899 size_t image_size = 0;
3900 u32 chunk_size = 0;
3901 u32 data_written = 0;
3902 u32 offset = 0;
3903 int status = 0;
3904 u8 add_status = 0;
f67ef7ba 3905 u8 change_status;
84517482 3906
485bf569 3907 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3908 dev_err(&adapter->pdev->dev,
485bf569
SN
3909 "FW Image not properly aligned. "
3910 "Length must be 4 byte aligned.\n");
3911 status = -EINVAL;
3912 goto lancer_fw_exit;
d9efd2af
SB
3913 }
3914
485bf569
SN
3915 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3916 + LANCER_FW_DOWNLOAD_CHUNK;
3917 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3918 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3919 if (!flash_cmd.va) {
3920 status = -ENOMEM;
485bf569
SN
3921 goto lancer_fw_exit;
3922 }
84517482 3923
485bf569
SN
3924 dest_image_ptr = flash_cmd.va +
3925 sizeof(struct lancer_cmd_req_write_object);
3926 image_size = fw->size;
3927 data_ptr = fw->data;
3928
3929 while (image_size) {
3930 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3931
3932 /* Copy the image chunk content. */
3933 memcpy(dest_image_ptr, data_ptr, chunk_size);
3934
3935 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3936 chunk_size, offset,
3937 LANCER_FW_DOWNLOAD_LOCATION,
3938 &data_written, &change_status,
3939 &add_status);
485bf569
SN
3940 if (status)
3941 break;
3942
3943 offset += data_written;
3944 data_ptr += data_written;
3945 image_size -= data_written;
3946 }
3947
3948 if (!status) {
3949 /* Commit the FW written */
3950 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3951 0, offset,
3952 LANCER_FW_DOWNLOAD_LOCATION,
3953 &data_written, &change_status,
3954 &add_status);
485bf569
SN
3955 }
3956
3957 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3958 flash_cmd.dma);
3959 if (status) {
3960 dev_err(&adapter->pdev->dev,
3961 "Firmware load error. "
3962 "Status code: 0x%x Additional Status: 0x%x\n",
3963 status, add_status);
3964 goto lancer_fw_exit;
3965 }
3966
f67ef7ba 3967 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
3968 dev_info(&adapter->pdev->dev,
3969 "Resetting adapter to activate new FW\n");
5c510811
SK
3970 status = lancer_physdev_ctrl(adapter,
3971 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3972 if (status) {
3973 dev_err(&adapter->pdev->dev,
3974 "Adapter busy for FW reset.\n"
3975 "New FW will not be active.\n");
3976 goto lancer_fw_exit;
3977 }
3978 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3979 dev_err(&adapter->pdev->dev,
3980 "System reboot required for new FW"
3981 " to be active\n");
3982 }
3983
485bf569
SN
3984 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3985lancer_fw_exit:
3986 return status;
3987}
3988
ca34fe38
SP
3989#define UFI_TYPE2 2
3990#define UFI_TYPE3 3
0ad3157e 3991#define UFI_TYPE3R 10
ca34fe38
SP
3992#define UFI_TYPE4 4
3993static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3994 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3995{
3996 if (fhdr == NULL)
3997 goto be_get_ufi_exit;
3998
ca34fe38
SP
3999 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4000 return UFI_TYPE4;
0ad3157e
VV
4001 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4002 if (fhdr->asic_type_rev == 0x10)
4003 return UFI_TYPE3R;
4004 else
4005 return UFI_TYPE3;
4006 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4007 return UFI_TYPE2;
773a2d7c
PR
4008
4009be_get_ufi_exit:
4010 dev_err(&adapter->pdev->dev,
4011 "UFI and Interface are not compatible for flashing\n");
4012 return -1;
4013}
4014
485bf569
SN
4015static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4016{
485bf569
SN
4017 struct flash_file_hdr_g3 *fhdr3;
4018 struct image_hdr *img_hdr_ptr = NULL;
4019 struct be_dma_mem flash_cmd;
4020 const u8 *p;
773a2d7c 4021 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4022
be716446 4023 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4024 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4025 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4026 if (!flash_cmd.va) {
4027 status = -ENOMEM;
485bf569 4028 goto be_fw_exit;
84517482
AK
4029 }
4030
773a2d7c 4031 p = fw->data;
0ad3157e 4032 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4033
0ad3157e 4034 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4035
773a2d7c
PR
4036 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4037 for (i = 0; i < num_imgs; i++) {
4038 img_hdr_ptr = (struct image_hdr *)(fw->data +
4039 (sizeof(struct flash_file_hdr_g3) +
4040 i * sizeof(struct image_hdr)));
4041 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4042 switch (ufi_type) {
4043 case UFI_TYPE4:
773a2d7c
PR
4044 status = be_flash_skyhawk(adapter, fw,
4045 &flash_cmd, num_imgs);
0ad3157e
VV
4046 break;
4047 case UFI_TYPE3R:
ca34fe38
SP
4048 status = be_flash_BEx(adapter, fw, &flash_cmd,
4049 num_imgs);
0ad3157e
VV
4050 break;
4051 case UFI_TYPE3:
4052 /* Do not flash this ufi on BE3-R cards */
4053 if (adapter->asic_rev < 0x10)
4054 status = be_flash_BEx(adapter, fw,
4055 &flash_cmd,
4056 num_imgs);
4057 else {
4058 status = -1;
4059 dev_err(&adapter->pdev->dev,
4060 "Can't load BE3 UFI on BE3R\n");
4061 }
4062 }
3f0d4560 4063 }
773a2d7c
PR
4064 }
4065
ca34fe38
SP
4066 if (ufi_type == UFI_TYPE2)
4067 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4068 else if (ufi_type == -1)
3f0d4560 4069 status = -1;
84517482 4070
2b7bcebf
IV
4071 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4072 flash_cmd.dma);
84517482
AK
4073 if (status) {
4074 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4075 goto be_fw_exit;
84517482
AK
4076 }
4077
af901ca1 4078 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4079
485bf569
SN
4080be_fw_exit:
4081 return status;
4082}
4083
4084int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4085{
4086 const struct firmware *fw;
4087 int status;
4088
4089 if (!netif_running(adapter->netdev)) {
4090 dev_err(&adapter->pdev->dev,
4091 "Firmware load not allowed (interface is down)\n");
4092 return -1;
4093 }
4094
4095 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4096 if (status)
4097 goto fw_exit;
4098
4099 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4100
4101 if (lancer_chip(adapter))
4102 status = lancer_fw_download(adapter, fw);
4103 else
4104 status = be_fw_download(adapter, fw);
4105
eeb65ced
SK
4106 if (!status)
4107 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4108 adapter->fw_on_flash);
4109
84517482
AK
4110fw_exit:
4111 release_firmware(fw);
4112 return status;
4113}
4114
a77dcb8c
AK
4115static int be_ndo_bridge_setlink(struct net_device *dev,
4116 struct nlmsghdr *nlh)
4117{
4118 struct be_adapter *adapter = netdev_priv(dev);
4119 struct nlattr *attr, *br_spec;
4120 int rem;
4121 int status = 0;
4122 u16 mode = 0;
4123
4124 if (!sriov_enabled(adapter))
4125 return -EOPNOTSUPP;
4126
4127 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4128
4129 nla_for_each_nested(attr, br_spec, rem) {
4130 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4131 continue;
4132
4133 mode = nla_get_u16(attr);
4134 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4135 return -EINVAL;
4136
4137 status = be_cmd_set_hsw_config(adapter, 0, 0,
4138 adapter->if_handle,
4139 mode == BRIDGE_MODE_VEPA ?
4140 PORT_FWD_TYPE_VEPA :
4141 PORT_FWD_TYPE_VEB);
4142 if (status)
4143 goto err;
4144
4145 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4146 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4147
4148 return status;
4149 }
4150err:
4151 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4152 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4153
4154 return status;
4155}
4156
4157static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4158 struct net_device *dev,
4159 u32 filter_mask)
4160{
4161 struct be_adapter *adapter = netdev_priv(dev);
4162 int status = 0;
4163 u8 hsw_mode;
4164
4165 if (!sriov_enabled(adapter))
4166 return 0;
4167
4168 /* BE and Lancer chips support VEB mode only */
4169 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4170 hsw_mode = PORT_FWD_TYPE_VEB;
4171 } else {
4172 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4173 adapter->if_handle, &hsw_mode);
4174 if (status)
4175 return 0;
4176 }
4177
4178 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4179 hsw_mode == PORT_FWD_TYPE_VEPA ?
4180 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4181}
4182
c5abe7c0 4183#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4184static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4185 __be16 port)
4186{
4187 struct be_adapter *adapter = netdev_priv(netdev);
4188 struct device *dev = &adapter->pdev->dev;
4189 int status;
4190
4191 if (lancer_chip(adapter) || BEx_chip(adapter))
4192 return;
4193
4194 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4195 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4196 be16_to_cpu(port));
4197 dev_info(dev,
4198 "Only one UDP port supported for VxLAN offloads\n");
4199 return;
4200 }
4201
4202 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4203 OP_CONVERT_NORMAL_TO_TUNNEL);
4204 if (status) {
4205 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4206 goto err;
4207 }
4208
4209 status = be_cmd_set_vxlan_port(adapter, port);
4210 if (status) {
4211 dev_warn(dev, "Failed to add VxLAN port\n");
4212 goto err;
4213 }
4214 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4215 adapter->vxlan_port = port;
4216
4217 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4218 be16_to_cpu(port));
4219 return;
4220err:
4221 be_disable_vxlan_offloads(adapter);
4222 return;
4223}
4224
4225static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4226 __be16 port)
4227{
4228 struct be_adapter *adapter = netdev_priv(netdev);
4229
4230 if (lancer_chip(adapter) || BEx_chip(adapter))
4231 return;
4232
4233 if (adapter->vxlan_port != port)
4234 return;
4235
4236 be_disable_vxlan_offloads(adapter);
4237
4238 dev_info(&adapter->pdev->dev,
4239 "Disabled VxLAN offloads for UDP port %d\n",
4240 be16_to_cpu(port));
4241}
c5abe7c0 4242#endif
c9c47142 4243
e5686ad8 4244static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4245 .ndo_open = be_open,
4246 .ndo_stop = be_close,
4247 .ndo_start_xmit = be_xmit,
a54769f5 4248 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4249 .ndo_set_mac_address = be_mac_addr_set,
4250 .ndo_change_mtu = be_change_mtu,
ab1594e9 4251 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4252 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4253 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4254 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4255 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4256 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 4257 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739 4258 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4259 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4260#ifdef CONFIG_NET_POLL_CONTROLLER
4261 .ndo_poll_controller = be_netpoll,
4262#endif
a77dcb8c
AK
4263 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4264 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4265#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4266 .ndo_busy_poll = be_busy_poll,
6384a4d0 4267#endif
c5abe7c0 4268#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4269 .ndo_add_vxlan_port = be_add_vxlan_port,
4270 .ndo_del_vxlan_port = be_del_vxlan_port,
c5abe7c0 4271#endif
6b7c5b94
SP
4272};
4273
4274static void be_netdev_init(struct net_device *netdev)
4275{
4276 struct be_adapter *adapter = netdev_priv(netdev);
4277
c9c47142
SP
4278 if (skyhawk_chip(adapter)) {
4279 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4280 NETIF_F_TSO | NETIF_F_TSO6 |
4281 NETIF_F_GSO_UDP_TUNNEL;
4282 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4283 }
6332c8d3 4284 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4285 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4286 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4287 if (be_multi_rxq(adapter))
4288 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4289
4290 netdev->features |= netdev->hw_features |
f646968f 4291 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4292
eb8a50d9 4293 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4294 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4295
fbc13f01
AK
4296 netdev->priv_flags |= IFF_UNICAST_FLT;
4297
6b7c5b94
SP
4298 netdev->flags |= IFF_MULTICAST;
4299
b7e5887e 4300 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4301
10ef9ab4 4302 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
4303
4304 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
4305}
4306
4307static void be_unmap_pci_bars(struct be_adapter *adapter)
4308{
c5b3ad4c
SP
4309 if (adapter->csr)
4310 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4311 if (adapter->db)
ce66f781 4312 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4313}
4314
ce66f781
SP
4315static int db_bar(struct be_adapter *adapter)
4316{
4317 if (lancer_chip(adapter) || !be_physfn(adapter))
4318 return 0;
4319 else
4320 return 4;
4321}
4322
4323static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4324{
dbf0f2a7 4325 if (skyhawk_chip(adapter)) {
ce66f781
SP
4326 adapter->roce_db.size = 4096;
4327 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4328 db_bar(adapter));
4329 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4330 db_bar(adapter));
4331 }
045508a8 4332 return 0;
6b7c5b94
SP
4333}
4334
4335static int be_map_pci_bars(struct be_adapter *adapter)
4336{
4337 u8 __iomem *addr;
fe6d2a38 4338
c5b3ad4c
SP
4339 if (BEx_chip(adapter) && be_physfn(adapter)) {
4340 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4341 if (adapter->csr == NULL)
4342 return -ENOMEM;
4343 }
4344
ce66f781 4345 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4346 if (addr == NULL)
4347 goto pci_map_err;
ba343c77 4348 adapter->db = addr;
ce66f781
SP
4349
4350 be_roce_map_pci_bars(adapter);
6b7c5b94 4351 return 0;
ce66f781 4352
6b7c5b94
SP
4353pci_map_err:
4354 be_unmap_pci_bars(adapter);
4355 return -ENOMEM;
4356}
4357
6b7c5b94
SP
4358static void be_ctrl_cleanup(struct be_adapter *adapter)
4359{
8788fdc2 4360 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4361
4362 be_unmap_pci_bars(adapter);
4363
4364 if (mem->va)
2b7bcebf
IV
4365 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4366 mem->dma);
e7b909a6 4367
5b8821b7 4368 mem = &adapter->rx_filter;
e7b909a6 4369 if (mem->va)
2b7bcebf
IV
4370 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4371 mem->dma);
6b7c5b94
SP
4372}
4373
6b7c5b94
SP
4374static int be_ctrl_init(struct be_adapter *adapter)
4375{
8788fdc2
SP
4376 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4377 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4378 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4379 u32 sli_intf;
6b7c5b94 4380 int status;
6b7c5b94 4381
ce66f781
SP
4382 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4383 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4384 SLI_INTF_FAMILY_SHIFT;
4385 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4386
6b7c5b94
SP
4387 status = be_map_pci_bars(adapter);
4388 if (status)
e7b909a6 4389 goto done;
6b7c5b94
SP
4390
4391 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4392 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4393 mbox_mem_alloc->size,
4394 &mbox_mem_alloc->dma,
4395 GFP_KERNEL);
6b7c5b94 4396 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4397 status = -ENOMEM;
4398 goto unmap_pci_bars;
6b7c5b94
SP
4399 }
4400 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4401 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4402 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4403 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4404
5b8821b7 4405 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4406 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4407 rx_filter->size, &rx_filter->dma,
4408 GFP_KERNEL);
5b8821b7 4409 if (rx_filter->va == NULL) {
e7b909a6
SP
4410 status = -ENOMEM;
4411 goto free_mbox;
4412 }
1f9061d2 4413
2984961c 4414 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4415 spin_lock_init(&adapter->mcc_lock);
4416 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4417
5eeff635 4418 init_completion(&adapter->et_cmd_compl);
cf588477 4419 pci_save_state(adapter->pdev);
6b7c5b94 4420 return 0;
e7b909a6
SP
4421
4422free_mbox:
2b7bcebf
IV
4423 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4424 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4425
4426unmap_pci_bars:
4427 be_unmap_pci_bars(adapter);
4428
4429done:
4430 return status;
6b7c5b94
SP
4431}
4432
4433static void be_stats_cleanup(struct be_adapter *adapter)
4434{
3abcdeda 4435 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4436
4437 if (cmd->va)
2b7bcebf
IV
4438 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4439 cmd->va, cmd->dma);
6b7c5b94
SP
4440}
4441
4442static int be_stats_init(struct be_adapter *adapter)
4443{
3abcdeda 4444 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4445
ca34fe38
SP
4446 if (lancer_chip(adapter))
4447 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4448 else if (BE2_chip(adapter))
89a88ab8 4449 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4450 else if (BE3_chip(adapter))
ca34fe38 4451 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4452 else
4453 /* ALL non-BE ASICs */
4454 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4455
ede23fa8
JP
4456 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4457 GFP_KERNEL);
6b7c5b94
SP
4458 if (cmd->va == NULL)
4459 return -1;
4460 return 0;
4461}
4462
3bc6b06c 4463static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4464{
4465 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4466
6b7c5b94
SP
4467 if (!adapter)
4468 return;
4469
045508a8 4470 be_roce_dev_remove(adapter);
8cef7a78 4471 be_intr_set(adapter, false);
045508a8 4472
f67ef7ba
PR
4473 cancel_delayed_work_sync(&adapter->func_recovery_work);
4474
6b7c5b94
SP
4475 unregister_netdev(adapter->netdev);
4476
5fb379ee
SP
4477 be_clear(adapter);
4478
bf99e50d
PR
4479 /* tell fw we're done with firing cmds */
4480 be_cmd_fw_clean(adapter);
4481
6b7c5b94
SP
4482 be_stats_cleanup(adapter);
4483
4484 be_ctrl_cleanup(adapter);
4485
d6b6d987
SP
4486 pci_disable_pcie_error_reporting(pdev);
4487
6b7c5b94
SP
4488 pci_release_regions(pdev);
4489 pci_disable_device(pdev);
4490
4491 free_netdev(adapter->netdev);
4492}
4493
39f1d94d 4494static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4495{
baaa08d1 4496 int status, level;
6b7c5b94 4497
9e1453c5
AK
4498 status = be_cmd_get_cntl_attributes(adapter);
4499 if (status)
4500 return status;
4501
7aeb2156
PR
4502 /* Must be a power of 2 or else MODULO will BUG_ON */
4503 adapter->be_get_temp_freq = 64;
4504
baaa08d1
VV
4505 if (BEx_chip(adapter)) {
4506 level = be_cmd_get_fw_log_level(adapter);
4507 adapter->msg_enable =
4508 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4509 }
941a77d5 4510
92bf14ab 4511 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4512 return 0;
6b7c5b94
SP
4513}
4514
f67ef7ba 4515static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4516{
01e5b2c4 4517 struct device *dev = &adapter->pdev->dev;
d8110f62 4518 int status;
d8110f62 4519
f67ef7ba
PR
4520 status = lancer_test_and_set_rdy_state(adapter);
4521 if (status)
4522 goto err;
d8110f62 4523
f67ef7ba
PR
4524 if (netif_running(adapter->netdev))
4525 be_close(adapter->netdev);
d8110f62 4526
f67ef7ba
PR
4527 be_clear(adapter);
4528
01e5b2c4 4529 be_clear_all_error(adapter);
f67ef7ba
PR
4530
4531 status = be_setup(adapter);
4532 if (status)
4533 goto err;
d8110f62 4534
f67ef7ba
PR
4535 if (netif_running(adapter->netdev)) {
4536 status = be_open(adapter->netdev);
d8110f62
PR
4537 if (status)
4538 goto err;
f67ef7ba 4539 }
d8110f62 4540
4bebb56a 4541 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4542 return 0;
4543err:
01e5b2c4
SK
4544 if (status == -EAGAIN)
4545 dev_err(dev, "Waiting for resource provisioning\n");
4546 else
4bebb56a 4547 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4548
f67ef7ba
PR
4549 return status;
4550}
4551
4552static void be_func_recovery_task(struct work_struct *work)
4553{
4554 struct be_adapter *adapter =
4555 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4556 int status = 0;
d8110f62 4557
f67ef7ba 4558 be_detect_error(adapter);
d8110f62 4559
f67ef7ba 4560 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4561
f67ef7ba
PR
4562 rtnl_lock();
4563 netif_device_detach(adapter->netdev);
4564 rtnl_unlock();
d8110f62 4565
f67ef7ba 4566 status = lancer_recover_func(adapter);
f67ef7ba
PR
4567 if (!status)
4568 netif_device_attach(adapter->netdev);
d8110f62 4569 }
f67ef7ba 4570
01e5b2c4
SK
4571 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4572 * no need to attempt further recovery.
4573 */
4574 if (!status || status == -EAGAIN)
4575 schedule_delayed_work(&adapter->func_recovery_work,
4576 msecs_to_jiffies(1000));
d8110f62
PR
4577}
4578
4579static void be_worker(struct work_struct *work)
4580{
4581 struct be_adapter *adapter =
4582 container_of(work, struct be_adapter, work.work);
4583 struct be_rx_obj *rxo;
4584 int i;
4585
d8110f62
PR
4586 /* when interrupts are not yet enabled, just reap any pending
4587 * mcc completions */
4588 if (!netif_running(adapter->netdev)) {
072a9c48 4589 local_bh_disable();
10ef9ab4 4590 be_process_mcc(adapter);
072a9c48 4591 local_bh_enable();
d8110f62
PR
4592 goto reschedule;
4593 }
4594
4595 if (!adapter->stats_cmd_sent) {
4596 if (lancer_chip(adapter))
4597 lancer_cmd_get_pport_stats(adapter,
4598 &adapter->stats_cmd);
4599 else
4600 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4601 }
4602
d696b5e2
VV
4603 if (be_physfn(adapter) &&
4604 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4605 be_cmd_get_die_temperature(adapter);
4606
d8110f62 4607 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4608 /* Replenish RX-queues starved due to memory
4609 * allocation failures.
4610 */
4611 if (rxo->rx_post_starved)
d8110f62 4612 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4613 }
4614
2632bafd 4615 be_eqd_update(adapter);
10ef9ab4 4616
d8110f62
PR
4617reschedule:
4618 adapter->work_counter++;
4619 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4620}
4621
257a3feb 4622/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4623static bool be_reset_required(struct be_adapter *adapter)
4624{
257a3feb 4625 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4626}
4627
d379142b
SP
4628static char *mc_name(struct be_adapter *adapter)
4629{
f93f160b
VV
4630 char *str = ""; /* default */
4631
4632 switch (adapter->mc_type) {
4633 case UMC:
4634 str = "UMC";
4635 break;
4636 case FLEX10:
4637 str = "FLEX10";
4638 break;
4639 case vNIC1:
4640 str = "vNIC-1";
4641 break;
4642 case nPAR:
4643 str = "nPAR";
4644 break;
4645 case UFP:
4646 str = "UFP";
4647 break;
4648 case vNIC2:
4649 str = "vNIC-2";
4650 break;
4651 default:
4652 str = "";
4653 }
4654
4655 return str;
d379142b
SP
4656}
4657
4658static inline char *func_name(struct be_adapter *adapter)
4659{
4660 return be_physfn(adapter) ? "PF" : "VF";
4661}
4662
1dd06ae8 4663static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4664{
4665 int status = 0;
4666 struct be_adapter *adapter;
4667 struct net_device *netdev;
b4e32a71 4668 char port_name;
6b7c5b94
SP
4669
4670 status = pci_enable_device(pdev);
4671 if (status)
4672 goto do_none;
4673
4674 status = pci_request_regions(pdev, DRV_NAME);
4675 if (status)
4676 goto disable_dev;
4677 pci_set_master(pdev);
4678
7f640062 4679 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4680 if (netdev == NULL) {
4681 status = -ENOMEM;
4682 goto rel_reg;
4683 }
4684 adapter = netdev_priv(netdev);
4685 adapter->pdev = pdev;
4686 pci_set_drvdata(pdev, adapter);
4687 adapter->netdev = netdev;
2243e2e9 4688 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4689
4c15c243 4690 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4691 if (!status) {
4692 netdev->features |= NETIF_F_HIGHDMA;
4693 } else {
4c15c243 4694 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4695 if (status) {
4696 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4697 goto free_netdev;
4698 }
4699 }
4700
ea58c180
AK
4701 if (be_physfn(adapter)) {
4702 status = pci_enable_pcie_error_reporting(pdev);
4703 if (!status)
4704 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4705 }
d6b6d987 4706
6b7c5b94
SP
4707 status = be_ctrl_init(adapter);
4708 if (status)
39f1d94d 4709 goto free_netdev;
6b7c5b94 4710
2243e2e9 4711 /* sync up with fw's ready state */
ba343c77 4712 if (be_physfn(adapter)) {
bf99e50d 4713 status = be_fw_wait_ready(adapter);
ba343c77
SB
4714 if (status)
4715 goto ctrl_clean;
ba343c77 4716 }
6b7c5b94 4717
39f1d94d
SP
4718 if (be_reset_required(adapter)) {
4719 status = be_cmd_reset_function(adapter);
4720 if (status)
4721 goto ctrl_clean;
556ae191 4722
2d177be8
KA
4723 /* Wait for interrupts to quiesce after an FLR */
4724 msleep(100);
4725 }
8cef7a78
SK
4726
4727 /* Allow interrupts for other ULPs running on NIC function */
4728 be_intr_set(adapter, true);
10ef9ab4 4729
2d177be8
KA
4730 /* tell fw we're ready to fire cmds */
4731 status = be_cmd_fw_init(adapter);
4732 if (status)
4733 goto ctrl_clean;
4734
2243e2e9
SP
4735 status = be_stats_init(adapter);
4736 if (status)
4737 goto ctrl_clean;
4738
39f1d94d 4739 status = be_get_initial_config(adapter);
6b7c5b94
SP
4740 if (status)
4741 goto stats_clean;
6b7c5b94
SP
4742
4743 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4744 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4745 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4746
5fb379ee
SP
4747 status = be_setup(adapter);
4748 if (status)
55f5c3c5 4749 goto stats_clean;
2243e2e9 4750
3abcdeda 4751 be_netdev_init(netdev);
6b7c5b94
SP
4752 status = register_netdev(netdev);
4753 if (status != 0)
5fb379ee 4754 goto unsetup;
6b7c5b94 4755
045508a8
PP
4756 be_roce_dev_add(adapter);
4757
f67ef7ba
PR
4758 schedule_delayed_work(&adapter->func_recovery_work,
4759 msecs_to_jiffies(1000));
b4e32a71
PR
4760
4761 be_cmd_query_port_name(adapter, &port_name);
4762
d379142b
SP
4763 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4764 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4765
6b7c5b94
SP
4766 return 0;
4767
5fb379ee
SP
4768unsetup:
4769 be_clear(adapter);
6b7c5b94
SP
4770stats_clean:
4771 be_stats_cleanup(adapter);
4772ctrl_clean:
4773 be_ctrl_cleanup(adapter);
f9449ab7 4774free_netdev:
fe6d2a38 4775 free_netdev(netdev);
6b7c5b94
SP
4776rel_reg:
4777 pci_release_regions(pdev);
4778disable_dev:
4779 pci_disable_device(pdev);
4780do_none:
c4ca2374 4781 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4782 return status;
4783}
4784
4785static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4786{
4787 struct be_adapter *adapter = pci_get_drvdata(pdev);
4788 struct net_device *netdev = adapter->netdev;
4789
76a9e08e 4790 if (adapter->wol_en)
71d8d1b5
AK
4791 be_setup_wol(adapter, true);
4792
d4360d6f 4793 be_intr_set(adapter, false);
f67ef7ba
PR
4794 cancel_delayed_work_sync(&adapter->func_recovery_work);
4795
6b7c5b94
SP
4796 netif_device_detach(netdev);
4797 if (netif_running(netdev)) {
4798 rtnl_lock();
4799 be_close(netdev);
4800 rtnl_unlock();
4801 }
9b0365f1 4802 be_clear(adapter);
6b7c5b94
SP
4803
4804 pci_save_state(pdev);
4805 pci_disable_device(pdev);
4806 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4807 return 0;
4808}
4809
4810static int be_resume(struct pci_dev *pdev)
4811{
4812 int status = 0;
4813 struct be_adapter *adapter = pci_get_drvdata(pdev);
4814 struct net_device *netdev = adapter->netdev;
4815
4816 netif_device_detach(netdev);
4817
4818 status = pci_enable_device(pdev);
4819 if (status)
4820 return status;
4821
1ca01512 4822 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4823 pci_restore_state(pdev);
4824
dd5746bf
SB
4825 status = be_fw_wait_ready(adapter);
4826 if (status)
4827 return status;
4828
d4360d6f 4829 be_intr_set(adapter, true);
2243e2e9
SP
4830 /* tell fw we're ready to fire cmds */
4831 status = be_cmd_fw_init(adapter);
4832 if (status)
4833 return status;
4834
9b0365f1 4835 be_setup(adapter);
6b7c5b94
SP
4836 if (netif_running(netdev)) {
4837 rtnl_lock();
4838 be_open(netdev);
4839 rtnl_unlock();
4840 }
f67ef7ba
PR
4841
4842 schedule_delayed_work(&adapter->func_recovery_work,
4843 msecs_to_jiffies(1000));
6b7c5b94 4844 netif_device_attach(netdev);
71d8d1b5 4845
76a9e08e 4846 if (adapter->wol_en)
71d8d1b5 4847 be_setup_wol(adapter, false);
a4ca055f 4848
6b7c5b94
SP
4849 return 0;
4850}
4851
82456b03
SP
4852/*
4853 * An FLR will stop BE from DMAing any data.
4854 */
4855static void be_shutdown(struct pci_dev *pdev)
4856{
4857 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4858
2d5d4154
AK
4859 if (!adapter)
4860 return;
82456b03 4861
0f4a6828 4862 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4863 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4864
2d5d4154 4865 netif_device_detach(adapter->netdev);
82456b03 4866
57841869
AK
4867 be_cmd_reset_function(adapter);
4868
82456b03 4869 pci_disable_device(pdev);
82456b03
SP
4870}
4871
cf588477
SP
4872static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4873 pci_channel_state_t state)
4874{
4875 struct be_adapter *adapter = pci_get_drvdata(pdev);
4876 struct net_device *netdev = adapter->netdev;
4877
4878 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4879
01e5b2c4
SK
4880 if (!adapter->eeh_error) {
4881 adapter->eeh_error = true;
cf588477 4882
01e5b2c4 4883 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4884
cf588477 4885 rtnl_lock();
01e5b2c4
SK
4886 netif_device_detach(netdev);
4887 if (netif_running(netdev))
4888 be_close(netdev);
cf588477 4889 rtnl_unlock();
01e5b2c4
SK
4890
4891 be_clear(adapter);
cf588477 4892 }
cf588477
SP
4893
4894 if (state == pci_channel_io_perm_failure)
4895 return PCI_ERS_RESULT_DISCONNECT;
4896
4897 pci_disable_device(pdev);
4898
eeb7fc7b
SK
4899 /* The error could cause the FW to trigger a flash debug dump.
4900 * Resetting the card while flash dump is in progress
c8a54163
PR
4901 * can cause it not to recover; wait for it to finish.
4902 * Wait only for first function as it is needed only once per
4903 * adapter.
eeb7fc7b 4904 */
c8a54163
PR
4905 if (pdev->devfn == 0)
4906 ssleep(30);
4907
cf588477
SP
4908 return PCI_ERS_RESULT_NEED_RESET;
4909}
4910
4911static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4912{
4913 struct be_adapter *adapter = pci_get_drvdata(pdev);
4914 int status;
4915
4916 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4917
4918 status = pci_enable_device(pdev);
4919 if (status)
4920 return PCI_ERS_RESULT_DISCONNECT;
4921
4922 pci_set_master(pdev);
1ca01512 4923 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4924 pci_restore_state(pdev);
4925
4926 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4927 dev_info(&adapter->pdev->dev,
4928 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4929 status = be_fw_wait_ready(adapter);
cf588477
SP
4930 if (status)
4931 return PCI_ERS_RESULT_DISCONNECT;
4932
d6b6d987 4933 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4934 be_clear_all_error(adapter);
cf588477
SP
4935 return PCI_ERS_RESULT_RECOVERED;
4936}
4937
4938static void be_eeh_resume(struct pci_dev *pdev)
4939{
4940 int status = 0;
4941 struct be_adapter *adapter = pci_get_drvdata(pdev);
4942 struct net_device *netdev = adapter->netdev;
4943
4944 dev_info(&adapter->pdev->dev, "EEH resume\n");
4945
4946 pci_save_state(pdev);
4947
2d177be8 4948 status = be_cmd_reset_function(adapter);
cf588477
SP
4949 if (status)
4950 goto err;
4951
2d177be8
KA
4952 /* tell fw we're ready to fire cmds */
4953 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4954 if (status)
4955 goto err;
4956
cf588477
SP
4957 status = be_setup(adapter);
4958 if (status)
4959 goto err;
4960
4961 if (netif_running(netdev)) {
4962 status = be_open(netdev);
4963 if (status)
4964 goto err;
4965 }
f67ef7ba
PR
4966
4967 schedule_delayed_work(&adapter->func_recovery_work,
4968 msecs_to_jiffies(1000));
cf588477
SP
4969 netif_device_attach(netdev);
4970 return;
4971err:
4972 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4973}
4974
3646f0e5 4975static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4976 .error_detected = be_eeh_err_detected,
4977 .slot_reset = be_eeh_reset,
4978 .resume = be_eeh_resume,
4979};
4980
6b7c5b94
SP
4981static struct pci_driver be_driver = {
4982 .name = DRV_NAME,
4983 .id_table = be_dev_ids,
4984 .probe = be_probe,
4985 .remove = be_remove,
4986 .suspend = be_suspend,
cf588477 4987 .resume = be_resume,
82456b03 4988 .shutdown = be_shutdown,
cf588477 4989 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4990};
4991
4992static int __init be_init_module(void)
4993{
8e95a202
JP
4994 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4995 rx_frag_size != 2048) {
6b7c5b94
SP
4996 printk(KERN_WARNING DRV_NAME
4997 " : Module param rx_frag_size must be 2048/4096/8192."
4998 " Using 2048\n");
4999 rx_frag_size = 2048;
5000 }
6b7c5b94
SP
5001
5002 return pci_register_driver(&be_driver);
5003}
5004module_init(be_init_module);
5005
5006static void __exit be_exit_module(void)
5007{
5008 pci_unregister_driver(&be_driver);
5009}
5010module_exit(be_exit_module);
This page took 0.997774 seconds and 5 git commands to generate.