openvswitch: Remove unneeded ovs_netdev_get_ifindex()
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf 148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
1f9061d2 149 GFP_KERNEL | __GFP_ZERO);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
195
196 wmb();
8788fdc2 197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
198}
199
94d73aaa
VV
200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
6b7c5b94
SP
202{
203 u32 val = 0;
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
e3a7ae2c 252 u8 current_mac[ETH_ALEN];
fbc13f01 253 u32 pmac_id = adapter->pmac_id[0];
704e4c88 254 bool active_mac = true;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
704e4c88
PR
259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
a65027e4 287 if (status)
e3a7ae2c 288 goto err;
6b7c5b94 289
704e4c88
PR
290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
e3a7ae2c
SK
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
298 return status;
299}
300
ca34fe38
SP
301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 330{
ac124ff9
SP
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 334 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 337
ac124ff9 338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
89a88ab8
AK
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
ac124ff9 366 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 367 else
ac124ff9 368 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
ca34fe38 378static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 379{
ac124ff9
SP
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 383 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 386
ac124ff9 387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
ac124ff9 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
413 drvs->jabber_events = port_stats->jabber_events;
414 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 415 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
416 drvs->forwarded_packets = rxf_stats->forwarded_packets;
417 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
418 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
419 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
420 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
421}
422
005d5696
SX
423static void populate_lancer_stats(struct be_adapter *adapter)
424{
89a88ab8 425
005d5696 426 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
427 struct lancer_pport_stats *pport_stats =
428 pport_stats_from_cmd(adapter);
429
430 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
431 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
432 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
433 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 434 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 435 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
436 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
437 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
438 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
439 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
440 drvs->rx_dropped_tcp_length =
441 pport_stats->rx_dropped_invalid_tcp_length;
442 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
443 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
444 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
445 drvs->rx_dropped_header_too_small =
446 pport_stats->rx_dropped_header_too_small;
447 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
448 drvs->rx_address_filtered =
449 pport_stats->rx_address_filtered +
450 pport_stats->rx_vlan_filtered;
ac124ff9 451 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 452 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
453 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
454 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 455 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
456 drvs->forwarded_packets = pport_stats->num_forwards_lo;
457 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 458 drvs->rx_drops_too_many_frags =
ac124ff9 459 pport_stats->rx_drops_too_many_frags_lo;
005d5696 460}
89a88ab8 461
09c1c68f
SP
462static void accumulate_16bit_val(u32 *acc, u16 val)
463{
464#define lo(x) (x & 0xFFFF)
465#define hi(x) (x & 0xFFFF0000)
466 bool wrapped = val < lo(*acc);
467 u32 newacc = hi(*acc) + val;
468
469 if (wrapped)
470 newacc += 65536;
471 ACCESS_ONCE(*acc) = newacc;
472}
473
89a88ab8
AK
474void be_parse_stats(struct be_adapter *adapter)
475{
ac124ff9
SP
476 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
477 struct be_rx_obj *rxo;
478 int i;
479
ca34fe38
SP
480 if (lancer_chip(adapter)) {
481 populate_lancer_stats(adapter);
005d5696 482 } else {
ca34fe38
SP
483 if (BE2_chip(adapter))
484 populate_be_v0_stats(adapter);
485 else
486 /* for BE3 and Skyhawk */
487 populate_be_v1_stats(adapter);
d51ebd33 488
ca34fe38
SP
489 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
490 for_all_rx_queues(adapter, rxo, i) {
491 /* below erx HW counter can actually wrap around after
492 * 65535. Driver accumulates a 32-bit value
493 */
494 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
495 (u16)erx->rx_drops_no_fragments \
496 [rxo->q.id]);
497 }
09c1c68f 498 }
89a88ab8
AK
499}
500
ab1594e9
SP
501static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
502 struct rtnl_link_stats64 *stats)
6b7c5b94 503{
ab1594e9 504 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 505 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 506 struct be_rx_obj *rxo;
3c8def97 507 struct be_tx_obj *txo;
ab1594e9
SP
508 u64 pkts, bytes;
509 unsigned int start;
3abcdeda 510 int i;
6b7c5b94 511
3abcdeda 512 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
513 const struct be_rx_stats *rx_stats = rx_stats(rxo);
514 do {
515 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
516 pkts = rx_stats(rxo)->rx_pkts;
517 bytes = rx_stats(rxo)->rx_bytes;
518 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
519 stats->rx_packets += pkts;
520 stats->rx_bytes += bytes;
521 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
522 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
523 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
524 }
525
3c8def97 526 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
527 const struct be_tx_stats *tx_stats = tx_stats(txo);
528 do {
529 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
530 pkts = tx_stats(txo)->tx_pkts;
531 bytes = tx_stats(txo)->tx_bytes;
532 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
533 stats->tx_packets += pkts;
534 stats->tx_bytes += bytes;
3c8def97 535 }
6b7c5b94
SP
536
537 /* bad pkts received */
ab1594e9 538 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
539 drvs->rx_alignment_symbol_errors +
540 drvs->rx_in_range_errors +
541 drvs->rx_out_range_errors +
542 drvs->rx_frame_too_long +
543 drvs->rx_dropped_too_small +
544 drvs->rx_dropped_too_short +
545 drvs->rx_dropped_header_too_small +
546 drvs->rx_dropped_tcp_length +
ab1594e9 547 drvs->rx_dropped_runt;
68110868 548
6b7c5b94 549 /* detailed rx errors */
ab1594e9 550 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
551 drvs->rx_out_range_errors +
552 drvs->rx_frame_too_long;
68110868 553
ab1594e9 554 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
555
556 /* frame alignment errors */
ab1594e9 557 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 558
6b7c5b94
SP
559 /* receiver fifo overrun */
560 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 561 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
562 drvs->rx_input_fifo_overflow_drop +
563 drvs->rx_drops_no_pbuf;
ab1594e9 564 return stats;
6b7c5b94
SP
565}
566
b236916a 567void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 568{
6b7c5b94
SP
569 struct net_device *netdev = adapter->netdev;
570
b236916a 571 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 572 netif_carrier_off(netdev);
b236916a 573 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 574 }
b236916a
AK
575
576 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
577 netif_carrier_on(netdev);
578 else
579 netif_carrier_off(netdev);
6b7c5b94
SP
580}
581
3c8def97 582static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 583 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 584{
3c8def97
SP
585 struct be_tx_stats *stats = tx_stats(txo);
586
ab1594e9 587 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
588 stats->tx_reqs++;
589 stats->tx_wrbs += wrb_cnt;
590 stats->tx_bytes += copied;
591 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 592 if (stopped)
ac124ff9 593 stats->tx_stops++;
ab1594e9 594 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
595}
596
597/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
598static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
599 bool *dummy)
6b7c5b94 600{
ebc8d2ab
DM
601 int cnt = (skb->len > skb->data_len);
602
603 cnt += skb_shinfo(skb)->nr_frags;
604
6b7c5b94
SP
605 /* to account for hdr wrb */
606 cnt++;
fe6d2a38
SP
607 if (lancer_chip(adapter) || !(cnt & 1)) {
608 *dummy = false;
609 } else {
6b7c5b94
SP
610 /* add a dummy to make it an even num */
611 cnt++;
612 *dummy = true;
fe6d2a38 613 }
6b7c5b94
SP
614 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
615 return cnt;
616}
617
618static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
619{
620 wrb->frag_pa_hi = upper_32_bits(addr);
621 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
622 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 623 wrb->rsvd0 = 0;
6b7c5b94
SP
624}
625
1ded132d
AK
626static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
627 struct sk_buff *skb)
628{
629 u8 vlan_prio;
630 u16 vlan_tag;
631
632 vlan_tag = vlan_tx_tag_get(skb);
633 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
634 /* If vlan priority provided by OS is NOT in available bmap */
635 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
636 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
637 adapter->recommended_prio;
638
639 return vlan_tag;
640}
641
93040ae5
SK
642static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
643{
644 return vlan_tx_tag_present(skb) || adapter->pvid;
645}
646
cc4ce020
SK
647static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
648 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 649{
1ded132d 650 u16 vlan_tag;
cc4ce020 651
6b7c5b94
SP
652 memset(hdr, 0, sizeof(*hdr));
653
654 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
655
49e4b847 656 if (skb_is_gso(skb)) {
6b7c5b94
SP
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
659 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 660 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
662 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
663 if (is_tcp_pkt(skb))
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
665 else if (is_udp_pkt(skb))
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
667 }
668
4c5102f9 669 if (vlan_tx_tag_present(skb)) {
6b7c5b94 670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 671 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
673 }
674
675 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
676 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
679}
680
2b7bcebf 681static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
682 bool unmap_single)
683{
684 dma_addr_t dma;
685
686 be_dws_le_to_cpu(wrb, sizeof(*wrb));
687
688 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 689 if (wrb->frag_len) {
7101e111 690 if (unmap_single)
2b7bcebf
IV
691 dma_unmap_single(dev, dma, wrb->frag_len,
692 DMA_TO_DEVICE);
7101e111 693 else
2b7bcebf 694 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
695 }
696}
6b7c5b94 697
3c8def97 698static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
699 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
700{
7101e111
SP
701 dma_addr_t busaddr;
702 int i, copied = 0;
2b7bcebf 703 struct device *dev = &adapter->pdev->dev;
6b7c5b94 704 struct sk_buff *first_skb = skb;
6b7c5b94
SP
705 struct be_eth_wrb *wrb;
706 struct be_eth_hdr_wrb *hdr;
7101e111
SP
707 bool map_single = false;
708 u16 map_head;
6b7c5b94 709
6b7c5b94
SP
710 hdr = queue_head_node(txq);
711 queue_head_inc(txq);
7101e111 712 map_head = txq->head;
6b7c5b94 713
ebc8d2ab 714 if (skb->len > skb->data_len) {
e743d313 715 int len = skb_headlen(skb);
2b7bcebf
IV
716 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
717 if (dma_mapping_error(dev, busaddr))
7101e111
SP
718 goto dma_err;
719 map_single = true;
ebc8d2ab
DM
720 wrb = queue_head_node(txq);
721 wrb_fill(wrb, busaddr, len);
722 be_dws_cpu_to_le(wrb, sizeof(*wrb));
723 queue_head_inc(txq);
724 copied += len;
725 }
6b7c5b94 726
ebc8d2ab 727 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 728 const struct skb_frag_struct *frag =
ebc8d2ab 729 &skb_shinfo(skb)->frags[i];
b061b39e 730 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 731 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 732 if (dma_mapping_error(dev, busaddr))
7101e111 733 goto dma_err;
ebc8d2ab 734 wrb = queue_head_node(txq);
9e903e08 735 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
736 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737 queue_head_inc(txq);
9e903e08 738 copied += skb_frag_size(frag);
6b7c5b94
SP
739 }
740
741 if (dummy_wrb) {
742 wrb = queue_head_node(txq);
743 wrb_fill(wrb, 0, 0);
744 be_dws_cpu_to_le(wrb, sizeof(*wrb));
745 queue_head_inc(txq);
746 }
747
cc4ce020 748 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
749 be_dws_cpu_to_le(hdr, sizeof(*hdr));
750
751 return copied;
7101e111
SP
752dma_err:
753 txq->head = map_head;
754 while (copied) {
755 wrb = queue_head_node(txq);
2b7bcebf 756 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
757 map_single = false;
758 copied -= wrb->frag_len;
759 queue_head_inc(txq);
760 }
761 return 0;
6b7c5b94
SP
762}
763
93040ae5
SK
764static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
765 struct sk_buff *skb)
766{
767 u16 vlan_tag = 0;
768
769 skb = skb_share_check(skb, GFP_ATOMIC);
770 if (unlikely(!skb))
771 return skb;
772
773 if (vlan_tx_tag_present(skb)) {
774 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6e0895c2 775 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
f11a869d
IV
776 if (skb)
777 skb->vlan_tci = 0;
93040ae5
SK
778 }
779
780 return skb;
781}
782
61357325 783static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 784 struct net_device *netdev)
6b7c5b94
SP
785{
786 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
787 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
788 struct be_queue_info *txq = &txo->q;
93040ae5 789 struct iphdr *ip = NULL;
6b7c5b94 790 u32 wrb_cnt = 0, copied = 0;
93040ae5 791 u32 start = txq->head, eth_hdr_len;
6b7c5b94
SP
792 bool dummy_wrb, stopped = false;
793
93040ae5
SK
794 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
795 VLAN_ETH_HLEN : ETH_HLEN;
796
797 /* HW has a bug which considers padding bytes as legal
798 * and modifies the IPv4 hdr's 'tot_len' field
1ded132d 799 */
93040ae5
SK
800 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
801 is_ipv4_pkt(skb)) {
802 ip = (struct iphdr *)ip_hdr(skb);
803 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
804 }
1ded132d 805
93040ae5
SK
806 /* HW has a bug wherein it will calculate CSUM for VLAN
807 * pkts even though it is disabled.
808 * Manually insert VLAN in pkt.
809 */
810 if (skb->ip_summed != CHECKSUM_PARTIAL &&
811 be_vlan_tag_chk(adapter, skb)) {
812 skb = be_insert_vlan_in_pkt(adapter, skb);
1ded132d
AK
813 if (unlikely(!skb))
814 goto tx_drop;
1ded132d
AK
815 }
816
fe6d2a38 817 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 818
3c8def97 819 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 820 if (copied) {
cd8f76c0
ED
821 int gso_segs = skb_shinfo(skb)->gso_segs;
822
c190e3c8 823 /* record the sent skb in the sent_skb table */
3c8def97
SP
824 BUG_ON(txo->sent_skb_list[start]);
825 txo->sent_skb_list[start] = skb;
c190e3c8
AK
826
827 /* Ensure txq has space for the next skb; Else stop the queue
828 * *BEFORE* ringing the tx doorbell, so that we serialze the
829 * tx compls of the current transmit which'll wake up the queue
830 */
7101e111 831 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
832 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
833 txq->len) {
3c8def97 834 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
835 stopped = true;
836 }
6b7c5b94 837
94d73aaa 838 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 839
cd8f76c0 840 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
841 } else {
842 txq->head = start;
843 dev_kfree_skb_any(skb);
6b7c5b94 844 }
1ded132d 845tx_drop:
6b7c5b94
SP
846 return NETDEV_TX_OK;
847}
848
849static int be_change_mtu(struct net_device *netdev, int new_mtu)
850{
851 struct be_adapter *adapter = netdev_priv(netdev);
852 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
853 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
854 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
855 dev_info(&adapter->pdev->dev,
856 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
857 BE_MIN_MTU,
858 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
859 return -EINVAL;
860 }
861 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
862 netdev->mtu, new_mtu);
863 netdev->mtu = new_mtu;
864 return 0;
865}
866
867/*
82903e4b
AK
868 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
869 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 870 */
10329df8 871static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 872{
10329df8
SP
873 u16 vids[BE_NUM_VLANS_SUPPORTED];
874 u16 num = 0, i;
82903e4b 875 int status = 0;
1da87b7f 876
c0e64ef4
SP
877 /* No need to further configure vids if in promiscuous mode */
878 if (adapter->promiscuous)
879 return 0;
880
0fc16ebf
PR
881 if (adapter->vlans_added > adapter->max_vlans)
882 goto set_vlan_promisc;
883
884 /* Construct VLAN Table to give to HW */
885 for (i = 0; i < VLAN_N_VID; i++)
886 if (adapter->vlan_tag[i])
10329df8 887 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
888
889 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 890 vids, num, 1, 0);
0fc16ebf
PR
891
892 /* Set to VLAN promisc mode as setting VLAN filter failed */
893 if (status) {
894 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
895 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
896 goto set_vlan_promisc;
6b7c5b94 897 }
1da87b7f 898
b31c50a7 899 return status;
0fc16ebf
PR
900
901set_vlan_promisc:
902 status = be_cmd_vlan_config(adapter, adapter->if_handle,
903 NULL, 0, 1, 1);
904 return status;
6b7c5b94
SP
905}
906
80d5c368 907static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
908{
909 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 910 int status = 0;
6b7c5b94 911
a85e9986 912 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
913 status = -EINVAL;
914 goto ret;
915 }
ba343c77 916
a85e9986
PR
917 /* Packets with VID 0 are always received by Lancer by default */
918 if (lancer_chip(adapter) && vid == 0)
919 goto ret;
920
6b7c5b94 921 adapter->vlan_tag[vid] = 1;
82903e4b 922 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 923 status = be_vid_config(adapter);
8e586137 924
80817cbf
AK
925 if (!status)
926 adapter->vlans_added++;
927 else
928 adapter->vlan_tag[vid] = 0;
929ret:
930 return status;
6b7c5b94
SP
931}
932
80d5c368 933static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
934{
935 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 936 int status = 0;
6b7c5b94 937
a85e9986 938 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
939 status = -EINVAL;
940 goto ret;
941 }
ba343c77 942
a85e9986
PR
943 /* Packets with VID 0 are always received by Lancer by default */
944 if (lancer_chip(adapter) && vid == 0)
945 goto ret;
946
6b7c5b94 947 adapter->vlan_tag[vid] = 0;
82903e4b 948 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 949 status = be_vid_config(adapter);
8e586137 950
80817cbf
AK
951 if (!status)
952 adapter->vlans_added--;
953 else
954 adapter->vlan_tag[vid] = 1;
955ret:
956 return status;
6b7c5b94
SP
957}
958
a54769f5 959static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
960{
961 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 962 int status;
6b7c5b94 963
24307eef 964 if (netdev->flags & IFF_PROMISC) {
5b8821b7 965 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
966 adapter->promiscuous = true;
967 goto done;
6b7c5b94
SP
968 }
969
25985edc 970 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
971 if (adapter->promiscuous) {
972 adapter->promiscuous = false;
5b8821b7 973 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
974
975 if (adapter->vlans_added)
10329df8 976 be_vid_config(adapter);
6b7c5b94
SP
977 }
978
e7b909a6 979 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 980 if (netdev->flags & IFF_ALLMULTI ||
abb93951 981 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 982 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 983 goto done;
6b7c5b94 984 }
6b7c5b94 985
fbc13f01
AK
986 if (netdev_uc_count(netdev) != adapter->uc_macs) {
987 struct netdev_hw_addr *ha;
988 int i = 1; /* First slot is claimed by the Primary MAC */
989
990 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
991 be_cmd_pmac_del(adapter, adapter->if_handle,
992 adapter->pmac_id[i], 0);
993 }
994
995 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
996 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
997 adapter->promiscuous = true;
998 goto done;
999 }
1000
1001 netdev_for_each_uc_addr(ha, adapter->netdev) {
1002 adapter->uc_macs++; /* First slot is for Primary MAC */
1003 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1004 adapter->if_handle,
1005 &adapter->pmac_id[adapter->uc_macs], 0);
1006 }
1007 }
1008
0fc16ebf
PR
1009 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1010
1011 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1012 if (status) {
1013 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1014 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1015 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1016 }
24307eef
SP
1017done:
1018 return;
6b7c5b94
SP
1019}
1020
ba343c77
SB
1021static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1022{
1023 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1024 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1025 int status;
704e4c88
PR
1026 bool active_mac = false;
1027 u32 pmac_id;
1028 u8 old_mac[ETH_ALEN];
ba343c77 1029
11ac75ed 1030 if (!sriov_enabled(adapter))
ba343c77
SB
1031 return -EPERM;
1032
11ac75ed 1033 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1034 return -EINVAL;
1035
590c391d 1036 if (lancer_chip(adapter)) {
704e4c88
PR
1037 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1038 &pmac_id, vf + 1);
1039 if (!status && active_mac)
1040 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1041 pmac_id, vf + 1);
1042
590c391d
PR
1043 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1044 } else {
11ac75ed
SP
1045 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1046 vf_cfg->pmac_id, vf + 1);
ba343c77 1047
11ac75ed
SP
1048 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1049 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1050 }
1051
64600ea5 1052 if (status)
ba343c77
SB
1053 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1054 mac, vf);
64600ea5 1055 else
11ac75ed 1056 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1057
ba343c77
SB
1058 return status;
1059}
1060
64600ea5
AK
1061static int be_get_vf_config(struct net_device *netdev, int vf,
1062 struct ifla_vf_info *vi)
1063{
1064 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1065 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1066
11ac75ed 1067 if (!sriov_enabled(adapter))
64600ea5
AK
1068 return -EPERM;
1069
11ac75ed 1070 if (vf >= adapter->num_vfs)
64600ea5
AK
1071 return -EINVAL;
1072
1073 vi->vf = vf;
11ac75ed
SP
1074 vi->tx_rate = vf_cfg->tx_rate;
1075 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1076 vi->qos = 0;
11ac75ed 1077 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1078
1079 return 0;
1080}
1081
1da87b7f
AK
1082static int be_set_vf_vlan(struct net_device *netdev,
1083 int vf, u16 vlan, u8 qos)
1084{
1085 struct be_adapter *adapter = netdev_priv(netdev);
1086 int status = 0;
1087
11ac75ed 1088 if (!sriov_enabled(adapter))
1da87b7f
AK
1089 return -EPERM;
1090
11ac75ed 1091 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1092 return -EINVAL;
1093
1094 if (vlan) {
f1f3ee1b
AK
1095 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1096 /* If this is new value, program it. Else skip. */
1097 adapter->vf_cfg[vf].vlan_tag = vlan;
1098
1099 status = be_cmd_set_hsw_config(adapter, vlan,
1100 vf + 1, adapter->vf_cfg[vf].if_handle);
1101 }
1da87b7f 1102 } else {
f1f3ee1b 1103 /* Reset Transparent Vlan Tagging. */
11ac75ed 1104 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1105 vlan = adapter->vf_cfg[vf].def_vid;
1106 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1107 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1108 }
1109
1da87b7f
AK
1110
1111 if (status)
1112 dev_info(&adapter->pdev->dev,
1113 "VLAN %d config on VF %d failed\n", vlan, vf);
1114 return status;
1115}
1116
e1d18735
AK
1117static int be_set_vf_tx_rate(struct net_device *netdev,
1118 int vf, int rate)
1119{
1120 struct be_adapter *adapter = netdev_priv(netdev);
1121 int status = 0;
1122
11ac75ed 1123 if (!sriov_enabled(adapter))
e1d18735
AK
1124 return -EPERM;
1125
94f434c2 1126 if (vf >= adapter->num_vfs)
e1d18735
AK
1127 return -EINVAL;
1128
94f434c2
AK
1129 if (rate < 100 || rate > 10000) {
1130 dev_err(&adapter->pdev->dev,
1131 "tx rate must be between 100 and 10000 Mbps\n");
1132 return -EINVAL;
1133 }
e1d18735 1134
d5c18473
PR
1135 if (lancer_chip(adapter))
1136 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1137 else
1138 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1139
1140 if (status)
94f434c2 1141 dev_err(&adapter->pdev->dev,
e1d18735 1142 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1143 else
1144 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1145 return status;
1146}
1147
39f1d94d
SP
1148static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1149{
1150 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1151 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1152 u16 offset, stride;
1153
1154 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1155 if (!pos)
1156 return 0;
39f1d94d
SP
1157 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1158 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1159
1160 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1161 while (dev) {
2f6a0260 1162 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1163 vfs++;
1164 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1165 assigned_vfs++;
1166 }
1167 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1168 }
1169 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1170}
1171
10ef9ab4 1172static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1173{
10ef9ab4 1174 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1175 ulong now = jiffies;
ac124ff9 1176 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1177 u64 pkts;
1178 unsigned int start, eqd;
ac124ff9 1179
10ef9ab4
SP
1180 if (!eqo->enable_aic) {
1181 eqd = eqo->eqd;
1182 goto modify_eqd;
1183 }
1184
1185 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1186 return;
6b7c5b94 1187
10ef9ab4
SP
1188 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1189
4097f663 1190 /* Wrapped around */
3abcdeda
SP
1191 if (time_before(now, stats->rx_jiffies)) {
1192 stats->rx_jiffies = now;
4097f663
SP
1193 return;
1194 }
6b7c5b94 1195
ac124ff9
SP
1196 /* Update once a second */
1197 if (delta < HZ)
6b7c5b94
SP
1198 return;
1199
ab1594e9
SP
1200 do {
1201 start = u64_stats_fetch_begin_bh(&stats->sync);
1202 pkts = stats->rx_pkts;
1203 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1204
68c3e5a7 1205 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1206 stats->rx_pkts_prev = pkts;
3abcdeda 1207 stats->rx_jiffies = now;
10ef9ab4
SP
1208 eqd = (stats->rx_pps / 110000) << 3;
1209 eqd = min(eqd, eqo->max_eqd);
1210 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1211 if (eqd < 10)
1212 eqd = 0;
10ef9ab4
SP
1213
1214modify_eqd:
1215 if (eqd != eqo->cur_eqd) {
1216 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1217 eqo->cur_eqd = eqd;
ac124ff9 1218 }
6b7c5b94
SP
1219}
1220
3abcdeda 1221static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1222 struct be_rx_compl_info *rxcp)
4097f663 1223{
ac124ff9 1224 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1225
ab1594e9 1226 u64_stats_update_begin(&stats->sync);
3abcdeda 1227 stats->rx_compl++;
2e588f84 1228 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1229 stats->rx_pkts++;
2e588f84 1230 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1231 stats->rx_mcast_pkts++;
2e588f84 1232 if (rxcp->err)
ac124ff9 1233 stats->rx_compl_err++;
ab1594e9 1234 u64_stats_update_end(&stats->sync);
4097f663
SP
1235}
1236
2e588f84 1237static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1238{
19fad86f
PR
1239 /* L4 checksum is not reliable for non TCP/UDP packets.
1240 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1241 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1242 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1243}
1244
10ef9ab4
SP
1245static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1246 u16 frag_idx)
6b7c5b94 1247{
10ef9ab4 1248 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1249 struct be_rx_page_info *rx_page_info;
3abcdeda 1250 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1251
3abcdeda 1252 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1253 BUG_ON(!rx_page_info->page);
1254
205859a2 1255 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1256 dma_unmap_page(&adapter->pdev->dev,
1257 dma_unmap_addr(rx_page_info, bus),
1258 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1259 rx_page_info->last_page_user = false;
1260 }
6b7c5b94
SP
1261
1262 atomic_dec(&rxq->used);
1263 return rx_page_info;
1264}
1265
1266/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1267static void be_rx_compl_discard(struct be_rx_obj *rxo,
1268 struct be_rx_compl_info *rxcp)
6b7c5b94 1269{
3abcdeda 1270 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1271 struct be_rx_page_info *page_info;
2e588f84 1272 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1273
e80d9da6 1274 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1275 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1276 put_page(page_info->page);
1277 memset(page_info, 0, sizeof(*page_info));
2e588f84 1278 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1279 }
1280}
1281
1282/*
1283 * skb_fill_rx_data forms a complete skb for an ether frame
1284 * indicated by rxcp.
1285 */
10ef9ab4
SP
1286static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1287 struct be_rx_compl_info *rxcp)
6b7c5b94 1288{
3abcdeda 1289 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1290 struct be_rx_page_info *page_info;
2e588f84
SP
1291 u16 i, j;
1292 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1293 u8 *start;
6b7c5b94 1294
10ef9ab4 1295 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1296 start = page_address(page_info->page) + page_info->page_offset;
1297 prefetch(start);
1298
1299 /* Copy data in the first descriptor of this completion */
2e588f84 1300 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1301
6b7c5b94
SP
1302 skb->len = curr_frag_len;
1303 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1304 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1305 /* Complete packet has now been moved to data */
1306 put_page(page_info->page);
1307 skb->data_len = 0;
1308 skb->tail += curr_frag_len;
1309 } else {
ac1ae5f3
ED
1310 hdr_len = ETH_HLEN;
1311 memcpy(skb->data, start, hdr_len);
6b7c5b94 1312 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1313 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1314 skb_shinfo(skb)->frags[0].page_offset =
1315 page_info->page_offset + hdr_len;
9e903e08 1316 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1317 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1318 skb->truesize += rx_frag_size;
6b7c5b94
SP
1319 skb->tail += hdr_len;
1320 }
205859a2 1321 page_info->page = NULL;
6b7c5b94 1322
2e588f84
SP
1323 if (rxcp->pkt_size <= rx_frag_size) {
1324 BUG_ON(rxcp->num_rcvd != 1);
1325 return;
6b7c5b94
SP
1326 }
1327
1328 /* More frags present for this completion */
2e588f84
SP
1329 index_inc(&rxcp->rxq_idx, rxq->len);
1330 remaining = rxcp->pkt_size - curr_frag_len;
1331 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1332 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1333 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1334
bd46cb6c
AK
1335 /* Coalesce all frags from the same physical page in one slot */
1336 if (page_info->page_offset == 0) {
1337 /* Fresh page */
1338 j++;
b061b39e 1339 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1340 skb_shinfo(skb)->frags[j].page_offset =
1341 page_info->page_offset;
9e903e08 1342 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1343 skb_shinfo(skb)->nr_frags++;
1344 } else {
1345 put_page(page_info->page);
1346 }
1347
9e903e08 1348 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1349 skb->len += curr_frag_len;
1350 skb->data_len += curr_frag_len;
bdb28a97 1351 skb->truesize += rx_frag_size;
2e588f84
SP
1352 remaining -= curr_frag_len;
1353 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1354 page_info->page = NULL;
6b7c5b94 1355 }
bd46cb6c 1356 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1357}
1358
5be93b9a 1359/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1360static void be_rx_compl_process(struct be_rx_obj *rxo,
1361 struct be_rx_compl_info *rxcp)
6b7c5b94 1362{
10ef9ab4 1363 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1364 struct net_device *netdev = adapter->netdev;
6b7c5b94 1365 struct sk_buff *skb;
89420424 1366
bb349bb4 1367 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1368 if (unlikely(!skb)) {
ac124ff9 1369 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1370 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1371 return;
1372 }
1373
10ef9ab4 1374 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1375
6332c8d3 1376 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1377 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1378 else
1379 skb_checksum_none_assert(skb);
6b7c5b94 1380
6332c8d3 1381 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1382 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1383 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1384 skb->rxhash = rxcp->rss_hash;
1385
6b7c5b94 1386
343e43c0 1387 if (rxcp->vlanf)
86a9bad3 1388 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1389
1390 netif_receive_skb(skb);
6b7c5b94
SP
1391}
1392
5be93b9a 1393/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1394void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1395 struct be_rx_compl_info *rxcp)
6b7c5b94 1396{
10ef9ab4 1397 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1398 struct be_rx_page_info *page_info;
5be93b9a 1399 struct sk_buff *skb = NULL;
3abcdeda 1400 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1401 u16 remaining, curr_frag_len;
1402 u16 i, j;
3968fa1e 1403
10ef9ab4 1404 skb = napi_get_frags(napi);
5be93b9a 1405 if (!skb) {
10ef9ab4 1406 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1407 return;
1408 }
1409
2e588f84
SP
1410 remaining = rxcp->pkt_size;
1411 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1412 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1413
1414 curr_frag_len = min(remaining, rx_frag_size);
1415
bd46cb6c
AK
1416 /* Coalesce all frags from the same physical page in one slot */
1417 if (i == 0 || page_info->page_offset == 0) {
1418 /* First frag or Fresh page */
1419 j++;
b061b39e 1420 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1421 skb_shinfo(skb)->frags[j].page_offset =
1422 page_info->page_offset;
9e903e08 1423 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1424 } else {
1425 put_page(page_info->page);
1426 }
9e903e08 1427 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1428 skb->truesize += rx_frag_size;
bd46cb6c 1429 remaining -= curr_frag_len;
2e588f84 1430 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1431 memset(page_info, 0, sizeof(*page_info));
1432 }
bd46cb6c 1433 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1434
5be93b9a 1435 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1436 skb->len = rxcp->pkt_size;
1437 skb->data_len = rxcp->pkt_size;
5be93b9a 1438 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1439 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1440 if (adapter->netdev->features & NETIF_F_RXHASH)
1441 skb->rxhash = rxcp->rss_hash;
5be93b9a 1442
343e43c0 1443 if (rxcp->vlanf)
86a9bad3 1444 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1445
10ef9ab4 1446 napi_gro_frags(napi);
2e588f84
SP
1447}
1448
10ef9ab4
SP
1449static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1450 struct be_rx_compl_info *rxcp)
2e588f84
SP
1451{
1452 rxcp->pkt_size =
1453 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1454 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1455 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1456 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1457 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1458 rxcp->ip_csum =
1459 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1460 rxcp->l4_csum =
1461 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1462 rxcp->ipv6 =
1463 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1464 rxcp->rxq_idx =
1465 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1466 rxcp->num_rcvd =
1467 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1468 rxcp->pkt_type =
1469 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1470 rxcp->rss_hash =
c297977e 1471 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1472 if (rxcp->vlanf) {
1473 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1474 compl);
1475 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1476 compl);
15d72184 1477 }
12004ae9 1478 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1479}
1480
10ef9ab4
SP
1481static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1482 struct be_rx_compl_info *rxcp)
2e588f84
SP
1483{
1484 rxcp->pkt_size =
1485 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1486 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1487 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1488 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1489 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1490 rxcp->ip_csum =
1491 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1492 rxcp->l4_csum =
1493 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1494 rxcp->ipv6 =
1495 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1496 rxcp->rxq_idx =
1497 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1498 rxcp->num_rcvd =
1499 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1500 rxcp->pkt_type =
1501 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1502 rxcp->rss_hash =
c297977e 1503 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1504 if (rxcp->vlanf) {
1505 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1506 compl);
1507 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1508 compl);
15d72184 1509 }
12004ae9 1510 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1511}
1512
1513static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1514{
1515 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1516 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1517 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1518
2e588f84
SP
1519 /* For checking the valid bit it is Ok to use either definition as the
1520 * valid bit is at the same position in both v0 and v1 Rx compl */
1521 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1522 return NULL;
6b7c5b94 1523
2e588f84
SP
1524 rmb();
1525 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1526
2e588f84 1527 if (adapter->be3_native)
10ef9ab4 1528 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1529 else
10ef9ab4 1530 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1531
15d72184
SP
1532 if (rxcp->vlanf) {
1533 /* vlanf could be wrongly set in some cards.
1534 * ignore if vtm is not set */
752961a1 1535 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1536 rxcp->vlanf = 0;
6b7c5b94 1537
15d72184 1538 if (!lancer_chip(adapter))
3c709f8f 1539 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1540
939cf306 1541 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1542 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1543 rxcp->vlanf = 0;
1544 }
2e588f84
SP
1545
1546 /* As the compl has been parsed, reset it; we wont touch it again */
1547 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1548
3abcdeda 1549 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1550 return rxcp;
1551}
1552
1829b086 1553static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1554{
6b7c5b94 1555 u32 order = get_order(size);
1829b086 1556
6b7c5b94 1557 if (order > 0)
1829b086
ED
1558 gfp |= __GFP_COMP;
1559 return alloc_pages(gfp, order);
6b7c5b94
SP
1560}
1561
1562/*
1563 * Allocate a page, split it to fragments of size rx_frag_size and post as
1564 * receive buffers to BE
1565 */
1829b086 1566static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1567{
3abcdeda 1568 struct be_adapter *adapter = rxo->adapter;
26d92f92 1569 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1570 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1571 struct page *pagep = NULL;
1572 struct be_eth_rx_d *rxd;
1573 u64 page_dmaaddr = 0, frag_dmaaddr;
1574 u32 posted, page_offset = 0;
1575
3abcdeda 1576 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1577 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1578 if (!pagep) {
1829b086 1579 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1580 if (unlikely(!pagep)) {
ac124ff9 1581 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1582 break;
1583 }
2b7bcebf
IV
1584 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1585 0, adapter->big_page_size,
1586 DMA_FROM_DEVICE);
6b7c5b94
SP
1587 page_info->page_offset = 0;
1588 } else {
1589 get_page(pagep);
1590 page_info->page_offset = page_offset + rx_frag_size;
1591 }
1592 page_offset = page_info->page_offset;
1593 page_info->page = pagep;
fac6da5b 1594 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1595 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1596
1597 rxd = queue_head_node(rxq);
1598 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1599 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1600
1601 /* Any space left in the current big page for another frag? */
1602 if ((page_offset + rx_frag_size + rx_frag_size) >
1603 adapter->big_page_size) {
1604 pagep = NULL;
1605 page_info->last_page_user = true;
1606 }
26d92f92
SP
1607
1608 prev_page_info = page_info;
1609 queue_head_inc(rxq);
10ef9ab4 1610 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1611 }
1612 if (pagep)
26d92f92 1613 prev_page_info->last_page_user = true;
6b7c5b94
SP
1614
1615 if (posted) {
6b7c5b94 1616 atomic_add(posted, &rxq->used);
8788fdc2 1617 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1618 } else if (atomic_read(&rxq->used) == 0) {
1619 /* Let be_worker replenish when memory is available */
3abcdeda 1620 rxo->rx_post_starved = true;
6b7c5b94 1621 }
6b7c5b94
SP
1622}
1623
5fb379ee 1624static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1625{
6b7c5b94
SP
1626 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1627
1628 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1629 return NULL;
1630
f3eb62d2 1631 rmb();
6b7c5b94
SP
1632 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1633
1634 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1635
1636 queue_tail_inc(tx_cq);
1637 return txcp;
1638}
1639
3c8def97
SP
1640static u16 be_tx_compl_process(struct be_adapter *adapter,
1641 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1642{
3c8def97 1643 struct be_queue_info *txq = &txo->q;
a73b796e 1644 struct be_eth_wrb *wrb;
3c8def97 1645 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1646 struct sk_buff *sent_skb;
ec43b1a6
SP
1647 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1648 bool unmap_skb_hdr = true;
6b7c5b94 1649
ec43b1a6 1650 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1651 BUG_ON(!sent_skb);
ec43b1a6
SP
1652 sent_skbs[txq->tail] = NULL;
1653
1654 /* skip header wrb */
a73b796e 1655 queue_tail_inc(txq);
6b7c5b94 1656
ec43b1a6 1657 do {
6b7c5b94 1658 cur_index = txq->tail;
a73b796e 1659 wrb = queue_tail_node(txq);
2b7bcebf
IV
1660 unmap_tx_frag(&adapter->pdev->dev, wrb,
1661 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1662 unmap_skb_hdr = false;
1663
6b7c5b94
SP
1664 num_wrbs++;
1665 queue_tail_inc(txq);
ec43b1a6 1666 } while (cur_index != last_index);
6b7c5b94 1667
6b7c5b94 1668 kfree_skb(sent_skb);
4d586b82 1669 return num_wrbs;
6b7c5b94
SP
1670}
1671
10ef9ab4
SP
1672/* Return the number of events in the event queue */
1673static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1674{
10ef9ab4
SP
1675 struct be_eq_entry *eqe;
1676 int num = 0;
859b1e4e 1677
10ef9ab4
SP
1678 do {
1679 eqe = queue_tail_node(&eqo->q);
1680 if (eqe->evt == 0)
1681 break;
859b1e4e 1682
10ef9ab4
SP
1683 rmb();
1684 eqe->evt = 0;
1685 num++;
1686 queue_tail_inc(&eqo->q);
1687 } while (true);
1688
1689 return num;
859b1e4e
SP
1690}
1691
10ef9ab4
SP
1692/* Leaves the EQ is disarmed state */
1693static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1694{
10ef9ab4 1695 int num = events_get(eqo);
859b1e4e 1696
10ef9ab4 1697 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1698}
1699
10ef9ab4 1700static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1701{
1702 struct be_rx_page_info *page_info;
3abcdeda
SP
1703 struct be_queue_info *rxq = &rxo->q;
1704 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1705 struct be_rx_compl_info *rxcp;
d23e946c
SP
1706 struct be_adapter *adapter = rxo->adapter;
1707 int flush_wait = 0;
6b7c5b94
SP
1708 u16 tail;
1709
d23e946c
SP
1710 /* Consume pending rx completions.
1711 * Wait for the flush completion (identified by zero num_rcvd)
1712 * to arrive. Notify CQ even when there are no more CQ entries
1713 * for HW to flush partially coalesced CQ entries.
1714 * In Lancer, there is no need to wait for flush compl.
1715 */
1716 for (;;) {
1717 rxcp = be_rx_compl_get(rxo);
1718 if (rxcp == NULL) {
1719 if (lancer_chip(adapter))
1720 break;
1721
1722 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1723 dev_warn(&adapter->pdev->dev,
1724 "did not receive flush compl\n");
1725 break;
1726 }
1727 be_cq_notify(adapter, rx_cq->id, true, 0);
1728 mdelay(1);
1729 } else {
1730 be_rx_compl_discard(rxo, rxcp);
1731 be_cq_notify(adapter, rx_cq->id, true, 1);
1732 if (rxcp->num_rcvd == 0)
1733 break;
1734 }
6b7c5b94
SP
1735 }
1736
d23e946c
SP
1737 /* After cleanup, leave the CQ in unarmed state */
1738 be_cq_notify(adapter, rx_cq->id, false, 0);
1739
1740 /* Then free posted rx buffers that were not used */
6b7c5b94 1741 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1742 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1743 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1744 put_page(page_info->page);
1745 memset(page_info, 0, sizeof(*page_info));
1746 }
1747 BUG_ON(atomic_read(&rxq->used));
482c9e79 1748 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1749}
1750
0ae57bb3 1751static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1752{
0ae57bb3
SP
1753 struct be_tx_obj *txo;
1754 struct be_queue_info *txq;
a8e9179a 1755 struct be_eth_tx_compl *txcp;
4d586b82 1756 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1757 struct sk_buff *sent_skb;
1758 bool dummy_wrb;
0ae57bb3 1759 int i, pending_txqs;
a8e9179a
SP
1760
1761 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1762 do {
0ae57bb3
SP
1763 pending_txqs = adapter->num_tx_qs;
1764
1765 for_all_tx_queues(adapter, txo, i) {
1766 txq = &txo->q;
1767 while ((txcp = be_tx_compl_get(&txo->cq))) {
1768 end_idx =
1769 AMAP_GET_BITS(struct amap_eth_tx_compl,
1770 wrb_index, txcp);
1771 num_wrbs += be_tx_compl_process(adapter, txo,
1772 end_idx);
1773 cmpl++;
1774 }
1775 if (cmpl) {
1776 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1777 atomic_sub(num_wrbs, &txq->used);
1778 cmpl = 0;
1779 num_wrbs = 0;
1780 }
1781 if (atomic_read(&txq->used) == 0)
1782 pending_txqs--;
a8e9179a
SP
1783 }
1784
0ae57bb3 1785 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1786 break;
1787
1788 mdelay(1);
1789 } while (true);
1790
0ae57bb3
SP
1791 for_all_tx_queues(adapter, txo, i) {
1792 txq = &txo->q;
1793 if (atomic_read(&txq->used))
1794 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1795 atomic_read(&txq->used));
1796
1797 /* free posted tx for which compls will never arrive */
1798 while (atomic_read(&txq->used)) {
1799 sent_skb = txo->sent_skb_list[txq->tail];
1800 end_idx = txq->tail;
1801 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1802 &dummy_wrb);
1803 index_adv(&end_idx, num_wrbs - 1, txq->len);
1804 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1805 atomic_sub(num_wrbs, &txq->used);
1806 }
b03388d6 1807 }
6b7c5b94
SP
1808}
1809
10ef9ab4
SP
1810static void be_evt_queues_destroy(struct be_adapter *adapter)
1811{
1812 struct be_eq_obj *eqo;
1813 int i;
1814
1815 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1816 if (eqo->q.created) {
1817 be_eq_clean(eqo);
10ef9ab4 1818 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1819 }
10ef9ab4
SP
1820 be_queue_free(adapter, &eqo->q);
1821 }
1822}
1823
1824static int be_evt_queues_create(struct be_adapter *adapter)
1825{
1826 struct be_queue_info *eq;
1827 struct be_eq_obj *eqo;
1828 int i, rc;
1829
1830 adapter->num_evt_qs = num_irqs(adapter);
1831
1832 for_all_evt_queues(adapter, eqo, i) {
1833 eqo->adapter = adapter;
1834 eqo->tx_budget = BE_TX_BUDGET;
1835 eqo->idx = i;
1836 eqo->max_eqd = BE_MAX_EQD;
1837 eqo->enable_aic = true;
1838
1839 eq = &eqo->q;
1840 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1841 sizeof(struct be_eq_entry));
1842 if (rc)
1843 return rc;
1844
1845 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1846 if (rc)
1847 return rc;
1848 }
1cfafab9 1849 return 0;
10ef9ab4
SP
1850}
1851
5fb379ee
SP
1852static void be_mcc_queues_destroy(struct be_adapter *adapter)
1853{
1854 struct be_queue_info *q;
5fb379ee 1855
8788fdc2 1856 q = &adapter->mcc_obj.q;
5fb379ee 1857 if (q->created)
8788fdc2 1858 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1859 be_queue_free(adapter, q);
1860
8788fdc2 1861 q = &adapter->mcc_obj.cq;
5fb379ee 1862 if (q->created)
8788fdc2 1863 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1864 be_queue_free(adapter, q);
1865}
1866
1867/* Must be called only after TX qs are created as MCC shares TX EQ */
1868static int be_mcc_queues_create(struct be_adapter *adapter)
1869{
1870 struct be_queue_info *q, *cq;
5fb379ee 1871
8788fdc2 1872 cq = &adapter->mcc_obj.cq;
5fb379ee 1873 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1874 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1875 goto err;
1876
10ef9ab4
SP
1877 /* Use the default EQ for MCC completions */
1878 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1879 goto mcc_cq_free;
1880
8788fdc2 1881 q = &adapter->mcc_obj.q;
5fb379ee
SP
1882 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1883 goto mcc_cq_destroy;
1884
8788fdc2 1885 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1886 goto mcc_q_free;
1887
1888 return 0;
1889
1890mcc_q_free:
1891 be_queue_free(adapter, q);
1892mcc_cq_destroy:
8788fdc2 1893 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1894mcc_cq_free:
1895 be_queue_free(adapter, cq);
1896err:
1897 return -1;
1898}
1899
6b7c5b94
SP
1900static void be_tx_queues_destroy(struct be_adapter *adapter)
1901{
1902 struct be_queue_info *q;
3c8def97
SP
1903 struct be_tx_obj *txo;
1904 u8 i;
6b7c5b94 1905
3c8def97
SP
1906 for_all_tx_queues(adapter, txo, i) {
1907 q = &txo->q;
1908 if (q->created)
1909 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1910 be_queue_free(adapter, q);
6b7c5b94 1911
3c8def97
SP
1912 q = &txo->cq;
1913 if (q->created)
1914 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1915 be_queue_free(adapter, q);
1916 }
6b7c5b94
SP
1917}
1918
dafc0fe3
SP
1919static int be_num_txqs_want(struct be_adapter *adapter)
1920{
abb93951
PR
1921 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1922 be_is_mc(adapter) ||
1923 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 1924 BE2_chip(adapter))
dafc0fe3
SP
1925 return 1;
1926 else
abb93951 1927 return adapter->max_tx_queues;
dafc0fe3
SP
1928}
1929
10ef9ab4 1930static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1931{
10ef9ab4
SP
1932 struct be_queue_info *cq, *eq;
1933 int status;
3c8def97
SP
1934 struct be_tx_obj *txo;
1935 u8 i;
6b7c5b94 1936
dafc0fe3 1937 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1938 if (adapter->num_tx_qs != MAX_TX_QS) {
1939 rtnl_lock();
dafc0fe3
SP
1940 netif_set_real_num_tx_queues(adapter->netdev,
1941 adapter->num_tx_qs);
3bb62f4f
PR
1942 rtnl_unlock();
1943 }
dafc0fe3 1944
10ef9ab4
SP
1945 for_all_tx_queues(adapter, txo, i) {
1946 cq = &txo->cq;
1947 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1948 sizeof(struct be_eth_tx_compl));
1949 if (status)
1950 return status;
3c8def97 1951
10ef9ab4
SP
1952 /* If num_evt_qs is less than num_tx_qs, then more than
1953 * one txq share an eq
1954 */
1955 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1956 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1957 if (status)
1958 return status;
1959 }
1960 return 0;
1961}
6b7c5b94 1962
10ef9ab4
SP
1963static int be_tx_qs_create(struct be_adapter *adapter)
1964{
1965 struct be_tx_obj *txo;
1966 int i, status;
fe6d2a38 1967
3c8def97 1968 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1969 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1970 sizeof(struct be_eth_wrb));
1971 if (status)
1972 return status;
6b7c5b94 1973
94d73aaa 1974 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
1975 if (status)
1976 return status;
3c8def97 1977 }
6b7c5b94 1978
d379142b
SP
1979 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1980 adapter->num_tx_qs);
10ef9ab4 1981 return 0;
6b7c5b94
SP
1982}
1983
10ef9ab4 1984static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1985{
1986 struct be_queue_info *q;
3abcdeda
SP
1987 struct be_rx_obj *rxo;
1988 int i;
1989
1990 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1991 q = &rxo->cq;
1992 if (q->created)
1993 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1994 be_queue_free(adapter, q);
ac6a0c4a
SP
1995 }
1996}
1997
10ef9ab4 1998static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1999{
10ef9ab4 2000 struct be_queue_info *eq, *cq;
3abcdeda
SP
2001 struct be_rx_obj *rxo;
2002 int rc, i;
6b7c5b94 2003
10ef9ab4
SP
2004 /* We'll create as many RSS rings as there are irqs.
2005 * But when there's only one irq there's no use creating RSS rings
2006 */
2007 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2008 num_irqs(adapter) + 1 : 1;
7f640062
SP
2009 if (adapter->num_rx_qs != MAX_RX_QS) {
2010 rtnl_lock();
2011 netif_set_real_num_rx_queues(adapter->netdev,
2012 adapter->num_rx_qs);
2013 rtnl_unlock();
2014 }
ac6a0c4a 2015
6b7c5b94 2016 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2017 for_all_rx_queues(adapter, rxo, i) {
2018 rxo->adapter = adapter;
3abcdeda
SP
2019 cq = &rxo->cq;
2020 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2021 sizeof(struct be_eth_rx_compl));
2022 if (rc)
10ef9ab4 2023 return rc;
3abcdeda 2024
10ef9ab4
SP
2025 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2026 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2027 if (rc)
10ef9ab4 2028 return rc;
3abcdeda 2029 }
6b7c5b94 2030
d379142b
SP
2031 dev_info(&adapter->pdev->dev,
2032 "created %d RSS queue(s) and 1 default RX queue\n",
2033 adapter->num_rx_qs - 1);
10ef9ab4 2034 return 0;
b628bde2
SP
2035}
2036
6b7c5b94
SP
2037static irqreturn_t be_intx(int irq, void *dev)
2038{
e49cc34f
SP
2039 struct be_eq_obj *eqo = dev;
2040 struct be_adapter *adapter = eqo->adapter;
2041 int num_evts = 0;
6b7c5b94 2042
d0b9cec3
SP
2043 /* IRQ is not expected when NAPI is scheduled as the EQ
2044 * will not be armed.
2045 * But, this can happen on Lancer INTx where it takes
2046 * a while to de-assert INTx or in BE2 where occasionaly
2047 * an interrupt may be raised even when EQ is unarmed.
2048 * If NAPI is already scheduled, then counting & notifying
2049 * events will orphan them.
e49cc34f 2050 */
d0b9cec3 2051 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2052 num_evts = events_get(eqo);
d0b9cec3
SP
2053 __napi_schedule(&eqo->napi);
2054 if (num_evts)
2055 eqo->spurious_intr = 0;
2056 }
2057 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2058
d0b9cec3
SP
2059 /* Return IRQ_HANDLED only for the the first spurious intr
2060 * after a valid intr to stop the kernel from branding
2061 * this irq as a bad one!
e49cc34f 2062 */
d0b9cec3
SP
2063 if (num_evts || eqo->spurious_intr++ == 0)
2064 return IRQ_HANDLED;
2065 else
2066 return IRQ_NONE;
6b7c5b94
SP
2067}
2068
10ef9ab4 2069static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2070{
10ef9ab4 2071 struct be_eq_obj *eqo = dev;
6b7c5b94 2072
0b545a62
SP
2073 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2074 napi_schedule(&eqo->napi);
6b7c5b94
SP
2075 return IRQ_HANDLED;
2076}
2077
2e588f84 2078static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2079{
2e588f84 2080 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2081}
2082
10ef9ab4
SP
2083static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2084 int budget)
6b7c5b94 2085{
3abcdeda
SP
2086 struct be_adapter *adapter = rxo->adapter;
2087 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2088 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2089 u32 work_done;
2090
2091 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2092 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2093 if (!rxcp)
2094 break;
2095
12004ae9
SP
2096 /* Is it a flush compl that has no data */
2097 if (unlikely(rxcp->num_rcvd == 0))
2098 goto loop_continue;
2099
2100 /* Discard compl with partial DMA Lancer B0 */
2101 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2102 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2103 goto loop_continue;
2104 }
2105
2106 /* On BE drop pkts that arrive due to imperfect filtering in
2107 * promiscuous mode on some skews
2108 */
2109 if (unlikely(rxcp->port != adapter->port_num &&
2110 !lancer_chip(adapter))) {
10ef9ab4 2111 be_rx_compl_discard(rxo, rxcp);
12004ae9 2112 goto loop_continue;
64642811 2113 }
009dd872 2114
12004ae9 2115 if (do_gro(rxcp))
10ef9ab4 2116 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2117 else
10ef9ab4 2118 be_rx_compl_process(rxo, rxcp);
12004ae9 2119loop_continue:
2e588f84 2120 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2121 }
2122
10ef9ab4
SP
2123 if (work_done) {
2124 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2125
10ef9ab4
SP
2126 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2127 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2128 }
10ef9ab4 2129
6b7c5b94
SP
2130 return work_done;
2131}
2132
10ef9ab4
SP
2133static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2134 int budget, int idx)
6b7c5b94 2135{
6b7c5b94 2136 struct be_eth_tx_compl *txcp;
10ef9ab4 2137 int num_wrbs = 0, work_done;
3c8def97 2138
10ef9ab4
SP
2139 for (work_done = 0; work_done < budget; work_done++) {
2140 txcp = be_tx_compl_get(&txo->cq);
2141 if (!txcp)
2142 break;
2143 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2144 AMAP_GET_BITS(struct amap_eth_tx_compl,
2145 wrb_index, txcp));
10ef9ab4 2146 }
6b7c5b94 2147
10ef9ab4
SP
2148 if (work_done) {
2149 be_cq_notify(adapter, txo->cq.id, true, work_done);
2150 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2151
10ef9ab4
SP
2152 /* As Tx wrbs have been freed up, wake up netdev queue
2153 * if it was stopped due to lack of tx wrbs. */
2154 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2155 atomic_read(&txo->q.used) < txo->q.len / 2) {
2156 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2157 }
10ef9ab4
SP
2158
2159 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2160 tx_stats(txo)->tx_compl += work_done;
2161 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2162 }
10ef9ab4
SP
2163 return (work_done < budget); /* Done */
2164}
6b7c5b94 2165
10ef9ab4
SP
2166int be_poll(struct napi_struct *napi, int budget)
2167{
2168 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2169 struct be_adapter *adapter = eqo->adapter;
0b545a62 2170 int max_work = 0, work, i, num_evts;
10ef9ab4 2171 bool tx_done;
f31e50a8 2172
0b545a62
SP
2173 num_evts = events_get(eqo);
2174
10ef9ab4
SP
2175 /* Process all TXQs serviced by this EQ */
2176 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2177 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2178 eqo->tx_budget, i);
2179 if (!tx_done)
2180 max_work = budget;
f31e50a8
SP
2181 }
2182
10ef9ab4
SP
2183 /* This loop will iterate twice for EQ0 in which
2184 * completions of the last RXQ (default one) are also processed
2185 * For other EQs the loop iterates only once
2186 */
2187 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2188 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2189 max_work = max(work, max_work);
2190 }
6b7c5b94 2191
10ef9ab4
SP
2192 if (is_mcc_eqo(eqo))
2193 be_process_mcc(adapter);
93c86700 2194
10ef9ab4
SP
2195 if (max_work < budget) {
2196 napi_complete(napi);
0b545a62 2197 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2198 } else {
2199 /* As we'll continue in polling mode, count and clear events */
0b545a62 2200 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2201 }
10ef9ab4 2202 return max_work;
6b7c5b94
SP
2203}
2204
f67ef7ba 2205void be_detect_error(struct be_adapter *adapter)
7c185276 2206{
e1cfb67a
PR
2207 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2208 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2209 u32 i;
2210
d23e946c 2211 if (be_hw_error(adapter))
72f02485
SP
2212 return;
2213
e1cfb67a
PR
2214 if (lancer_chip(adapter)) {
2215 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2216 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2217 sliport_err1 = ioread32(adapter->db +
2218 SLIPORT_ERROR1_OFFSET);
2219 sliport_err2 = ioread32(adapter->db +
2220 SLIPORT_ERROR2_OFFSET);
2221 }
2222 } else {
2223 pci_read_config_dword(adapter->pdev,
2224 PCICFG_UE_STATUS_LOW, &ue_lo);
2225 pci_read_config_dword(adapter->pdev,
2226 PCICFG_UE_STATUS_HIGH, &ue_hi);
2227 pci_read_config_dword(adapter->pdev,
2228 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2229 pci_read_config_dword(adapter->pdev,
2230 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2231
f67ef7ba
PR
2232 ue_lo = (ue_lo & ~ue_lo_mask);
2233 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2234 }
7c185276 2235
1451ae6e
AK
2236 /* On certain platforms BE hardware can indicate spurious UEs.
2237 * Allow the h/w to stop working completely in case of a real UE.
2238 * Hence not setting the hw_error for UE detection.
2239 */
2240 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2241 adapter->hw_error = true;
434b3648 2242 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2243 "Error detected in the card\n");
2244 }
2245
2246 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2247 dev_err(&adapter->pdev->dev,
2248 "ERR: sliport status 0x%x\n", sliport_status);
2249 dev_err(&adapter->pdev->dev,
2250 "ERR: sliport error1 0x%x\n", sliport_err1);
2251 dev_err(&adapter->pdev->dev,
2252 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2253 }
2254
e1cfb67a
PR
2255 if (ue_lo) {
2256 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2257 if (ue_lo & 1)
7c185276
AK
2258 dev_err(&adapter->pdev->dev,
2259 "UE: %s bit set\n", ue_status_low_desc[i]);
2260 }
2261 }
f67ef7ba 2262
e1cfb67a
PR
2263 if (ue_hi) {
2264 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2265 if (ue_hi & 1)
7c185276
AK
2266 dev_err(&adapter->pdev->dev,
2267 "UE: %s bit set\n", ue_status_hi_desc[i]);
2268 }
2269 }
2270
2271}
2272
8d56ff11
SP
2273static void be_msix_disable(struct be_adapter *adapter)
2274{
ac6a0c4a 2275 if (msix_enabled(adapter)) {
8d56ff11 2276 pci_disable_msix(adapter->pdev);
ac6a0c4a 2277 adapter->num_msix_vec = 0;
3abcdeda
SP
2278 }
2279}
2280
10ef9ab4
SP
2281static uint be_num_rss_want(struct be_adapter *adapter)
2282{
30e80b55 2283 u32 num = 0;
abb93951 2284
10ef9ab4 2285 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2286 (lancer_chip(adapter) ||
2287 (!sriov_want(adapter) && be_physfn(adapter)))) {
2288 num = adapter->max_rss_queues;
30e80b55
YM
2289 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2290 }
2291 return num;
10ef9ab4
SP
2292}
2293
6b7c5b94
SP
2294static void be_msix_enable(struct be_adapter *adapter)
2295{
10ef9ab4 2296#define BE_MIN_MSIX_VECTORS 1
045508a8 2297 int i, status, num_vec, num_roce_vec = 0;
d379142b 2298 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2299
10ef9ab4
SP
2300 /* If RSS queues are not used, need a vec for default RX Q */
2301 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2302 if (be_roce_supported(adapter)) {
2303 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2304 (num_online_cpus() + 1));
2305 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2306 num_vec += num_roce_vec;
2307 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2308 }
10ef9ab4 2309 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2310
ac6a0c4a 2311 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2312 adapter->msix_entries[i].entry = i;
2313
ac6a0c4a 2314 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2315 if (status == 0) {
2316 goto done;
2317 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2318 num_vec = status;
3abcdeda 2319 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2320 num_vec) == 0)
3abcdeda 2321 goto done;
3abcdeda 2322 }
d379142b
SP
2323
2324 dev_warn(dev, "MSIx enable failed\n");
3abcdeda
SP
2325 return;
2326done:
045508a8
PP
2327 if (be_roce_supported(adapter)) {
2328 if (num_vec > num_roce_vec) {
2329 adapter->num_msix_vec = num_vec - num_roce_vec;
2330 adapter->num_msix_roce_vec =
2331 num_vec - adapter->num_msix_vec;
2332 } else {
2333 adapter->num_msix_vec = num_vec;
2334 adapter->num_msix_roce_vec = 0;
2335 }
2336 } else
2337 adapter->num_msix_vec = num_vec;
d379142b 2338 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
ac6a0c4a 2339 return;
6b7c5b94
SP
2340}
2341
fe6d2a38 2342static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2343 struct be_eq_obj *eqo)
b628bde2 2344{
10ef9ab4 2345 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2346}
6b7c5b94 2347
b628bde2
SP
2348static int be_msix_register(struct be_adapter *adapter)
2349{
10ef9ab4
SP
2350 struct net_device *netdev = adapter->netdev;
2351 struct be_eq_obj *eqo;
2352 int status, i, vec;
6b7c5b94 2353
10ef9ab4
SP
2354 for_all_evt_queues(adapter, eqo, i) {
2355 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2356 vec = be_msix_vec_get(adapter, eqo);
2357 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2358 if (status)
2359 goto err_msix;
2360 }
b628bde2 2361
6b7c5b94 2362 return 0;
3abcdeda 2363err_msix:
10ef9ab4
SP
2364 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2365 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2366 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2367 status);
ac6a0c4a 2368 be_msix_disable(adapter);
6b7c5b94
SP
2369 return status;
2370}
2371
2372static int be_irq_register(struct be_adapter *adapter)
2373{
2374 struct net_device *netdev = adapter->netdev;
2375 int status;
2376
ac6a0c4a 2377 if (msix_enabled(adapter)) {
6b7c5b94
SP
2378 status = be_msix_register(adapter);
2379 if (status == 0)
2380 goto done;
ba343c77
SB
2381 /* INTx is not supported for VF */
2382 if (!be_physfn(adapter))
2383 return status;
6b7c5b94
SP
2384 }
2385
e49cc34f 2386 /* INTx: only the first EQ is used */
6b7c5b94
SP
2387 netdev->irq = adapter->pdev->irq;
2388 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2389 &adapter->eq_obj[0]);
6b7c5b94
SP
2390 if (status) {
2391 dev_err(&adapter->pdev->dev,
2392 "INTx request IRQ failed - err %d\n", status);
2393 return status;
2394 }
2395done:
2396 adapter->isr_registered = true;
2397 return 0;
2398}
2399
2400static void be_irq_unregister(struct be_adapter *adapter)
2401{
2402 struct net_device *netdev = adapter->netdev;
10ef9ab4 2403 struct be_eq_obj *eqo;
3abcdeda 2404 int i;
6b7c5b94
SP
2405
2406 if (!adapter->isr_registered)
2407 return;
2408
2409 /* INTx */
ac6a0c4a 2410 if (!msix_enabled(adapter)) {
e49cc34f 2411 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2412 goto done;
2413 }
2414
2415 /* MSIx */
10ef9ab4
SP
2416 for_all_evt_queues(adapter, eqo, i)
2417 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2418
6b7c5b94
SP
2419done:
2420 adapter->isr_registered = false;
6b7c5b94
SP
2421}
2422
10ef9ab4 2423static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2424{
2425 struct be_queue_info *q;
2426 struct be_rx_obj *rxo;
2427 int i;
2428
2429 for_all_rx_queues(adapter, rxo, i) {
2430 q = &rxo->q;
2431 if (q->created) {
2432 be_cmd_rxq_destroy(adapter, q);
2433 /* After the rxq is invalidated, wait for a grace time
2434 * of 1ms for all dma to end and the flush compl to
2435 * arrive
2436 */
2437 mdelay(1);
10ef9ab4 2438 be_rx_cq_clean(rxo);
482c9e79 2439 }
10ef9ab4 2440 be_queue_free(adapter, q);
482c9e79
SP
2441 }
2442}
2443
889cd4b2
SP
2444static int be_close(struct net_device *netdev)
2445{
2446 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2447 struct be_eq_obj *eqo;
2448 int i;
889cd4b2 2449
045508a8
PP
2450 be_roce_dev_close(adapter);
2451
a323d9bf 2452 for_all_evt_queues(adapter, eqo, i)
10ef9ab4 2453 napi_disable(&eqo->napi);
a323d9bf
SP
2454
2455 be_async_mcc_disable(adapter);
2456
2457 /* Wait for all pending tx completions to arrive so that
2458 * all tx skbs are freed.
2459 */
2460 be_tx_compl_clean(adapter);
2461
2462 be_rx_qs_destroy(adapter);
2463
2464 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2465 if (msix_enabled(adapter))
2466 synchronize_irq(be_msix_vec_get(adapter, eqo));
2467 else
2468 synchronize_irq(netdev->irq);
2469 be_eq_clean(eqo);
63fcb27f
PR
2470 }
2471
889cd4b2
SP
2472 be_irq_unregister(adapter);
2473
482c9e79
SP
2474 return 0;
2475}
2476
10ef9ab4 2477static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2478{
2479 struct be_rx_obj *rxo;
e9008ee9
PR
2480 int rc, i, j;
2481 u8 rsstable[128];
482c9e79
SP
2482
2483 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2484 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2485 sizeof(struct be_eth_rx_d));
2486 if (rc)
2487 return rc;
2488 }
2489
2490 /* The FW would like the default RXQ to be created first */
2491 rxo = default_rxo(adapter);
2492 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2493 adapter->if_handle, false, &rxo->rss_id);
2494 if (rc)
2495 return rc;
2496
2497 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2498 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2499 rx_frag_size, adapter->if_handle,
2500 true, &rxo->rss_id);
482c9e79
SP
2501 if (rc)
2502 return rc;
2503 }
2504
2505 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2506 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2507 for_all_rss_queues(adapter, rxo, i) {
2508 if ((j + i) >= 128)
2509 break;
2510 rsstable[j + i] = rxo->rss_id;
2511 }
2512 }
594ad54a
SR
2513 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2514 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2515
2516 if (!BEx_chip(adapter))
2517 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2518 RSS_ENABLE_UDP_IPV6;
2519
2520 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2521 128);
2522 if (rc) {
2523 adapter->rss_flags = 0;
482c9e79 2524 return rc;
594ad54a 2525 }
482c9e79
SP
2526 }
2527
2528 /* First time posting */
10ef9ab4 2529 for_all_rx_queues(adapter, rxo, i)
482c9e79 2530 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2531 return 0;
2532}
2533
6b7c5b94
SP
2534static int be_open(struct net_device *netdev)
2535{
2536 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2537 struct be_eq_obj *eqo;
3abcdeda 2538 struct be_rx_obj *rxo;
10ef9ab4 2539 struct be_tx_obj *txo;
b236916a 2540 u8 link_status;
3abcdeda 2541 int status, i;
5fb379ee 2542
10ef9ab4 2543 status = be_rx_qs_create(adapter);
482c9e79
SP
2544 if (status)
2545 goto err;
2546
5fb379ee
SP
2547 be_irq_register(adapter);
2548
10ef9ab4 2549 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2550 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2551
10ef9ab4
SP
2552 for_all_tx_queues(adapter, txo, i)
2553 be_cq_notify(adapter, txo->cq.id, true, 0);
2554
7a1e9b20
SP
2555 be_async_mcc_enable(adapter);
2556
10ef9ab4
SP
2557 for_all_evt_queues(adapter, eqo, i) {
2558 napi_enable(&eqo->napi);
2559 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2560 }
2561
323ff71e 2562 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2563 if (!status)
2564 be_link_status_update(adapter, link_status);
2565
045508a8 2566 be_roce_dev_open(adapter);
889cd4b2
SP
2567 return 0;
2568err:
2569 be_close(adapter->netdev);
2570 return -EIO;
5fb379ee
SP
2571}
2572
71d8d1b5
AK
2573static int be_setup_wol(struct be_adapter *adapter, bool enable)
2574{
2575 struct be_dma_mem cmd;
2576 int status = 0;
2577 u8 mac[ETH_ALEN];
2578
2579 memset(mac, 0, ETH_ALEN);
2580
2581 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf 2582 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1f9061d2 2583 GFP_KERNEL | __GFP_ZERO);
71d8d1b5
AK
2584 if (cmd.va == NULL)
2585 return -1;
71d8d1b5
AK
2586
2587 if (enable) {
2588 status = pci_write_config_dword(adapter->pdev,
2589 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2590 if (status) {
2591 dev_err(&adapter->pdev->dev,
2381a55c 2592 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2593 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2594 cmd.dma);
71d8d1b5
AK
2595 return status;
2596 }
2597 status = be_cmd_enable_magic_wol(adapter,
2598 adapter->netdev->dev_addr, &cmd);
2599 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2600 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2601 } else {
2602 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2603 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2604 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2605 }
2606
2b7bcebf 2607 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2608 return status;
2609}
2610
6d87f5c3
AK
2611/*
2612 * Generate a seed MAC address from the PF MAC Address using jhash.
2613 * MAC Address for VFs are assigned incrementally starting from the seed.
2614 * These addresses are programmed in the ASIC by the PF and the VF driver
2615 * queries for the MAC address during its probe.
2616 */
4c876616 2617static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2618{
f9449ab7 2619 u32 vf;
3abcdeda 2620 int status = 0;
6d87f5c3 2621 u8 mac[ETH_ALEN];
11ac75ed 2622 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2623
2624 be_vf_eth_addr_generate(adapter, mac);
2625
11ac75ed 2626 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2627 if (lancer_chip(adapter)) {
2628 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2629 } else {
2630 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2631 vf_cfg->if_handle,
2632 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2633 }
2634
6d87f5c3
AK
2635 if (status)
2636 dev_err(&adapter->pdev->dev,
590c391d 2637 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2638 else
11ac75ed 2639 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2640
2641 mac[5] += 1;
2642 }
2643 return status;
2644}
2645
4c876616
SP
2646static int be_vfs_mac_query(struct be_adapter *adapter)
2647{
2648 int status, vf;
2649 u8 mac[ETH_ALEN];
2650 struct be_vf_cfg *vf_cfg;
2651 bool active;
2652
2653 for_all_vfs(adapter, vf_cfg, vf) {
2654 be_cmd_get_mac_from_list(adapter, mac, &active,
2655 &vf_cfg->pmac_id, 0);
2656
2657 status = be_cmd_mac_addr_query(adapter, mac, false,
2658 vf_cfg->if_handle, 0);
2659 if (status)
2660 return status;
2661 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2662 }
2663 return 0;
2664}
2665
f9449ab7 2666static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2667{
11ac75ed 2668 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2669 u32 vf;
2670
39f1d94d 2671 if (be_find_vfs(adapter, ASSIGNED)) {
4c876616
SP
2672 dev_warn(&adapter->pdev->dev,
2673 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2674 goto done;
2675 }
2676
11ac75ed 2677 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2678 if (lancer_chip(adapter))
2679 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2680 else
11ac75ed
SP
2681 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2682 vf_cfg->pmac_id, vf + 1);
f9449ab7 2683
11ac75ed
SP
2684 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2685 }
39f1d94d
SP
2686 pci_disable_sriov(adapter->pdev);
2687done:
2688 kfree(adapter->vf_cfg);
2689 adapter->num_vfs = 0;
6d87f5c3
AK
2690}
2691
a54769f5
SP
2692static int be_clear(struct be_adapter *adapter)
2693{
fbc13f01
AK
2694 int i = 1;
2695
191eb756
SP
2696 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2697 cancel_delayed_work_sync(&adapter->work);
2698 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2699 }
2700
11ac75ed 2701 if (sriov_enabled(adapter))
f9449ab7
SP
2702 be_vf_clear(adapter);
2703
fbc13f01
AK
2704 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2705 be_cmd_pmac_del(adapter, adapter->if_handle,
2706 adapter->pmac_id[i], 0);
2707
f9449ab7 2708 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2709
2710 be_mcc_queues_destroy(adapter);
10ef9ab4 2711 be_rx_cqs_destroy(adapter);
a54769f5 2712 be_tx_queues_destroy(adapter);
10ef9ab4 2713 be_evt_queues_destroy(adapter);
a54769f5 2714
abb93951
PR
2715 kfree(adapter->pmac_id);
2716 adapter->pmac_id = NULL;
2717
10ef9ab4 2718 be_msix_disable(adapter);
a54769f5
SP
2719 return 0;
2720}
2721
4c876616 2722static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2723{
4c876616
SP
2724 struct be_vf_cfg *vf_cfg;
2725 u32 cap_flags, en_flags, vf;
abb93951
PR
2726 int status;
2727
4c876616
SP
2728 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2729 BE_IF_FLAGS_MULTICAST;
abb93951 2730
4c876616
SP
2731 for_all_vfs(adapter, vf_cfg, vf) {
2732 if (!BE3_chip(adapter))
a05f99db
VV
2733 be_cmd_get_profile_config(adapter, &cap_flags,
2734 NULL, vf + 1);
4c876616
SP
2735
2736 /* If a FW profile exists, then cap_flags are updated */
2737 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2738 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2739 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2740 &vf_cfg->if_handle, vf + 1);
2741 if (status)
2742 goto err;
2743 }
2744err:
2745 return status;
abb93951
PR
2746}
2747
39f1d94d 2748static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2749{
11ac75ed 2750 struct be_vf_cfg *vf_cfg;
30128031
SP
2751 int vf;
2752
39f1d94d
SP
2753 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2754 GFP_KERNEL);
2755 if (!adapter->vf_cfg)
2756 return -ENOMEM;
2757
11ac75ed
SP
2758 for_all_vfs(adapter, vf_cfg, vf) {
2759 vf_cfg->if_handle = -1;
2760 vf_cfg->pmac_id = -1;
30128031 2761 }
39f1d94d 2762 return 0;
30128031
SP
2763}
2764
f9449ab7
SP
2765static int be_vf_setup(struct be_adapter *adapter)
2766{
11ac75ed 2767 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2768 u16 def_vlan, lnk_speed;
4c876616
SP
2769 int status, old_vfs, vf;
2770 struct device *dev = &adapter->pdev->dev;
39f1d94d 2771
4c876616
SP
2772 old_vfs = be_find_vfs(adapter, ENABLED);
2773 if (old_vfs) {
2774 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2775 if (old_vfs != num_vfs)
2776 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2777 adapter->num_vfs = old_vfs;
39f1d94d 2778 } else {
4c876616
SP
2779 if (num_vfs > adapter->dev_num_vfs)
2780 dev_info(dev, "Device supports %d VFs and not %d\n",
2781 adapter->dev_num_vfs, num_vfs);
2782 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2783
2784 status = pci_enable_sriov(adapter->pdev, num_vfs);
2785 if (status) {
2786 dev_err(dev, "SRIOV enable failed\n");
2787 adapter->num_vfs = 0;
2788 return 0;
2789 }
39f1d94d
SP
2790 }
2791
2792 status = be_vf_setup_init(adapter);
2793 if (status)
2794 goto err;
30128031 2795
4c876616
SP
2796 if (old_vfs) {
2797 for_all_vfs(adapter, vf_cfg, vf) {
2798 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2799 if (status)
2800 goto err;
2801 }
2802 } else {
2803 status = be_vfs_if_create(adapter);
f9449ab7
SP
2804 if (status)
2805 goto err;
f9449ab7
SP
2806 }
2807
4c876616
SP
2808 if (old_vfs) {
2809 status = be_vfs_mac_query(adapter);
2810 if (status)
2811 goto err;
2812 } else {
39f1d94d
SP
2813 status = be_vf_eth_addr_config(adapter);
2814 if (status)
2815 goto err;
2816 }
f9449ab7 2817
11ac75ed 2818 for_all_vfs(adapter, vf_cfg, vf) {
4c876616
SP
2819 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2820 * Allow full available bandwidth
2821 */
2822 if (BE3_chip(adapter) && !old_vfs)
2823 be_cmd_set_qos(adapter, 1000, vf+1);
2824
2825 status = be_cmd_link_status_query(adapter, &lnk_speed,
2826 NULL, vf + 1);
2827 if (!status)
2828 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2829
2830 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2831 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2832 if (status)
2833 goto err;
2834 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2835
2836 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7
SP
2837 }
2838 return 0;
2839err:
4c876616
SP
2840 dev_err(dev, "VF setup failed\n");
2841 be_vf_clear(adapter);
f9449ab7
SP
2842 return status;
2843}
2844
30128031
SP
2845static void be_setup_init(struct be_adapter *adapter)
2846{
2847 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2848 adapter->phy.link_speed = -1;
30128031
SP
2849 adapter->if_handle = -1;
2850 adapter->be3_native = false;
2851 adapter->promiscuous = false;
f25b119c
PR
2852 if (be_physfn(adapter))
2853 adapter->cmd_privileges = MAX_PRIVILEGES;
2854 else
2855 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2856}
2857
1578e777
PR
2858static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2859 bool *active_mac, u32 *pmac_id)
590c391d 2860{
1578e777 2861 int status = 0;
e5e1ee89 2862
1578e777
PR
2863 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2864 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2865 if (!lancer_chip(adapter) && !be_physfn(adapter))
2866 *active_mac = true;
2867 else
2868 *active_mac = false;
e5e1ee89 2869
1578e777
PR
2870 return status;
2871 }
e5e1ee89 2872
1578e777
PR
2873 if (lancer_chip(adapter)) {
2874 status = be_cmd_get_mac_from_list(adapter, mac,
2875 active_mac, pmac_id, 0);
2876 if (*active_mac) {
5ee4979b
SP
2877 status = be_cmd_mac_addr_query(adapter, mac, false,
2878 if_handle, *pmac_id);
1578e777
PR
2879 }
2880 } else if (be_physfn(adapter)) {
2881 /* For BE3, for PF get permanent MAC */
5ee4979b 2882 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2883 *active_mac = false;
e5e1ee89 2884 } else {
1578e777 2885 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 2886 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
2887 if_handle, 0);
2888 *active_mac = true;
e5e1ee89 2889 }
590c391d
PR
2890 return status;
2891}
2892
abb93951
PR
2893static void be_get_resources(struct be_adapter *adapter)
2894{
4c876616
SP
2895 u16 dev_num_vfs;
2896 int pos, status;
abb93951 2897 bool profile_present = false;
a05f99db 2898 u16 txq_count = 0;
abb93951 2899
4c876616 2900 if (!BEx_chip(adapter)) {
abb93951 2901 status = be_cmd_get_func_config(adapter);
abb93951
PR
2902 if (!status)
2903 profile_present = true;
a05f99db
VV
2904 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2905 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
abb93951
PR
2906 }
2907
2908 if (profile_present) {
2909 /* Sanity fixes for Lancer */
2910 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2911 BE_UC_PMAC_COUNT);
2912 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2913 BE_NUM_VLANS_SUPPORTED);
2914 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2915 BE_MAX_MC);
2916 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2917 MAX_TX_QS);
2918 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2919 BE3_MAX_RSS_QS);
2920 adapter->max_event_queues = min_t(u16,
2921 adapter->max_event_queues,
2922 BE3_MAX_RSS_QS);
2923
2924 if (adapter->max_rss_queues &&
2925 adapter->max_rss_queues == adapter->max_rx_queues)
2926 adapter->max_rss_queues -= 1;
2927
2928 if (adapter->max_event_queues < adapter->max_rss_queues)
2929 adapter->max_rss_queues = adapter->max_event_queues;
2930
2931 } else {
2932 if (be_physfn(adapter))
2933 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2934 else
2935 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2936
2937 if (adapter->function_mode & FLEX10_MODE)
2938 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2939 else
2940 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2941
2942 adapter->max_mcast_mac = BE_MAX_MC;
a05f99db
VV
2943 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
2944 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2945 MAX_TX_QS);
abb93951
PR
2946 adapter->max_rss_queues = (adapter->be3_native) ?
2947 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2948 adapter->max_event_queues = BE3_MAX_RSS_QS;
2949
2950 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2951 BE_IF_FLAGS_BROADCAST |
2952 BE_IF_FLAGS_MULTICAST |
2953 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2954 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2955 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2956 BE_IF_FLAGS_PROMISCUOUS;
2957
2958 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2959 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2960 }
4c876616
SP
2961
2962 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2963 if (pos) {
2964 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2965 &dev_num_vfs);
2966 if (BE3_chip(adapter))
2967 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2968 adapter->dev_num_vfs = dev_num_vfs;
2969 }
abb93951
PR
2970}
2971
39f1d94d
SP
2972/* Routine to query per function resource limits */
2973static int be_get_config(struct be_adapter *adapter)
2974{
4c876616 2975 int status;
39f1d94d 2976
abb93951
PR
2977 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2978 &adapter->function_mode,
0ad3157e
VV
2979 &adapter->function_caps,
2980 &adapter->asic_rev);
abb93951
PR
2981 if (status)
2982 goto err;
2983
2984 be_get_resources(adapter);
2985
2986 /* primary mac needs 1 pmac entry */
2987 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2988 sizeof(u32), GFP_KERNEL);
2989 if (!adapter->pmac_id) {
2990 status = -ENOMEM;
2991 goto err;
2992 }
2993
abb93951
PR
2994err:
2995 return status;
39f1d94d
SP
2996}
2997
5fb379ee
SP
2998static int be_setup(struct be_adapter *adapter)
2999{
39f1d94d 3000 struct device *dev = &adapter->pdev->dev;
abb93951 3001 u32 en_flags;
a54769f5 3002 u32 tx_fc, rx_fc;
10ef9ab4 3003 int status;
ba343c77 3004 u8 mac[ETH_ALEN];
1578e777 3005 bool active_mac;
ba343c77 3006
30128031 3007 be_setup_init(adapter);
6b7c5b94 3008
abb93951
PR
3009 if (!lancer_chip(adapter))
3010 be_cmd_req_native_mode(adapter);
39f1d94d 3011
abb93951
PR
3012 status = be_get_config(adapter);
3013 if (status)
3014 goto err;
73d540f2 3015
10ef9ab4
SP
3016 be_msix_enable(adapter);
3017
3018 status = be_evt_queues_create(adapter);
3019 if (status)
a54769f5 3020 goto err;
6b7c5b94 3021
10ef9ab4
SP
3022 status = be_tx_cqs_create(adapter);
3023 if (status)
3024 goto err;
3025
3026 status = be_rx_cqs_create(adapter);
3027 if (status)
a54769f5 3028 goto err;
6b7c5b94 3029
f9449ab7 3030 status = be_mcc_queues_create(adapter);
10ef9ab4 3031 if (status)
a54769f5 3032 goto err;
6b7c5b94 3033
f25b119c
PR
3034 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3035 /* In UMC mode FW does not return right privileges.
3036 * Override with correct privilege equivalent to PF.
3037 */
3038 if (be_is_mc(adapter))
3039 adapter->cmd_privileges = MAX_PRIVILEGES;
3040
f9449ab7
SP
3041 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3042 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 3043
abb93951 3044 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3045 en_flags |= BE_IF_FLAGS_RSS;
1578e777 3046
abb93951 3047 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 3048
abb93951 3049 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 3050 &adapter->if_handle, 0);
5fb379ee 3051 if (status != 0)
a54769f5 3052 goto err;
6b7c5b94 3053
1578e777
PR
3054 memset(mac, 0, ETH_ALEN);
3055 active_mac = false;
3056 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3057 &active_mac, &adapter->pmac_id[0]);
3058 if (status != 0)
3059 goto err;
3060
3061 if (!active_mac) {
3062 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3063 &adapter->pmac_id[0], 0);
3064 if (status != 0)
3065 goto err;
3066 }
3067
3068 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3069 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3070 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 3071 }
0dffc83e 3072
10ef9ab4
SP
3073 status = be_tx_qs_create(adapter);
3074 if (status)
3075 goto err;
3076
04b71175 3077 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 3078
1d1e9a46 3079 if (adapter->vlans_added)
10329df8 3080 be_vid_config(adapter);
7ab8b0b4 3081
a54769f5 3082 be_set_rx_mode(adapter->netdev);
5fb379ee 3083
ddc3f5cb 3084 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3085
ddc3f5cb
AK
3086 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3087 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3088 adapter->rx_fc);
2dc1deb6 3089
39f1d94d
SP
3090 if (be_physfn(adapter) && num_vfs) {
3091 if (adapter->dev_num_vfs)
3092 be_vf_setup(adapter);
3093 else
3094 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3095 }
3096
f25b119c
PR
3097 status = be_cmd_get_phy_info(adapter);
3098 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3099 adapter->phy.fc_autoneg = 1;
3100
191eb756
SP
3101 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3102 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3103 return 0;
a54769f5
SP
3104err:
3105 be_clear(adapter);
3106 return status;
3107}
6b7c5b94 3108
66268739
IV
3109#ifdef CONFIG_NET_POLL_CONTROLLER
3110static void be_netpoll(struct net_device *netdev)
3111{
3112 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3113 struct be_eq_obj *eqo;
66268739
IV
3114 int i;
3115
e49cc34f
SP
3116 for_all_evt_queues(adapter, eqo, i) {
3117 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3118 napi_schedule(&eqo->napi);
3119 }
10ef9ab4
SP
3120
3121 return;
66268739
IV
3122}
3123#endif
3124
84517482 3125#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3126char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3127
fa9a6fed 3128static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3129 const u8 *p, u32 img_start, int image_size,
3130 int hdr_size)
fa9a6fed
SB
3131{
3132 u32 crc_offset;
3133 u8 flashed_crc[4];
3134 int status;
3f0d4560
AK
3135
3136 crc_offset = hdr_size + img_start + image_size - 4;
3137
fa9a6fed 3138 p += crc_offset;
3f0d4560
AK
3139
3140 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3141 (image_size - 4));
fa9a6fed
SB
3142 if (status) {
3143 dev_err(&adapter->pdev->dev,
3144 "could not get crc from flash, not flashing redboot\n");
3145 return false;
3146 }
3147
3148 /*update redboot only if crc does not match*/
3149 if (!memcmp(flashed_crc, p, 4))
3150 return false;
3151 else
3152 return true;
fa9a6fed
SB
3153}
3154
306f1348
SP
3155static bool phy_flashing_required(struct be_adapter *adapter)
3156{
42f11cf2
AK
3157 return (adapter->phy.phy_type == TN_8022 &&
3158 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3159}
3160
c165541e
PR
3161static bool is_comp_in_ufi(struct be_adapter *adapter,
3162 struct flash_section_info *fsec, int type)
3163{
3164 int i = 0, img_type = 0;
3165 struct flash_section_info_g2 *fsec_g2 = NULL;
3166
ca34fe38 3167 if (BE2_chip(adapter))
c165541e
PR
3168 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3169
3170 for (i = 0; i < MAX_FLASH_COMP; i++) {
3171 if (fsec_g2)
3172 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3173 else
3174 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3175
3176 if (img_type == type)
3177 return true;
3178 }
3179 return false;
3180
3181}
3182
3183struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3184 int header_size,
3185 const struct firmware *fw)
3186{
3187 struct flash_section_info *fsec = NULL;
3188 const u8 *p = fw->data;
3189
3190 p += header_size;
3191 while (p < (fw->data + fw->size)) {
3192 fsec = (struct flash_section_info *)p;
3193 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3194 return fsec;
3195 p += 32;
3196 }
3197 return NULL;
3198}
3199
773a2d7c
PR
3200static int be_flash(struct be_adapter *adapter, const u8 *img,
3201 struct be_dma_mem *flash_cmd, int optype, int img_size)
3202{
3203 u32 total_bytes = 0, flash_op, num_bytes = 0;
3204 int status = 0;
3205 struct be_cmd_write_flashrom *req = flash_cmd->va;
3206
3207 total_bytes = img_size;
3208 while (total_bytes) {
3209 num_bytes = min_t(u32, 32*1024, total_bytes);
3210
3211 total_bytes -= num_bytes;
3212
3213 if (!total_bytes) {
3214 if (optype == OPTYPE_PHY_FW)
3215 flash_op = FLASHROM_OPER_PHY_FLASH;
3216 else
3217 flash_op = FLASHROM_OPER_FLASH;
3218 } else {
3219 if (optype == OPTYPE_PHY_FW)
3220 flash_op = FLASHROM_OPER_PHY_SAVE;
3221 else
3222 flash_op = FLASHROM_OPER_SAVE;
3223 }
3224
be716446 3225 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3226 img += num_bytes;
3227 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3228 flash_op, num_bytes);
3229 if (status) {
3230 if (status == ILLEGAL_IOCTL_REQ &&
3231 optype == OPTYPE_PHY_FW)
3232 break;
3233 dev_err(&adapter->pdev->dev,
3234 "cmd to write to flash rom failed.\n");
3235 return status;
3236 }
3237 }
3238 return 0;
3239}
3240
0ad3157e 3241/* For BE2, BE3 and BE3-R */
ca34fe38 3242static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3243 const struct firmware *fw,
3244 struct be_dma_mem *flash_cmd,
3245 int num_of_images)
3f0d4560 3246
84517482 3247{
3f0d4560 3248 int status = 0, i, filehdr_size = 0;
c165541e 3249 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3250 const u8 *p = fw->data;
215faf9c 3251 const struct flash_comp *pflashcomp;
773a2d7c 3252 int num_comp, redboot;
c165541e
PR
3253 struct flash_section_info *fsec = NULL;
3254
3255 struct flash_comp gen3_flash_types[] = {
3256 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3257 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3258 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3259 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3260 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3261 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3262 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3263 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3264 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3265 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3266 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3267 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3268 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3269 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3270 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3271 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3272 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3273 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3274 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3275 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3276 };
c165541e
PR
3277
3278 struct flash_comp gen2_flash_types[] = {
3279 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3280 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3281 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3282 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3283 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3284 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3285 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3286 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3287 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3288 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3289 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3290 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3291 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3292 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3293 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3294 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3295 };
3296
ca34fe38 3297 if (BE3_chip(adapter)) {
3f0d4560
AK
3298 pflashcomp = gen3_flash_types;
3299 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3300 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3301 } else {
3302 pflashcomp = gen2_flash_types;
3303 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3304 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3305 }
ca34fe38 3306
c165541e
PR
3307 /* Get flash section info*/
3308 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3309 if (!fsec) {
3310 dev_err(&adapter->pdev->dev,
3311 "Invalid Cookie. UFI corrupted ?\n");
3312 return -1;
3313 }
9fe96934 3314 for (i = 0; i < num_comp; i++) {
c165541e 3315 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3316 continue;
c165541e
PR
3317
3318 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3319 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3320 continue;
3321
773a2d7c
PR
3322 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3323 !phy_flashing_required(adapter))
306f1348 3324 continue;
c165541e 3325
773a2d7c
PR
3326 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3327 redboot = be_flash_redboot(adapter, fw->data,
3328 pflashcomp[i].offset, pflashcomp[i].size,
3329 filehdr_size + img_hdrs_size);
3330 if (!redboot)
3331 continue;
3332 }
c165541e 3333
3f0d4560 3334 p = fw->data;
c165541e 3335 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3336 if (p + pflashcomp[i].size > fw->data + fw->size)
3337 return -1;
773a2d7c
PR
3338
3339 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3340 pflashcomp[i].size);
3341 if (status) {
3342 dev_err(&adapter->pdev->dev,
3343 "Flashing section type %d failed.\n",
3344 pflashcomp[i].img_type);
3345 return status;
84517482 3346 }
84517482 3347 }
84517482
AK
3348 return 0;
3349}
3350
773a2d7c
PR
3351static int be_flash_skyhawk(struct be_adapter *adapter,
3352 const struct firmware *fw,
3353 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3354{
773a2d7c
PR
3355 int status = 0, i, filehdr_size = 0;
3356 int img_offset, img_size, img_optype, redboot;
3357 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3358 const u8 *p = fw->data;
3359 struct flash_section_info *fsec = NULL;
3360
3361 filehdr_size = sizeof(struct flash_file_hdr_g3);
3362 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3363 if (!fsec) {
3364 dev_err(&adapter->pdev->dev,
3365 "Invalid Cookie. UFI corrupted ?\n");
3366 return -1;
3367 }
3368
3369 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3370 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3371 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3372
3373 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3374 case IMAGE_FIRMWARE_iSCSI:
3375 img_optype = OPTYPE_ISCSI_ACTIVE;
3376 break;
3377 case IMAGE_BOOT_CODE:
3378 img_optype = OPTYPE_REDBOOT;
3379 break;
3380 case IMAGE_OPTION_ROM_ISCSI:
3381 img_optype = OPTYPE_BIOS;
3382 break;
3383 case IMAGE_OPTION_ROM_PXE:
3384 img_optype = OPTYPE_PXE_BIOS;
3385 break;
3386 case IMAGE_OPTION_ROM_FCoE:
3387 img_optype = OPTYPE_FCOE_BIOS;
3388 break;
3389 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3390 img_optype = OPTYPE_ISCSI_BACKUP;
3391 break;
3392 case IMAGE_NCSI:
3393 img_optype = OPTYPE_NCSI_FW;
3394 break;
3395 default:
3396 continue;
3397 }
3398
3399 if (img_optype == OPTYPE_REDBOOT) {
3400 redboot = be_flash_redboot(adapter, fw->data,
3401 img_offset, img_size,
3402 filehdr_size + img_hdrs_size);
3403 if (!redboot)
3404 continue;
3405 }
3406
3407 p = fw->data;
3408 p += filehdr_size + img_offset + img_hdrs_size;
3409 if (p + img_size > fw->data + fw->size)
3410 return -1;
3411
3412 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3413 if (status) {
3414 dev_err(&adapter->pdev->dev,
3415 "Flashing section type %d failed.\n",
3416 fsec->fsec_entry[i].type);
3417 return status;
3418 }
3419 }
3420 return 0;
3f0d4560
AK
3421}
3422
f67ef7ba
PR
3423static int lancer_wait_idle(struct be_adapter *adapter)
3424{
3425#define SLIPORT_IDLE_TIMEOUT 30
3426 u32 reg_val;
3427 int status = 0, i;
3428
3429 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3430 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3431 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3432 break;
3433
3434 ssleep(1);
3435 }
3436
3437 if (i == SLIPORT_IDLE_TIMEOUT)
3438 status = -1;
3439
3440 return status;
3441}
3442
3443static int lancer_fw_reset(struct be_adapter *adapter)
3444{
3445 int status = 0;
3446
3447 status = lancer_wait_idle(adapter);
3448 if (status)
3449 return status;
3450
3451 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3452 PHYSDEV_CONTROL_OFFSET);
3453
3454 return status;
3455}
3456
485bf569
SN
3457static int lancer_fw_download(struct be_adapter *adapter,
3458 const struct firmware *fw)
84517482 3459{
485bf569
SN
3460#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3461#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3462 struct be_dma_mem flash_cmd;
485bf569
SN
3463 const u8 *data_ptr = NULL;
3464 u8 *dest_image_ptr = NULL;
3465 size_t image_size = 0;
3466 u32 chunk_size = 0;
3467 u32 data_written = 0;
3468 u32 offset = 0;
3469 int status = 0;
3470 u8 add_status = 0;
f67ef7ba 3471 u8 change_status;
84517482 3472
485bf569 3473 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3474 dev_err(&adapter->pdev->dev,
485bf569
SN
3475 "FW Image not properly aligned. "
3476 "Length must be 4 byte aligned.\n");
3477 status = -EINVAL;
3478 goto lancer_fw_exit;
d9efd2af
SB
3479 }
3480
485bf569
SN
3481 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3482 + LANCER_FW_DOWNLOAD_CHUNK;
3483 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3484 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3485 if (!flash_cmd.va) {
3486 status = -ENOMEM;
485bf569
SN
3487 goto lancer_fw_exit;
3488 }
84517482 3489
485bf569
SN
3490 dest_image_ptr = flash_cmd.va +
3491 sizeof(struct lancer_cmd_req_write_object);
3492 image_size = fw->size;
3493 data_ptr = fw->data;
3494
3495 while (image_size) {
3496 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3497
3498 /* Copy the image chunk content. */
3499 memcpy(dest_image_ptr, data_ptr, chunk_size);
3500
3501 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3502 chunk_size, offset,
3503 LANCER_FW_DOWNLOAD_LOCATION,
3504 &data_written, &change_status,
3505 &add_status);
485bf569
SN
3506 if (status)
3507 break;
3508
3509 offset += data_written;
3510 data_ptr += data_written;
3511 image_size -= data_written;
3512 }
3513
3514 if (!status) {
3515 /* Commit the FW written */
3516 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3517 0, offset,
3518 LANCER_FW_DOWNLOAD_LOCATION,
3519 &data_written, &change_status,
3520 &add_status);
485bf569
SN
3521 }
3522
3523 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3524 flash_cmd.dma);
3525 if (status) {
3526 dev_err(&adapter->pdev->dev,
3527 "Firmware load error. "
3528 "Status code: 0x%x Additional Status: 0x%x\n",
3529 status, add_status);
3530 goto lancer_fw_exit;
3531 }
3532
f67ef7ba
PR
3533 if (change_status == LANCER_FW_RESET_NEEDED) {
3534 status = lancer_fw_reset(adapter);
3535 if (status) {
3536 dev_err(&adapter->pdev->dev,
3537 "Adapter busy for FW reset.\n"
3538 "New FW will not be active.\n");
3539 goto lancer_fw_exit;
3540 }
3541 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3542 dev_err(&adapter->pdev->dev,
3543 "System reboot required for new FW"
3544 " to be active\n");
3545 }
3546
485bf569
SN
3547 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3548lancer_fw_exit:
3549 return status;
3550}
3551
ca34fe38
SP
3552#define UFI_TYPE2 2
3553#define UFI_TYPE3 3
0ad3157e 3554#define UFI_TYPE3R 10
ca34fe38
SP
3555#define UFI_TYPE4 4
3556static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3557 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3558{
3559 if (fhdr == NULL)
3560 goto be_get_ufi_exit;
3561
ca34fe38
SP
3562 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3563 return UFI_TYPE4;
0ad3157e
VV
3564 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3565 if (fhdr->asic_type_rev == 0x10)
3566 return UFI_TYPE3R;
3567 else
3568 return UFI_TYPE3;
3569 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3570 return UFI_TYPE2;
773a2d7c
PR
3571
3572be_get_ufi_exit:
3573 dev_err(&adapter->pdev->dev,
3574 "UFI and Interface are not compatible for flashing\n");
3575 return -1;
3576}
3577
485bf569
SN
3578static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3579{
485bf569
SN
3580 struct flash_file_hdr_g3 *fhdr3;
3581 struct image_hdr *img_hdr_ptr = NULL;
3582 struct be_dma_mem flash_cmd;
3583 const u8 *p;
773a2d7c 3584 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3585
be716446 3586 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3587 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3588 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3589 if (!flash_cmd.va) {
3590 status = -ENOMEM;
485bf569 3591 goto be_fw_exit;
84517482
AK
3592 }
3593
773a2d7c 3594 p = fw->data;
0ad3157e 3595 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3596
0ad3157e 3597 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3598
773a2d7c
PR
3599 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3600 for (i = 0; i < num_imgs; i++) {
3601 img_hdr_ptr = (struct image_hdr *)(fw->data +
3602 (sizeof(struct flash_file_hdr_g3) +
3603 i * sizeof(struct image_hdr)));
3604 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3605 switch (ufi_type) {
3606 case UFI_TYPE4:
773a2d7c
PR
3607 status = be_flash_skyhawk(adapter, fw,
3608 &flash_cmd, num_imgs);
0ad3157e
VV
3609 break;
3610 case UFI_TYPE3R:
ca34fe38
SP
3611 status = be_flash_BEx(adapter, fw, &flash_cmd,
3612 num_imgs);
0ad3157e
VV
3613 break;
3614 case UFI_TYPE3:
3615 /* Do not flash this ufi on BE3-R cards */
3616 if (adapter->asic_rev < 0x10)
3617 status = be_flash_BEx(adapter, fw,
3618 &flash_cmd,
3619 num_imgs);
3620 else {
3621 status = -1;
3622 dev_err(&adapter->pdev->dev,
3623 "Can't load BE3 UFI on BE3R\n");
3624 }
3625 }
3f0d4560 3626 }
773a2d7c
PR
3627 }
3628
ca34fe38
SP
3629 if (ufi_type == UFI_TYPE2)
3630 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3631 else if (ufi_type == -1)
3f0d4560 3632 status = -1;
84517482 3633
2b7bcebf
IV
3634 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3635 flash_cmd.dma);
84517482
AK
3636 if (status) {
3637 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3638 goto be_fw_exit;
84517482
AK
3639 }
3640
af901ca1 3641 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3642
485bf569
SN
3643be_fw_exit:
3644 return status;
3645}
3646
3647int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3648{
3649 const struct firmware *fw;
3650 int status;
3651
3652 if (!netif_running(adapter->netdev)) {
3653 dev_err(&adapter->pdev->dev,
3654 "Firmware load not allowed (interface is down)\n");
3655 return -1;
3656 }
3657
3658 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3659 if (status)
3660 goto fw_exit;
3661
3662 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3663
3664 if (lancer_chip(adapter))
3665 status = lancer_fw_download(adapter, fw);
3666 else
3667 status = be_fw_download(adapter, fw);
3668
84517482
AK
3669fw_exit:
3670 release_firmware(fw);
3671 return status;
3672}
3673
e5686ad8 3674static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3675 .ndo_open = be_open,
3676 .ndo_stop = be_close,
3677 .ndo_start_xmit = be_xmit,
a54769f5 3678 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3679 .ndo_set_mac_address = be_mac_addr_set,
3680 .ndo_change_mtu = be_change_mtu,
ab1594e9 3681 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3682 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3683 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3684 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3685 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3686 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3687 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3688 .ndo_get_vf_config = be_get_vf_config,
3689#ifdef CONFIG_NET_POLL_CONTROLLER
3690 .ndo_poll_controller = be_netpoll,
3691#endif
6b7c5b94
SP
3692};
3693
3694static void be_netdev_init(struct net_device *netdev)
3695{
3696 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3697 struct be_eq_obj *eqo;
3abcdeda 3698 int i;
6b7c5b94 3699
6332c8d3 3700 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3701 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3702 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3703 if (be_multi_rxq(adapter))
3704 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3705
3706 netdev->features |= netdev->hw_features |
f646968f 3707 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3708
eb8a50d9 3709 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3710 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3711
fbc13f01
AK
3712 netdev->priv_flags |= IFF_UNICAST_FLT;
3713
6b7c5b94
SP
3714 netdev->flags |= IFF_MULTICAST;
3715
b7e5887e 3716 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3717
10ef9ab4 3718 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3719
3720 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3721
10ef9ab4
SP
3722 for_all_evt_queues(adapter, eqo, i)
3723 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3724}
3725
3726static void be_unmap_pci_bars(struct be_adapter *adapter)
3727{
c5b3ad4c
SP
3728 if (adapter->csr)
3729 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3730 if (adapter->db)
ce66f781 3731 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3732}
3733
ce66f781
SP
3734static int db_bar(struct be_adapter *adapter)
3735{
3736 if (lancer_chip(adapter) || !be_physfn(adapter))
3737 return 0;
3738 else
3739 return 4;
3740}
3741
3742static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3743{
dbf0f2a7 3744 if (skyhawk_chip(adapter)) {
ce66f781
SP
3745 adapter->roce_db.size = 4096;
3746 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3747 db_bar(adapter));
3748 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3749 db_bar(adapter));
3750 }
045508a8 3751 return 0;
6b7c5b94
SP
3752}
3753
3754static int be_map_pci_bars(struct be_adapter *adapter)
3755{
3756 u8 __iomem *addr;
ce66f781 3757 u32 sli_intf;
6b7c5b94 3758
ce66f781
SP
3759 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3760 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3761 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3762
c5b3ad4c
SP
3763 if (BEx_chip(adapter) && be_physfn(adapter)) {
3764 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3765 if (adapter->csr == NULL)
3766 return -ENOMEM;
3767 }
3768
ce66f781 3769 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3770 if (addr == NULL)
3771 goto pci_map_err;
ba343c77 3772 adapter->db = addr;
ce66f781
SP
3773
3774 be_roce_map_pci_bars(adapter);
6b7c5b94 3775 return 0;
ce66f781 3776
6b7c5b94
SP
3777pci_map_err:
3778 be_unmap_pci_bars(adapter);
3779 return -ENOMEM;
3780}
3781
6b7c5b94
SP
3782static void be_ctrl_cleanup(struct be_adapter *adapter)
3783{
8788fdc2 3784 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3785
3786 be_unmap_pci_bars(adapter);
3787
3788 if (mem->va)
2b7bcebf
IV
3789 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3790 mem->dma);
e7b909a6 3791
5b8821b7 3792 mem = &adapter->rx_filter;
e7b909a6 3793 if (mem->va)
2b7bcebf
IV
3794 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3795 mem->dma);
6b7c5b94
SP
3796}
3797
6b7c5b94
SP
3798static int be_ctrl_init(struct be_adapter *adapter)
3799{
8788fdc2
SP
3800 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3801 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3802 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3803 u32 sli_intf;
6b7c5b94 3804 int status;
6b7c5b94 3805
ce66f781
SP
3806 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3807 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3808 SLI_INTF_FAMILY_SHIFT;
3809 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3810
6b7c5b94
SP
3811 status = be_map_pci_bars(adapter);
3812 if (status)
e7b909a6 3813 goto done;
6b7c5b94
SP
3814
3815 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3816 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3817 mbox_mem_alloc->size,
3818 &mbox_mem_alloc->dma,
3819 GFP_KERNEL);
6b7c5b94 3820 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3821 status = -ENOMEM;
3822 goto unmap_pci_bars;
6b7c5b94
SP
3823 }
3824 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3825 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3826 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3827 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3828
5b8821b7
SP
3829 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3830 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
1f9061d2
JP
3831 &rx_filter->dma,
3832 GFP_KERNEL | __GFP_ZERO);
5b8821b7 3833 if (rx_filter->va == NULL) {
e7b909a6
SP
3834 status = -ENOMEM;
3835 goto free_mbox;
3836 }
1f9061d2 3837
2984961c 3838 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3839 spin_lock_init(&adapter->mcc_lock);
3840 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3841
dd131e76 3842 init_completion(&adapter->flash_compl);
cf588477 3843 pci_save_state(adapter->pdev);
6b7c5b94 3844 return 0;
e7b909a6
SP
3845
3846free_mbox:
2b7bcebf
IV
3847 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3848 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3849
3850unmap_pci_bars:
3851 be_unmap_pci_bars(adapter);
3852
3853done:
3854 return status;
6b7c5b94
SP
3855}
3856
3857static void be_stats_cleanup(struct be_adapter *adapter)
3858{
3abcdeda 3859 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3860
3861 if (cmd->va)
2b7bcebf
IV
3862 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3863 cmd->va, cmd->dma);
6b7c5b94
SP
3864}
3865
3866static int be_stats_init(struct be_adapter *adapter)
3867{
3abcdeda 3868 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3869
ca34fe38
SP
3870 if (lancer_chip(adapter))
3871 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3872 else if (BE2_chip(adapter))
89a88ab8 3873 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3874 else
3875 /* BE3 and Skyhawk */
3876 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3877
2b7bcebf 3878 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
1f9061d2 3879 GFP_KERNEL | __GFP_ZERO);
6b7c5b94
SP
3880 if (cmd->va == NULL)
3881 return -1;
3882 return 0;
3883}
3884
3bc6b06c 3885static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
3886{
3887 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3888
6b7c5b94
SP
3889 if (!adapter)
3890 return;
3891
045508a8 3892 be_roce_dev_remove(adapter);
8cef7a78 3893 be_intr_set(adapter, false);
045508a8 3894
f67ef7ba
PR
3895 cancel_delayed_work_sync(&adapter->func_recovery_work);
3896
6b7c5b94
SP
3897 unregister_netdev(adapter->netdev);
3898
5fb379ee
SP
3899 be_clear(adapter);
3900
bf99e50d
PR
3901 /* tell fw we're done with firing cmds */
3902 be_cmd_fw_clean(adapter);
3903
6b7c5b94
SP
3904 be_stats_cleanup(adapter);
3905
3906 be_ctrl_cleanup(adapter);
3907
d6b6d987
SP
3908 pci_disable_pcie_error_reporting(pdev);
3909
6b7c5b94
SP
3910 pci_set_drvdata(pdev, NULL);
3911 pci_release_regions(pdev);
3912 pci_disable_device(pdev);
3913
3914 free_netdev(adapter->netdev);
3915}
3916
4762f6ce
AK
3917bool be_is_wol_supported(struct be_adapter *adapter)
3918{
3919 return ((adapter->wol_cap & BE_WOL_CAP) &&
3920 !be_is_wol_excluded(adapter)) ? true : false;
3921}
3922
941a77d5
SK
3923u32 be_get_fw_log_level(struct be_adapter *adapter)
3924{
3925 struct be_dma_mem extfat_cmd;
3926 struct be_fat_conf_params *cfgs;
3927 int status;
3928 u32 level = 0;
3929 int j;
3930
f25b119c
PR
3931 if (lancer_chip(adapter))
3932 return 0;
3933
941a77d5
SK
3934 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3935 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3936 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3937 &extfat_cmd.dma);
3938
3939 if (!extfat_cmd.va) {
3940 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3941 __func__);
3942 goto err;
3943 }
3944
3945 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3946 if (!status) {
3947 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3948 sizeof(struct be_cmd_resp_hdr));
ac46a462 3949 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
3950 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3951 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3952 }
3953 }
3954 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3955 extfat_cmd.dma);
3956err:
3957 return level;
3958}
abb93951 3959
39f1d94d 3960static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3961{
6b7c5b94 3962 int status;
941a77d5 3963 u32 level;
6b7c5b94 3964
9e1453c5
AK
3965 status = be_cmd_get_cntl_attributes(adapter);
3966 if (status)
3967 return status;
3968
4762f6ce
AK
3969 status = be_cmd_get_acpi_wol_cap(adapter);
3970 if (status) {
3971 /* in case of a failure to get wol capabillities
3972 * check the exclusion list to determine WOL capability */
3973 if (!be_is_wol_excluded(adapter))
3974 adapter->wol_cap |= BE_WOL_CAP;
3975 }
3976
3977 if (be_is_wol_supported(adapter))
3978 adapter->wol = true;
3979
7aeb2156
PR
3980 /* Must be a power of 2 or else MODULO will BUG_ON */
3981 adapter->be_get_temp_freq = 64;
3982
941a77d5
SK
3983 level = be_get_fw_log_level(adapter);
3984 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3985
2243e2e9 3986 return 0;
6b7c5b94
SP
3987}
3988
f67ef7ba 3989static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
3990{
3991 int status;
d8110f62 3992
f67ef7ba
PR
3993 status = lancer_test_and_set_rdy_state(adapter);
3994 if (status)
3995 goto err;
d8110f62 3996
f67ef7ba
PR
3997 if (netif_running(adapter->netdev))
3998 be_close(adapter->netdev);
d8110f62 3999
f67ef7ba
PR
4000 be_clear(adapter);
4001
4002 adapter->hw_error = false;
4003 adapter->fw_timeout = false;
4004
4005 status = be_setup(adapter);
4006 if (status)
4007 goto err;
d8110f62 4008
f67ef7ba
PR
4009 if (netif_running(adapter->netdev)) {
4010 status = be_open(adapter->netdev);
d8110f62
PR
4011 if (status)
4012 goto err;
f67ef7ba 4013 }
d8110f62 4014
f67ef7ba
PR
4015 dev_err(&adapter->pdev->dev,
4016 "Adapter SLIPORT recovery succeeded\n");
4017 return 0;
4018err:
67297ad8
PR
4019 if (adapter->eeh_error)
4020 dev_err(&adapter->pdev->dev,
4021 "Adapter SLIPORT recovery failed\n");
d8110f62 4022
f67ef7ba
PR
4023 return status;
4024}
4025
4026static void be_func_recovery_task(struct work_struct *work)
4027{
4028 struct be_adapter *adapter =
4029 container_of(work, struct be_adapter, func_recovery_work.work);
4030 int status;
d8110f62 4031
f67ef7ba 4032 be_detect_error(adapter);
d8110f62 4033
f67ef7ba 4034 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4035
f67ef7ba
PR
4036 if (adapter->eeh_error)
4037 goto out;
d8110f62 4038
f67ef7ba
PR
4039 rtnl_lock();
4040 netif_device_detach(adapter->netdev);
4041 rtnl_unlock();
d8110f62 4042
f67ef7ba 4043 status = lancer_recover_func(adapter);
d8110f62 4044
f67ef7ba
PR
4045 if (!status)
4046 netif_device_attach(adapter->netdev);
d8110f62 4047 }
f67ef7ba
PR
4048
4049out:
4050 schedule_delayed_work(&adapter->func_recovery_work,
4051 msecs_to_jiffies(1000));
d8110f62
PR
4052}
4053
4054static void be_worker(struct work_struct *work)
4055{
4056 struct be_adapter *adapter =
4057 container_of(work, struct be_adapter, work.work);
4058 struct be_rx_obj *rxo;
10ef9ab4 4059 struct be_eq_obj *eqo;
d8110f62
PR
4060 int i;
4061
d8110f62
PR
4062 /* when interrupts are not yet enabled, just reap any pending
4063 * mcc completions */
4064 if (!netif_running(adapter->netdev)) {
072a9c48 4065 local_bh_disable();
10ef9ab4 4066 be_process_mcc(adapter);
072a9c48 4067 local_bh_enable();
d8110f62
PR
4068 goto reschedule;
4069 }
4070
4071 if (!adapter->stats_cmd_sent) {
4072 if (lancer_chip(adapter))
4073 lancer_cmd_get_pport_stats(adapter,
4074 &adapter->stats_cmd);
4075 else
4076 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4077 }
4078
7aeb2156
PR
4079 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4080 be_cmd_get_die_temperature(adapter);
4081
d8110f62 4082 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4083 if (rxo->rx_post_starved) {
4084 rxo->rx_post_starved = false;
4085 be_post_rx_frags(rxo, GFP_KERNEL);
4086 }
4087 }
4088
10ef9ab4
SP
4089 for_all_evt_queues(adapter, eqo, i)
4090 be_eqd_update(adapter, eqo);
4091
d8110f62
PR
4092reschedule:
4093 adapter->work_counter++;
4094 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4095}
4096
39f1d94d
SP
4097static bool be_reset_required(struct be_adapter *adapter)
4098{
d79c0a20 4099 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4100}
4101
d379142b
SP
4102static char *mc_name(struct be_adapter *adapter)
4103{
4104 if (adapter->function_mode & FLEX10_MODE)
4105 return "FLEX10";
4106 else if (adapter->function_mode & VNIC_MODE)
4107 return "vNIC";
4108 else if (adapter->function_mode & UMC_ENABLED)
4109 return "UMC";
4110 else
4111 return "";
4112}
4113
4114static inline char *func_name(struct be_adapter *adapter)
4115{
4116 return be_physfn(adapter) ? "PF" : "VF";
4117}
4118
1dd06ae8 4119static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4120{
4121 int status = 0;
4122 struct be_adapter *adapter;
4123 struct net_device *netdev;
b4e32a71 4124 char port_name;
6b7c5b94
SP
4125
4126 status = pci_enable_device(pdev);
4127 if (status)
4128 goto do_none;
4129
4130 status = pci_request_regions(pdev, DRV_NAME);
4131 if (status)
4132 goto disable_dev;
4133 pci_set_master(pdev);
4134
7f640062 4135 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4136 if (netdev == NULL) {
4137 status = -ENOMEM;
4138 goto rel_reg;
4139 }
4140 adapter = netdev_priv(netdev);
4141 adapter->pdev = pdev;
4142 pci_set_drvdata(pdev, adapter);
4143 adapter->netdev = netdev;
2243e2e9 4144 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4145
2b7bcebf 4146 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94 4147 if (!status) {
2bd92cd2
CH
4148 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4149 if (status < 0) {
4150 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4151 goto free_netdev;
4152 }
6b7c5b94
SP
4153 netdev->features |= NETIF_F_HIGHDMA;
4154 } else {
2b7bcebf 4155 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4156 if (status) {
4157 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4158 goto free_netdev;
4159 }
4160 }
4161
d6b6d987
SP
4162 status = pci_enable_pcie_error_reporting(pdev);
4163 if (status)
4164 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4165
6b7c5b94
SP
4166 status = be_ctrl_init(adapter);
4167 if (status)
39f1d94d 4168 goto free_netdev;
6b7c5b94 4169
2243e2e9 4170 /* sync up with fw's ready state */
ba343c77 4171 if (be_physfn(adapter)) {
bf99e50d 4172 status = be_fw_wait_ready(adapter);
ba343c77
SB
4173 if (status)
4174 goto ctrl_clean;
ba343c77 4175 }
6b7c5b94 4176
39f1d94d
SP
4177 if (be_reset_required(adapter)) {
4178 status = be_cmd_reset_function(adapter);
4179 if (status)
4180 goto ctrl_clean;
556ae191 4181
2d177be8
KA
4182 /* Wait for interrupts to quiesce after an FLR */
4183 msleep(100);
4184 }
8cef7a78
SK
4185
4186 /* Allow interrupts for other ULPs running on NIC function */
4187 be_intr_set(adapter, true);
10ef9ab4 4188
2d177be8
KA
4189 /* tell fw we're ready to fire cmds */
4190 status = be_cmd_fw_init(adapter);
4191 if (status)
4192 goto ctrl_clean;
4193
2243e2e9
SP
4194 status = be_stats_init(adapter);
4195 if (status)
4196 goto ctrl_clean;
4197
39f1d94d 4198 status = be_get_initial_config(adapter);
6b7c5b94
SP
4199 if (status)
4200 goto stats_clean;
6b7c5b94
SP
4201
4202 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4203 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4204 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4205
5fb379ee
SP
4206 status = be_setup(adapter);
4207 if (status)
55f5c3c5 4208 goto stats_clean;
2243e2e9 4209
3abcdeda 4210 be_netdev_init(netdev);
6b7c5b94
SP
4211 status = register_netdev(netdev);
4212 if (status != 0)
5fb379ee 4213 goto unsetup;
6b7c5b94 4214
045508a8
PP
4215 be_roce_dev_add(adapter);
4216
f67ef7ba
PR
4217 schedule_delayed_work(&adapter->func_recovery_work,
4218 msecs_to_jiffies(1000));
b4e32a71
PR
4219
4220 be_cmd_query_port_name(adapter, &port_name);
4221
d379142b
SP
4222 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4223 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4224
6b7c5b94
SP
4225 return 0;
4226
5fb379ee
SP
4227unsetup:
4228 be_clear(adapter);
6b7c5b94
SP
4229stats_clean:
4230 be_stats_cleanup(adapter);
4231ctrl_clean:
4232 be_ctrl_cleanup(adapter);
f9449ab7 4233free_netdev:
fe6d2a38 4234 free_netdev(netdev);
8d56ff11 4235 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4236rel_reg:
4237 pci_release_regions(pdev);
4238disable_dev:
4239 pci_disable_device(pdev);
4240do_none:
c4ca2374 4241 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4242 return status;
4243}
4244
4245static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4246{
4247 struct be_adapter *adapter = pci_get_drvdata(pdev);
4248 struct net_device *netdev = adapter->netdev;
4249
71d8d1b5
AK
4250 if (adapter->wol)
4251 be_setup_wol(adapter, true);
4252
f67ef7ba
PR
4253 cancel_delayed_work_sync(&adapter->func_recovery_work);
4254
6b7c5b94
SP
4255 netif_device_detach(netdev);
4256 if (netif_running(netdev)) {
4257 rtnl_lock();
4258 be_close(netdev);
4259 rtnl_unlock();
4260 }
9b0365f1 4261 be_clear(adapter);
6b7c5b94
SP
4262
4263 pci_save_state(pdev);
4264 pci_disable_device(pdev);
4265 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4266 return 0;
4267}
4268
4269static int be_resume(struct pci_dev *pdev)
4270{
4271 int status = 0;
4272 struct be_adapter *adapter = pci_get_drvdata(pdev);
4273 struct net_device *netdev = adapter->netdev;
4274
4275 netif_device_detach(netdev);
4276
4277 status = pci_enable_device(pdev);
4278 if (status)
4279 return status;
4280
4281 pci_set_power_state(pdev, 0);
4282 pci_restore_state(pdev);
4283
2243e2e9
SP
4284 /* tell fw we're ready to fire cmds */
4285 status = be_cmd_fw_init(adapter);
4286 if (status)
4287 return status;
4288
9b0365f1 4289 be_setup(adapter);
6b7c5b94
SP
4290 if (netif_running(netdev)) {
4291 rtnl_lock();
4292 be_open(netdev);
4293 rtnl_unlock();
4294 }
f67ef7ba
PR
4295
4296 schedule_delayed_work(&adapter->func_recovery_work,
4297 msecs_to_jiffies(1000));
6b7c5b94 4298 netif_device_attach(netdev);
71d8d1b5
AK
4299
4300 if (adapter->wol)
4301 be_setup_wol(adapter, false);
a4ca055f 4302
6b7c5b94
SP
4303 return 0;
4304}
4305
82456b03
SP
4306/*
4307 * An FLR will stop BE from DMAing any data.
4308 */
4309static void be_shutdown(struct pci_dev *pdev)
4310{
4311 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4312
2d5d4154
AK
4313 if (!adapter)
4314 return;
82456b03 4315
0f4a6828 4316 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4317 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4318
2d5d4154 4319 netif_device_detach(adapter->netdev);
82456b03 4320
57841869
AK
4321 be_cmd_reset_function(adapter);
4322
82456b03 4323 pci_disable_device(pdev);
82456b03
SP
4324}
4325
cf588477
SP
4326static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4327 pci_channel_state_t state)
4328{
4329 struct be_adapter *adapter = pci_get_drvdata(pdev);
4330 struct net_device *netdev = adapter->netdev;
4331
4332 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4333
f67ef7ba
PR
4334 adapter->eeh_error = true;
4335
4336 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4337
f67ef7ba 4338 rtnl_lock();
cf588477 4339 netif_device_detach(netdev);
f67ef7ba 4340 rtnl_unlock();
cf588477
SP
4341
4342 if (netif_running(netdev)) {
4343 rtnl_lock();
4344 be_close(netdev);
4345 rtnl_unlock();
4346 }
4347 be_clear(adapter);
4348
4349 if (state == pci_channel_io_perm_failure)
4350 return PCI_ERS_RESULT_DISCONNECT;
4351
4352 pci_disable_device(pdev);
4353
eeb7fc7b
SK
4354 /* The error could cause the FW to trigger a flash debug dump.
4355 * Resetting the card while flash dump is in progress
c8a54163
PR
4356 * can cause it not to recover; wait for it to finish.
4357 * Wait only for first function as it is needed only once per
4358 * adapter.
eeb7fc7b 4359 */
c8a54163
PR
4360 if (pdev->devfn == 0)
4361 ssleep(30);
4362
cf588477
SP
4363 return PCI_ERS_RESULT_NEED_RESET;
4364}
4365
4366static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4367{
4368 struct be_adapter *adapter = pci_get_drvdata(pdev);
4369 int status;
4370
4371 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4372 be_clear_all_error(adapter);
cf588477
SP
4373
4374 status = pci_enable_device(pdev);
4375 if (status)
4376 return PCI_ERS_RESULT_DISCONNECT;
4377
4378 pci_set_master(pdev);
4379 pci_set_power_state(pdev, 0);
4380 pci_restore_state(pdev);
4381
4382 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4383 dev_info(&adapter->pdev->dev,
4384 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4385 status = be_fw_wait_ready(adapter);
cf588477
SP
4386 if (status)
4387 return PCI_ERS_RESULT_DISCONNECT;
4388
d6b6d987 4389 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4390 return PCI_ERS_RESULT_RECOVERED;
4391}
4392
4393static void be_eeh_resume(struct pci_dev *pdev)
4394{
4395 int status = 0;
4396 struct be_adapter *adapter = pci_get_drvdata(pdev);
4397 struct net_device *netdev = adapter->netdev;
4398
4399 dev_info(&adapter->pdev->dev, "EEH resume\n");
4400
4401 pci_save_state(pdev);
4402
2d177be8 4403 status = be_cmd_reset_function(adapter);
cf588477
SP
4404 if (status)
4405 goto err;
4406
2d177be8
KA
4407 /* tell fw we're ready to fire cmds */
4408 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4409 if (status)
4410 goto err;
4411
cf588477
SP
4412 status = be_setup(adapter);
4413 if (status)
4414 goto err;
4415
4416 if (netif_running(netdev)) {
4417 status = be_open(netdev);
4418 if (status)
4419 goto err;
4420 }
f67ef7ba
PR
4421
4422 schedule_delayed_work(&adapter->func_recovery_work,
4423 msecs_to_jiffies(1000));
cf588477
SP
4424 netif_device_attach(netdev);
4425 return;
4426err:
4427 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4428}
4429
3646f0e5 4430static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4431 .error_detected = be_eeh_err_detected,
4432 .slot_reset = be_eeh_reset,
4433 .resume = be_eeh_resume,
4434};
4435
6b7c5b94
SP
4436static struct pci_driver be_driver = {
4437 .name = DRV_NAME,
4438 .id_table = be_dev_ids,
4439 .probe = be_probe,
4440 .remove = be_remove,
4441 .suspend = be_suspend,
cf588477 4442 .resume = be_resume,
82456b03 4443 .shutdown = be_shutdown,
cf588477 4444 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4445};
4446
4447static int __init be_init_module(void)
4448{
8e95a202
JP
4449 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4450 rx_frag_size != 2048) {
6b7c5b94
SP
4451 printk(KERN_WARNING DRV_NAME
4452 " : Module param rx_frag_size must be 2048/4096/8192."
4453 " Using 2048\n");
4454 rx_frag_size = 2048;
4455 }
6b7c5b94
SP
4456
4457 return pci_register_driver(&be_driver);
4458}
4459module_init(be_init_module);
4460
4461static void __exit be_exit_module(void)
4462{
4463 pci_unregister_driver(&be_driver);
4464}
4465module_exit(be_exit_module);
This page took 0.805365 seconds and 5 git commands to generate.