drivers: net: davinci_cpdma: remove CRC bytes from skb added by CPDMA
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf 148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
1f9061d2 149 GFP_KERNEL | __GFP_ZERO);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
195
196 wmb();
8788fdc2 197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
198}
199
94d73aaa
VV
200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
6b7c5b94
SP
202{
203 u32 val = 0;
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
e3a7ae2c 252 u8 current_mac[ETH_ALEN];
fbc13f01 253 u32 pmac_id = adapter->pmac_id[0];
704e4c88 254 bool active_mac = true;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
704e4c88
PR
259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
a65027e4 287 if (status)
e3a7ae2c 288 goto err;
6b7c5b94 289
704e4c88
PR
290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
e3a7ae2c
SK
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
298 return status;
299}
300
ca34fe38
SP
301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 330{
ac124ff9
SP
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 334 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 337
ac124ff9 338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
89a88ab8
AK
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
ac124ff9 366 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 367 else
ac124ff9 368 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
ca34fe38 378static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 379{
ac124ff9
SP
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 383 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 386
ac124ff9 387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
ac124ff9 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
005d5696
SX
424static void populate_lancer_stats(struct be_adapter *adapter)
425{
89a88ab8 426
005d5696 427 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
ac124ff9 452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 456 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 459 drvs->rx_drops_too_many_frags =
ac124ff9 460 pport_stats->rx_drops_too_many_frags_lo;
005d5696 461}
89a88ab8 462
09c1c68f
SP
463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
a6c578ef
AK
475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
89a88ab8
AK
489void be_parse_stats(struct be_adapter *adapter)
490{
ac124ff9
SP
491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
a6c578ef 494 u32 erx_stat;
ac124ff9 495
ca34fe38
SP
496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
005d5696 498 } else {
ca34fe38
SP
499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
d51ebd33 504
ca34fe38
SP
505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 509 }
09c1c68f 510 }
89a88ab8
AK
511}
512
ab1594e9
SP
513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
6b7c5b94 515{
ab1594e9 516 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 517 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 518 struct be_rx_obj *rxo;
3c8def97 519 struct be_tx_obj *txo;
ab1594e9
SP
520 u64 pkts, bytes;
521 unsigned int start;
3abcdeda 522 int i;
6b7c5b94 523
3abcdeda 524 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
536 }
537
3c8def97 538 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
3c8def97 547 }
6b7c5b94
SP
548
549 /* bad pkts received */
ab1594e9 550 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
ab1594e9 559 drvs->rx_dropped_runt;
68110868 560
6b7c5b94 561 /* detailed rx errors */
ab1594e9 562 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
68110868 565
ab1594e9 566 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
567
568 /* frame alignment errors */
ab1594e9 569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 570
6b7c5b94
SP
571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
ab1594e9 576 return stats;
6b7c5b94
SP
577}
578
b236916a 579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 580{
6b7c5b94
SP
581 struct net_device *netdev = adapter->netdev;
582
b236916a 583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 584 netif_carrier_off(netdev);
b236916a 585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 586 }
b236916a
AK
587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
6b7c5b94
SP
592}
593
3c8def97 594static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 596{
3c8def97
SP
597 struct be_tx_stats *stats = tx_stats(txo);
598
ab1594e9 599 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 604 if (stopped)
ac124ff9 605 stats->tx_stops++;
ab1594e9 606 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
6b7c5b94 612{
ebc8d2ab
DM
613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
6b7c5b94
SP
617 /* to account for hdr wrb */
618 cnt++;
fe6d2a38
SP
619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
6b7c5b94
SP
622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
fe6d2a38 625 }
6b7c5b94
SP
626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 635 wrb->rsvd0 = 0;
6b7c5b94
SP
636}
637
1ded132d
AK
638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
cc4ce020 654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 656{
1ded132d 657 u16 vlan_tag;
cc4ce020 658
6b7c5b94
SP
659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
49e4b847 663 if (skb_is_gso(skb)) {
6b7c5b94
SP
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
4c5102f9 676 if (vlan_tx_tag_present(skb)) {
6b7c5b94 677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
680 }
681
bc0c3405
AK
682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
2b7bcebf 689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 697 if (wrb->frag_len) {
7101e111 698 if (unmap_single)
2b7bcebf
IV
699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
7101e111 701 else
2b7bcebf 702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
703 }
704}
6b7c5b94 705
3c8def97 706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
6b7c5b94 709{
7101e111
SP
710 dma_addr_t busaddr;
711 int i, copied = 0;
2b7bcebf 712 struct device *dev = &adapter->pdev->dev;
6b7c5b94 713 struct sk_buff *first_skb = skb;
6b7c5b94
SP
714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
7101e111
SP
716 bool map_single = false;
717 u16 map_head;
6b7c5b94 718
6b7c5b94
SP
719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
7101e111 721 map_head = txq->head;
6b7c5b94 722
ebc8d2ab 723 if (skb->len > skb->data_len) {
e743d313 724 int len = skb_headlen(skb);
2b7bcebf
IV
725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
7101e111
SP
727 goto dma_err;
728 map_single = true;
ebc8d2ab
DM
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
6b7c5b94 735
ebc8d2ab 736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 737 const struct skb_frag_struct *frag =
ebc8d2ab 738 &skb_shinfo(skb)->frags[i];
b061b39e 739 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 740 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 741 if (dma_mapping_error(dev, busaddr))
7101e111 742 goto dma_err;
ebc8d2ab 743 wrb = queue_head_node(txq);
9e903e08 744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
9e903e08 747 copied += skb_frag_size(frag);
6b7c5b94
SP
748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
bc0c3405 757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
7101e111
SP
761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
2b7bcebf 765 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
6b7c5b94
SP
771}
772
93040ae5 773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
93040ae5
SK
776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
efee8e87 783 if (vlan_tx_tag_present(skb))
93040ae5 784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
efee8e87
SB
785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 vlan_tag = adapter->pvid;
bc0c3405
AK
787
788 if (vlan_tag) {
58717686 789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
790 if (unlikely(!skb))
791 return skb;
bc0c3405 792 skb->vlan_tci = 0;
efee8e87
SB
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
bc0c3405
AK
795 }
796
797 /* Insert the outer VLAN, if any */
798 if (adapter->qnq_vid) {
799 vlan_tag = adapter->qnq_vid;
58717686 800 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
801 if (unlikely(!skb))
802 return skb;
803 if (skip_hw_vlan)
804 *skip_hw_vlan = true;
805 }
806
93040ae5
SK
807 return skb;
808}
809
bc0c3405
AK
810static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811{
812 struct ethhdr *eh = (struct ethhdr *)skb->data;
813 u16 offset = ETH_HLEN;
814
815 if (eh->h_proto == htons(ETH_P_IPV6)) {
816 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818 offset += sizeof(struct ipv6hdr);
819 if (ip6h->nexthdr != NEXTHDR_TCP &&
820 ip6h->nexthdr != NEXTHDR_UDP) {
821 struct ipv6_opt_hdr *ehdr =
822 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825 if (ehdr->hdrlen == 0xff)
826 return true;
827 }
828 }
829 return false;
830}
831
832static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833{
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835}
836
ee9c799c
SP
837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
838 struct sk_buff *skb)
bc0c3405 839{
ee9c799c 840 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
841}
842
ee9c799c
SP
843static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
844 struct sk_buff *skb,
845 bool *skip_hw_vlan)
6b7c5b94 846{
d2cb6ce7 847 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
848 unsigned int eth_hdr_len;
849 struct iphdr *ip;
93040ae5 850
48265667
SK
851 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
852 * may cause a transmit stall on that port. So the work-around is to
853 * pad such packets to a 36-byte length.
854 */
855 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
856 if (skb_padto(skb, 36))
857 goto tx_drop;
858 skb->len = 36;
859 }
860
1297f9db
AK
861 /* For padded packets, BE HW modifies tot_len field in IP header
862 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 863 * For padded packets, Lancer computes incorrect checksum.
1ded132d 864 */
ee9c799c
SP
865 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
866 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
867 if (skb->len <= 60 &&
868 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 869 is_ipv4_pkt(skb)) {
93040ae5
SK
870 ip = (struct iphdr *)ip_hdr(skb);
871 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
872 }
1ded132d 873
d2cb6ce7
AK
874 /* If vlan tag is already inlined in the packet, skip HW VLAN
875 * tagging in UMC mode
876 */
877 if ((adapter->function_mode & UMC_ENABLED) &&
878 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 879 *skip_hw_vlan = true;
d2cb6ce7 880
93040ae5
SK
881 /* HW has a bug wherein it will calculate CSUM for VLAN
882 * pkts even though it is disabled.
883 * Manually insert VLAN in pkt.
884 */
885 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
886 vlan_tx_tag_present(skb)) {
887 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
888 if (unlikely(!skb))
889 goto tx_drop;
890 }
891
892 /* HW may lockup when VLAN HW tagging is requested on
893 * certain ipv6 packets. Drop such pkts if the HW workaround to
894 * skip HW tagging is not enabled by FW.
895 */
896 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
897 (adapter->pvid || adapter->qnq_vid) &&
898 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
899 goto tx_drop;
900
901 /* Manual VLAN tag insertion to prevent:
902 * ASIC lockup when the ASIC inserts VLAN tag into
903 * certain ipv6 packets. Insert VLAN tags in driver,
904 * and set event, completion, vlan bits accordingly
905 * in the Tx WRB.
906 */
907 if (be_ipv6_tx_stall_chk(adapter, skb) &&
908 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 909 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
910 if (unlikely(!skb))
911 goto tx_drop;
1ded132d
AK
912 }
913
ee9c799c
SP
914 return skb;
915tx_drop:
916 dev_kfree_skb_any(skb);
917 return NULL;
918}
919
920static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
921{
922 struct be_adapter *adapter = netdev_priv(netdev);
923 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
924 struct be_queue_info *txq = &txo->q;
925 bool dummy_wrb, stopped = false;
926 u32 wrb_cnt = 0, copied = 0;
927 bool skip_hw_vlan = false;
928 u32 start = txq->head;
929
930 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
931 if (!skb)
932 return NETDEV_TX_OK;
933
fe6d2a38 934 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 935
bc0c3405
AK
936 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
937 skip_hw_vlan);
c190e3c8 938 if (copied) {
cd8f76c0
ED
939 int gso_segs = skb_shinfo(skb)->gso_segs;
940
c190e3c8 941 /* record the sent skb in the sent_skb table */
3c8def97
SP
942 BUG_ON(txo->sent_skb_list[start]);
943 txo->sent_skb_list[start] = skb;
c190e3c8
AK
944
945 /* Ensure txq has space for the next skb; Else stop the queue
946 * *BEFORE* ringing the tx doorbell, so that we serialze the
947 * tx compls of the current transmit which'll wake up the queue
948 */
7101e111 949 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
950 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
951 txq->len) {
3c8def97 952 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
953 stopped = true;
954 }
6b7c5b94 955
94d73aaa 956 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 957
cd8f76c0 958 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
959 } else {
960 txq->head = start;
961 dev_kfree_skb_any(skb);
6b7c5b94 962 }
6b7c5b94
SP
963 return NETDEV_TX_OK;
964}
965
966static int be_change_mtu(struct net_device *netdev, int new_mtu)
967{
968 struct be_adapter *adapter = netdev_priv(netdev);
969 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
970 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
971 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
972 dev_info(&adapter->pdev->dev,
973 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
974 BE_MIN_MTU,
975 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
976 return -EINVAL;
977 }
978 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
979 netdev->mtu, new_mtu);
980 netdev->mtu = new_mtu;
981 return 0;
982}
983
984/*
82903e4b
AK
985 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
986 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 987 */
10329df8 988static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 989{
10329df8
SP
990 u16 vids[BE_NUM_VLANS_SUPPORTED];
991 u16 num = 0, i;
82903e4b 992 int status = 0;
1da87b7f 993
c0e64ef4
SP
994 /* No need to further configure vids if in promiscuous mode */
995 if (adapter->promiscuous)
996 return 0;
997
0fc16ebf
PR
998 if (adapter->vlans_added > adapter->max_vlans)
999 goto set_vlan_promisc;
1000
1001 /* Construct VLAN Table to give to HW */
1002 for (i = 0; i < VLAN_N_VID; i++)
1003 if (adapter->vlan_tag[i])
10329df8 1004 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1005
1006 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 1007 vids, num, 1, 0);
0fc16ebf
PR
1008
1009 /* Set to VLAN promisc mode as setting VLAN filter failed */
1010 if (status) {
1011 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1012 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1013 goto set_vlan_promisc;
6b7c5b94 1014 }
1da87b7f 1015
b31c50a7 1016 return status;
0fc16ebf
PR
1017
1018set_vlan_promisc:
1019 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1020 NULL, 0, 1, 1);
1021 return status;
6b7c5b94
SP
1022}
1023
80d5c368 1024static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1027 int status = 0;
6b7c5b94 1028
a85e9986 1029 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1030 status = -EINVAL;
1031 goto ret;
1032 }
ba343c77 1033
a85e9986
PR
1034 /* Packets with VID 0 are always received by Lancer by default */
1035 if (lancer_chip(adapter) && vid == 0)
1036 goto ret;
1037
6b7c5b94 1038 adapter->vlan_tag[vid] = 1;
82903e4b 1039 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 1040 status = be_vid_config(adapter);
8e586137 1041
80817cbf
AK
1042 if (!status)
1043 adapter->vlans_added++;
1044 else
1045 adapter->vlan_tag[vid] = 0;
1046ret:
1047 return status;
6b7c5b94
SP
1048}
1049
80d5c368 1050static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1051{
1052 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1053 int status = 0;
6b7c5b94 1054
a85e9986 1055 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1056 status = -EINVAL;
1057 goto ret;
1058 }
ba343c77 1059
a85e9986
PR
1060 /* Packets with VID 0 are always received by Lancer by default */
1061 if (lancer_chip(adapter) && vid == 0)
1062 goto ret;
1063
6b7c5b94 1064 adapter->vlan_tag[vid] = 0;
82903e4b 1065 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 1066 status = be_vid_config(adapter);
8e586137 1067
80817cbf
AK
1068 if (!status)
1069 adapter->vlans_added--;
1070 else
1071 adapter->vlan_tag[vid] = 1;
1072ret:
1073 return status;
6b7c5b94
SP
1074}
1075
a54769f5 1076static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1077{
1078 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1079 int status;
6b7c5b94 1080
24307eef 1081 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1082 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1083 adapter->promiscuous = true;
1084 goto done;
6b7c5b94
SP
1085 }
1086
25985edc 1087 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1088 if (adapter->promiscuous) {
1089 adapter->promiscuous = false;
5b8821b7 1090 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1091
1092 if (adapter->vlans_added)
10329df8 1093 be_vid_config(adapter);
6b7c5b94
SP
1094 }
1095
e7b909a6 1096 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1097 if (netdev->flags & IFF_ALLMULTI ||
abb93951 1098 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 1099 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1100 goto done;
6b7c5b94 1101 }
6b7c5b94 1102
fbc13f01
AK
1103 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1104 struct netdev_hw_addr *ha;
1105 int i = 1; /* First slot is claimed by the Primary MAC */
1106
1107 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1108 be_cmd_pmac_del(adapter, adapter->if_handle,
1109 adapter->pmac_id[i], 0);
1110 }
1111
1112 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1113 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1114 adapter->promiscuous = true;
1115 goto done;
1116 }
1117
1118 netdev_for_each_uc_addr(ha, adapter->netdev) {
1119 adapter->uc_macs++; /* First slot is for Primary MAC */
1120 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1121 adapter->if_handle,
1122 &adapter->pmac_id[adapter->uc_macs], 0);
1123 }
1124 }
1125
0fc16ebf
PR
1126 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1127
1128 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1129 if (status) {
1130 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1131 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1132 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1133 }
24307eef
SP
1134done:
1135 return;
6b7c5b94
SP
1136}
1137
ba343c77
SB
1138static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1139{
1140 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1141 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1142 int status;
704e4c88
PR
1143 bool active_mac = false;
1144 u32 pmac_id;
1145 u8 old_mac[ETH_ALEN];
ba343c77 1146
11ac75ed 1147 if (!sriov_enabled(adapter))
ba343c77
SB
1148 return -EPERM;
1149
11ac75ed 1150 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1151 return -EINVAL;
1152
590c391d 1153 if (lancer_chip(adapter)) {
704e4c88
PR
1154 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1155 &pmac_id, vf + 1);
1156 if (!status && active_mac)
1157 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1158 pmac_id, vf + 1);
1159
590c391d
PR
1160 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1161 } else {
11ac75ed
SP
1162 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1163 vf_cfg->pmac_id, vf + 1);
ba343c77 1164
11ac75ed
SP
1165 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1166 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1167 }
1168
64600ea5 1169 if (status)
ba343c77
SB
1170 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1171 mac, vf);
64600ea5 1172 else
11ac75ed 1173 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1174
ba343c77
SB
1175 return status;
1176}
1177
64600ea5
AK
1178static int be_get_vf_config(struct net_device *netdev, int vf,
1179 struct ifla_vf_info *vi)
1180{
1181 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1182 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1183
11ac75ed 1184 if (!sriov_enabled(adapter))
64600ea5
AK
1185 return -EPERM;
1186
11ac75ed 1187 if (vf >= adapter->num_vfs)
64600ea5
AK
1188 return -EINVAL;
1189
1190 vi->vf = vf;
11ac75ed
SP
1191 vi->tx_rate = vf_cfg->tx_rate;
1192 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1193 vi->qos = 0;
11ac75ed 1194 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1195
1196 return 0;
1197}
1198
1da87b7f
AK
1199static int be_set_vf_vlan(struct net_device *netdev,
1200 int vf, u16 vlan, u8 qos)
1201{
1202 struct be_adapter *adapter = netdev_priv(netdev);
1203 int status = 0;
1204
11ac75ed 1205 if (!sriov_enabled(adapter))
1da87b7f
AK
1206 return -EPERM;
1207
11ac75ed 1208 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1209 return -EINVAL;
1210
1211 if (vlan) {
f1f3ee1b
AK
1212 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1213 /* If this is new value, program it. Else skip. */
1214 adapter->vf_cfg[vf].vlan_tag = vlan;
1215
1216 status = be_cmd_set_hsw_config(adapter, vlan,
1217 vf + 1, adapter->vf_cfg[vf].if_handle);
1218 }
1da87b7f 1219 } else {
f1f3ee1b 1220 /* Reset Transparent Vlan Tagging. */
11ac75ed 1221 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1222 vlan = adapter->vf_cfg[vf].def_vid;
1223 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1224 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1225 }
1226
1da87b7f
AK
1227
1228 if (status)
1229 dev_info(&adapter->pdev->dev,
1230 "VLAN %d config on VF %d failed\n", vlan, vf);
1231 return status;
1232}
1233
e1d18735
AK
1234static int be_set_vf_tx_rate(struct net_device *netdev,
1235 int vf, int rate)
1236{
1237 struct be_adapter *adapter = netdev_priv(netdev);
1238 int status = 0;
1239
11ac75ed 1240 if (!sriov_enabled(adapter))
e1d18735
AK
1241 return -EPERM;
1242
94f434c2 1243 if (vf >= adapter->num_vfs)
e1d18735
AK
1244 return -EINVAL;
1245
94f434c2
AK
1246 if (rate < 100 || rate > 10000) {
1247 dev_err(&adapter->pdev->dev,
1248 "tx rate must be between 100 and 10000 Mbps\n");
1249 return -EINVAL;
1250 }
e1d18735 1251
d5c18473
PR
1252 if (lancer_chip(adapter))
1253 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1254 else
1255 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1256
1257 if (status)
94f434c2 1258 dev_err(&adapter->pdev->dev,
e1d18735 1259 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1260 else
1261 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1262 return status;
1263}
1264
39f1d94d
SP
1265static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1266{
1267 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1268 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1269 u16 offset, stride;
1270
1271 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1272 if (!pos)
1273 return 0;
39f1d94d
SP
1274 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1275 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1276
1277 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1278 while (dev) {
2f6a0260 1279 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1280 vfs++;
1281 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1282 assigned_vfs++;
1283 }
1284 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1285 }
1286 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1287}
1288
10ef9ab4 1289static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1290{
10ef9ab4 1291 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1292 ulong now = jiffies;
ac124ff9 1293 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1294 u64 pkts;
1295 unsigned int start, eqd;
ac124ff9 1296
10ef9ab4
SP
1297 if (!eqo->enable_aic) {
1298 eqd = eqo->eqd;
1299 goto modify_eqd;
1300 }
1301
1302 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1303 return;
6b7c5b94 1304
10ef9ab4
SP
1305 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1306
4097f663 1307 /* Wrapped around */
3abcdeda
SP
1308 if (time_before(now, stats->rx_jiffies)) {
1309 stats->rx_jiffies = now;
4097f663
SP
1310 return;
1311 }
6b7c5b94 1312
ac124ff9
SP
1313 /* Update once a second */
1314 if (delta < HZ)
6b7c5b94
SP
1315 return;
1316
ab1594e9
SP
1317 do {
1318 start = u64_stats_fetch_begin_bh(&stats->sync);
1319 pkts = stats->rx_pkts;
1320 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1321
68c3e5a7 1322 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1323 stats->rx_pkts_prev = pkts;
3abcdeda 1324 stats->rx_jiffies = now;
10ef9ab4
SP
1325 eqd = (stats->rx_pps / 110000) << 3;
1326 eqd = min(eqd, eqo->max_eqd);
1327 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1328 if (eqd < 10)
1329 eqd = 0;
10ef9ab4
SP
1330
1331modify_eqd:
1332 if (eqd != eqo->cur_eqd) {
1333 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1334 eqo->cur_eqd = eqd;
ac124ff9 1335 }
6b7c5b94
SP
1336}
1337
3abcdeda 1338static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1339 struct be_rx_compl_info *rxcp)
4097f663 1340{
ac124ff9 1341 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1342
ab1594e9 1343 u64_stats_update_begin(&stats->sync);
3abcdeda 1344 stats->rx_compl++;
2e588f84 1345 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1346 stats->rx_pkts++;
2e588f84 1347 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1348 stats->rx_mcast_pkts++;
2e588f84 1349 if (rxcp->err)
ac124ff9 1350 stats->rx_compl_err++;
ab1594e9 1351 u64_stats_update_end(&stats->sync);
4097f663
SP
1352}
1353
2e588f84 1354static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1355{
19fad86f
PR
1356 /* L4 checksum is not reliable for non TCP/UDP packets.
1357 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1358 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1359 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1360}
1361
10ef9ab4
SP
1362static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1363 u16 frag_idx)
6b7c5b94 1364{
10ef9ab4 1365 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1366 struct be_rx_page_info *rx_page_info;
3abcdeda 1367 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1368
3abcdeda 1369 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1370 BUG_ON(!rx_page_info->page);
1371
205859a2 1372 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1373 dma_unmap_page(&adapter->pdev->dev,
1374 dma_unmap_addr(rx_page_info, bus),
1375 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1376 rx_page_info->last_page_user = false;
1377 }
6b7c5b94
SP
1378
1379 atomic_dec(&rxq->used);
1380 return rx_page_info;
1381}
1382
1383/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1384static void be_rx_compl_discard(struct be_rx_obj *rxo,
1385 struct be_rx_compl_info *rxcp)
6b7c5b94 1386{
3abcdeda 1387 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1388 struct be_rx_page_info *page_info;
2e588f84 1389 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1390
e80d9da6 1391 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1392 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1393 put_page(page_info->page);
1394 memset(page_info, 0, sizeof(*page_info));
2e588f84 1395 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1396 }
1397}
1398
1399/*
1400 * skb_fill_rx_data forms a complete skb for an ether frame
1401 * indicated by rxcp.
1402 */
10ef9ab4
SP
1403static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1404 struct be_rx_compl_info *rxcp)
6b7c5b94 1405{
3abcdeda 1406 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1407 struct be_rx_page_info *page_info;
2e588f84
SP
1408 u16 i, j;
1409 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1410 u8 *start;
6b7c5b94 1411
10ef9ab4 1412 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1413 start = page_address(page_info->page) + page_info->page_offset;
1414 prefetch(start);
1415
1416 /* Copy data in the first descriptor of this completion */
2e588f84 1417 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1418
6b7c5b94
SP
1419 skb->len = curr_frag_len;
1420 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1421 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1422 /* Complete packet has now been moved to data */
1423 put_page(page_info->page);
1424 skb->data_len = 0;
1425 skb->tail += curr_frag_len;
1426 } else {
ac1ae5f3
ED
1427 hdr_len = ETH_HLEN;
1428 memcpy(skb->data, start, hdr_len);
6b7c5b94 1429 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1430 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1431 skb_shinfo(skb)->frags[0].page_offset =
1432 page_info->page_offset + hdr_len;
9e903e08 1433 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1434 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1435 skb->truesize += rx_frag_size;
6b7c5b94
SP
1436 skb->tail += hdr_len;
1437 }
205859a2 1438 page_info->page = NULL;
6b7c5b94 1439
2e588f84
SP
1440 if (rxcp->pkt_size <= rx_frag_size) {
1441 BUG_ON(rxcp->num_rcvd != 1);
1442 return;
6b7c5b94
SP
1443 }
1444
1445 /* More frags present for this completion */
2e588f84
SP
1446 index_inc(&rxcp->rxq_idx, rxq->len);
1447 remaining = rxcp->pkt_size - curr_frag_len;
1448 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1449 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1450 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1451
bd46cb6c
AK
1452 /* Coalesce all frags from the same physical page in one slot */
1453 if (page_info->page_offset == 0) {
1454 /* Fresh page */
1455 j++;
b061b39e 1456 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1457 skb_shinfo(skb)->frags[j].page_offset =
1458 page_info->page_offset;
9e903e08 1459 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1460 skb_shinfo(skb)->nr_frags++;
1461 } else {
1462 put_page(page_info->page);
1463 }
1464
9e903e08 1465 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1466 skb->len += curr_frag_len;
1467 skb->data_len += curr_frag_len;
bdb28a97 1468 skb->truesize += rx_frag_size;
2e588f84
SP
1469 remaining -= curr_frag_len;
1470 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1471 page_info->page = NULL;
6b7c5b94 1472 }
bd46cb6c 1473 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1474}
1475
5be93b9a 1476/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1477static void be_rx_compl_process(struct be_rx_obj *rxo,
1478 struct be_rx_compl_info *rxcp)
6b7c5b94 1479{
10ef9ab4 1480 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1481 struct net_device *netdev = adapter->netdev;
6b7c5b94 1482 struct sk_buff *skb;
89420424 1483
bb349bb4 1484 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1485 if (unlikely(!skb)) {
ac124ff9 1486 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1487 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1488 return;
1489 }
1490
10ef9ab4 1491 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1492
6332c8d3 1493 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1494 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1495 else
1496 skb_checksum_none_assert(skb);
6b7c5b94 1497
6332c8d3 1498 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1499 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1500 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1501 skb->rxhash = rxcp->rss_hash;
1502
6b7c5b94 1503
343e43c0 1504 if (rxcp->vlanf)
86a9bad3 1505 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1506
1507 netif_receive_skb(skb);
6b7c5b94
SP
1508}
1509
5be93b9a 1510/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1511void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1512 struct be_rx_compl_info *rxcp)
6b7c5b94 1513{
10ef9ab4 1514 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1515 struct be_rx_page_info *page_info;
5be93b9a 1516 struct sk_buff *skb = NULL;
3abcdeda 1517 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1518 u16 remaining, curr_frag_len;
1519 u16 i, j;
3968fa1e 1520
10ef9ab4 1521 skb = napi_get_frags(napi);
5be93b9a 1522 if (!skb) {
10ef9ab4 1523 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1524 return;
1525 }
1526
2e588f84
SP
1527 remaining = rxcp->pkt_size;
1528 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1529 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1530
1531 curr_frag_len = min(remaining, rx_frag_size);
1532
bd46cb6c
AK
1533 /* Coalesce all frags from the same physical page in one slot */
1534 if (i == 0 || page_info->page_offset == 0) {
1535 /* First frag or Fresh page */
1536 j++;
b061b39e 1537 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1538 skb_shinfo(skb)->frags[j].page_offset =
1539 page_info->page_offset;
9e903e08 1540 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1541 } else {
1542 put_page(page_info->page);
1543 }
9e903e08 1544 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1545 skb->truesize += rx_frag_size;
bd46cb6c 1546 remaining -= curr_frag_len;
2e588f84 1547 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1548 memset(page_info, 0, sizeof(*page_info));
1549 }
bd46cb6c 1550 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1551
5be93b9a 1552 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1553 skb->len = rxcp->pkt_size;
1554 skb->data_len = rxcp->pkt_size;
5be93b9a 1555 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1556 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1557 if (adapter->netdev->features & NETIF_F_RXHASH)
1558 skb->rxhash = rxcp->rss_hash;
5be93b9a 1559
343e43c0 1560 if (rxcp->vlanf)
86a9bad3 1561 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1562
10ef9ab4 1563 napi_gro_frags(napi);
2e588f84
SP
1564}
1565
10ef9ab4
SP
1566static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1567 struct be_rx_compl_info *rxcp)
2e588f84
SP
1568{
1569 rxcp->pkt_size =
1570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1571 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1572 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1573 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1574 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1575 rxcp->ip_csum =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1577 rxcp->l4_csum =
1578 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1579 rxcp->ipv6 =
1580 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1581 rxcp->rxq_idx =
1582 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1583 rxcp->num_rcvd =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1585 rxcp->pkt_type =
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1587 rxcp->rss_hash =
c297977e 1588 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1589 if (rxcp->vlanf) {
1590 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1591 compl);
1592 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1593 compl);
15d72184 1594 }
12004ae9 1595 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1596}
1597
10ef9ab4
SP
1598static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1599 struct be_rx_compl_info *rxcp)
2e588f84
SP
1600{
1601 rxcp->pkt_size =
1602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1603 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1604 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1605 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1606 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1607 rxcp->ip_csum =
1608 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1609 rxcp->l4_csum =
1610 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1611 rxcp->ipv6 =
1612 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1613 rxcp->rxq_idx =
1614 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1615 rxcp->num_rcvd =
1616 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1617 rxcp->pkt_type =
1618 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1619 rxcp->rss_hash =
c297977e 1620 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1621 if (rxcp->vlanf) {
1622 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1623 compl);
1624 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1625 compl);
15d72184 1626 }
12004ae9 1627 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1628}
1629
1630static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1631{
1632 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1633 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1634 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1635
2e588f84
SP
1636 /* For checking the valid bit it is Ok to use either definition as the
1637 * valid bit is at the same position in both v0 and v1 Rx compl */
1638 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1639 return NULL;
6b7c5b94 1640
2e588f84
SP
1641 rmb();
1642 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1643
2e588f84 1644 if (adapter->be3_native)
10ef9ab4 1645 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1646 else
10ef9ab4 1647 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1648
15d72184
SP
1649 if (rxcp->vlanf) {
1650 /* vlanf could be wrongly set in some cards.
1651 * ignore if vtm is not set */
752961a1 1652 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1653 rxcp->vlanf = 0;
6b7c5b94 1654
15d72184 1655 if (!lancer_chip(adapter))
3c709f8f 1656 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1657
939cf306 1658 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1659 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1660 rxcp->vlanf = 0;
1661 }
2e588f84
SP
1662
1663 /* As the compl has been parsed, reset it; we wont touch it again */
1664 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1665
3abcdeda 1666 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1667 return rxcp;
1668}
1669
1829b086 1670static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1671{
6b7c5b94 1672 u32 order = get_order(size);
1829b086 1673
6b7c5b94 1674 if (order > 0)
1829b086
ED
1675 gfp |= __GFP_COMP;
1676 return alloc_pages(gfp, order);
6b7c5b94
SP
1677}
1678
1679/*
1680 * Allocate a page, split it to fragments of size rx_frag_size and post as
1681 * receive buffers to BE
1682 */
1829b086 1683static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1684{
3abcdeda 1685 struct be_adapter *adapter = rxo->adapter;
26d92f92 1686 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1687 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1688 struct page *pagep = NULL;
1689 struct be_eth_rx_d *rxd;
1690 u64 page_dmaaddr = 0, frag_dmaaddr;
1691 u32 posted, page_offset = 0;
1692
3abcdeda 1693 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1694 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1695 if (!pagep) {
1829b086 1696 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1697 if (unlikely(!pagep)) {
ac124ff9 1698 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1699 break;
1700 }
2b7bcebf
IV
1701 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1702 0, adapter->big_page_size,
1703 DMA_FROM_DEVICE);
6b7c5b94
SP
1704 page_info->page_offset = 0;
1705 } else {
1706 get_page(pagep);
1707 page_info->page_offset = page_offset + rx_frag_size;
1708 }
1709 page_offset = page_info->page_offset;
1710 page_info->page = pagep;
fac6da5b 1711 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1712 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1713
1714 rxd = queue_head_node(rxq);
1715 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1716 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1717
1718 /* Any space left in the current big page for another frag? */
1719 if ((page_offset + rx_frag_size + rx_frag_size) >
1720 adapter->big_page_size) {
1721 pagep = NULL;
1722 page_info->last_page_user = true;
1723 }
26d92f92
SP
1724
1725 prev_page_info = page_info;
1726 queue_head_inc(rxq);
10ef9ab4 1727 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1728 }
1729 if (pagep)
26d92f92 1730 prev_page_info->last_page_user = true;
6b7c5b94
SP
1731
1732 if (posted) {
6b7c5b94 1733 atomic_add(posted, &rxq->used);
8788fdc2 1734 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1735 } else if (atomic_read(&rxq->used) == 0) {
1736 /* Let be_worker replenish when memory is available */
3abcdeda 1737 rxo->rx_post_starved = true;
6b7c5b94 1738 }
6b7c5b94
SP
1739}
1740
5fb379ee 1741static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1742{
6b7c5b94
SP
1743 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1744
1745 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1746 return NULL;
1747
f3eb62d2 1748 rmb();
6b7c5b94
SP
1749 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1750
1751 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1752
1753 queue_tail_inc(tx_cq);
1754 return txcp;
1755}
1756
3c8def97
SP
1757static u16 be_tx_compl_process(struct be_adapter *adapter,
1758 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1759{
3c8def97 1760 struct be_queue_info *txq = &txo->q;
a73b796e 1761 struct be_eth_wrb *wrb;
3c8def97 1762 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1763 struct sk_buff *sent_skb;
ec43b1a6
SP
1764 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1765 bool unmap_skb_hdr = true;
6b7c5b94 1766
ec43b1a6 1767 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1768 BUG_ON(!sent_skb);
ec43b1a6
SP
1769 sent_skbs[txq->tail] = NULL;
1770
1771 /* skip header wrb */
a73b796e 1772 queue_tail_inc(txq);
6b7c5b94 1773
ec43b1a6 1774 do {
6b7c5b94 1775 cur_index = txq->tail;
a73b796e 1776 wrb = queue_tail_node(txq);
2b7bcebf
IV
1777 unmap_tx_frag(&adapter->pdev->dev, wrb,
1778 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1779 unmap_skb_hdr = false;
1780
6b7c5b94
SP
1781 num_wrbs++;
1782 queue_tail_inc(txq);
ec43b1a6 1783 } while (cur_index != last_index);
6b7c5b94 1784
6b7c5b94 1785 kfree_skb(sent_skb);
4d586b82 1786 return num_wrbs;
6b7c5b94
SP
1787}
1788
10ef9ab4
SP
1789/* Return the number of events in the event queue */
1790static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1791{
10ef9ab4
SP
1792 struct be_eq_entry *eqe;
1793 int num = 0;
859b1e4e 1794
10ef9ab4
SP
1795 do {
1796 eqe = queue_tail_node(&eqo->q);
1797 if (eqe->evt == 0)
1798 break;
859b1e4e 1799
10ef9ab4
SP
1800 rmb();
1801 eqe->evt = 0;
1802 num++;
1803 queue_tail_inc(&eqo->q);
1804 } while (true);
1805
1806 return num;
859b1e4e
SP
1807}
1808
10ef9ab4
SP
1809/* Leaves the EQ is disarmed state */
1810static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1811{
10ef9ab4 1812 int num = events_get(eqo);
859b1e4e 1813
10ef9ab4 1814 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1815}
1816
10ef9ab4 1817static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1818{
1819 struct be_rx_page_info *page_info;
3abcdeda
SP
1820 struct be_queue_info *rxq = &rxo->q;
1821 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1822 struct be_rx_compl_info *rxcp;
d23e946c
SP
1823 struct be_adapter *adapter = rxo->adapter;
1824 int flush_wait = 0;
6b7c5b94
SP
1825 u16 tail;
1826
d23e946c
SP
1827 /* Consume pending rx completions.
1828 * Wait for the flush completion (identified by zero num_rcvd)
1829 * to arrive. Notify CQ even when there are no more CQ entries
1830 * for HW to flush partially coalesced CQ entries.
1831 * In Lancer, there is no need to wait for flush compl.
1832 */
1833 for (;;) {
1834 rxcp = be_rx_compl_get(rxo);
1835 if (rxcp == NULL) {
1836 if (lancer_chip(adapter))
1837 break;
1838
1839 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1840 dev_warn(&adapter->pdev->dev,
1841 "did not receive flush compl\n");
1842 break;
1843 }
1844 be_cq_notify(adapter, rx_cq->id, true, 0);
1845 mdelay(1);
1846 } else {
1847 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1848 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1849 if (rxcp->num_rcvd == 0)
1850 break;
1851 }
6b7c5b94
SP
1852 }
1853
d23e946c
SP
1854 /* After cleanup, leave the CQ in unarmed state */
1855 be_cq_notify(adapter, rx_cq->id, false, 0);
1856
1857 /* Then free posted rx buffers that were not used */
6b7c5b94 1858 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1859 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1860 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1861 put_page(page_info->page);
1862 memset(page_info, 0, sizeof(*page_info));
1863 }
1864 BUG_ON(atomic_read(&rxq->used));
482c9e79 1865 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1866}
1867
0ae57bb3 1868static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1869{
0ae57bb3
SP
1870 struct be_tx_obj *txo;
1871 struct be_queue_info *txq;
a8e9179a 1872 struct be_eth_tx_compl *txcp;
4d586b82 1873 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1874 struct sk_buff *sent_skb;
1875 bool dummy_wrb;
0ae57bb3 1876 int i, pending_txqs;
a8e9179a
SP
1877
1878 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1879 do {
0ae57bb3
SP
1880 pending_txqs = adapter->num_tx_qs;
1881
1882 for_all_tx_queues(adapter, txo, i) {
1883 txq = &txo->q;
1884 while ((txcp = be_tx_compl_get(&txo->cq))) {
1885 end_idx =
1886 AMAP_GET_BITS(struct amap_eth_tx_compl,
1887 wrb_index, txcp);
1888 num_wrbs += be_tx_compl_process(adapter, txo,
1889 end_idx);
1890 cmpl++;
1891 }
1892 if (cmpl) {
1893 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1894 atomic_sub(num_wrbs, &txq->used);
1895 cmpl = 0;
1896 num_wrbs = 0;
1897 }
1898 if (atomic_read(&txq->used) == 0)
1899 pending_txqs--;
a8e9179a
SP
1900 }
1901
0ae57bb3 1902 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1903 break;
1904
1905 mdelay(1);
1906 } while (true);
1907
0ae57bb3
SP
1908 for_all_tx_queues(adapter, txo, i) {
1909 txq = &txo->q;
1910 if (atomic_read(&txq->used))
1911 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1912 atomic_read(&txq->used));
1913
1914 /* free posted tx for which compls will never arrive */
1915 while (atomic_read(&txq->used)) {
1916 sent_skb = txo->sent_skb_list[txq->tail];
1917 end_idx = txq->tail;
1918 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1919 &dummy_wrb);
1920 index_adv(&end_idx, num_wrbs - 1, txq->len);
1921 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1922 atomic_sub(num_wrbs, &txq->used);
1923 }
b03388d6 1924 }
6b7c5b94
SP
1925}
1926
10ef9ab4
SP
1927static void be_evt_queues_destroy(struct be_adapter *adapter)
1928{
1929 struct be_eq_obj *eqo;
1930 int i;
1931
1932 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1933 if (eqo->q.created) {
1934 be_eq_clean(eqo);
10ef9ab4 1935 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1936 }
10ef9ab4
SP
1937 be_queue_free(adapter, &eqo->q);
1938 }
1939}
1940
1941static int be_evt_queues_create(struct be_adapter *adapter)
1942{
1943 struct be_queue_info *eq;
1944 struct be_eq_obj *eqo;
1945 int i, rc;
1946
1947 adapter->num_evt_qs = num_irqs(adapter);
1948
1949 for_all_evt_queues(adapter, eqo, i) {
1950 eqo->adapter = adapter;
1951 eqo->tx_budget = BE_TX_BUDGET;
1952 eqo->idx = i;
1953 eqo->max_eqd = BE_MAX_EQD;
1954 eqo->enable_aic = true;
1955
1956 eq = &eqo->q;
1957 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1958 sizeof(struct be_eq_entry));
1959 if (rc)
1960 return rc;
1961
1962 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1963 if (rc)
1964 return rc;
1965 }
1cfafab9 1966 return 0;
10ef9ab4
SP
1967}
1968
5fb379ee
SP
1969static void be_mcc_queues_destroy(struct be_adapter *adapter)
1970{
1971 struct be_queue_info *q;
5fb379ee 1972
8788fdc2 1973 q = &adapter->mcc_obj.q;
5fb379ee 1974 if (q->created)
8788fdc2 1975 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1976 be_queue_free(adapter, q);
1977
8788fdc2 1978 q = &adapter->mcc_obj.cq;
5fb379ee 1979 if (q->created)
8788fdc2 1980 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1981 be_queue_free(adapter, q);
1982}
1983
1984/* Must be called only after TX qs are created as MCC shares TX EQ */
1985static int be_mcc_queues_create(struct be_adapter *adapter)
1986{
1987 struct be_queue_info *q, *cq;
5fb379ee 1988
8788fdc2 1989 cq = &adapter->mcc_obj.cq;
5fb379ee 1990 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1991 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1992 goto err;
1993
10ef9ab4
SP
1994 /* Use the default EQ for MCC completions */
1995 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1996 goto mcc_cq_free;
1997
8788fdc2 1998 q = &adapter->mcc_obj.q;
5fb379ee
SP
1999 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2000 goto mcc_cq_destroy;
2001
8788fdc2 2002 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2003 goto mcc_q_free;
2004
2005 return 0;
2006
2007mcc_q_free:
2008 be_queue_free(adapter, q);
2009mcc_cq_destroy:
8788fdc2 2010 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2011mcc_cq_free:
2012 be_queue_free(adapter, cq);
2013err:
2014 return -1;
2015}
2016
6b7c5b94
SP
2017static void be_tx_queues_destroy(struct be_adapter *adapter)
2018{
2019 struct be_queue_info *q;
3c8def97
SP
2020 struct be_tx_obj *txo;
2021 u8 i;
6b7c5b94 2022
3c8def97
SP
2023 for_all_tx_queues(adapter, txo, i) {
2024 q = &txo->q;
2025 if (q->created)
2026 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2027 be_queue_free(adapter, q);
6b7c5b94 2028
3c8def97
SP
2029 q = &txo->cq;
2030 if (q->created)
2031 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2032 be_queue_free(adapter, q);
2033 }
6b7c5b94
SP
2034}
2035
dafc0fe3
SP
2036static int be_num_txqs_want(struct be_adapter *adapter)
2037{
abb93951
PR
2038 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2039 be_is_mc(adapter) ||
2040 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 2041 BE2_chip(adapter))
dafc0fe3
SP
2042 return 1;
2043 else
abb93951 2044 return adapter->max_tx_queues;
dafc0fe3
SP
2045}
2046
10ef9ab4 2047static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2048{
10ef9ab4
SP
2049 struct be_queue_info *cq, *eq;
2050 int status;
3c8def97
SP
2051 struct be_tx_obj *txo;
2052 u8 i;
6b7c5b94 2053
dafc0fe3 2054 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
2055 if (adapter->num_tx_qs != MAX_TX_QS) {
2056 rtnl_lock();
dafc0fe3
SP
2057 netif_set_real_num_tx_queues(adapter->netdev,
2058 adapter->num_tx_qs);
3bb62f4f
PR
2059 rtnl_unlock();
2060 }
dafc0fe3 2061
10ef9ab4
SP
2062 for_all_tx_queues(adapter, txo, i) {
2063 cq = &txo->cq;
2064 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2065 sizeof(struct be_eth_tx_compl));
2066 if (status)
2067 return status;
3c8def97 2068
10ef9ab4
SP
2069 /* If num_evt_qs is less than num_tx_qs, then more than
2070 * one txq share an eq
2071 */
2072 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2073 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2074 if (status)
2075 return status;
2076 }
2077 return 0;
2078}
6b7c5b94 2079
10ef9ab4
SP
2080static int be_tx_qs_create(struct be_adapter *adapter)
2081{
2082 struct be_tx_obj *txo;
2083 int i, status;
fe6d2a38 2084
3c8def97 2085 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
2086 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2087 sizeof(struct be_eth_wrb));
2088 if (status)
2089 return status;
6b7c5b94 2090
94d73aaa 2091 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2092 if (status)
2093 return status;
3c8def97 2094 }
6b7c5b94 2095
d379142b
SP
2096 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2097 adapter->num_tx_qs);
10ef9ab4 2098 return 0;
6b7c5b94
SP
2099}
2100
10ef9ab4 2101static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2102{
2103 struct be_queue_info *q;
3abcdeda
SP
2104 struct be_rx_obj *rxo;
2105 int i;
2106
2107 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2108 q = &rxo->cq;
2109 if (q->created)
2110 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2111 be_queue_free(adapter, q);
ac6a0c4a
SP
2112 }
2113}
2114
10ef9ab4 2115static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2116{
10ef9ab4 2117 struct be_queue_info *eq, *cq;
3abcdeda
SP
2118 struct be_rx_obj *rxo;
2119 int rc, i;
6b7c5b94 2120
10ef9ab4
SP
2121 /* We'll create as many RSS rings as there are irqs.
2122 * But when there's only one irq there's no use creating RSS rings
2123 */
2124 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2125 num_irqs(adapter) + 1 : 1;
7f640062
SP
2126 if (adapter->num_rx_qs != MAX_RX_QS) {
2127 rtnl_lock();
2128 netif_set_real_num_rx_queues(adapter->netdev,
2129 adapter->num_rx_qs);
2130 rtnl_unlock();
2131 }
ac6a0c4a 2132
6b7c5b94 2133 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2134 for_all_rx_queues(adapter, rxo, i) {
2135 rxo->adapter = adapter;
3abcdeda
SP
2136 cq = &rxo->cq;
2137 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2138 sizeof(struct be_eth_rx_compl));
2139 if (rc)
10ef9ab4 2140 return rc;
3abcdeda 2141
10ef9ab4
SP
2142 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2143 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2144 if (rc)
10ef9ab4 2145 return rc;
3abcdeda 2146 }
6b7c5b94 2147
d379142b
SP
2148 dev_info(&adapter->pdev->dev,
2149 "created %d RSS queue(s) and 1 default RX queue\n",
2150 adapter->num_rx_qs - 1);
10ef9ab4 2151 return 0;
b628bde2
SP
2152}
2153
6b7c5b94
SP
2154static irqreturn_t be_intx(int irq, void *dev)
2155{
e49cc34f
SP
2156 struct be_eq_obj *eqo = dev;
2157 struct be_adapter *adapter = eqo->adapter;
2158 int num_evts = 0;
6b7c5b94 2159
d0b9cec3
SP
2160 /* IRQ is not expected when NAPI is scheduled as the EQ
2161 * will not be armed.
2162 * But, this can happen on Lancer INTx where it takes
2163 * a while to de-assert INTx or in BE2 where occasionaly
2164 * an interrupt may be raised even when EQ is unarmed.
2165 * If NAPI is already scheduled, then counting & notifying
2166 * events will orphan them.
e49cc34f 2167 */
d0b9cec3 2168 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2169 num_evts = events_get(eqo);
d0b9cec3
SP
2170 __napi_schedule(&eqo->napi);
2171 if (num_evts)
2172 eqo->spurious_intr = 0;
2173 }
2174 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2175
d0b9cec3
SP
2176 /* Return IRQ_HANDLED only for the the first spurious intr
2177 * after a valid intr to stop the kernel from branding
2178 * this irq as a bad one!
e49cc34f 2179 */
d0b9cec3
SP
2180 if (num_evts || eqo->spurious_intr++ == 0)
2181 return IRQ_HANDLED;
2182 else
2183 return IRQ_NONE;
6b7c5b94
SP
2184}
2185
10ef9ab4 2186static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2187{
10ef9ab4 2188 struct be_eq_obj *eqo = dev;
6b7c5b94 2189
0b545a62
SP
2190 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2191 napi_schedule(&eqo->napi);
6b7c5b94
SP
2192 return IRQ_HANDLED;
2193}
2194
2e588f84 2195static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2196{
2e588f84 2197 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2198}
2199
10ef9ab4
SP
2200static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2201 int budget)
6b7c5b94 2202{
3abcdeda
SP
2203 struct be_adapter *adapter = rxo->adapter;
2204 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2205 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2206 u32 work_done;
2207
2208 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2209 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2210 if (!rxcp)
2211 break;
2212
12004ae9
SP
2213 /* Is it a flush compl that has no data */
2214 if (unlikely(rxcp->num_rcvd == 0))
2215 goto loop_continue;
2216
2217 /* Discard compl with partial DMA Lancer B0 */
2218 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2219 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2220 goto loop_continue;
2221 }
2222
2223 /* On BE drop pkts that arrive due to imperfect filtering in
2224 * promiscuous mode on some skews
2225 */
2226 if (unlikely(rxcp->port != adapter->port_num &&
2227 !lancer_chip(adapter))) {
10ef9ab4 2228 be_rx_compl_discard(rxo, rxcp);
12004ae9 2229 goto loop_continue;
64642811 2230 }
009dd872 2231
12004ae9 2232 if (do_gro(rxcp))
10ef9ab4 2233 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2234 else
10ef9ab4 2235 be_rx_compl_process(rxo, rxcp);
12004ae9 2236loop_continue:
2e588f84 2237 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2238 }
2239
10ef9ab4
SP
2240 if (work_done) {
2241 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2242
10ef9ab4
SP
2243 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2244 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2245 }
10ef9ab4 2246
6b7c5b94
SP
2247 return work_done;
2248}
2249
10ef9ab4
SP
2250static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2251 int budget, int idx)
6b7c5b94 2252{
6b7c5b94 2253 struct be_eth_tx_compl *txcp;
10ef9ab4 2254 int num_wrbs = 0, work_done;
3c8def97 2255
10ef9ab4
SP
2256 for (work_done = 0; work_done < budget; work_done++) {
2257 txcp = be_tx_compl_get(&txo->cq);
2258 if (!txcp)
2259 break;
2260 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2261 AMAP_GET_BITS(struct amap_eth_tx_compl,
2262 wrb_index, txcp));
10ef9ab4 2263 }
6b7c5b94 2264
10ef9ab4
SP
2265 if (work_done) {
2266 be_cq_notify(adapter, txo->cq.id, true, work_done);
2267 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2268
10ef9ab4
SP
2269 /* As Tx wrbs have been freed up, wake up netdev queue
2270 * if it was stopped due to lack of tx wrbs. */
2271 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2272 atomic_read(&txo->q.used) < txo->q.len / 2) {
2273 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2274 }
10ef9ab4
SP
2275
2276 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2277 tx_stats(txo)->tx_compl += work_done;
2278 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2279 }
10ef9ab4
SP
2280 return (work_done < budget); /* Done */
2281}
6b7c5b94 2282
10ef9ab4
SP
2283int be_poll(struct napi_struct *napi, int budget)
2284{
2285 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2286 struct be_adapter *adapter = eqo->adapter;
0b545a62 2287 int max_work = 0, work, i, num_evts;
10ef9ab4 2288 bool tx_done;
f31e50a8 2289
0b545a62
SP
2290 num_evts = events_get(eqo);
2291
10ef9ab4
SP
2292 /* Process all TXQs serviced by this EQ */
2293 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2294 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2295 eqo->tx_budget, i);
2296 if (!tx_done)
2297 max_work = budget;
f31e50a8
SP
2298 }
2299
10ef9ab4
SP
2300 /* This loop will iterate twice for EQ0 in which
2301 * completions of the last RXQ (default one) are also processed
2302 * For other EQs the loop iterates only once
2303 */
2304 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2305 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2306 max_work = max(work, max_work);
2307 }
6b7c5b94 2308
10ef9ab4
SP
2309 if (is_mcc_eqo(eqo))
2310 be_process_mcc(adapter);
93c86700 2311
10ef9ab4
SP
2312 if (max_work < budget) {
2313 napi_complete(napi);
0b545a62 2314 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2315 } else {
2316 /* As we'll continue in polling mode, count and clear events */
0b545a62 2317 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2318 }
10ef9ab4 2319 return max_work;
6b7c5b94
SP
2320}
2321
f67ef7ba 2322void be_detect_error(struct be_adapter *adapter)
7c185276 2323{
e1cfb67a
PR
2324 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2325 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2326 u32 i;
2327
d23e946c 2328 if (be_hw_error(adapter))
72f02485
SP
2329 return;
2330
e1cfb67a
PR
2331 if (lancer_chip(adapter)) {
2332 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2333 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2334 sliport_err1 = ioread32(adapter->db +
2335 SLIPORT_ERROR1_OFFSET);
2336 sliport_err2 = ioread32(adapter->db +
2337 SLIPORT_ERROR2_OFFSET);
2338 }
2339 } else {
2340 pci_read_config_dword(adapter->pdev,
2341 PCICFG_UE_STATUS_LOW, &ue_lo);
2342 pci_read_config_dword(adapter->pdev,
2343 PCICFG_UE_STATUS_HIGH, &ue_hi);
2344 pci_read_config_dword(adapter->pdev,
2345 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2346 pci_read_config_dword(adapter->pdev,
2347 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2348
f67ef7ba
PR
2349 ue_lo = (ue_lo & ~ue_lo_mask);
2350 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2351 }
7c185276 2352
1451ae6e
AK
2353 /* On certain platforms BE hardware can indicate spurious UEs.
2354 * Allow the h/w to stop working completely in case of a real UE.
2355 * Hence not setting the hw_error for UE detection.
2356 */
2357 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2358 adapter->hw_error = true;
434b3648 2359 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2360 "Error detected in the card\n");
2361 }
2362
2363 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2364 dev_err(&adapter->pdev->dev,
2365 "ERR: sliport status 0x%x\n", sliport_status);
2366 dev_err(&adapter->pdev->dev,
2367 "ERR: sliport error1 0x%x\n", sliport_err1);
2368 dev_err(&adapter->pdev->dev,
2369 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2370 }
2371
e1cfb67a
PR
2372 if (ue_lo) {
2373 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2374 if (ue_lo & 1)
7c185276
AK
2375 dev_err(&adapter->pdev->dev,
2376 "UE: %s bit set\n", ue_status_low_desc[i]);
2377 }
2378 }
f67ef7ba 2379
e1cfb67a
PR
2380 if (ue_hi) {
2381 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2382 if (ue_hi & 1)
7c185276
AK
2383 dev_err(&adapter->pdev->dev,
2384 "UE: %s bit set\n", ue_status_hi_desc[i]);
2385 }
2386 }
2387
2388}
2389
8d56ff11
SP
2390static void be_msix_disable(struct be_adapter *adapter)
2391{
ac6a0c4a 2392 if (msix_enabled(adapter)) {
8d56ff11 2393 pci_disable_msix(adapter->pdev);
ac6a0c4a 2394 adapter->num_msix_vec = 0;
3abcdeda
SP
2395 }
2396}
2397
10ef9ab4
SP
2398static uint be_num_rss_want(struct be_adapter *adapter)
2399{
30e80b55 2400 u32 num = 0;
abb93951 2401
10ef9ab4 2402 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2403 (lancer_chip(adapter) ||
2404 (!sriov_want(adapter) && be_physfn(adapter)))) {
2405 num = adapter->max_rss_queues;
30e80b55
YM
2406 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2407 }
2408 return num;
10ef9ab4
SP
2409}
2410
c2bba3df 2411static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2412{
10ef9ab4 2413#define BE_MIN_MSIX_VECTORS 1
045508a8 2414 int i, status, num_vec, num_roce_vec = 0;
d379142b 2415 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2416
10ef9ab4
SP
2417 /* If RSS queues are not used, need a vec for default RX Q */
2418 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2419 if (be_roce_supported(adapter)) {
2420 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2421 (num_online_cpus() + 1));
2422 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2423 num_vec += num_roce_vec;
2424 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2425 }
10ef9ab4 2426 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2427
ac6a0c4a 2428 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2429 adapter->msix_entries[i].entry = i;
2430
ac6a0c4a 2431 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2432 if (status == 0) {
2433 goto done;
2434 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2435 num_vec = status;
c2bba3df
SK
2436 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2437 num_vec);
2438 if (!status)
3abcdeda 2439 goto done;
3abcdeda 2440 }
d379142b
SP
2441
2442 dev_warn(dev, "MSIx enable failed\n");
c2bba3df
SK
2443 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2444 if (!be_physfn(adapter))
2445 return status;
2446 return 0;
3abcdeda 2447done:
045508a8
PP
2448 if (be_roce_supported(adapter)) {
2449 if (num_vec > num_roce_vec) {
2450 adapter->num_msix_vec = num_vec - num_roce_vec;
2451 adapter->num_msix_roce_vec =
2452 num_vec - adapter->num_msix_vec;
2453 } else {
2454 adapter->num_msix_vec = num_vec;
2455 adapter->num_msix_roce_vec = 0;
2456 }
2457 } else
2458 adapter->num_msix_vec = num_vec;
d379142b 2459 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
c2bba3df 2460 return 0;
6b7c5b94
SP
2461}
2462
fe6d2a38 2463static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2464 struct be_eq_obj *eqo)
b628bde2 2465{
10ef9ab4 2466 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2467}
6b7c5b94 2468
b628bde2
SP
2469static int be_msix_register(struct be_adapter *adapter)
2470{
10ef9ab4
SP
2471 struct net_device *netdev = adapter->netdev;
2472 struct be_eq_obj *eqo;
2473 int status, i, vec;
6b7c5b94 2474
10ef9ab4
SP
2475 for_all_evt_queues(adapter, eqo, i) {
2476 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2477 vec = be_msix_vec_get(adapter, eqo);
2478 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2479 if (status)
2480 goto err_msix;
2481 }
b628bde2 2482
6b7c5b94 2483 return 0;
3abcdeda 2484err_msix:
10ef9ab4
SP
2485 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2486 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2487 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2488 status);
ac6a0c4a 2489 be_msix_disable(adapter);
6b7c5b94
SP
2490 return status;
2491}
2492
2493static int be_irq_register(struct be_adapter *adapter)
2494{
2495 struct net_device *netdev = adapter->netdev;
2496 int status;
2497
ac6a0c4a 2498 if (msix_enabled(adapter)) {
6b7c5b94
SP
2499 status = be_msix_register(adapter);
2500 if (status == 0)
2501 goto done;
ba343c77
SB
2502 /* INTx is not supported for VF */
2503 if (!be_physfn(adapter))
2504 return status;
6b7c5b94
SP
2505 }
2506
e49cc34f 2507 /* INTx: only the first EQ is used */
6b7c5b94
SP
2508 netdev->irq = adapter->pdev->irq;
2509 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2510 &adapter->eq_obj[0]);
6b7c5b94
SP
2511 if (status) {
2512 dev_err(&adapter->pdev->dev,
2513 "INTx request IRQ failed - err %d\n", status);
2514 return status;
2515 }
2516done:
2517 adapter->isr_registered = true;
2518 return 0;
2519}
2520
2521static void be_irq_unregister(struct be_adapter *adapter)
2522{
2523 struct net_device *netdev = adapter->netdev;
10ef9ab4 2524 struct be_eq_obj *eqo;
3abcdeda 2525 int i;
6b7c5b94
SP
2526
2527 if (!adapter->isr_registered)
2528 return;
2529
2530 /* INTx */
ac6a0c4a 2531 if (!msix_enabled(adapter)) {
e49cc34f 2532 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2533 goto done;
2534 }
2535
2536 /* MSIx */
10ef9ab4
SP
2537 for_all_evt_queues(adapter, eqo, i)
2538 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2539
6b7c5b94
SP
2540done:
2541 adapter->isr_registered = false;
6b7c5b94
SP
2542}
2543
10ef9ab4 2544static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2545{
2546 struct be_queue_info *q;
2547 struct be_rx_obj *rxo;
2548 int i;
2549
2550 for_all_rx_queues(adapter, rxo, i) {
2551 q = &rxo->q;
2552 if (q->created) {
2553 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2554 be_rx_cq_clean(rxo);
482c9e79 2555 }
10ef9ab4 2556 be_queue_free(adapter, q);
482c9e79
SP
2557 }
2558}
2559
889cd4b2
SP
2560static int be_close(struct net_device *netdev)
2561{
2562 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2563 struct be_eq_obj *eqo;
2564 int i;
889cd4b2 2565
045508a8
PP
2566 be_roce_dev_close(adapter);
2567
04d3d624
SK
2568 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2569 for_all_evt_queues(adapter, eqo, i)
2570 napi_disable(&eqo->napi);
2571 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2572 }
a323d9bf
SP
2573
2574 be_async_mcc_disable(adapter);
2575
2576 /* Wait for all pending tx completions to arrive so that
2577 * all tx skbs are freed.
2578 */
2579 be_tx_compl_clean(adapter);
fba87559 2580 netif_tx_disable(netdev);
a323d9bf
SP
2581
2582 be_rx_qs_destroy(adapter);
2583
2584 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2585 if (msix_enabled(adapter))
2586 synchronize_irq(be_msix_vec_get(adapter, eqo));
2587 else
2588 synchronize_irq(netdev->irq);
2589 be_eq_clean(eqo);
63fcb27f
PR
2590 }
2591
889cd4b2
SP
2592 be_irq_unregister(adapter);
2593
482c9e79
SP
2594 return 0;
2595}
2596
10ef9ab4 2597static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2598{
2599 struct be_rx_obj *rxo;
e9008ee9
PR
2600 int rc, i, j;
2601 u8 rsstable[128];
482c9e79
SP
2602
2603 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2604 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2605 sizeof(struct be_eth_rx_d));
2606 if (rc)
2607 return rc;
2608 }
2609
2610 /* The FW would like the default RXQ to be created first */
2611 rxo = default_rxo(adapter);
2612 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2613 adapter->if_handle, false, &rxo->rss_id);
2614 if (rc)
2615 return rc;
2616
2617 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2618 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2619 rx_frag_size, adapter->if_handle,
2620 true, &rxo->rss_id);
482c9e79
SP
2621 if (rc)
2622 return rc;
2623 }
2624
2625 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2626 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2627 for_all_rss_queues(adapter, rxo, i) {
2628 if ((j + i) >= 128)
2629 break;
2630 rsstable[j + i] = rxo->rss_id;
2631 }
2632 }
594ad54a
SR
2633 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2634 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2635
2636 if (!BEx_chip(adapter))
2637 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2638 RSS_ENABLE_UDP_IPV6;
2639
2640 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2641 128);
2642 if (rc) {
2643 adapter->rss_flags = 0;
482c9e79 2644 return rc;
594ad54a 2645 }
482c9e79
SP
2646 }
2647
2648 /* First time posting */
10ef9ab4 2649 for_all_rx_queues(adapter, rxo, i)
482c9e79 2650 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2651 return 0;
2652}
2653
6b7c5b94
SP
2654static int be_open(struct net_device *netdev)
2655{
2656 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2657 struct be_eq_obj *eqo;
3abcdeda 2658 struct be_rx_obj *rxo;
10ef9ab4 2659 struct be_tx_obj *txo;
b236916a 2660 u8 link_status;
3abcdeda 2661 int status, i;
5fb379ee 2662
10ef9ab4 2663 status = be_rx_qs_create(adapter);
482c9e79
SP
2664 if (status)
2665 goto err;
2666
c2bba3df
SK
2667 status = be_irq_register(adapter);
2668 if (status)
2669 goto err;
5fb379ee 2670
10ef9ab4 2671 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2672 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2673
10ef9ab4
SP
2674 for_all_tx_queues(adapter, txo, i)
2675 be_cq_notify(adapter, txo->cq.id, true, 0);
2676
7a1e9b20
SP
2677 be_async_mcc_enable(adapter);
2678
10ef9ab4
SP
2679 for_all_evt_queues(adapter, eqo, i) {
2680 napi_enable(&eqo->napi);
2681 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2682 }
04d3d624 2683 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2684
323ff71e 2685 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2686 if (!status)
2687 be_link_status_update(adapter, link_status);
2688
fba87559 2689 netif_tx_start_all_queues(netdev);
045508a8 2690 be_roce_dev_open(adapter);
889cd4b2
SP
2691 return 0;
2692err:
2693 be_close(adapter->netdev);
2694 return -EIO;
5fb379ee
SP
2695}
2696
71d8d1b5
AK
2697static int be_setup_wol(struct be_adapter *adapter, bool enable)
2698{
2699 struct be_dma_mem cmd;
2700 int status = 0;
2701 u8 mac[ETH_ALEN];
2702
2703 memset(mac, 0, ETH_ALEN);
2704
2705 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf 2706 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1f9061d2 2707 GFP_KERNEL | __GFP_ZERO);
71d8d1b5
AK
2708 if (cmd.va == NULL)
2709 return -1;
71d8d1b5
AK
2710
2711 if (enable) {
2712 status = pci_write_config_dword(adapter->pdev,
2713 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2714 if (status) {
2715 dev_err(&adapter->pdev->dev,
2381a55c 2716 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2717 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2718 cmd.dma);
71d8d1b5
AK
2719 return status;
2720 }
2721 status = be_cmd_enable_magic_wol(adapter,
2722 adapter->netdev->dev_addr, &cmd);
2723 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2724 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2725 } else {
2726 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2727 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2728 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2729 }
2730
2b7bcebf 2731 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2732 return status;
2733}
2734
6d87f5c3
AK
2735/*
2736 * Generate a seed MAC address from the PF MAC Address using jhash.
2737 * MAC Address for VFs are assigned incrementally starting from the seed.
2738 * These addresses are programmed in the ASIC by the PF and the VF driver
2739 * queries for the MAC address during its probe.
2740 */
4c876616 2741static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2742{
f9449ab7 2743 u32 vf;
3abcdeda 2744 int status = 0;
6d87f5c3 2745 u8 mac[ETH_ALEN];
11ac75ed 2746 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2747
2748 be_vf_eth_addr_generate(adapter, mac);
2749
11ac75ed 2750 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2751 if (lancer_chip(adapter)) {
2752 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2753 } else {
2754 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2755 vf_cfg->if_handle,
2756 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2757 }
2758
6d87f5c3
AK
2759 if (status)
2760 dev_err(&adapter->pdev->dev,
590c391d 2761 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2762 else
11ac75ed 2763 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2764
2765 mac[5] += 1;
2766 }
2767 return status;
2768}
2769
4c876616
SP
2770static int be_vfs_mac_query(struct be_adapter *adapter)
2771{
2772 int status, vf;
2773 u8 mac[ETH_ALEN];
2774 struct be_vf_cfg *vf_cfg;
2775 bool active;
2776
2777 for_all_vfs(adapter, vf_cfg, vf) {
2778 be_cmd_get_mac_from_list(adapter, mac, &active,
2779 &vf_cfg->pmac_id, 0);
2780
2781 status = be_cmd_mac_addr_query(adapter, mac, false,
2782 vf_cfg->if_handle, 0);
2783 if (status)
2784 return status;
2785 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2786 }
2787 return 0;
2788}
2789
f9449ab7 2790static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2791{
11ac75ed 2792 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2793 u32 vf;
2794
39f1d94d 2795 if (be_find_vfs(adapter, ASSIGNED)) {
4c876616
SP
2796 dev_warn(&adapter->pdev->dev,
2797 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2798 goto done;
2799 }
2800
b4c1df93
SP
2801 pci_disable_sriov(adapter->pdev);
2802
11ac75ed 2803 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2804 if (lancer_chip(adapter))
2805 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2806 else
11ac75ed
SP
2807 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2808 vf_cfg->pmac_id, vf + 1);
f9449ab7 2809
11ac75ed
SP
2810 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2811 }
39f1d94d
SP
2812done:
2813 kfree(adapter->vf_cfg);
2814 adapter->num_vfs = 0;
6d87f5c3
AK
2815}
2816
a54769f5
SP
2817static int be_clear(struct be_adapter *adapter)
2818{
fbc13f01
AK
2819 int i = 1;
2820
191eb756
SP
2821 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2822 cancel_delayed_work_sync(&adapter->work);
2823 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2824 }
2825
11ac75ed 2826 if (sriov_enabled(adapter))
f9449ab7
SP
2827 be_vf_clear(adapter);
2828
fbc13f01
AK
2829 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2830 be_cmd_pmac_del(adapter, adapter->if_handle,
2831 adapter->pmac_id[i], 0);
2832
f9449ab7 2833 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2834
2835 be_mcc_queues_destroy(adapter);
10ef9ab4 2836 be_rx_cqs_destroy(adapter);
a54769f5 2837 be_tx_queues_destroy(adapter);
10ef9ab4 2838 be_evt_queues_destroy(adapter);
a54769f5 2839
abb93951
PR
2840 kfree(adapter->pmac_id);
2841 adapter->pmac_id = NULL;
2842
10ef9ab4 2843 be_msix_disable(adapter);
a54769f5
SP
2844 return 0;
2845}
2846
4c876616 2847static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2848{
4c876616
SP
2849 struct be_vf_cfg *vf_cfg;
2850 u32 cap_flags, en_flags, vf;
abb93951
PR
2851 int status;
2852
4c876616
SP
2853 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2854 BE_IF_FLAGS_MULTICAST;
abb93951 2855
4c876616
SP
2856 for_all_vfs(adapter, vf_cfg, vf) {
2857 if (!BE3_chip(adapter))
a05f99db
VV
2858 be_cmd_get_profile_config(adapter, &cap_flags,
2859 NULL, vf + 1);
4c876616
SP
2860
2861 /* If a FW profile exists, then cap_flags are updated */
2862 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2863 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2864 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2865 &vf_cfg->if_handle, vf + 1);
2866 if (status)
2867 goto err;
2868 }
2869err:
2870 return status;
abb93951
PR
2871}
2872
39f1d94d 2873static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2874{
11ac75ed 2875 struct be_vf_cfg *vf_cfg;
30128031
SP
2876 int vf;
2877
39f1d94d
SP
2878 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2879 GFP_KERNEL);
2880 if (!adapter->vf_cfg)
2881 return -ENOMEM;
2882
11ac75ed
SP
2883 for_all_vfs(adapter, vf_cfg, vf) {
2884 vf_cfg->if_handle = -1;
2885 vf_cfg->pmac_id = -1;
30128031 2886 }
39f1d94d 2887 return 0;
30128031
SP
2888}
2889
f9449ab7
SP
2890static int be_vf_setup(struct be_adapter *adapter)
2891{
11ac75ed 2892 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2893 u16 def_vlan, lnk_speed;
4c876616
SP
2894 int status, old_vfs, vf;
2895 struct device *dev = &adapter->pdev->dev;
39f1d94d 2896
4c876616
SP
2897 old_vfs = be_find_vfs(adapter, ENABLED);
2898 if (old_vfs) {
2899 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2900 if (old_vfs != num_vfs)
2901 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2902 adapter->num_vfs = old_vfs;
39f1d94d 2903 } else {
4c876616
SP
2904 if (num_vfs > adapter->dev_num_vfs)
2905 dev_info(dev, "Device supports %d VFs and not %d\n",
2906 adapter->dev_num_vfs, num_vfs);
2907 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
b4c1df93 2908 if (!adapter->num_vfs)
4c876616 2909 return 0;
39f1d94d
SP
2910 }
2911
2912 status = be_vf_setup_init(adapter);
2913 if (status)
2914 goto err;
30128031 2915
4c876616
SP
2916 if (old_vfs) {
2917 for_all_vfs(adapter, vf_cfg, vf) {
2918 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2919 if (status)
2920 goto err;
2921 }
2922 } else {
2923 status = be_vfs_if_create(adapter);
f9449ab7
SP
2924 if (status)
2925 goto err;
f9449ab7
SP
2926 }
2927
4c876616
SP
2928 if (old_vfs) {
2929 status = be_vfs_mac_query(adapter);
2930 if (status)
2931 goto err;
2932 } else {
39f1d94d
SP
2933 status = be_vf_eth_addr_config(adapter);
2934 if (status)
2935 goto err;
2936 }
f9449ab7 2937
11ac75ed 2938 for_all_vfs(adapter, vf_cfg, vf) {
4c876616
SP
2939 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2940 * Allow full available bandwidth
2941 */
2942 if (BE3_chip(adapter) && !old_vfs)
2943 be_cmd_set_qos(adapter, 1000, vf+1);
2944
2945 status = be_cmd_link_status_query(adapter, &lnk_speed,
2946 NULL, vf + 1);
2947 if (!status)
2948 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2949
2950 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2951 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2952 if (status)
2953 goto err;
2954 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2955
2956 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 2957 }
b4c1df93
SP
2958
2959 if (!old_vfs) {
2960 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2961 if (status) {
2962 dev_err(dev, "SRIOV enable failed\n");
2963 adapter->num_vfs = 0;
2964 goto err;
2965 }
2966 }
f9449ab7
SP
2967 return 0;
2968err:
4c876616
SP
2969 dev_err(dev, "VF setup failed\n");
2970 be_vf_clear(adapter);
f9449ab7
SP
2971 return status;
2972}
2973
30128031
SP
2974static void be_setup_init(struct be_adapter *adapter)
2975{
2976 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2977 adapter->phy.link_speed = -1;
30128031
SP
2978 adapter->if_handle = -1;
2979 adapter->be3_native = false;
2980 adapter->promiscuous = false;
f25b119c
PR
2981 if (be_physfn(adapter))
2982 adapter->cmd_privileges = MAX_PRIVILEGES;
2983 else
2984 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2985}
2986
1578e777
PR
2987static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2988 bool *active_mac, u32 *pmac_id)
590c391d 2989{
1578e777 2990 int status = 0;
e5e1ee89 2991
1578e777
PR
2992 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2993 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2994 if (!lancer_chip(adapter) && !be_physfn(adapter))
2995 *active_mac = true;
2996 else
2997 *active_mac = false;
e5e1ee89 2998
1578e777
PR
2999 return status;
3000 }
e5e1ee89 3001
1578e777
PR
3002 if (lancer_chip(adapter)) {
3003 status = be_cmd_get_mac_from_list(adapter, mac,
3004 active_mac, pmac_id, 0);
3005 if (*active_mac) {
5ee4979b
SP
3006 status = be_cmd_mac_addr_query(adapter, mac, false,
3007 if_handle, *pmac_id);
1578e777
PR
3008 }
3009 } else if (be_physfn(adapter)) {
3010 /* For BE3, for PF get permanent MAC */
5ee4979b 3011 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 3012 *active_mac = false;
e5e1ee89 3013 } else {
1578e777 3014 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 3015 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
3016 if_handle, 0);
3017 *active_mac = true;
e5e1ee89 3018 }
590c391d
PR
3019 return status;
3020}
3021
abb93951
PR
3022static void be_get_resources(struct be_adapter *adapter)
3023{
4c876616
SP
3024 u16 dev_num_vfs;
3025 int pos, status;
abb93951 3026 bool profile_present = false;
a05f99db 3027 u16 txq_count = 0;
abb93951 3028
4c876616 3029 if (!BEx_chip(adapter)) {
abb93951 3030 status = be_cmd_get_func_config(adapter);
abb93951
PR
3031 if (!status)
3032 profile_present = true;
a05f99db
VV
3033 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3034 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
abb93951
PR
3035 }
3036
3037 if (profile_present) {
3038 /* Sanity fixes for Lancer */
3039 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3040 BE_UC_PMAC_COUNT);
3041 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3042 BE_NUM_VLANS_SUPPORTED);
3043 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3044 BE_MAX_MC);
3045 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3046 MAX_TX_QS);
3047 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3048 BE3_MAX_RSS_QS);
3049 adapter->max_event_queues = min_t(u16,
3050 adapter->max_event_queues,
3051 BE3_MAX_RSS_QS);
3052
3053 if (adapter->max_rss_queues &&
3054 adapter->max_rss_queues == adapter->max_rx_queues)
3055 adapter->max_rss_queues -= 1;
3056
3057 if (adapter->max_event_queues < adapter->max_rss_queues)
3058 adapter->max_rss_queues = adapter->max_event_queues;
3059
3060 } else {
3061 if (be_physfn(adapter))
3062 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3063 else
3064 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3065
3066 if (adapter->function_mode & FLEX10_MODE)
3067 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3068 else
3069 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3070
3071 adapter->max_mcast_mac = BE_MAX_MC;
a05f99db
VV
3072 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3073 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3074 MAX_TX_QS);
abb93951
PR
3075 adapter->max_rss_queues = (adapter->be3_native) ?
3076 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3077 adapter->max_event_queues = BE3_MAX_RSS_QS;
3078
3079 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3080 BE_IF_FLAGS_BROADCAST |
3081 BE_IF_FLAGS_MULTICAST |
3082 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3083 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3084 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3085 BE_IF_FLAGS_PROMISCUOUS;
3086
3087 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3088 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3089 }
4c876616
SP
3090
3091 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3092 if (pos) {
3093 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3094 &dev_num_vfs);
3095 if (BE3_chip(adapter))
3096 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3097 adapter->dev_num_vfs = dev_num_vfs;
3098 }
abb93951
PR
3099}
3100
39f1d94d
SP
3101/* Routine to query per function resource limits */
3102static int be_get_config(struct be_adapter *adapter)
3103{
4c876616 3104 int status;
39f1d94d 3105
abb93951
PR
3106 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3107 &adapter->function_mode,
0ad3157e
VV
3108 &adapter->function_caps,
3109 &adapter->asic_rev);
abb93951
PR
3110 if (status)
3111 goto err;
3112
3113 be_get_resources(adapter);
3114
3115 /* primary mac needs 1 pmac entry */
3116 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3117 sizeof(u32), GFP_KERNEL);
3118 if (!adapter->pmac_id) {
3119 status = -ENOMEM;
3120 goto err;
3121 }
3122
abb93951
PR
3123err:
3124 return status;
39f1d94d
SP
3125}
3126
5fb379ee
SP
3127static int be_setup(struct be_adapter *adapter)
3128{
39f1d94d 3129 struct device *dev = &adapter->pdev->dev;
abb93951 3130 u32 en_flags;
a54769f5 3131 u32 tx_fc, rx_fc;
10ef9ab4 3132 int status;
ba343c77 3133 u8 mac[ETH_ALEN];
1578e777 3134 bool active_mac;
ba343c77 3135
30128031 3136 be_setup_init(adapter);
6b7c5b94 3137
abb93951
PR
3138 if (!lancer_chip(adapter))
3139 be_cmd_req_native_mode(adapter);
39f1d94d 3140
abb93951
PR
3141 status = be_get_config(adapter);
3142 if (status)
3143 goto err;
73d540f2 3144
c2bba3df
SK
3145 status = be_msix_enable(adapter);
3146 if (status)
3147 goto err;
10ef9ab4
SP
3148
3149 status = be_evt_queues_create(adapter);
3150 if (status)
a54769f5 3151 goto err;
6b7c5b94 3152
10ef9ab4
SP
3153 status = be_tx_cqs_create(adapter);
3154 if (status)
3155 goto err;
3156
3157 status = be_rx_cqs_create(adapter);
3158 if (status)
a54769f5 3159 goto err;
6b7c5b94 3160
f9449ab7 3161 status = be_mcc_queues_create(adapter);
10ef9ab4 3162 if (status)
a54769f5 3163 goto err;
6b7c5b94 3164
f25b119c
PR
3165 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3166 /* In UMC mode FW does not return right privileges.
3167 * Override with correct privilege equivalent to PF.
3168 */
3169 if (be_is_mc(adapter))
3170 adapter->cmd_privileges = MAX_PRIVILEGES;
3171
f9449ab7
SP
3172 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3173 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 3174
abb93951 3175 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3176 en_flags |= BE_IF_FLAGS_RSS;
1578e777 3177
abb93951 3178 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 3179
abb93951 3180 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 3181 &adapter->if_handle, 0);
5fb379ee 3182 if (status != 0)
a54769f5 3183 goto err;
6b7c5b94 3184
1578e777
PR
3185 memset(mac, 0, ETH_ALEN);
3186 active_mac = false;
3187 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3188 &active_mac, &adapter->pmac_id[0]);
3189 if (status != 0)
3190 goto err;
3191
3192 if (!active_mac) {
3193 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3194 &adapter->pmac_id[0], 0);
3195 if (status != 0)
3196 goto err;
3197 }
3198
3199 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3200 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3201 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 3202 }
0dffc83e 3203
10ef9ab4
SP
3204 status = be_tx_qs_create(adapter);
3205 if (status)
3206 goto err;
3207
eeb65ced 3208 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3209
1d1e9a46 3210 if (adapter->vlans_added)
10329df8 3211 be_vid_config(adapter);
7ab8b0b4 3212
a54769f5 3213 be_set_rx_mode(adapter->netdev);
5fb379ee 3214
ddc3f5cb 3215 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3216
ddc3f5cb
AK
3217 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3218 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3219 adapter->rx_fc);
2dc1deb6 3220
b4c1df93 3221 if (be_physfn(adapter)) {
39f1d94d
SP
3222 if (adapter->dev_num_vfs)
3223 be_vf_setup(adapter);
3224 else
3225 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3226 }
3227
f25b119c
PR
3228 status = be_cmd_get_phy_info(adapter);
3229 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3230 adapter->phy.fc_autoneg = 1;
3231
191eb756
SP
3232 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3233 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3234 return 0;
a54769f5
SP
3235err:
3236 be_clear(adapter);
3237 return status;
3238}
6b7c5b94 3239
66268739
IV
3240#ifdef CONFIG_NET_POLL_CONTROLLER
3241static void be_netpoll(struct net_device *netdev)
3242{
3243 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3244 struct be_eq_obj *eqo;
66268739
IV
3245 int i;
3246
e49cc34f
SP
3247 for_all_evt_queues(adapter, eqo, i) {
3248 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3249 napi_schedule(&eqo->napi);
3250 }
10ef9ab4
SP
3251
3252 return;
66268739
IV
3253}
3254#endif
3255
84517482 3256#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3257char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3258
fa9a6fed 3259static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3260 const u8 *p, u32 img_start, int image_size,
3261 int hdr_size)
fa9a6fed
SB
3262{
3263 u32 crc_offset;
3264 u8 flashed_crc[4];
3265 int status;
3f0d4560
AK
3266
3267 crc_offset = hdr_size + img_start + image_size - 4;
3268
fa9a6fed 3269 p += crc_offset;
3f0d4560
AK
3270
3271 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3272 (image_size - 4));
fa9a6fed
SB
3273 if (status) {
3274 dev_err(&adapter->pdev->dev,
3275 "could not get crc from flash, not flashing redboot\n");
3276 return false;
3277 }
3278
3279 /*update redboot only if crc does not match*/
3280 if (!memcmp(flashed_crc, p, 4))
3281 return false;
3282 else
3283 return true;
fa9a6fed
SB
3284}
3285
306f1348
SP
3286static bool phy_flashing_required(struct be_adapter *adapter)
3287{
42f11cf2
AK
3288 return (adapter->phy.phy_type == TN_8022 &&
3289 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3290}
3291
c165541e
PR
3292static bool is_comp_in_ufi(struct be_adapter *adapter,
3293 struct flash_section_info *fsec, int type)
3294{
3295 int i = 0, img_type = 0;
3296 struct flash_section_info_g2 *fsec_g2 = NULL;
3297
ca34fe38 3298 if (BE2_chip(adapter))
c165541e
PR
3299 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3300
3301 for (i = 0; i < MAX_FLASH_COMP; i++) {
3302 if (fsec_g2)
3303 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3304 else
3305 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3306
3307 if (img_type == type)
3308 return true;
3309 }
3310 return false;
3311
3312}
3313
3314struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3315 int header_size,
3316 const struct firmware *fw)
3317{
3318 struct flash_section_info *fsec = NULL;
3319 const u8 *p = fw->data;
3320
3321 p += header_size;
3322 while (p < (fw->data + fw->size)) {
3323 fsec = (struct flash_section_info *)p;
3324 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3325 return fsec;
3326 p += 32;
3327 }
3328 return NULL;
3329}
3330
773a2d7c
PR
3331static int be_flash(struct be_adapter *adapter, const u8 *img,
3332 struct be_dma_mem *flash_cmd, int optype, int img_size)
3333{
3334 u32 total_bytes = 0, flash_op, num_bytes = 0;
3335 int status = 0;
3336 struct be_cmd_write_flashrom *req = flash_cmd->va;
3337
3338 total_bytes = img_size;
3339 while (total_bytes) {
3340 num_bytes = min_t(u32, 32*1024, total_bytes);
3341
3342 total_bytes -= num_bytes;
3343
3344 if (!total_bytes) {
3345 if (optype == OPTYPE_PHY_FW)
3346 flash_op = FLASHROM_OPER_PHY_FLASH;
3347 else
3348 flash_op = FLASHROM_OPER_FLASH;
3349 } else {
3350 if (optype == OPTYPE_PHY_FW)
3351 flash_op = FLASHROM_OPER_PHY_SAVE;
3352 else
3353 flash_op = FLASHROM_OPER_SAVE;
3354 }
3355
be716446 3356 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3357 img += num_bytes;
3358 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3359 flash_op, num_bytes);
3360 if (status) {
3361 if (status == ILLEGAL_IOCTL_REQ &&
3362 optype == OPTYPE_PHY_FW)
3363 break;
3364 dev_err(&adapter->pdev->dev,
3365 "cmd to write to flash rom failed.\n");
3366 return status;
3367 }
3368 }
3369 return 0;
3370}
3371
0ad3157e 3372/* For BE2, BE3 and BE3-R */
ca34fe38 3373static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3374 const struct firmware *fw,
3375 struct be_dma_mem *flash_cmd,
3376 int num_of_images)
3f0d4560 3377
84517482 3378{
3f0d4560 3379 int status = 0, i, filehdr_size = 0;
c165541e 3380 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3381 const u8 *p = fw->data;
215faf9c 3382 const struct flash_comp *pflashcomp;
773a2d7c 3383 int num_comp, redboot;
c165541e
PR
3384 struct flash_section_info *fsec = NULL;
3385
3386 struct flash_comp gen3_flash_types[] = {
3387 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3388 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3389 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3390 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3391 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3392 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3393 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3394 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3395 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3396 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3397 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3398 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3399 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3400 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3401 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3402 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3403 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3404 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3405 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3406 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3407 };
c165541e
PR
3408
3409 struct flash_comp gen2_flash_types[] = {
3410 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3411 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3412 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3413 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3414 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3415 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3416 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3417 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3418 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3419 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3420 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3421 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3422 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3423 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3424 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3425 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3426 };
3427
ca34fe38 3428 if (BE3_chip(adapter)) {
3f0d4560
AK
3429 pflashcomp = gen3_flash_types;
3430 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3431 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3432 } else {
3433 pflashcomp = gen2_flash_types;
3434 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3435 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3436 }
ca34fe38 3437
c165541e
PR
3438 /* Get flash section info*/
3439 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3440 if (!fsec) {
3441 dev_err(&adapter->pdev->dev,
3442 "Invalid Cookie. UFI corrupted ?\n");
3443 return -1;
3444 }
9fe96934 3445 for (i = 0; i < num_comp; i++) {
c165541e 3446 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3447 continue;
c165541e
PR
3448
3449 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3450 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3451 continue;
3452
773a2d7c
PR
3453 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3454 !phy_flashing_required(adapter))
306f1348 3455 continue;
c165541e 3456
773a2d7c
PR
3457 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3458 redboot = be_flash_redboot(adapter, fw->data,
3459 pflashcomp[i].offset, pflashcomp[i].size,
3460 filehdr_size + img_hdrs_size);
3461 if (!redboot)
3462 continue;
3463 }
c165541e 3464
3f0d4560 3465 p = fw->data;
c165541e 3466 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3467 if (p + pflashcomp[i].size > fw->data + fw->size)
3468 return -1;
773a2d7c
PR
3469
3470 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3471 pflashcomp[i].size);
3472 if (status) {
3473 dev_err(&adapter->pdev->dev,
3474 "Flashing section type %d failed.\n",
3475 pflashcomp[i].img_type);
3476 return status;
84517482 3477 }
84517482 3478 }
84517482
AK
3479 return 0;
3480}
3481
773a2d7c
PR
3482static int be_flash_skyhawk(struct be_adapter *adapter,
3483 const struct firmware *fw,
3484 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3485{
773a2d7c
PR
3486 int status = 0, i, filehdr_size = 0;
3487 int img_offset, img_size, img_optype, redboot;
3488 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3489 const u8 *p = fw->data;
3490 struct flash_section_info *fsec = NULL;
3491
3492 filehdr_size = sizeof(struct flash_file_hdr_g3);
3493 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3494 if (!fsec) {
3495 dev_err(&adapter->pdev->dev,
3496 "Invalid Cookie. UFI corrupted ?\n");
3497 return -1;
3498 }
3499
3500 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3501 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3502 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3503
3504 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3505 case IMAGE_FIRMWARE_iSCSI:
3506 img_optype = OPTYPE_ISCSI_ACTIVE;
3507 break;
3508 case IMAGE_BOOT_CODE:
3509 img_optype = OPTYPE_REDBOOT;
3510 break;
3511 case IMAGE_OPTION_ROM_ISCSI:
3512 img_optype = OPTYPE_BIOS;
3513 break;
3514 case IMAGE_OPTION_ROM_PXE:
3515 img_optype = OPTYPE_PXE_BIOS;
3516 break;
3517 case IMAGE_OPTION_ROM_FCoE:
3518 img_optype = OPTYPE_FCOE_BIOS;
3519 break;
3520 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3521 img_optype = OPTYPE_ISCSI_BACKUP;
3522 break;
3523 case IMAGE_NCSI:
3524 img_optype = OPTYPE_NCSI_FW;
3525 break;
3526 default:
3527 continue;
3528 }
3529
3530 if (img_optype == OPTYPE_REDBOOT) {
3531 redboot = be_flash_redboot(adapter, fw->data,
3532 img_offset, img_size,
3533 filehdr_size + img_hdrs_size);
3534 if (!redboot)
3535 continue;
3536 }
3537
3538 p = fw->data;
3539 p += filehdr_size + img_offset + img_hdrs_size;
3540 if (p + img_size > fw->data + fw->size)
3541 return -1;
3542
3543 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3544 if (status) {
3545 dev_err(&adapter->pdev->dev,
3546 "Flashing section type %d failed.\n",
3547 fsec->fsec_entry[i].type);
3548 return status;
3549 }
3550 }
3551 return 0;
3f0d4560
AK
3552}
3553
f67ef7ba
PR
3554static int lancer_wait_idle(struct be_adapter *adapter)
3555{
3556#define SLIPORT_IDLE_TIMEOUT 30
3557 u32 reg_val;
3558 int status = 0, i;
3559
3560 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3561 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3562 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3563 break;
3564
3565 ssleep(1);
3566 }
3567
3568 if (i == SLIPORT_IDLE_TIMEOUT)
3569 status = -1;
3570
3571 return status;
3572}
3573
3574static int lancer_fw_reset(struct be_adapter *adapter)
3575{
3576 int status = 0;
3577
3578 status = lancer_wait_idle(adapter);
3579 if (status)
3580 return status;
3581
3582 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3583 PHYSDEV_CONTROL_OFFSET);
3584
3585 return status;
3586}
3587
485bf569
SN
3588static int lancer_fw_download(struct be_adapter *adapter,
3589 const struct firmware *fw)
84517482 3590{
485bf569
SN
3591#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3592#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3593 struct be_dma_mem flash_cmd;
485bf569
SN
3594 const u8 *data_ptr = NULL;
3595 u8 *dest_image_ptr = NULL;
3596 size_t image_size = 0;
3597 u32 chunk_size = 0;
3598 u32 data_written = 0;
3599 u32 offset = 0;
3600 int status = 0;
3601 u8 add_status = 0;
f67ef7ba 3602 u8 change_status;
84517482 3603
485bf569 3604 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3605 dev_err(&adapter->pdev->dev,
485bf569
SN
3606 "FW Image not properly aligned. "
3607 "Length must be 4 byte aligned.\n");
3608 status = -EINVAL;
3609 goto lancer_fw_exit;
d9efd2af
SB
3610 }
3611
485bf569
SN
3612 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3613 + LANCER_FW_DOWNLOAD_CHUNK;
3614 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3615 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3616 if (!flash_cmd.va) {
3617 status = -ENOMEM;
485bf569
SN
3618 goto lancer_fw_exit;
3619 }
84517482 3620
485bf569
SN
3621 dest_image_ptr = flash_cmd.va +
3622 sizeof(struct lancer_cmd_req_write_object);
3623 image_size = fw->size;
3624 data_ptr = fw->data;
3625
3626 while (image_size) {
3627 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3628
3629 /* Copy the image chunk content. */
3630 memcpy(dest_image_ptr, data_ptr, chunk_size);
3631
3632 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3633 chunk_size, offset,
3634 LANCER_FW_DOWNLOAD_LOCATION,
3635 &data_written, &change_status,
3636 &add_status);
485bf569
SN
3637 if (status)
3638 break;
3639
3640 offset += data_written;
3641 data_ptr += data_written;
3642 image_size -= data_written;
3643 }
3644
3645 if (!status) {
3646 /* Commit the FW written */
3647 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3648 0, offset,
3649 LANCER_FW_DOWNLOAD_LOCATION,
3650 &data_written, &change_status,
3651 &add_status);
485bf569
SN
3652 }
3653
3654 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3655 flash_cmd.dma);
3656 if (status) {
3657 dev_err(&adapter->pdev->dev,
3658 "Firmware load error. "
3659 "Status code: 0x%x Additional Status: 0x%x\n",
3660 status, add_status);
3661 goto lancer_fw_exit;
3662 }
3663
f67ef7ba
PR
3664 if (change_status == LANCER_FW_RESET_NEEDED) {
3665 status = lancer_fw_reset(adapter);
3666 if (status) {
3667 dev_err(&adapter->pdev->dev,
3668 "Adapter busy for FW reset.\n"
3669 "New FW will not be active.\n");
3670 goto lancer_fw_exit;
3671 }
3672 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3673 dev_err(&adapter->pdev->dev,
3674 "System reboot required for new FW"
3675 " to be active\n");
3676 }
3677
485bf569
SN
3678 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3679lancer_fw_exit:
3680 return status;
3681}
3682
ca34fe38
SP
3683#define UFI_TYPE2 2
3684#define UFI_TYPE3 3
0ad3157e 3685#define UFI_TYPE3R 10
ca34fe38
SP
3686#define UFI_TYPE4 4
3687static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3688 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3689{
3690 if (fhdr == NULL)
3691 goto be_get_ufi_exit;
3692
ca34fe38
SP
3693 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3694 return UFI_TYPE4;
0ad3157e
VV
3695 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3696 if (fhdr->asic_type_rev == 0x10)
3697 return UFI_TYPE3R;
3698 else
3699 return UFI_TYPE3;
3700 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3701 return UFI_TYPE2;
773a2d7c
PR
3702
3703be_get_ufi_exit:
3704 dev_err(&adapter->pdev->dev,
3705 "UFI and Interface are not compatible for flashing\n");
3706 return -1;
3707}
3708
485bf569
SN
3709static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3710{
485bf569
SN
3711 struct flash_file_hdr_g3 *fhdr3;
3712 struct image_hdr *img_hdr_ptr = NULL;
3713 struct be_dma_mem flash_cmd;
3714 const u8 *p;
773a2d7c 3715 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3716
be716446 3717 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3718 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3719 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3720 if (!flash_cmd.va) {
3721 status = -ENOMEM;
485bf569 3722 goto be_fw_exit;
84517482
AK
3723 }
3724
773a2d7c 3725 p = fw->data;
0ad3157e 3726 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3727
0ad3157e 3728 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3729
773a2d7c
PR
3730 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3731 for (i = 0; i < num_imgs; i++) {
3732 img_hdr_ptr = (struct image_hdr *)(fw->data +
3733 (sizeof(struct flash_file_hdr_g3) +
3734 i * sizeof(struct image_hdr)));
3735 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3736 switch (ufi_type) {
3737 case UFI_TYPE4:
773a2d7c
PR
3738 status = be_flash_skyhawk(adapter, fw,
3739 &flash_cmd, num_imgs);
0ad3157e
VV
3740 break;
3741 case UFI_TYPE3R:
ca34fe38
SP
3742 status = be_flash_BEx(adapter, fw, &flash_cmd,
3743 num_imgs);
0ad3157e
VV
3744 break;
3745 case UFI_TYPE3:
3746 /* Do not flash this ufi on BE3-R cards */
3747 if (adapter->asic_rev < 0x10)
3748 status = be_flash_BEx(adapter, fw,
3749 &flash_cmd,
3750 num_imgs);
3751 else {
3752 status = -1;
3753 dev_err(&adapter->pdev->dev,
3754 "Can't load BE3 UFI on BE3R\n");
3755 }
3756 }
3f0d4560 3757 }
773a2d7c
PR
3758 }
3759
ca34fe38
SP
3760 if (ufi_type == UFI_TYPE2)
3761 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3762 else if (ufi_type == -1)
3f0d4560 3763 status = -1;
84517482 3764
2b7bcebf
IV
3765 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3766 flash_cmd.dma);
84517482
AK
3767 if (status) {
3768 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3769 goto be_fw_exit;
84517482
AK
3770 }
3771
af901ca1 3772 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3773
485bf569
SN
3774be_fw_exit:
3775 return status;
3776}
3777
3778int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3779{
3780 const struct firmware *fw;
3781 int status;
3782
3783 if (!netif_running(adapter->netdev)) {
3784 dev_err(&adapter->pdev->dev,
3785 "Firmware load not allowed (interface is down)\n");
3786 return -1;
3787 }
3788
3789 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3790 if (status)
3791 goto fw_exit;
3792
3793 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3794
3795 if (lancer_chip(adapter))
3796 status = lancer_fw_download(adapter, fw);
3797 else
3798 status = be_fw_download(adapter, fw);
3799
eeb65ced
SK
3800 if (!status)
3801 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3802 adapter->fw_on_flash);
3803
84517482
AK
3804fw_exit:
3805 release_firmware(fw);
3806 return status;
3807}
3808
e5686ad8 3809static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3810 .ndo_open = be_open,
3811 .ndo_stop = be_close,
3812 .ndo_start_xmit = be_xmit,
a54769f5 3813 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3814 .ndo_set_mac_address = be_mac_addr_set,
3815 .ndo_change_mtu = be_change_mtu,
ab1594e9 3816 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3817 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3818 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3819 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3820 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3821 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3822 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3823 .ndo_get_vf_config = be_get_vf_config,
3824#ifdef CONFIG_NET_POLL_CONTROLLER
3825 .ndo_poll_controller = be_netpoll,
3826#endif
6b7c5b94
SP
3827};
3828
3829static void be_netdev_init(struct net_device *netdev)
3830{
3831 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3832 struct be_eq_obj *eqo;
3abcdeda 3833 int i;
6b7c5b94 3834
6332c8d3 3835 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3836 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3837 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3838 if (be_multi_rxq(adapter))
3839 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3840
3841 netdev->features |= netdev->hw_features |
f646968f 3842 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3843
eb8a50d9 3844 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3845 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3846
fbc13f01
AK
3847 netdev->priv_flags |= IFF_UNICAST_FLT;
3848
6b7c5b94
SP
3849 netdev->flags |= IFF_MULTICAST;
3850
b7e5887e 3851 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3852
10ef9ab4 3853 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3854
3855 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3856
10ef9ab4
SP
3857 for_all_evt_queues(adapter, eqo, i)
3858 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3859}
3860
3861static void be_unmap_pci_bars(struct be_adapter *adapter)
3862{
c5b3ad4c
SP
3863 if (adapter->csr)
3864 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3865 if (adapter->db)
ce66f781 3866 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3867}
3868
ce66f781
SP
3869static int db_bar(struct be_adapter *adapter)
3870{
3871 if (lancer_chip(adapter) || !be_physfn(adapter))
3872 return 0;
3873 else
3874 return 4;
3875}
3876
3877static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3878{
dbf0f2a7 3879 if (skyhawk_chip(adapter)) {
ce66f781
SP
3880 adapter->roce_db.size = 4096;
3881 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3882 db_bar(adapter));
3883 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3884 db_bar(adapter));
3885 }
045508a8 3886 return 0;
6b7c5b94
SP
3887}
3888
3889static int be_map_pci_bars(struct be_adapter *adapter)
3890{
3891 u8 __iomem *addr;
ce66f781 3892 u32 sli_intf;
6b7c5b94 3893
ce66f781
SP
3894 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3895 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3896 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3897
c5b3ad4c
SP
3898 if (BEx_chip(adapter) && be_physfn(adapter)) {
3899 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3900 if (adapter->csr == NULL)
3901 return -ENOMEM;
3902 }
3903
ce66f781 3904 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3905 if (addr == NULL)
3906 goto pci_map_err;
ba343c77 3907 adapter->db = addr;
ce66f781
SP
3908
3909 be_roce_map_pci_bars(adapter);
6b7c5b94 3910 return 0;
ce66f781 3911
6b7c5b94
SP
3912pci_map_err:
3913 be_unmap_pci_bars(adapter);
3914 return -ENOMEM;
3915}
3916
6b7c5b94
SP
3917static void be_ctrl_cleanup(struct be_adapter *adapter)
3918{
8788fdc2 3919 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3920
3921 be_unmap_pci_bars(adapter);
3922
3923 if (mem->va)
2b7bcebf
IV
3924 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3925 mem->dma);
e7b909a6 3926
5b8821b7 3927 mem = &adapter->rx_filter;
e7b909a6 3928 if (mem->va)
2b7bcebf
IV
3929 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3930 mem->dma);
6b7c5b94
SP
3931}
3932
6b7c5b94
SP
3933static int be_ctrl_init(struct be_adapter *adapter)
3934{
8788fdc2
SP
3935 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3936 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3937 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3938 u32 sli_intf;
6b7c5b94 3939 int status;
6b7c5b94 3940
ce66f781
SP
3941 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3942 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3943 SLI_INTF_FAMILY_SHIFT;
3944 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3945
6b7c5b94
SP
3946 status = be_map_pci_bars(adapter);
3947 if (status)
e7b909a6 3948 goto done;
6b7c5b94
SP
3949
3950 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3951 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3952 mbox_mem_alloc->size,
3953 &mbox_mem_alloc->dma,
3954 GFP_KERNEL);
6b7c5b94 3955 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3956 status = -ENOMEM;
3957 goto unmap_pci_bars;
6b7c5b94
SP
3958 }
3959 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3960 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3961 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3962 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3963
5b8821b7
SP
3964 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3965 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
1f9061d2
JP
3966 &rx_filter->dma,
3967 GFP_KERNEL | __GFP_ZERO);
5b8821b7 3968 if (rx_filter->va == NULL) {
e7b909a6
SP
3969 status = -ENOMEM;
3970 goto free_mbox;
3971 }
1f9061d2 3972
2984961c 3973 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3974 spin_lock_init(&adapter->mcc_lock);
3975 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3976
dd131e76 3977 init_completion(&adapter->flash_compl);
cf588477 3978 pci_save_state(adapter->pdev);
6b7c5b94 3979 return 0;
e7b909a6
SP
3980
3981free_mbox:
2b7bcebf
IV
3982 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3983 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3984
3985unmap_pci_bars:
3986 be_unmap_pci_bars(adapter);
3987
3988done:
3989 return status;
6b7c5b94
SP
3990}
3991
3992static void be_stats_cleanup(struct be_adapter *adapter)
3993{
3abcdeda 3994 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3995
3996 if (cmd->va)
2b7bcebf
IV
3997 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3998 cmd->va, cmd->dma);
6b7c5b94
SP
3999}
4000
4001static int be_stats_init(struct be_adapter *adapter)
4002{
3abcdeda 4003 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4004
ca34fe38
SP
4005 if (lancer_chip(adapter))
4006 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4007 else if (BE2_chip(adapter))
89a88ab8 4008 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
4009 else
4010 /* BE3 and Skyhawk */
4011 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4012
2b7bcebf 4013 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
1f9061d2 4014 GFP_KERNEL | __GFP_ZERO);
6b7c5b94
SP
4015 if (cmd->va == NULL)
4016 return -1;
4017 return 0;
4018}
4019
3bc6b06c 4020static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4021{
4022 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4023
6b7c5b94
SP
4024 if (!adapter)
4025 return;
4026
045508a8 4027 be_roce_dev_remove(adapter);
8cef7a78 4028 be_intr_set(adapter, false);
045508a8 4029
f67ef7ba
PR
4030 cancel_delayed_work_sync(&adapter->func_recovery_work);
4031
6b7c5b94
SP
4032 unregister_netdev(adapter->netdev);
4033
5fb379ee
SP
4034 be_clear(adapter);
4035
bf99e50d
PR
4036 /* tell fw we're done with firing cmds */
4037 be_cmd_fw_clean(adapter);
4038
6b7c5b94
SP
4039 be_stats_cleanup(adapter);
4040
4041 be_ctrl_cleanup(adapter);
4042
d6b6d987
SP
4043 pci_disable_pcie_error_reporting(pdev);
4044
6b7c5b94
SP
4045 pci_set_drvdata(pdev, NULL);
4046 pci_release_regions(pdev);
4047 pci_disable_device(pdev);
4048
4049 free_netdev(adapter->netdev);
4050}
4051
4762f6ce
AK
4052bool be_is_wol_supported(struct be_adapter *adapter)
4053{
4054 return ((adapter->wol_cap & BE_WOL_CAP) &&
4055 !be_is_wol_excluded(adapter)) ? true : false;
4056}
4057
941a77d5
SK
4058u32 be_get_fw_log_level(struct be_adapter *adapter)
4059{
4060 struct be_dma_mem extfat_cmd;
4061 struct be_fat_conf_params *cfgs;
4062 int status;
4063 u32 level = 0;
4064 int j;
4065
f25b119c
PR
4066 if (lancer_chip(adapter))
4067 return 0;
4068
941a77d5
SK
4069 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4070 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4071 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4072 &extfat_cmd.dma);
4073
4074 if (!extfat_cmd.va) {
4075 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4076 __func__);
4077 goto err;
4078 }
4079
4080 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4081 if (!status) {
4082 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4083 sizeof(struct be_cmd_resp_hdr));
ac46a462 4084 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
4085 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4086 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4087 }
4088 }
4089 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4090 extfat_cmd.dma);
4091err:
4092 return level;
4093}
abb93951 4094
39f1d94d 4095static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4096{
6b7c5b94 4097 int status;
941a77d5 4098 u32 level;
6b7c5b94 4099
9e1453c5
AK
4100 status = be_cmd_get_cntl_attributes(adapter);
4101 if (status)
4102 return status;
4103
4762f6ce
AK
4104 status = be_cmd_get_acpi_wol_cap(adapter);
4105 if (status) {
4106 /* in case of a failure to get wol capabillities
4107 * check the exclusion list to determine WOL capability */
4108 if (!be_is_wol_excluded(adapter))
4109 adapter->wol_cap |= BE_WOL_CAP;
4110 }
4111
4112 if (be_is_wol_supported(adapter))
4113 adapter->wol = true;
4114
7aeb2156
PR
4115 /* Must be a power of 2 or else MODULO will BUG_ON */
4116 adapter->be_get_temp_freq = 64;
4117
941a77d5
SK
4118 level = be_get_fw_log_level(adapter);
4119 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4120
2243e2e9 4121 return 0;
6b7c5b94
SP
4122}
4123
f67ef7ba 4124static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
4125{
4126 int status;
d8110f62 4127
f67ef7ba
PR
4128 status = lancer_test_and_set_rdy_state(adapter);
4129 if (status)
4130 goto err;
d8110f62 4131
f67ef7ba
PR
4132 if (netif_running(adapter->netdev))
4133 be_close(adapter->netdev);
d8110f62 4134
f67ef7ba
PR
4135 be_clear(adapter);
4136
4137 adapter->hw_error = false;
4138 adapter->fw_timeout = false;
4139
4140 status = be_setup(adapter);
4141 if (status)
4142 goto err;
d8110f62 4143
f67ef7ba
PR
4144 if (netif_running(adapter->netdev)) {
4145 status = be_open(adapter->netdev);
d8110f62
PR
4146 if (status)
4147 goto err;
f67ef7ba 4148 }
d8110f62 4149
f67ef7ba
PR
4150 dev_err(&adapter->pdev->dev,
4151 "Adapter SLIPORT recovery succeeded\n");
4152 return 0;
4153err:
67297ad8
PR
4154 if (adapter->eeh_error)
4155 dev_err(&adapter->pdev->dev,
4156 "Adapter SLIPORT recovery failed\n");
d8110f62 4157
f67ef7ba
PR
4158 return status;
4159}
4160
4161static void be_func_recovery_task(struct work_struct *work)
4162{
4163 struct be_adapter *adapter =
4164 container_of(work, struct be_adapter, func_recovery_work.work);
4165 int status;
d8110f62 4166
f67ef7ba 4167 be_detect_error(adapter);
d8110f62 4168
f67ef7ba 4169 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4170
f67ef7ba
PR
4171 if (adapter->eeh_error)
4172 goto out;
d8110f62 4173
f67ef7ba
PR
4174 rtnl_lock();
4175 netif_device_detach(adapter->netdev);
4176 rtnl_unlock();
d8110f62 4177
f67ef7ba 4178 status = lancer_recover_func(adapter);
d8110f62 4179
f67ef7ba
PR
4180 if (!status)
4181 netif_device_attach(adapter->netdev);
d8110f62 4182 }
f67ef7ba
PR
4183
4184out:
4185 schedule_delayed_work(&adapter->func_recovery_work,
4186 msecs_to_jiffies(1000));
d8110f62
PR
4187}
4188
4189static void be_worker(struct work_struct *work)
4190{
4191 struct be_adapter *adapter =
4192 container_of(work, struct be_adapter, work.work);
4193 struct be_rx_obj *rxo;
10ef9ab4 4194 struct be_eq_obj *eqo;
d8110f62
PR
4195 int i;
4196
d8110f62
PR
4197 /* when interrupts are not yet enabled, just reap any pending
4198 * mcc completions */
4199 if (!netif_running(adapter->netdev)) {
072a9c48 4200 local_bh_disable();
10ef9ab4 4201 be_process_mcc(adapter);
072a9c48 4202 local_bh_enable();
d8110f62
PR
4203 goto reschedule;
4204 }
4205
4206 if (!adapter->stats_cmd_sent) {
4207 if (lancer_chip(adapter))
4208 lancer_cmd_get_pport_stats(adapter,
4209 &adapter->stats_cmd);
4210 else
4211 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4212 }
4213
7aeb2156
PR
4214 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4215 be_cmd_get_die_temperature(adapter);
4216
d8110f62 4217 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4218 if (rxo->rx_post_starved) {
4219 rxo->rx_post_starved = false;
4220 be_post_rx_frags(rxo, GFP_KERNEL);
4221 }
4222 }
4223
10ef9ab4
SP
4224 for_all_evt_queues(adapter, eqo, i)
4225 be_eqd_update(adapter, eqo);
4226
d8110f62
PR
4227reschedule:
4228 adapter->work_counter++;
4229 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4230}
4231
39f1d94d
SP
4232static bool be_reset_required(struct be_adapter *adapter)
4233{
d79c0a20 4234 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4235}
4236
d379142b
SP
4237static char *mc_name(struct be_adapter *adapter)
4238{
4239 if (adapter->function_mode & FLEX10_MODE)
4240 return "FLEX10";
4241 else if (adapter->function_mode & VNIC_MODE)
4242 return "vNIC";
4243 else if (adapter->function_mode & UMC_ENABLED)
4244 return "UMC";
4245 else
4246 return "";
4247}
4248
4249static inline char *func_name(struct be_adapter *adapter)
4250{
4251 return be_physfn(adapter) ? "PF" : "VF";
4252}
4253
1dd06ae8 4254static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4255{
4256 int status = 0;
4257 struct be_adapter *adapter;
4258 struct net_device *netdev;
b4e32a71 4259 char port_name;
6b7c5b94
SP
4260
4261 status = pci_enable_device(pdev);
4262 if (status)
4263 goto do_none;
4264
4265 status = pci_request_regions(pdev, DRV_NAME);
4266 if (status)
4267 goto disable_dev;
4268 pci_set_master(pdev);
4269
7f640062 4270 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4271 if (netdev == NULL) {
4272 status = -ENOMEM;
4273 goto rel_reg;
4274 }
4275 adapter = netdev_priv(netdev);
4276 adapter->pdev = pdev;
4277 pci_set_drvdata(pdev, adapter);
4278 adapter->netdev = netdev;
2243e2e9 4279 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4280
2b7bcebf 4281 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94 4282 if (!status) {
2bd92cd2
CH
4283 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4284 if (status < 0) {
4285 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4286 goto free_netdev;
4287 }
6b7c5b94
SP
4288 netdev->features |= NETIF_F_HIGHDMA;
4289 } else {
2b7bcebf 4290 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4291 if (status) {
4292 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4293 goto free_netdev;
4294 }
4295 }
4296
d6b6d987
SP
4297 status = pci_enable_pcie_error_reporting(pdev);
4298 if (status)
4299 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4300
6b7c5b94
SP
4301 status = be_ctrl_init(adapter);
4302 if (status)
39f1d94d 4303 goto free_netdev;
6b7c5b94 4304
2243e2e9 4305 /* sync up with fw's ready state */
ba343c77 4306 if (be_physfn(adapter)) {
bf99e50d 4307 status = be_fw_wait_ready(adapter);
ba343c77
SB
4308 if (status)
4309 goto ctrl_clean;
ba343c77 4310 }
6b7c5b94 4311
39f1d94d
SP
4312 if (be_reset_required(adapter)) {
4313 status = be_cmd_reset_function(adapter);
4314 if (status)
4315 goto ctrl_clean;
556ae191 4316
2d177be8
KA
4317 /* Wait for interrupts to quiesce after an FLR */
4318 msleep(100);
4319 }
8cef7a78
SK
4320
4321 /* Allow interrupts for other ULPs running on NIC function */
4322 be_intr_set(adapter, true);
10ef9ab4 4323
2d177be8
KA
4324 /* tell fw we're ready to fire cmds */
4325 status = be_cmd_fw_init(adapter);
4326 if (status)
4327 goto ctrl_clean;
4328
2243e2e9
SP
4329 status = be_stats_init(adapter);
4330 if (status)
4331 goto ctrl_clean;
4332
39f1d94d 4333 status = be_get_initial_config(adapter);
6b7c5b94
SP
4334 if (status)
4335 goto stats_clean;
6b7c5b94
SP
4336
4337 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4338 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4339 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4340
5fb379ee
SP
4341 status = be_setup(adapter);
4342 if (status)
55f5c3c5 4343 goto stats_clean;
2243e2e9 4344
3abcdeda 4345 be_netdev_init(netdev);
6b7c5b94
SP
4346 status = register_netdev(netdev);
4347 if (status != 0)
5fb379ee 4348 goto unsetup;
6b7c5b94 4349
045508a8
PP
4350 be_roce_dev_add(adapter);
4351
f67ef7ba
PR
4352 schedule_delayed_work(&adapter->func_recovery_work,
4353 msecs_to_jiffies(1000));
b4e32a71
PR
4354
4355 be_cmd_query_port_name(adapter, &port_name);
4356
d379142b
SP
4357 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4358 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4359
6b7c5b94
SP
4360 return 0;
4361
5fb379ee
SP
4362unsetup:
4363 be_clear(adapter);
6b7c5b94
SP
4364stats_clean:
4365 be_stats_cleanup(adapter);
4366ctrl_clean:
4367 be_ctrl_cleanup(adapter);
f9449ab7 4368free_netdev:
fe6d2a38 4369 free_netdev(netdev);
8d56ff11 4370 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4371rel_reg:
4372 pci_release_regions(pdev);
4373disable_dev:
4374 pci_disable_device(pdev);
4375do_none:
c4ca2374 4376 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4377 return status;
4378}
4379
4380static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4381{
4382 struct be_adapter *adapter = pci_get_drvdata(pdev);
4383 struct net_device *netdev = adapter->netdev;
4384
71d8d1b5
AK
4385 if (adapter->wol)
4386 be_setup_wol(adapter, true);
4387
f67ef7ba
PR
4388 cancel_delayed_work_sync(&adapter->func_recovery_work);
4389
6b7c5b94
SP
4390 netif_device_detach(netdev);
4391 if (netif_running(netdev)) {
4392 rtnl_lock();
4393 be_close(netdev);
4394 rtnl_unlock();
4395 }
9b0365f1 4396 be_clear(adapter);
6b7c5b94
SP
4397
4398 pci_save_state(pdev);
4399 pci_disable_device(pdev);
4400 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4401 return 0;
4402}
4403
4404static int be_resume(struct pci_dev *pdev)
4405{
4406 int status = 0;
4407 struct be_adapter *adapter = pci_get_drvdata(pdev);
4408 struct net_device *netdev = adapter->netdev;
4409
4410 netif_device_detach(netdev);
4411
4412 status = pci_enable_device(pdev);
4413 if (status)
4414 return status;
4415
4416 pci_set_power_state(pdev, 0);
4417 pci_restore_state(pdev);
4418
2243e2e9
SP
4419 /* tell fw we're ready to fire cmds */
4420 status = be_cmd_fw_init(adapter);
4421 if (status)
4422 return status;
4423
9b0365f1 4424 be_setup(adapter);
6b7c5b94
SP
4425 if (netif_running(netdev)) {
4426 rtnl_lock();
4427 be_open(netdev);
4428 rtnl_unlock();
4429 }
f67ef7ba
PR
4430
4431 schedule_delayed_work(&adapter->func_recovery_work,
4432 msecs_to_jiffies(1000));
6b7c5b94 4433 netif_device_attach(netdev);
71d8d1b5
AK
4434
4435 if (adapter->wol)
4436 be_setup_wol(adapter, false);
a4ca055f 4437
6b7c5b94
SP
4438 return 0;
4439}
4440
82456b03
SP
4441/*
4442 * An FLR will stop BE from DMAing any data.
4443 */
4444static void be_shutdown(struct pci_dev *pdev)
4445{
4446 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4447
2d5d4154
AK
4448 if (!adapter)
4449 return;
82456b03 4450
0f4a6828 4451 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4452 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4453
2d5d4154 4454 netif_device_detach(adapter->netdev);
82456b03 4455
57841869
AK
4456 be_cmd_reset_function(adapter);
4457
82456b03 4458 pci_disable_device(pdev);
82456b03
SP
4459}
4460
cf588477
SP
4461static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4462 pci_channel_state_t state)
4463{
4464 struct be_adapter *adapter = pci_get_drvdata(pdev);
4465 struct net_device *netdev = adapter->netdev;
4466
4467 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4468
f67ef7ba
PR
4469 adapter->eeh_error = true;
4470
4471 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4472
f67ef7ba 4473 rtnl_lock();
cf588477 4474 netif_device_detach(netdev);
f67ef7ba 4475 rtnl_unlock();
cf588477
SP
4476
4477 if (netif_running(netdev)) {
4478 rtnl_lock();
4479 be_close(netdev);
4480 rtnl_unlock();
4481 }
4482 be_clear(adapter);
4483
4484 if (state == pci_channel_io_perm_failure)
4485 return PCI_ERS_RESULT_DISCONNECT;
4486
4487 pci_disable_device(pdev);
4488
eeb7fc7b
SK
4489 /* The error could cause the FW to trigger a flash debug dump.
4490 * Resetting the card while flash dump is in progress
c8a54163
PR
4491 * can cause it not to recover; wait for it to finish.
4492 * Wait only for first function as it is needed only once per
4493 * adapter.
eeb7fc7b 4494 */
c8a54163
PR
4495 if (pdev->devfn == 0)
4496 ssleep(30);
4497
cf588477
SP
4498 return PCI_ERS_RESULT_NEED_RESET;
4499}
4500
4501static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4502{
4503 struct be_adapter *adapter = pci_get_drvdata(pdev);
4504 int status;
4505
4506 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4507 be_clear_all_error(adapter);
cf588477
SP
4508
4509 status = pci_enable_device(pdev);
4510 if (status)
4511 return PCI_ERS_RESULT_DISCONNECT;
4512
4513 pci_set_master(pdev);
4514 pci_set_power_state(pdev, 0);
4515 pci_restore_state(pdev);
4516
4517 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4518 dev_info(&adapter->pdev->dev,
4519 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4520 status = be_fw_wait_ready(adapter);
cf588477
SP
4521 if (status)
4522 return PCI_ERS_RESULT_DISCONNECT;
4523
d6b6d987 4524 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4525 return PCI_ERS_RESULT_RECOVERED;
4526}
4527
4528static void be_eeh_resume(struct pci_dev *pdev)
4529{
4530 int status = 0;
4531 struct be_adapter *adapter = pci_get_drvdata(pdev);
4532 struct net_device *netdev = adapter->netdev;
4533
4534 dev_info(&adapter->pdev->dev, "EEH resume\n");
4535
4536 pci_save_state(pdev);
4537
2d177be8 4538 status = be_cmd_reset_function(adapter);
cf588477
SP
4539 if (status)
4540 goto err;
4541
2d177be8
KA
4542 /* tell fw we're ready to fire cmds */
4543 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4544 if (status)
4545 goto err;
4546
cf588477
SP
4547 status = be_setup(adapter);
4548 if (status)
4549 goto err;
4550
4551 if (netif_running(netdev)) {
4552 status = be_open(netdev);
4553 if (status)
4554 goto err;
4555 }
f67ef7ba
PR
4556
4557 schedule_delayed_work(&adapter->func_recovery_work,
4558 msecs_to_jiffies(1000));
cf588477
SP
4559 netif_device_attach(netdev);
4560 return;
4561err:
4562 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4563}
4564
3646f0e5 4565static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4566 .error_detected = be_eeh_err_detected,
4567 .slot_reset = be_eeh_reset,
4568 .resume = be_eeh_resume,
4569};
4570
6b7c5b94
SP
4571static struct pci_driver be_driver = {
4572 .name = DRV_NAME,
4573 .id_table = be_dev_ids,
4574 .probe = be_probe,
4575 .remove = be_remove,
4576 .suspend = be_suspend,
cf588477 4577 .resume = be_resume,
82456b03 4578 .shutdown = be_shutdown,
cf588477 4579 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4580};
4581
4582static int __init be_init_module(void)
4583{
8e95a202
JP
4584 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4585 rx_frag_size != 2048) {
6b7c5b94
SP
4586 printk(KERN_WARNING DRV_NAME
4587 " : Module param rx_frag_size must be 2048/4096/8192."
4588 " Using 2048\n");
4589 rx_frag_size = 2048;
4590 }
6b7c5b94
SP
4591
4592 return pci_register_driver(&be_driver);
4593}
4594module_init(be_init_module);
4595
4596static void __exit be_exit_module(void)
4597{
4598 pci_unregister_driver(&be_driver);
4599}
4600module_exit(be_exit_module);
This page took 0.89495 seconds and 5 git commands to generate.