be2net: Wait till resources are available for VF in error recovery
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
6b7c5b94
SP
47 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 50/* UE Status Low CSR */
42c8b11e 51static const char * const ue_status_low_desc[] = {
7c185276
AK
52 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
42c8b11e 86static const char * const ue_status_hi_desc[] = {
7c185276
AK
87 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
42c8b11e 110 "NETC",
7c185276
AK
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
6b7c5b94 120
752961a1
SP
121/* Is BE in a multi-channel mode */
122static inline bool be_is_mc(struct be_adapter *adapter) {
123 return (adapter->function_mode & FLEX10_MODE ||
124 adapter->function_mode & VNIC_MODE ||
125 adapter->function_mode & UMC_ENABLED);
126}
127
6b7c5b94
SP
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 131 if (mem->va) {
2b7bcebf
IV
132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 mem->dma);
1cfafab9
SP
134 mem->va = NULL;
135 }
6b7c5b94
SP
136}
137
138static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139 u16 len, u16 entry_size)
140{
141 struct be_dma_mem *mem = &q->dma_mem;
142
143 memset(q, 0, sizeof(*q));
144 q->len = len;
145 q->entry_size = entry_size;
146 mem->size = len * entry_size;
2b7bcebf
IV
147 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148 GFP_KERNEL);
6b7c5b94 149 if (!mem->va)
10ef9ab4 150 return -ENOMEM;
6b7c5b94
SP
151 memset(mem->va, 0, mem->size);
152 return 0;
153}
154
8788fdc2 155static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
f67ef7ba 159 if (adapter->eeh_error)
cf588477
SP
160 return;
161
db3ea781
SP
162 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163 &reg);
164 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
5f0b849e 166 if (!enabled && enable)
6b7c5b94 167 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 168 else if (enabled && !enable)
6b7c5b94 169 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 170 else
6b7c5b94 171 return;
5f0b849e 172
db3ea781
SP
173 pci_write_config_dword(adapter->pdev,
174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
175}
176
8788fdc2 177static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
178{
179 u32 val = 0;
180 val |= qid & DB_RQ_RING_ID_MASK;
181 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
182
183 wmb();
8788fdc2 184 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
185}
186
8788fdc2 187static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_TXULP_RING_ID_MASK;
191 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
195}
196
8788fdc2 197static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
198 bool arm, bool clear_int, u16 num_popped)
199{
200 u32 val = 0;
201 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
202 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 204
f67ef7ba 205 if (adapter->eeh_error)
cf588477
SP
206 return;
207
6b7c5b94
SP
208 if (arm)
209 val |= 1 << DB_EQ_REARM_SHIFT;
210 if (clear_int)
211 val |= 1 << DB_EQ_CLR_SHIFT;
212 val |= 1 << DB_EQ_EVNT_SHIFT;
213 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 214 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
215}
216
8788fdc2 217void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
218{
219 u32 val = 0;
220 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
221 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 223
f67ef7ba 224 if (adapter->eeh_error)
cf588477
SP
225 return;
226
6b7c5b94
SP
227 if (arm)
228 val |= 1 << DB_CQ_REARM_SHIFT;
229 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 230 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
231}
232
6b7c5b94
SP
233static int be_mac_addr_set(struct net_device *netdev, void *p)
234{
235 struct be_adapter *adapter = netdev_priv(netdev);
236 struct sockaddr *addr = p;
237 int status = 0;
e3a7ae2c 238 u8 current_mac[ETH_ALEN];
fbc13f01 239 u32 pmac_id = adapter->pmac_id[0];
704e4c88 240 bool active_mac = true;
6b7c5b94 241
ca9e4988
AK
242 if (!is_valid_ether_addr(addr->sa_data))
243 return -EADDRNOTAVAIL;
244
704e4c88
PR
245 /* For BE VF, MAC address is already activated by PF.
246 * Hence only operation left is updating netdev->devaddr.
247 * Update it if user is passing the same MAC which was used
248 * during configuring VF MAC from PF(Hypervisor).
249 */
250 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
251 status = be_cmd_mac_addr_query(adapter, current_mac,
252 false, adapter->if_handle, 0);
253 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
254 goto done;
255 else
256 goto err;
257 }
258
259 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
260 goto done;
261
262 /* For Lancer check if any MAC is active.
263 * If active, get its mac id.
264 */
265 if (lancer_chip(adapter) && !be_physfn(adapter))
266 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
267 &pmac_id, 0);
268
269 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
270 adapter->if_handle,
271 &adapter->pmac_id[0], 0);
272
a65027e4 273 if (status)
e3a7ae2c 274 goto err;
6b7c5b94 275
704e4c88
PR
276 if (active_mac)
277 be_cmd_pmac_del(adapter, adapter->if_handle,
278 pmac_id, 0);
279done:
e3a7ae2c
SK
280 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
281 return 0;
282err:
283 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
284 return status;
285}
286
89a88ab8
AK
287static void populate_be2_stats(struct be_adapter *adapter)
288{
ac124ff9
SP
289 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
290 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
291 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 292 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
293 &rxf_stats->port[adapter->port_num];
294 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 295
ac124ff9 296 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
297 drvs->rx_pause_frames = port_stats->rx_pause_frames;
298 drvs->rx_crc_errors = port_stats->rx_crc_errors;
299 drvs->rx_control_frames = port_stats->rx_control_frames;
300 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
301 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
302 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
303 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
304 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
305 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
306 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
307 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
308 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
309 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
310 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 311 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
312 drvs->rx_dropped_header_too_small =
313 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
314 drvs->rx_address_mismatch_drops =
315 port_stats->rx_address_mismatch_drops +
316 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
317 drvs->rx_alignment_symbol_errors =
318 port_stats->rx_alignment_symbol_errors;
319
320 drvs->tx_pauseframes = port_stats->tx_pauseframes;
321 drvs->tx_controlframes = port_stats->tx_controlframes;
322
323 if (adapter->port_num)
ac124ff9 324 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 325 else
ac124ff9 326 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 327 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 328 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
329 drvs->forwarded_packets = rxf_stats->forwarded_packets;
330 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
331 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
332 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
333 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
334}
335
336static void populate_be3_stats(struct be_adapter *adapter)
337{
ac124ff9
SP
338 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
339 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
340 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 341 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
342 &rxf_stats->port[adapter->port_num];
343 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 344
ac124ff9 345 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
346 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
347 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
348 drvs->rx_pause_frames = port_stats->rx_pause_frames;
349 drvs->rx_crc_errors = port_stats->rx_crc_errors;
350 drvs->rx_control_frames = port_stats->rx_control_frames;
351 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
352 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
353 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
355 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
356 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
357 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
358 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
359 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
360 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
361 drvs->rx_dropped_header_too_small =
362 port_stats->rx_dropped_header_too_small;
363 drvs->rx_input_fifo_overflow_drop =
364 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 365 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
366 drvs->rx_alignment_symbol_errors =
367 port_stats->rx_alignment_symbol_errors;
ac124ff9 368 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
369 drvs->tx_pauseframes = port_stats->tx_pauseframes;
370 drvs->tx_controlframes = port_stats->tx_controlframes;
371 drvs->jabber_events = port_stats->jabber_events;
372 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 373 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
374 drvs->forwarded_packets = rxf_stats->forwarded_packets;
375 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
376 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
377 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
378 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
379}
380
005d5696
SX
381static void populate_lancer_stats(struct be_adapter *adapter)
382{
89a88ab8 383
005d5696 384 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
385 struct lancer_pport_stats *pport_stats =
386 pport_stats_from_cmd(adapter);
387
388 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
389 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
390 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
391 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 392 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 393 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
394 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
395 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
396 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
397 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
398 drvs->rx_dropped_tcp_length =
399 pport_stats->rx_dropped_invalid_tcp_length;
400 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
403 drvs->rx_dropped_header_too_small =
404 pport_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
406 drvs->rx_address_mismatch_drops =
407 pport_stats->rx_address_mismatch_drops +
408 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 409 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 410 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
411 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
412 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 413 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
414 drvs->forwarded_packets = pport_stats->num_forwards_lo;
415 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 416 drvs->rx_drops_too_many_frags =
ac124ff9 417 pport_stats->rx_drops_too_many_frags_lo;
005d5696 418}
89a88ab8 419
09c1c68f
SP
420static void accumulate_16bit_val(u32 *acc, u16 val)
421{
422#define lo(x) (x & 0xFFFF)
423#define hi(x) (x & 0xFFFF0000)
424 bool wrapped = val < lo(*acc);
425 u32 newacc = hi(*acc) + val;
426
427 if (wrapped)
428 newacc += 65536;
429 ACCESS_ONCE(*acc) = newacc;
430}
431
89a88ab8
AK
432void be_parse_stats(struct be_adapter *adapter)
433{
ac124ff9
SP
434 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
435 struct be_rx_obj *rxo;
436 int i;
437
005d5696
SX
438 if (adapter->generation == BE_GEN3) {
439 if (lancer_chip(adapter))
440 populate_lancer_stats(adapter);
441 else
442 populate_be3_stats(adapter);
443 } else {
89a88ab8 444 populate_be2_stats(adapter);
005d5696 445 }
ac124ff9 446
d51ebd33
PR
447 if (lancer_chip(adapter))
448 goto done;
449
ac124ff9 450 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
451 for_all_rx_queues(adapter, rxo, i) {
452 /* below erx HW counter can actually wrap around after
453 * 65535. Driver accumulates a 32-bit value
454 */
455 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
456 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
457 }
d51ebd33
PR
458done:
459 return;
89a88ab8
AK
460}
461
ab1594e9
SP
462static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
463 struct rtnl_link_stats64 *stats)
6b7c5b94 464{
ab1594e9 465 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 466 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 467 struct be_rx_obj *rxo;
3c8def97 468 struct be_tx_obj *txo;
ab1594e9
SP
469 u64 pkts, bytes;
470 unsigned int start;
3abcdeda 471 int i;
6b7c5b94 472
3abcdeda 473 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
474 const struct be_rx_stats *rx_stats = rx_stats(rxo);
475 do {
476 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
477 pkts = rx_stats(rxo)->rx_pkts;
478 bytes = rx_stats(rxo)->rx_bytes;
479 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
480 stats->rx_packets += pkts;
481 stats->rx_bytes += bytes;
482 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
483 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
484 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
485 }
486
3c8def97 487 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
488 const struct be_tx_stats *tx_stats = tx_stats(txo);
489 do {
490 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
491 pkts = tx_stats(txo)->tx_pkts;
492 bytes = tx_stats(txo)->tx_bytes;
493 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
494 stats->tx_packets += pkts;
495 stats->tx_bytes += bytes;
3c8def97 496 }
6b7c5b94
SP
497
498 /* bad pkts received */
ab1594e9 499 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
500 drvs->rx_alignment_symbol_errors +
501 drvs->rx_in_range_errors +
502 drvs->rx_out_range_errors +
503 drvs->rx_frame_too_long +
504 drvs->rx_dropped_too_small +
505 drvs->rx_dropped_too_short +
506 drvs->rx_dropped_header_too_small +
507 drvs->rx_dropped_tcp_length +
ab1594e9 508 drvs->rx_dropped_runt;
68110868 509
6b7c5b94 510 /* detailed rx errors */
ab1594e9 511 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
512 drvs->rx_out_range_errors +
513 drvs->rx_frame_too_long;
68110868 514
ab1594e9 515 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
516
517 /* frame alignment errors */
ab1594e9 518 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 519
6b7c5b94
SP
520 /* receiver fifo overrun */
521 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 522 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
523 drvs->rx_input_fifo_overflow_drop +
524 drvs->rx_drops_no_pbuf;
ab1594e9 525 return stats;
6b7c5b94
SP
526}
527
b236916a 528void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 529{
6b7c5b94
SP
530 struct net_device *netdev = adapter->netdev;
531
b236916a 532 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 533 netif_carrier_off(netdev);
b236916a 534 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 535 }
b236916a
AK
536
537 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
538 netif_carrier_on(netdev);
539 else
540 netif_carrier_off(netdev);
6b7c5b94
SP
541}
542
3c8def97 543static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 544 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 545{
3c8def97
SP
546 struct be_tx_stats *stats = tx_stats(txo);
547
ab1594e9 548 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
549 stats->tx_reqs++;
550 stats->tx_wrbs += wrb_cnt;
551 stats->tx_bytes += copied;
552 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 553 if (stopped)
ac124ff9 554 stats->tx_stops++;
ab1594e9 555 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
556}
557
558/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
559static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
560 bool *dummy)
6b7c5b94 561{
ebc8d2ab
DM
562 int cnt = (skb->len > skb->data_len);
563
564 cnt += skb_shinfo(skb)->nr_frags;
565
6b7c5b94
SP
566 /* to account for hdr wrb */
567 cnt++;
fe6d2a38
SP
568 if (lancer_chip(adapter) || !(cnt & 1)) {
569 *dummy = false;
570 } else {
6b7c5b94
SP
571 /* add a dummy to make it an even num */
572 cnt++;
573 *dummy = true;
fe6d2a38 574 }
6b7c5b94
SP
575 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
576 return cnt;
577}
578
579static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
580{
581 wrb->frag_pa_hi = upper_32_bits(addr);
582 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
583 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 584 wrb->rsvd0 = 0;
6b7c5b94
SP
585}
586
1ded132d
AK
587static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
588 struct sk_buff *skb)
589{
590 u8 vlan_prio;
591 u16 vlan_tag;
592
593 vlan_tag = vlan_tx_tag_get(skb);
594 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
595 /* If vlan priority provided by OS is NOT in available bmap */
596 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
597 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
598 adapter->recommended_prio;
599
600 return vlan_tag;
601}
602
93040ae5
SK
603static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
604{
605 return vlan_tx_tag_present(skb) || adapter->pvid;
606}
607
cc4ce020
SK
608static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
609 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 610{
1ded132d 611 u16 vlan_tag;
cc4ce020 612
6b7c5b94
SP
613 memset(hdr, 0, sizeof(*hdr));
614
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
616
49e4b847 617 if (skb_is_gso(skb)) {
6b7c5b94
SP
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
620 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 621 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 622 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
623 if (lancer_chip(adapter) && adapter->sli_family ==
624 LANCER_A0_SLI_FAMILY) {
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
626 if (is_tcp_pkt(skb))
627 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
628 tcpcs, hdr, 1);
629 else if (is_udp_pkt(skb))
630 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
631 udpcs, hdr, 1);
632 }
6b7c5b94
SP
633 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
634 if (is_tcp_pkt(skb))
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
636 else if (is_udp_pkt(skb))
637 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
638 }
639
4c5102f9 640 if (vlan_tx_tag_present(skb)) {
6b7c5b94 641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 642 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 643 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
644 }
645
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
649 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
650}
651
2b7bcebf 652static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
653 bool unmap_single)
654{
655 dma_addr_t dma;
656
657 be_dws_le_to_cpu(wrb, sizeof(*wrb));
658
659 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 660 if (wrb->frag_len) {
7101e111 661 if (unmap_single)
2b7bcebf
IV
662 dma_unmap_single(dev, dma, wrb->frag_len,
663 DMA_TO_DEVICE);
7101e111 664 else
2b7bcebf 665 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
666 }
667}
6b7c5b94 668
3c8def97 669static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
670 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
671{
7101e111
SP
672 dma_addr_t busaddr;
673 int i, copied = 0;
2b7bcebf 674 struct device *dev = &adapter->pdev->dev;
6b7c5b94 675 struct sk_buff *first_skb = skb;
6b7c5b94
SP
676 struct be_eth_wrb *wrb;
677 struct be_eth_hdr_wrb *hdr;
7101e111
SP
678 bool map_single = false;
679 u16 map_head;
6b7c5b94 680
6b7c5b94
SP
681 hdr = queue_head_node(txq);
682 queue_head_inc(txq);
7101e111 683 map_head = txq->head;
6b7c5b94 684
ebc8d2ab 685 if (skb->len > skb->data_len) {
e743d313 686 int len = skb_headlen(skb);
2b7bcebf
IV
687 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
688 if (dma_mapping_error(dev, busaddr))
7101e111
SP
689 goto dma_err;
690 map_single = true;
ebc8d2ab
DM
691 wrb = queue_head_node(txq);
692 wrb_fill(wrb, busaddr, len);
693 be_dws_cpu_to_le(wrb, sizeof(*wrb));
694 queue_head_inc(txq);
695 copied += len;
696 }
6b7c5b94 697
ebc8d2ab 698 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 699 const struct skb_frag_struct *frag =
ebc8d2ab 700 &skb_shinfo(skb)->frags[i];
b061b39e 701 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 702 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 703 if (dma_mapping_error(dev, busaddr))
7101e111 704 goto dma_err;
ebc8d2ab 705 wrb = queue_head_node(txq);
9e903e08 706 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
707 be_dws_cpu_to_le(wrb, sizeof(*wrb));
708 queue_head_inc(txq);
9e903e08 709 copied += skb_frag_size(frag);
6b7c5b94
SP
710 }
711
712 if (dummy_wrb) {
713 wrb = queue_head_node(txq);
714 wrb_fill(wrb, 0, 0);
715 be_dws_cpu_to_le(wrb, sizeof(*wrb));
716 queue_head_inc(txq);
717 }
718
cc4ce020 719 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
720 be_dws_cpu_to_le(hdr, sizeof(*hdr));
721
722 return copied;
7101e111
SP
723dma_err:
724 txq->head = map_head;
725 while (copied) {
726 wrb = queue_head_node(txq);
2b7bcebf 727 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
728 map_single = false;
729 copied -= wrb->frag_len;
730 queue_head_inc(txq);
731 }
732 return 0;
6b7c5b94
SP
733}
734
93040ae5
SK
735static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
736 struct sk_buff *skb)
737{
738 u16 vlan_tag = 0;
739
740 skb = skb_share_check(skb, GFP_ATOMIC);
741 if (unlikely(!skb))
742 return skb;
743
744 if (vlan_tx_tag_present(skb)) {
745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746 __vlan_put_tag(skb, vlan_tag);
747 skb->vlan_tci = 0;
748 }
749
750 return skb;
751}
752
61357325 753static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 754 struct net_device *netdev)
6b7c5b94
SP
755{
756 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
757 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
758 struct be_queue_info *txq = &txo->q;
93040ae5 759 struct iphdr *ip = NULL;
6b7c5b94 760 u32 wrb_cnt = 0, copied = 0;
93040ae5 761 u32 start = txq->head, eth_hdr_len;
6b7c5b94
SP
762 bool dummy_wrb, stopped = false;
763
93040ae5
SK
764 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
765 VLAN_ETH_HLEN : ETH_HLEN;
766
767 /* HW has a bug which considers padding bytes as legal
768 * and modifies the IPv4 hdr's 'tot_len' field
1ded132d 769 */
93040ae5
SK
770 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
771 is_ipv4_pkt(skb)) {
772 ip = (struct iphdr *)ip_hdr(skb);
773 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
774 }
1ded132d 775
93040ae5
SK
776 /* HW has a bug wherein it will calculate CSUM for VLAN
777 * pkts even though it is disabled.
778 * Manually insert VLAN in pkt.
779 */
780 if (skb->ip_summed != CHECKSUM_PARTIAL &&
781 be_vlan_tag_chk(adapter, skb)) {
782 skb = be_insert_vlan_in_pkt(adapter, skb);
1ded132d
AK
783 if (unlikely(!skb))
784 goto tx_drop;
1ded132d
AK
785 }
786
fe6d2a38 787 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 788
3c8def97 789 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 790 if (copied) {
cd8f76c0
ED
791 int gso_segs = skb_shinfo(skb)->gso_segs;
792
c190e3c8 793 /* record the sent skb in the sent_skb table */
3c8def97
SP
794 BUG_ON(txo->sent_skb_list[start]);
795 txo->sent_skb_list[start] = skb;
c190e3c8
AK
796
797 /* Ensure txq has space for the next skb; Else stop the queue
798 * *BEFORE* ringing the tx doorbell, so that we serialze the
799 * tx compls of the current transmit which'll wake up the queue
800 */
7101e111 801 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
802 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
803 txq->len) {
3c8def97 804 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
805 stopped = true;
806 }
6b7c5b94 807
c190e3c8 808 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 809
cd8f76c0 810 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
811 } else {
812 txq->head = start;
813 dev_kfree_skb_any(skb);
6b7c5b94 814 }
1ded132d 815tx_drop:
6b7c5b94
SP
816 return NETDEV_TX_OK;
817}
818
819static int be_change_mtu(struct net_device *netdev, int new_mtu)
820{
821 struct be_adapter *adapter = netdev_priv(netdev);
822 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
823 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
824 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
825 dev_info(&adapter->pdev->dev,
826 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
827 BE_MIN_MTU,
828 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
829 return -EINVAL;
830 }
831 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
832 netdev->mtu, new_mtu);
833 netdev->mtu = new_mtu;
834 return 0;
835}
836
837/*
82903e4b
AK
838 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
839 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 840 */
10329df8 841static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 842{
10329df8
SP
843 u16 vids[BE_NUM_VLANS_SUPPORTED];
844 u16 num = 0, i;
82903e4b 845 int status = 0;
1da87b7f 846
c0e64ef4
SP
847 /* No need to further configure vids if in promiscuous mode */
848 if (adapter->promiscuous)
849 return 0;
850
0fc16ebf
PR
851 if (adapter->vlans_added > adapter->max_vlans)
852 goto set_vlan_promisc;
853
854 /* Construct VLAN Table to give to HW */
855 for (i = 0; i < VLAN_N_VID; i++)
856 if (adapter->vlan_tag[i])
10329df8 857 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
858
859 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 860 vids, num, 1, 0);
0fc16ebf
PR
861
862 /* Set to VLAN promisc mode as setting VLAN filter failed */
863 if (status) {
864 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
865 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
866 goto set_vlan_promisc;
6b7c5b94 867 }
1da87b7f 868
b31c50a7 869 return status;
0fc16ebf
PR
870
871set_vlan_promisc:
872 status = be_cmd_vlan_config(adapter, adapter->if_handle,
873 NULL, 0, 1, 1);
874 return status;
6b7c5b94
SP
875}
876
8e586137 877static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
878{
879 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 880 int status = 0;
6b7c5b94 881
80817cbf
AK
882 if (!be_physfn(adapter)) {
883 status = -EINVAL;
884 goto ret;
885 }
ba343c77 886
6b7c5b94 887 adapter->vlan_tag[vid] = 1;
82903e4b 888 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 889 status = be_vid_config(adapter);
8e586137 890
80817cbf
AK
891 if (!status)
892 adapter->vlans_added++;
893 else
894 adapter->vlan_tag[vid] = 0;
895ret:
896 return status;
6b7c5b94
SP
897}
898
8e586137 899static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
900{
901 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 902 int status = 0;
6b7c5b94 903
80817cbf
AK
904 if (!be_physfn(adapter)) {
905 status = -EINVAL;
906 goto ret;
907 }
ba343c77 908
6b7c5b94 909 adapter->vlan_tag[vid] = 0;
82903e4b 910 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 911 status = be_vid_config(adapter);
8e586137 912
80817cbf
AK
913 if (!status)
914 adapter->vlans_added--;
915 else
916 adapter->vlan_tag[vid] = 1;
917ret:
918 return status;
6b7c5b94
SP
919}
920
a54769f5 921static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
922{
923 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 924 int status;
6b7c5b94 925
24307eef 926 if (netdev->flags & IFF_PROMISC) {
5b8821b7 927 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
928 adapter->promiscuous = true;
929 goto done;
6b7c5b94
SP
930 }
931
25985edc 932 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
933 if (adapter->promiscuous) {
934 adapter->promiscuous = false;
5b8821b7 935 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
936
937 if (adapter->vlans_added)
10329df8 938 be_vid_config(adapter);
6b7c5b94
SP
939 }
940
e7b909a6 941 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 942 if (netdev->flags & IFF_ALLMULTI ||
abb93951 943 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 944 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 945 goto done;
6b7c5b94 946 }
6b7c5b94 947
fbc13f01
AK
948 if (netdev_uc_count(netdev) != adapter->uc_macs) {
949 struct netdev_hw_addr *ha;
950 int i = 1; /* First slot is claimed by the Primary MAC */
951
952 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
953 be_cmd_pmac_del(adapter, adapter->if_handle,
954 adapter->pmac_id[i], 0);
955 }
956
957 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
958 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
959 adapter->promiscuous = true;
960 goto done;
961 }
962
963 netdev_for_each_uc_addr(ha, adapter->netdev) {
964 adapter->uc_macs++; /* First slot is for Primary MAC */
965 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
966 adapter->if_handle,
967 &adapter->pmac_id[adapter->uc_macs], 0);
968 }
969 }
970
0fc16ebf
PR
971 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
972
973 /* Set to MCAST promisc mode if setting MULTICAST address fails */
974 if (status) {
975 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
976 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
977 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
978 }
24307eef
SP
979done:
980 return;
6b7c5b94
SP
981}
982
ba343c77
SB
983static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
984{
985 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 986 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 987 int status;
704e4c88
PR
988 bool active_mac = false;
989 u32 pmac_id;
990 u8 old_mac[ETH_ALEN];
ba343c77 991
11ac75ed 992 if (!sriov_enabled(adapter))
ba343c77
SB
993 return -EPERM;
994
11ac75ed 995 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
996 return -EINVAL;
997
590c391d 998 if (lancer_chip(adapter)) {
704e4c88
PR
999 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1000 &pmac_id, vf + 1);
1001 if (!status && active_mac)
1002 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1003 pmac_id, vf + 1);
1004
590c391d
PR
1005 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1006 } else {
11ac75ed
SP
1007 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1008 vf_cfg->pmac_id, vf + 1);
ba343c77 1009
11ac75ed
SP
1010 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1011 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1012 }
1013
64600ea5 1014 if (status)
ba343c77
SB
1015 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1016 mac, vf);
64600ea5 1017 else
11ac75ed 1018 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1019
ba343c77
SB
1020 return status;
1021}
1022
64600ea5
AK
1023static int be_get_vf_config(struct net_device *netdev, int vf,
1024 struct ifla_vf_info *vi)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1027 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1028
11ac75ed 1029 if (!sriov_enabled(adapter))
64600ea5
AK
1030 return -EPERM;
1031
11ac75ed 1032 if (vf >= adapter->num_vfs)
64600ea5
AK
1033 return -EINVAL;
1034
1035 vi->vf = vf;
11ac75ed
SP
1036 vi->tx_rate = vf_cfg->tx_rate;
1037 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1038 vi->qos = 0;
11ac75ed 1039 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1040
1041 return 0;
1042}
1043
1da87b7f
AK
1044static int be_set_vf_vlan(struct net_device *netdev,
1045 int vf, u16 vlan, u8 qos)
1046{
1047 struct be_adapter *adapter = netdev_priv(netdev);
1048 int status = 0;
1049
11ac75ed 1050 if (!sriov_enabled(adapter))
1da87b7f
AK
1051 return -EPERM;
1052
11ac75ed 1053 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1054 return -EINVAL;
1055
1056 if (vlan) {
f1f3ee1b
AK
1057 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1058 /* If this is new value, program it. Else skip. */
1059 adapter->vf_cfg[vf].vlan_tag = vlan;
1060
1061 status = be_cmd_set_hsw_config(adapter, vlan,
1062 vf + 1, adapter->vf_cfg[vf].if_handle);
1063 }
1da87b7f 1064 } else {
f1f3ee1b 1065 /* Reset Transparent Vlan Tagging. */
11ac75ed 1066 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1067 vlan = adapter->vf_cfg[vf].def_vid;
1068 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1069 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1070 }
1071
1da87b7f
AK
1072
1073 if (status)
1074 dev_info(&adapter->pdev->dev,
1075 "VLAN %d config on VF %d failed\n", vlan, vf);
1076 return status;
1077}
1078
e1d18735
AK
1079static int be_set_vf_tx_rate(struct net_device *netdev,
1080 int vf, int rate)
1081{
1082 struct be_adapter *adapter = netdev_priv(netdev);
1083 int status = 0;
1084
11ac75ed 1085 if (!sriov_enabled(adapter))
e1d18735
AK
1086 return -EPERM;
1087
94f434c2 1088 if (vf >= adapter->num_vfs)
e1d18735
AK
1089 return -EINVAL;
1090
94f434c2
AK
1091 if (rate < 100 || rate > 10000) {
1092 dev_err(&adapter->pdev->dev,
1093 "tx rate must be between 100 and 10000 Mbps\n");
1094 return -EINVAL;
1095 }
e1d18735 1096
d5c18473
PR
1097 if (lancer_chip(adapter))
1098 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1099 else
1100 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1101
1102 if (status)
94f434c2 1103 dev_err(&adapter->pdev->dev,
e1d18735 1104 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1105 else
1106 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1107 return status;
1108}
1109
39f1d94d
SP
1110static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1111{
1112 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1113 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1114 u16 offset, stride;
1115
1116 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1117 if (!pos)
1118 return 0;
39f1d94d
SP
1119 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1120 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1121
1122 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1123 while (dev) {
2f6a0260 1124 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1125 vfs++;
1126 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1127 assigned_vfs++;
1128 }
1129 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1130 }
1131 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1132}
1133
10ef9ab4 1134static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1135{
10ef9ab4 1136 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1137 ulong now = jiffies;
ac124ff9 1138 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1139 u64 pkts;
1140 unsigned int start, eqd;
ac124ff9 1141
10ef9ab4
SP
1142 if (!eqo->enable_aic) {
1143 eqd = eqo->eqd;
1144 goto modify_eqd;
1145 }
1146
1147 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1148 return;
6b7c5b94 1149
10ef9ab4
SP
1150 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1151
4097f663 1152 /* Wrapped around */
3abcdeda
SP
1153 if (time_before(now, stats->rx_jiffies)) {
1154 stats->rx_jiffies = now;
4097f663
SP
1155 return;
1156 }
6b7c5b94 1157
ac124ff9
SP
1158 /* Update once a second */
1159 if (delta < HZ)
6b7c5b94
SP
1160 return;
1161
ab1594e9
SP
1162 do {
1163 start = u64_stats_fetch_begin_bh(&stats->sync);
1164 pkts = stats->rx_pkts;
1165 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1166
68c3e5a7 1167 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1168 stats->rx_pkts_prev = pkts;
3abcdeda 1169 stats->rx_jiffies = now;
10ef9ab4
SP
1170 eqd = (stats->rx_pps / 110000) << 3;
1171 eqd = min(eqd, eqo->max_eqd);
1172 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1173 if (eqd < 10)
1174 eqd = 0;
10ef9ab4
SP
1175
1176modify_eqd:
1177 if (eqd != eqo->cur_eqd) {
1178 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1179 eqo->cur_eqd = eqd;
ac124ff9 1180 }
6b7c5b94
SP
1181}
1182
3abcdeda 1183static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1184 struct be_rx_compl_info *rxcp)
4097f663 1185{
ac124ff9 1186 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1187
ab1594e9 1188 u64_stats_update_begin(&stats->sync);
3abcdeda 1189 stats->rx_compl++;
2e588f84 1190 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1191 stats->rx_pkts++;
2e588f84 1192 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1193 stats->rx_mcast_pkts++;
2e588f84 1194 if (rxcp->err)
ac124ff9 1195 stats->rx_compl_err++;
ab1594e9 1196 u64_stats_update_end(&stats->sync);
4097f663
SP
1197}
1198
2e588f84 1199static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1200{
19fad86f
PR
1201 /* L4 checksum is not reliable for non TCP/UDP packets.
1202 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1203 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1204 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1205}
1206
10ef9ab4
SP
1207static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1208 u16 frag_idx)
6b7c5b94 1209{
10ef9ab4 1210 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1211 struct be_rx_page_info *rx_page_info;
3abcdeda 1212 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1213
3abcdeda 1214 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1215 BUG_ON(!rx_page_info->page);
1216
205859a2 1217 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1218 dma_unmap_page(&adapter->pdev->dev,
1219 dma_unmap_addr(rx_page_info, bus),
1220 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1221 rx_page_info->last_page_user = false;
1222 }
6b7c5b94
SP
1223
1224 atomic_dec(&rxq->used);
1225 return rx_page_info;
1226}
1227
1228/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1229static void be_rx_compl_discard(struct be_rx_obj *rxo,
1230 struct be_rx_compl_info *rxcp)
6b7c5b94 1231{
3abcdeda 1232 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1233 struct be_rx_page_info *page_info;
2e588f84 1234 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1235
e80d9da6 1236 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1237 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1238 put_page(page_info->page);
1239 memset(page_info, 0, sizeof(*page_info));
2e588f84 1240 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1241 }
1242}
1243
1244/*
1245 * skb_fill_rx_data forms a complete skb for an ether frame
1246 * indicated by rxcp.
1247 */
10ef9ab4
SP
1248static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1249 struct be_rx_compl_info *rxcp)
6b7c5b94 1250{
3abcdeda 1251 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1252 struct be_rx_page_info *page_info;
2e588f84
SP
1253 u16 i, j;
1254 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1255 u8 *start;
6b7c5b94 1256
10ef9ab4 1257 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1258 start = page_address(page_info->page) + page_info->page_offset;
1259 prefetch(start);
1260
1261 /* Copy data in the first descriptor of this completion */
2e588f84 1262 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1263
6b7c5b94
SP
1264 skb->len = curr_frag_len;
1265 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1266 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1267 /* Complete packet has now been moved to data */
1268 put_page(page_info->page);
1269 skb->data_len = 0;
1270 skb->tail += curr_frag_len;
1271 } else {
ac1ae5f3
ED
1272 hdr_len = ETH_HLEN;
1273 memcpy(skb->data, start, hdr_len);
6b7c5b94 1274 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1275 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1276 skb_shinfo(skb)->frags[0].page_offset =
1277 page_info->page_offset + hdr_len;
9e903e08 1278 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1279 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1280 skb->truesize += rx_frag_size;
6b7c5b94
SP
1281 skb->tail += hdr_len;
1282 }
205859a2 1283 page_info->page = NULL;
6b7c5b94 1284
2e588f84
SP
1285 if (rxcp->pkt_size <= rx_frag_size) {
1286 BUG_ON(rxcp->num_rcvd != 1);
1287 return;
6b7c5b94
SP
1288 }
1289
1290 /* More frags present for this completion */
2e588f84
SP
1291 index_inc(&rxcp->rxq_idx, rxq->len);
1292 remaining = rxcp->pkt_size - curr_frag_len;
1293 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1294 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1295 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1296
bd46cb6c
AK
1297 /* Coalesce all frags from the same physical page in one slot */
1298 if (page_info->page_offset == 0) {
1299 /* Fresh page */
1300 j++;
b061b39e 1301 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1302 skb_shinfo(skb)->frags[j].page_offset =
1303 page_info->page_offset;
9e903e08 1304 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1305 skb_shinfo(skb)->nr_frags++;
1306 } else {
1307 put_page(page_info->page);
1308 }
1309
9e903e08 1310 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1311 skb->len += curr_frag_len;
1312 skb->data_len += curr_frag_len;
bdb28a97 1313 skb->truesize += rx_frag_size;
2e588f84
SP
1314 remaining -= curr_frag_len;
1315 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1316 page_info->page = NULL;
6b7c5b94 1317 }
bd46cb6c 1318 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1319}
1320
5be93b9a 1321/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1322static void be_rx_compl_process(struct be_rx_obj *rxo,
1323 struct be_rx_compl_info *rxcp)
6b7c5b94 1324{
10ef9ab4 1325 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1326 struct net_device *netdev = adapter->netdev;
6b7c5b94 1327 struct sk_buff *skb;
89420424 1328
bb349bb4 1329 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1330 if (unlikely(!skb)) {
ac124ff9 1331 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1332 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1333 return;
1334 }
1335
10ef9ab4 1336 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1337
6332c8d3 1338 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1339 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1340 else
1341 skb_checksum_none_assert(skb);
6b7c5b94 1342
6332c8d3 1343 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1344 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1345 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1346 skb->rxhash = rxcp->rss_hash;
1347
6b7c5b94 1348
343e43c0 1349 if (rxcp->vlanf)
4c5102f9
AK
1350 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1351
1352 netif_receive_skb(skb);
6b7c5b94
SP
1353}
1354
5be93b9a 1355/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1356void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1357 struct be_rx_compl_info *rxcp)
6b7c5b94 1358{
10ef9ab4 1359 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1360 struct be_rx_page_info *page_info;
5be93b9a 1361 struct sk_buff *skb = NULL;
3abcdeda 1362 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1363 u16 remaining, curr_frag_len;
1364 u16 i, j;
3968fa1e 1365
10ef9ab4 1366 skb = napi_get_frags(napi);
5be93b9a 1367 if (!skb) {
10ef9ab4 1368 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1369 return;
1370 }
1371
2e588f84
SP
1372 remaining = rxcp->pkt_size;
1373 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1374 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1375
1376 curr_frag_len = min(remaining, rx_frag_size);
1377
bd46cb6c
AK
1378 /* Coalesce all frags from the same physical page in one slot */
1379 if (i == 0 || page_info->page_offset == 0) {
1380 /* First frag or Fresh page */
1381 j++;
b061b39e 1382 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1383 skb_shinfo(skb)->frags[j].page_offset =
1384 page_info->page_offset;
9e903e08 1385 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1386 } else {
1387 put_page(page_info->page);
1388 }
9e903e08 1389 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1390 skb->truesize += rx_frag_size;
bd46cb6c 1391 remaining -= curr_frag_len;
2e588f84 1392 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1393 memset(page_info, 0, sizeof(*page_info));
1394 }
bd46cb6c 1395 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1396
5be93b9a 1397 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1398 skb->len = rxcp->pkt_size;
1399 skb->data_len = rxcp->pkt_size;
5be93b9a 1400 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1401 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1402 if (adapter->netdev->features & NETIF_F_RXHASH)
1403 skb->rxhash = rxcp->rss_hash;
5be93b9a 1404
343e43c0 1405 if (rxcp->vlanf)
4c5102f9
AK
1406 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1407
10ef9ab4 1408 napi_gro_frags(napi);
2e588f84
SP
1409}
1410
10ef9ab4
SP
1411static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1412 struct be_rx_compl_info *rxcp)
2e588f84
SP
1413{
1414 rxcp->pkt_size =
1415 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1416 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1417 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1418 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1419 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1420 rxcp->ip_csum =
1421 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1422 rxcp->l4_csum =
1423 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1424 rxcp->ipv6 =
1425 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1426 rxcp->rxq_idx =
1427 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1428 rxcp->num_rcvd =
1429 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1430 rxcp->pkt_type =
1431 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1432 rxcp->rss_hash =
c297977e 1433 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1434 if (rxcp->vlanf) {
1435 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1436 compl);
1437 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1438 compl);
15d72184 1439 }
12004ae9 1440 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1441}
1442
10ef9ab4
SP
1443static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1444 struct be_rx_compl_info *rxcp)
2e588f84
SP
1445{
1446 rxcp->pkt_size =
1447 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1448 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1449 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1450 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1451 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1452 rxcp->ip_csum =
1453 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1454 rxcp->l4_csum =
1455 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1456 rxcp->ipv6 =
1457 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1458 rxcp->rxq_idx =
1459 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1460 rxcp->num_rcvd =
1461 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1462 rxcp->pkt_type =
1463 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1464 rxcp->rss_hash =
c297977e 1465 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1466 if (rxcp->vlanf) {
1467 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1468 compl);
1469 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1470 compl);
15d72184 1471 }
12004ae9 1472 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1473}
1474
1475static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1476{
1477 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1478 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1479 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1480
2e588f84
SP
1481 /* For checking the valid bit it is Ok to use either definition as the
1482 * valid bit is at the same position in both v0 and v1 Rx compl */
1483 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1484 return NULL;
6b7c5b94 1485
2e588f84
SP
1486 rmb();
1487 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1488
2e588f84 1489 if (adapter->be3_native)
10ef9ab4 1490 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1491 else
10ef9ab4 1492 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1493
15d72184
SP
1494 if (rxcp->vlanf) {
1495 /* vlanf could be wrongly set in some cards.
1496 * ignore if vtm is not set */
752961a1 1497 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1498 rxcp->vlanf = 0;
6b7c5b94 1499
15d72184 1500 if (!lancer_chip(adapter))
3c709f8f 1501 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1502
939cf306 1503 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1504 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1505 rxcp->vlanf = 0;
1506 }
2e588f84
SP
1507
1508 /* As the compl has been parsed, reset it; we wont touch it again */
1509 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1510
3abcdeda 1511 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1512 return rxcp;
1513}
1514
1829b086 1515static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1516{
6b7c5b94 1517 u32 order = get_order(size);
1829b086 1518
6b7c5b94 1519 if (order > 0)
1829b086
ED
1520 gfp |= __GFP_COMP;
1521 return alloc_pages(gfp, order);
6b7c5b94
SP
1522}
1523
1524/*
1525 * Allocate a page, split it to fragments of size rx_frag_size and post as
1526 * receive buffers to BE
1527 */
1829b086 1528static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1529{
3abcdeda 1530 struct be_adapter *adapter = rxo->adapter;
26d92f92 1531 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1532 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1533 struct page *pagep = NULL;
1534 struct be_eth_rx_d *rxd;
1535 u64 page_dmaaddr = 0, frag_dmaaddr;
1536 u32 posted, page_offset = 0;
1537
3abcdeda 1538 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1539 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1540 if (!pagep) {
1829b086 1541 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1542 if (unlikely(!pagep)) {
ac124ff9 1543 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1544 break;
1545 }
2b7bcebf
IV
1546 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1547 0, adapter->big_page_size,
1548 DMA_FROM_DEVICE);
6b7c5b94
SP
1549 page_info->page_offset = 0;
1550 } else {
1551 get_page(pagep);
1552 page_info->page_offset = page_offset + rx_frag_size;
1553 }
1554 page_offset = page_info->page_offset;
1555 page_info->page = pagep;
fac6da5b 1556 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1557 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1558
1559 rxd = queue_head_node(rxq);
1560 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1561 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1562
1563 /* Any space left in the current big page for another frag? */
1564 if ((page_offset + rx_frag_size + rx_frag_size) >
1565 adapter->big_page_size) {
1566 pagep = NULL;
1567 page_info->last_page_user = true;
1568 }
26d92f92
SP
1569
1570 prev_page_info = page_info;
1571 queue_head_inc(rxq);
10ef9ab4 1572 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1573 }
1574 if (pagep)
26d92f92 1575 prev_page_info->last_page_user = true;
6b7c5b94
SP
1576
1577 if (posted) {
6b7c5b94 1578 atomic_add(posted, &rxq->used);
8788fdc2 1579 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1580 } else if (atomic_read(&rxq->used) == 0) {
1581 /* Let be_worker replenish when memory is available */
3abcdeda 1582 rxo->rx_post_starved = true;
6b7c5b94 1583 }
6b7c5b94
SP
1584}
1585
5fb379ee 1586static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1587{
6b7c5b94
SP
1588 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1589
1590 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1591 return NULL;
1592
f3eb62d2 1593 rmb();
6b7c5b94
SP
1594 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1595
1596 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1597
1598 queue_tail_inc(tx_cq);
1599 return txcp;
1600}
1601
3c8def97
SP
1602static u16 be_tx_compl_process(struct be_adapter *adapter,
1603 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1604{
3c8def97 1605 struct be_queue_info *txq = &txo->q;
a73b796e 1606 struct be_eth_wrb *wrb;
3c8def97 1607 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1608 struct sk_buff *sent_skb;
ec43b1a6
SP
1609 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1610 bool unmap_skb_hdr = true;
6b7c5b94 1611
ec43b1a6 1612 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1613 BUG_ON(!sent_skb);
ec43b1a6
SP
1614 sent_skbs[txq->tail] = NULL;
1615
1616 /* skip header wrb */
a73b796e 1617 queue_tail_inc(txq);
6b7c5b94 1618
ec43b1a6 1619 do {
6b7c5b94 1620 cur_index = txq->tail;
a73b796e 1621 wrb = queue_tail_node(txq);
2b7bcebf
IV
1622 unmap_tx_frag(&adapter->pdev->dev, wrb,
1623 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1624 unmap_skb_hdr = false;
1625
6b7c5b94
SP
1626 num_wrbs++;
1627 queue_tail_inc(txq);
ec43b1a6 1628 } while (cur_index != last_index);
6b7c5b94 1629
6b7c5b94 1630 kfree_skb(sent_skb);
4d586b82 1631 return num_wrbs;
6b7c5b94
SP
1632}
1633
10ef9ab4
SP
1634/* Return the number of events in the event queue */
1635static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1636{
10ef9ab4
SP
1637 struct be_eq_entry *eqe;
1638 int num = 0;
859b1e4e 1639
10ef9ab4
SP
1640 do {
1641 eqe = queue_tail_node(&eqo->q);
1642 if (eqe->evt == 0)
1643 break;
859b1e4e 1644
10ef9ab4
SP
1645 rmb();
1646 eqe->evt = 0;
1647 num++;
1648 queue_tail_inc(&eqo->q);
1649 } while (true);
1650
1651 return num;
859b1e4e
SP
1652}
1653
10ef9ab4 1654static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1655{
10ef9ab4
SP
1656 bool rearm = false;
1657 int num = events_get(eqo);
859b1e4e 1658
10ef9ab4 1659 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1660 if (!num)
1661 rearm = true;
1662
af311fe3
PR
1663 if (num || msix_enabled(eqo->adapter))
1664 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1665
859b1e4e 1666 if (num)
10ef9ab4 1667 napi_schedule(&eqo->napi);
859b1e4e
SP
1668
1669 return num;
1670}
1671
10ef9ab4
SP
1672/* Leaves the EQ is disarmed state */
1673static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1674{
10ef9ab4 1675 int num = events_get(eqo);
859b1e4e 1676
10ef9ab4 1677 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1678}
1679
10ef9ab4 1680static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1681{
1682 struct be_rx_page_info *page_info;
3abcdeda
SP
1683 struct be_queue_info *rxq = &rxo->q;
1684 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1685 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1686 u16 tail;
1687
1688 /* First cleanup pending rx completions */
3abcdeda 1689 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1690 be_rx_compl_discard(rxo, rxcp);
1691 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1692 }
1693
1694 /* Then free posted rx buffer that were not used */
1695 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1696 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1697 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1698 put_page(page_info->page);
1699 memset(page_info, 0, sizeof(*page_info));
1700 }
1701 BUG_ON(atomic_read(&rxq->used));
482c9e79 1702 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1703}
1704
0ae57bb3 1705static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1706{
0ae57bb3
SP
1707 struct be_tx_obj *txo;
1708 struct be_queue_info *txq;
a8e9179a 1709 struct be_eth_tx_compl *txcp;
4d586b82 1710 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1711 struct sk_buff *sent_skb;
1712 bool dummy_wrb;
0ae57bb3 1713 int i, pending_txqs;
a8e9179a
SP
1714
1715 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1716 do {
0ae57bb3
SP
1717 pending_txqs = adapter->num_tx_qs;
1718
1719 for_all_tx_queues(adapter, txo, i) {
1720 txq = &txo->q;
1721 while ((txcp = be_tx_compl_get(&txo->cq))) {
1722 end_idx =
1723 AMAP_GET_BITS(struct amap_eth_tx_compl,
1724 wrb_index, txcp);
1725 num_wrbs += be_tx_compl_process(adapter, txo,
1726 end_idx);
1727 cmpl++;
1728 }
1729 if (cmpl) {
1730 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1731 atomic_sub(num_wrbs, &txq->used);
1732 cmpl = 0;
1733 num_wrbs = 0;
1734 }
1735 if (atomic_read(&txq->used) == 0)
1736 pending_txqs--;
a8e9179a
SP
1737 }
1738
0ae57bb3 1739 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1740 break;
1741
1742 mdelay(1);
1743 } while (true);
1744
0ae57bb3
SP
1745 for_all_tx_queues(adapter, txo, i) {
1746 txq = &txo->q;
1747 if (atomic_read(&txq->used))
1748 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1749 atomic_read(&txq->used));
1750
1751 /* free posted tx for which compls will never arrive */
1752 while (atomic_read(&txq->used)) {
1753 sent_skb = txo->sent_skb_list[txq->tail];
1754 end_idx = txq->tail;
1755 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1756 &dummy_wrb);
1757 index_adv(&end_idx, num_wrbs - 1, txq->len);
1758 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1759 atomic_sub(num_wrbs, &txq->used);
1760 }
b03388d6 1761 }
6b7c5b94
SP
1762}
1763
10ef9ab4
SP
1764static void be_evt_queues_destroy(struct be_adapter *adapter)
1765{
1766 struct be_eq_obj *eqo;
1767 int i;
1768
1769 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1770 if (eqo->q.created) {
1771 be_eq_clean(eqo);
10ef9ab4 1772 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1773 }
10ef9ab4
SP
1774 be_queue_free(adapter, &eqo->q);
1775 }
1776}
1777
1778static int be_evt_queues_create(struct be_adapter *adapter)
1779{
1780 struct be_queue_info *eq;
1781 struct be_eq_obj *eqo;
1782 int i, rc;
1783
1784 adapter->num_evt_qs = num_irqs(adapter);
1785
1786 for_all_evt_queues(adapter, eqo, i) {
1787 eqo->adapter = adapter;
1788 eqo->tx_budget = BE_TX_BUDGET;
1789 eqo->idx = i;
1790 eqo->max_eqd = BE_MAX_EQD;
1791 eqo->enable_aic = true;
1792
1793 eq = &eqo->q;
1794 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1795 sizeof(struct be_eq_entry));
1796 if (rc)
1797 return rc;
1798
1799 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1800 if (rc)
1801 return rc;
1802 }
1cfafab9 1803 return 0;
10ef9ab4
SP
1804}
1805
5fb379ee
SP
1806static void be_mcc_queues_destroy(struct be_adapter *adapter)
1807{
1808 struct be_queue_info *q;
5fb379ee 1809
8788fdc2 1810 q = &adapter->mcc_obj.q;
5fb379ee 1811 if (q->created)
8788fdc2 1812 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1813 be_queue_free(adapter, q);
1814
8788fdc2 1815 q = &adapter->mcc_obj.cq;
5fb379ee 1816 if (q->created)
8788fdc2 1817 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1818 be_queue_free(adapter, q);
1819}
1820
1821/* Must be called only after TX qs are created as MCC shares TX EQ */
1822static int be_mcc_queues_create(struct be_adapter *adapter)
1823{
1824 struct be_queue_info *q, *cq;
5fb379ee 1825
8788fdc2 1826 cq = &adapter->mcc_obj.cq;
5fb379ee 1827 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1828 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1829 goto err;
1830
10ef9ab4
SP
1831 /* Use the default EQ for MCC completions */
1832 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1833 goto mcc_cq_free;
1834
8788fdc2 1835 q = &adapter->mcc_obj.q;
5fb379ee
SP
1836 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1837 goto mcc_cq_destroy;
1838
8788fdc2 1839 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1840 goto mcc_q_free;
1841
1842 return 0;
1843
1844mcc_q_free:
1845 be_queue_free(adapter, q);
1846mcc_cq_destroy:
8788fdc2 1847 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1848mcc_cq_free:
1849 be_queue_free(adapter, cq);
1850err:
1851 return -1;
1852}
1853
6b7c5b94
SP
1854static void be_tx_queues_destroy(struct be_adapter *adapter)
1855{
1856 struct be_queue_info *q;
3c8def97
SP
1857 struct be_tx_obj *txo;
1858 u8 i;
6b7c5b94 1859
3c8def97
SP
1860 for_all_tx_queues(adapter, txo, i) {
1861 q = &txo->q;
1862 if (q->created)
1863 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1864 be_queue_free(adapter, q);
6b7c5b94 1865
3c8def97
SP
1866 q = &txo->cq;
1867 if (q->created)
1868 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1869 be_queue_free(adapter, q);
1870 }
6b7c5b94
SP
1871}
1872
dafc0fe3
SP
1873static int be_num_txqs_want(struct be_adapter *adapter)
1874{
abb93951
PR
1875 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1876 be_is_mc(adapter) ||
1877 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
39f1d94d 1878 adapter->generation == BE_GEN2)
dafc0fe3
SP
1879 return 1;
1880 else
abb93951 1881 return adapter->max_tx_queues;
dafc0fe3
SP
1882}
1883
10ef9ab4 1884static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1885{
10ef9ab4
SP
1886 struct be_queue_info *cq, *eq;
1887 int status;
3c8def97
SP
1888 struct be_tx_obj *txo;
1889 u8 i;
6b7c5b94 1890
dafc0fe3 1891 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1892 if (adapter->num_tx_qs != MAX_TX_QS) {
1893 rtnl_lock();
dafc0fe3
SP
1894 netif_set_real_num_tx_queues(adapter->netdev,
1895 adapter->num_tx_qs);
3bb62f4f
PR
1896 rtnl_unlock();
1897 }
dafc0fe3 1898
10ef9ab4
SP
1899 for_all_tx_queues(adapter, txo, i) {
1900 cq = &txo->cq;
1901 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1902 sizeof(struct be_eth_tx_compl));
1903 if (status)
1904 return status;
3c8def97 1905
10ef9ab4
SP
1906 /* If num_evt_qs is less than num_tx_qs, then more than
1907 * one txq share an eq
1908 */
1909 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1910 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1911 if (status)
1912 return status;
1913 }
1914 return 0;
1915}
6b7c5b94 1916
10ef9ab4
SP
1917static int be_tx_qs_create(struct be_adapter *adapter)
1918{
1919 struct be_tx_obj *txo;
1920 int i, status;
fe6d2a38 1921
3c8def97 1922 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1923 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1924 sizeof(struct be_eth_wrb));
1925 if (status)
1926 return status;
6b7c5b94 1927
10ef9ab4
SP
1928 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1929 if (status)
1930 return status;
3c8def97 1931 }
6b7c5b94 1932
d379142b
SP
1933 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1934 adapter->num_tx_qs);
10ef9ab4 1935 return 0;
6b7c5b94
SP
1936}
1937
10ef9ab4 1938static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1939{
1940 struct be_queue_info *q;
3abcdeda
SP
1941 struct be_rx_obj *rxo;
1942 int i;
1943
1944 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1945 q = &rxo->cq;
1946 if (q->created)
1947 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1948 be_queue_free(adapter, q);
ac6a0c4a
SP
1949 }
1950}
1951
10ef9ab4 1952static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1953{
10ef9ab4 1954 struct be_queue_info *eq, *cq;
3abcdeda
SP
1955 struct be_rx_obj *rxo;
1956 int rc, i;
6b7c5b94 1957
10ef9ab4
SP
1958 /* We'll create as many RSS rings as there are irqs.
1959 * But when there's only one irq there's no use creating RSS rings
1960 */
1961 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1962 num_irqs(adapter) + 1 : 1;
7f640062
SP
1963 if (adapter->num_rx_qs != MAX_RX_QS) {
1964 rtnl_lock();
1965 netif_set_real_num_rx_queues(adapter->netdev,
1966 adapter->num_rx_qs);
1967 rtnl_unlock();
1968 }
ac6a0c4a 1969
6b7c5b94 1970 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1971 for_all_rx_queues(adapter, rxo, i) {
1972 rxo->adapter = adapter;
3abcdeda
SP
1973 cq = &rxo->cq;
1974 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1975 sizeof(struct be_eth_rx_compl));
1976 if (rc)
10ef9ab4 1977 return rc;
3abcdeda 1978
10ef9ab4
SP
1979 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1980 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1981 if (rc)
10ef9ab4 1982 return rc;
3abcdeda 1983 }
6b7c5b94 1984
d379142b
SP
1985 dev_info(&adapter->pdev->dev,
1986 "created %d RSS queue(s) and 1 default RX queue\n",
1987 adapter->num_rx_qs - 1);
10ef9ab4 1988 return 0;
b628bde2
SP
1989}
1990
6b7c5b94
SP
1991static irqreturn_t be_intx(int irq, void *dev)
1992{
1993 struct be_adapter *adapter = dev;
10ef9ab4 1994 int num_evts;
6b7c5b94 1995
10ef9ab4
SP
1996 /* With INTx only one EQ is used */
1997 num_evts = event_handle(&adapter->eq_obj[0]);
1998 if (num_evts)
1999 return IRQ_HANDLED;
2000 else
2001 return IRQ_NONE;
6b7c5b94
SP
2002}
2003
10ef9ab4 2004static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2005{
10ef9ab4 2006 struct be_eq_obj *eqo = dev;
6b7c5b94 2007
10ef9ab4 2008 event_handle(eqo);
6b7c5b94
SP
2009 return IRQ_HANDLED;
2010}
2011
2e588f84 2012static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2013{
2e588f84 2014 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2015}
2016
10ef9ab4
SP
2017static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2018 int budget)
6b7c5b94 2019{
3abcdeda
SP
2020 struct be_adapter *adapter = rxo->adapter;
2021 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2022 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2023 u32 work_done;
2024
2025 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2026 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2027 if (!rxcp)
2028 break;
2029
12004ae9
SP
2030 /* Is it a flush compl that has no data */
2031 if (unlikely(rxcp->num_rcvd == 0))
2032 goto loop_continue;
2033
2034 /* Discard compl with partial DMA Lancer B0 */
2035 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2036 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2037 goto loop_continue;
2038 }
2039
2040 /* On BE drop pkts that arrive due to imperfect filtering in
2041 * promiscuous mode on some skews
2042 */
2043 if (unlikely(rxcp->port != adapter->port_num &&
2044 !lancer_chip(adapter))) {
10ef9ab4 2045 be_rx_compl_discard(rxo, rxcp);
12004ae9 2046 goto loop_continue;
64642811 2047 }
009dd872 2048
12004ae9 2049 if (do_gro(rxcp))
10ef9ab4 2050 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2051 else
10ef9ab4 2052 be_rx_compl_process(rxo, rxcp);
12004ae9 2053loop_continue:
2e588f84 2054 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2055 }
2056
10ef9ab4
SP
2057 if (work_done) {
2058 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2059
10ef9ab4
SP
2060 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2061 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2062 }
10ef9ab4 2063
6b7c5b94
SP
2064 return work_done;
2065}
2066
10ef9ab4
SP
2067static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2068 int budget, int idx)
6b7c5b94 2069{
6b7c5b94 2070 struct be_eth_tx_compl *txcp;
10ef9ab4 2071 int num_wrbs = 0, work_done;
3c8def97 2072
10ef9ab4
SP
2073 for (work_done = 0; work_done < budget; work_done++) {
2074 txcp = be_tx_compl_get(&txo->cq);
2075 if (!txcp)
2076 break;
2077 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2078 AMAP_GET_BITS(struct amap_eth_tx_compl,
2079 wrb_index, txcp));
10ef9ab4 2080 }
6b7c5b94 2081
10ef9ab4
SP
2082 if (work_done) {
2083 be_cq_notify(adapter, txo->cq.id, true, work_done);
2084 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2085
10ef9ab4
SP
2086 /* As Tx wrbs have been freed up, wake up netdev queue
2087 * if it was stopped due to lack of tx wrbs. */
2088 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2089 atomic_read(&txo->q.used) < txo->q.len / 2) {
2090 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2091 }
10ef9ab4
SP
2092
2093 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2094 tx_stats(txo)->tx_compl += work_done;
2095 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2096 }
10ef9ab4
SP
2097 return (work_done < budget); /* Done */
2098}
6b7c5b94 2099
10ef9ab4
SP
2100int be_poll(struct napi_struct *napi, int budget)
2101{
2102 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2103 struct be_adapter *adapter = eqo->adapter;
2104 int max_work = 0, work, i;
2105 bool tx_done;
f31e50a8 2106
10ef9ab4
SP
2107 /* Process all TXQs serviced by this EQ */
2108 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2109 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2110 eqo->tx_budget, i);
2111 if (!tx_done)
2112 max_work = budget;
f31e50a8
SP
2113 }
2114
10ef9ab4
SP
2115 /* This loop will iterate twice for EQ0 in which
2116 * completions of the last RXQ (default one) are also processed
2117 * For other EQs the loop iterates only once
2118 */
2119 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2120 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2121 max_work = max(work, max_work);
2122 }
6b7c5b94 2123
10ef9ab4
SP
2124 if (is_mcc_eqo(eqo))
2125 be_process_mcc(adapter);
93c86700 2126
10ef9ab4
SP
2127 if (max_work < budget) {
2128 napi_complete(napi);
2129 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2130 } else {
2131 /* As we'll continue in polling mode, count and clear events */
2132 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 2133 }
10ef9ab4 2134 return max_work;
6b7c5b94
SP
2135}
2136
f67ef7ba 2137void be_detect_error(struct be_adapter *adapter)
7c185276 2138{
e1cfb67a
PR
2139 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2140 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2141 u32 i;
2142
f67ef7ba 2143 if (be_crit_error(adapter))
72f02485
SP
2144 return;
2145
e1cfb67a
PR
2146 if (lancer_chip(adapter)) {
2147 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2148 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2149 sliport_err1 = ioread32(adapter->db +
2150 SLIPORT_ERROR1_OFFSET);
2151 sliport_err2 = ioread32(adapter->db +
2152 SLIPORT_ERROR2_OFFSET);
2153 }
2154 } else {
2155 pci_read_config_dword(adapter->pdev,
2156 PCICFG_UE_STATUS_LOW, &ue_lo);
2157 pci_read_config_dword(adapter->pdev,
2158 PCICFG_UE_STATUS_HIGH, &ue_hi);
2159 pci_read_config_dword(adapter->pdev,
2160 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2161 pci_read_config_dword(adapter->pdev,
2162 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2163
f67ef7ba
PR
2164 ue_lo = (ue_lo & ~ue_lo_mask);
2165 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2166 }
7c185276 2167
1451ae6e
AK
2168 /* On certain platforms BE hardware can indicate spurious UEs.
2169 * Allow the h/w to stop working completely in case of a real UE.
2170 * Hence not setting the hw_error for UE detection.
2171 */
2172 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2173 adapter->hw_error = true;
434b3648 2174 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2175 "Error detected in the card\n");
2176 }
2177
2178 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2179 dev_err(&adapter->pdev->dev,
2180 "ERR: sliport status 0x%x\n", sliport_status);
2181 dev_err(&adapter->pdev->dev,
2182 "ERR: sliport error1 0x%x\n", sliport_err1);
2183 dev_err(&adapter->pdev->dev,
2184 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2185 }
2186
e1cfb67a
PR
2187 if (ue_lo) {
2188 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2189 if (ue_lo & 1)
7c185276
AK
2190 dev_err(&adapter->pdev->dev,
2191 "UE: %s bit set\n", ue_status_low_desc[i]);
2192 }
2193 }
f67ef7ba 2194
e1cfb67a
PR
2195 if (ue_hi) {
2196 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2197 if (ue_hi & 1)
7c185276
AK
2198 dev_err(&adapter->pdev->dev,
2199 "UE: %s bit set\n", ue_status_hi_desc[i]);
2200 }
2201 }
2202
2203}
2204
8d56ff11
SP
2205static void be_msix_disable(struct be_adapter *adapter)
2206{
ac6a0c4a 2207 if (msix_enabled(adapter)) {
8d56ff11 2208 pci_disable_msix(adapter->pdev);
ac6a0c4a 2209 adapter->num_msix_vec = 0;
3abcdeda
SP
2210 }
2211}
2212
10ef9ab4
SP
2213static uint be_num_rss_want(struct be_adapter *adapter)
2214{
30e80b55 2215 u32 num = 0;
abb93951 2216
10ef9ab4 2217 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2218 (lancer_chip(adapter) ||
2219 (!sriov_want(adapter) && be_physfn(adapter)))) {
2220 num = adapter->max_rss_queues;
30e80b55
YM
2221 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2222 }
2223 return num;
10ef9ab4
SP
2224}
2225
6b7c5b94
SP
2226static void be_msix_enable(struct be_adapter *adapter)
2227{
10ef9ab4 2228#define BE_MIN_MSIX_VECTORS 1
045508a8 2229 int i, status, num_vec, num_roce_vec = 0;
d379142b 2230 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2231
10ef9ab4
SP
2232 /* If RSS queues are not used, need a vec for default RX Q */
2233 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2234 if (be_roce_supported(adapter)) {
2235 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2236 (num_online_cpus() + 1));
2237 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2238 num_vec += num_roce_vec;
2239 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2240 }
10ef9ab4 2241 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2242
ac6a0c4a 2243 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2244 adapter->msix_entries[i].entry = i;
2245
ac6a0c4a 2246 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2247 if (status == 0) {
2248 goto done;
2249 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2250 num_vec = status;
3abcdeda 2251 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2252 num_vec) == 0)
3abcdeda 2253 goto done;
3abcdeda 2254 }
d379142b
SP
2255
2256 dev_warn(dev, "MSIx enable failed\n");
3abcdeda
SP
2257 return;
2258done:
045508a8
PP
2259 if (be_roce_supported(adapter)) {
2260 if (num_vec > num_roce_vec) {
2261 adapter->num_msix_vec = num_vec - num_roce_vec;
2262 adapter->num_msix_roce_vec =
2263 num_vec - adapter->num_msix_vec;
2264 } else {
2265 adapter->num_msix_vec = num_vec;
2266 adapter->num_msix_roce_vec = 0;
2267 }
2268 } else
2269 adapter->num_msix_vec = num_vec;
d379142b 2270 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
ac6a0c4a 2271 return;
6b7c5b94
SP
2272}
2273
fe6d2a38 2274static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2275 struct be_eq_obj *eqo)
b628bde2 2276{
10ef9ab4 2277 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2278}
6b7c5b94 2279
b628bde2
SP
2280static int be_msix_register(struct be_adapter *adapter)
2281{
10ef9ab4
SP
2282 struct net_device *netdev = adapter->netdev;
2283 struct be_eq_obj *eqo;
2284 int status, i, vec;
6b7c5b94 2285
10ef9ab4
SP
2286 for_all_evt_queues(adapter, eqo, i) {
2287 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2288 vec = be_msix_vec_get(adapter, eqo);
2289 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2290 if (status)
2291 goto err_msix;
2292 }
b628bde2 2293
6b7c5b94 2294 return 0;
3abcdeda 2295err_msix:
10ef9ab4
SP
2296 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2297 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2298 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2299 status);
ac6a0c4a 2300 be_msix_disable(adapter);
6b7c5b94
SP
2301 return status;
2302}
2303
2304static int be_irq_register(struct be_adapter *adapter)
2305{
2306 struct net_device *netdev = adapter->netdev;
2307 int status;
2308
ac6a0c4a 2309 if (msix_enabled(adapter)) {
6b7c5b94
SP
2310 status = be_msix_register(adapter);
2311 if (status == 0)
2312 goto done;
ba343c77
SB
2313 /* INTx is not supported for VF */
2314 if (!be_physfn(adapter))
2315 return status;
6b7c5b94
SP
2316 }
2317
2318 /* INTx */
2319 netdev->irq = adapter->pdev->irq;
2320 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2321 adapter);
2322 if (status) {
2323 dev_err(&adapter->pdev->dev,
2324 "INTx request IRQ failed - err %d\n", status);
2325 return status;
2326 }
2327done:
2328 adapter->isr_registered = true;
2329 return 0;
2330}
2331
2332static void be_irq_unregister(struct be_adapter *adapter)
2333{
2334 struct net_device *netdev = adapter->netdev;
10ef9ab4 2335 struct be_eq_obj *eqo;
3abcdeda 2336 int i;
6b7c5b94
SP
2337
2338 if (!adapter->isr_registered)
2339 return;
2340
2341 /* INTx */
ac6a0c4a 2342 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2343 free_irq(netdev->irq, adapter);
2344 goto done;
2345 }
2346
2347 /* MSIx */
10ef9ab4
SP
2348 for_all_evt_queues(adapter, eqo, i)
2349 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2350
6b7c5b94
SP
2351done:
2352 adapter->isr_registered = false;
6b7c5b94
SP
2353}
2354
10ef9ab4 2355static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2356{
2357 struct be_queue_info *q;
2358 struct be_rx_obj *rxo;
2359 int i;
2360
2361 for_all_rx_queues(adapter, rxo, i) {
2362 q = &rxo->q;
2363 if (q->created) {
2364 be_cmd_rxq_destroy(adapter, q);
2365 /* After the rxq is invalidated, wait for a grace time
2366 * of 1ms for all dma to end and the flush compl to
2367 * arrive
2368 */
2369 mdelay(1);
10ef9ab4 2370 be_rx_cq_clean(rxo);
482c9e79 2371 }
10ef9ab4 2372 be_queue_free(adapter, q);
482c9e79
SP
2373 }
2374}
2375
889cd4b2
SP
2376static int be_close(struct net_device *netdev)
2377{
2378 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2379 struct be_eq_obj *eqo;
2380 int i;
889cd4b2 2381
045508a8
PP
2382 be_roce_dev_close(adapter);
2383
889cd4b2
SP
2384 be_async_mcc_disable(adapter);
2385
fe6d2a38
SP
2386 if (!lancer_chip(adapter))
2387 be_intr_set(adapter, false);
889cd4b2 2388
10ef9ab4
SP
2389 for_all_evt_queues(adapter, eqo, i) {
2390 napi_disable(&eqo->napi);
2391 if (msix_enabled(adapter))
2392 synchronize_irq(be_msix_vec_get(adapter, eqo));
2393 else
2394 synchronize_irq(netdev->irq);
2395 be_eq_clean(eqo);
63fcb27f
PR
2396 }
2397
889cd4b2
SP
2398 be_irq_unregister(adapter);
2399
889cd4b2
SP
2400 /* Wait for all pending tx completions to arrive so that
2401 * all tx skbs are freed.
2402 */
0ae57bb3 2403 be_tx_compl_clean(adapter);
889cd4b2 2404
10ef9ab4 2405 be_rx_qs_destroy(adapter);
482c9e79
SP
2406 return 0;
2407}
2408
10ef9ab4 2409static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2410{
2411 struct be_rx_obj *rxo;
e9008ee9
PR
2412 int rc, i, j;
2413 u8 rsstable[128];
482c9e79
SP
2414
2415 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2416 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2417 sizeof(struct be_eth_rx_d));
2418 if (rc)
2419 return rc;
2420 }
2421
2422 /* The FW would like the default RXQ to be created first */
2423 rxo = default_rxo(adapter);
2424 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2425 adapter->if_handle, false, &rxo->rss_id);
2426 if (rc)
2427 return rc;
2428
2429 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2430 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2431 rx_frag_size, adapter->if_handle,
2432 true, &rxo->rss_id);
482c9e79
SP
2433 if (rc)
2434 return rc;
2435 }
2436
2437 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2438 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2439 for_all_rss_queues(adapter, rxo, i) {
2440 if ((j + i) >= 128)
2441 break;
2442 rsstable[j + i] = rxo->rss_id;
2443 }
2444 }
2445 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2446 if (rc)
2447 return rc;
2448 }
2449
2450 /* First time posting */
10ef9ab4 2451 for_all_rx_queues(adapter, rxo, i)
482c9e79 2452 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2453 return 0;
2454}
2455
6b7c5b94
SP
2456static int be_open(struct net_device *netdev)
2457{
2458 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2459 struct be_eq_obj *eqo;
3abcdeda 2460 struct be_rx_obj *rxo;
10ef9ab4 2461 struct be_tx_obj *txo;
b236916a 2462 u8 link_status;
3abcdeda 2463 int status, i;
5fb379ee 2464
10ef9ab4 2465 status = be_rx_qs_create(adapter);
482c9e79
SP
2466 if (status)
2467 goto err;
2468
5fb379ee
SP
2469 be_irq_register(adapter);
2470
fe6d2a38
SP
2471 if (!lancer_chip(adapter))
2472 be_intr_set(adapter, true);
5fb379ee 2473
10ef9ab4 2474 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2475 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2476
10ef9ab4
SP
2477 for_all_tx_queues(adapter, txo, i)
2478 be_cq_notify(adapter, txo->cq.id, true, 0);
2479
7a1e9b20
SP
2480 be_async_mcc_enable(adapter);
2481
10ef9ab4
SP
2482 for_all_evt_queues(adapter, eqo, i) {
2483 napi_enable(&eqo->napi);
2484 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2485 }
2486
323ff71e 2487 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2488 if (!status)
2489 be_link_status_update(adapter, link_status);
2490
045508a8 2491 be_roce_dev_open(adapter);
889cd4b2
SP
2492 return 0;
2493err:
2494 be_close(adapter->netdev);
2495 return -EIO;
5fb379ee
SP
2496}
2497
71d8d1b5
AK
2498static int be_setup_wol(struct be_adapter *adapter, bool enable)
2499{
2500 struct be_dma_mem cmd;
2501 int status = 0;
2502 u8 mac[ETH_ALEN];
2503
2504 memset(mac, 0, ETH_ALEN);
2505
2506 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2507 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2508 GFP_KERNEL);
71d8d1b5
AK
2509 if (cmd.va == NULL)
2510 return -1;
2511 memset(cmd.va, 0, cmd.size);
2512
2513 if (enable) {
2514 status = pci_write_config_dword(adapter->pdev,
2515 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2516 if (status) {
2517 dev_err(&adapter->pdev->dev,
2381a55c 2518 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2519 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2520 cmd.dma);
71d8d1b5
AK
2521 return status;
2522 }
2523 status = be_cmd_enable_magic_wol(adapter,
2524 adapter->netdev->dev_addr, &cmd);
2525 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2526 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2527 } else {
2528 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2529 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2530 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2531 }
2532
2b7bcebf 2533 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2534 return status;
2535}
2536
6d87f5c3
AK
2537/*
2538 * Generate a seed MAC address from the PF MAC Address using jhash.
2539 * MAC Address for VFs are assigned incrementally starting from the seed.
2540 * These addresses are programmed in the ASIC by the PF and the VF driver
2541 * queries for the MAC address during its probe.
2542 */
2543static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2544{
f9449ab7 2545 u32 vf;
3abcdeda 2546 int status = 0;
6d87f5c3 2547 u8 mac[ETH_ALEN];
11ac75ed 2548 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2549
2550 be_vf_eth_addr_generate(adapter, mac);
2551
11ac75ed 2552 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2553 if (lancer_chip(adapter)) {
2554 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2555 } else {
2556 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2557 vf_cfg->if_handle,
2558 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2559 }
2560
6d87f5c3
AK
2561 if (status)
2562 dev_err(&adapter->pdev->dev,
590c391d 2563 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2564 else
11ac75ed 2565 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2566
2567 mac[5] += 1;
2568 }
2569 return status;
2570}
2571
f9449ab7 2572static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2573{
11ac75ed 2574 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2575 u32 vf;
2576
39f1d94d
SP
2577 if (be_find_vfs(adapter, ASSIGNED)) {
2578 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2579 goto done;
2580 }
2581
11ac75ed 2582 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2583 if (lancer_chip(adapter))
2584 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2585 else
11ac75ed
SP
2586 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2587 vf_cfg->pmac_id, vf + 1);
f9449ab7 2588
11ac75ed
SP
2589 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2590 }
39f1d94d
SP
2591 pci_disable_sriov(adapter->pdev);
2592done:
2593 kfree(adapter->vf_cfg);
2594 adapter->num_vfs = 0;
6d87f5c3
AK
2595}
2596
a54769f5
SP
2597static int be_clear(struct be_adapter *adapter)
2598{
fbc13f01
AK
2599 int i = 1;
2600
191eb756
SP
2601 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2602 cancel_delayed_work_sync(&adapter->work);
2603 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2604 }
2605
11ac75ed 2606 if (sriov_enabled(adapter))
f9449ab7
SP
2607 be_vf_clear(adapter);
2608
fbc13f01
AK
2609 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2610 be_cmd_pmac_del(adapter, adapter->if_handle,
2611 adapter->pmac_id[i], 0);
2612
f9449ab7 2613 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2614
2615 be_mcc_queues_destroy(adapter);
10ef9ab4 2616 be_rx_cqs_destroy(adapter);
a54769f5 2617 be_tx_queues_destroy(adapter);
10ef9ab4 2618 be_evt_queues_destroy(adapter);
a54769f5 2619
abb93951
PR
2620 kfree(adapter->pmac_id);
2621 adapter->pmac_id = NULL;
2622
10ef9ab4 2623 be_msix_disable(adapter);
a54769f5
SP
2624 return 0;
2625}
2626
abb93951
PR
2627static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2628 u32 *cap_flags, u8 domain)
2629{
2630 bool profile_present = false;
2631 int status;
2632
2633 if (lancer_chip(adapter)) {
2634 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2635 if (!status)
2636 profile_present = true;
2637 }
2638
2639 if (!profile_present)
2640 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2641 BE_IF_FLAGS_MULTICAST;
2642}
2643
39f1d94d 2644static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2645{
11ac75ed 2646 struct be_vf_cfg *vf_cfg;
30128031
SP
2647 int vf;
2648
39f1d94d
SP
2649 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2650 GFP_KERNEL);
2651 if (!adapter->vf_cfg)
2652 return -ENOMEM;
2653
11ac75ed
SP
2654 for_all_vfs(adapter, vf_cfg, vf) {
2655 vf_cfg->if_handle = -1;
2656 vf_cfg->pmac_id = -1;
30128031 2657 }
39f1d94d 2658 return 0;
30128031
SP
2659}
2660
f9449ab7
SP
2661static int be_vf_setup(struct be_adapter *adapter)
2662{
11ac75ed 2663 struct be_vf_cfg *vf_cfg;
39f1d94d 2664 struct device *dev = &adapter->pdev->dev;
f9449ab7 2665 u32 cap_flags, en_flags, vf;
f1f3ee1b 2666 u16 def_vlan, lnk_speed;
39f1d94d
SP
2667 int status, enabled_vfs;
2668
2669 enabled_vfs = be_find_vfs(adapter, ENABLED);
2670 if (enabled_vfs) {
2671 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2672 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2673 return 0;
2674 }
f9449ab7 2675
39f1d94d
SP
2676 if (num_vfs > adapter->dev_num_vfs) {
2677 dev_warn(dev, "Device supports %d VFs and not %d\n",
2678 adapter->dev_num_vfs, num_vfs);
2679 num_vfs = adapter->dev_num_vfs;
2680 }
2681
2682 status = pci_enable_sriov(adapter->pdev, num_vfs);
2683 if (!status) {
2684 adapter->num_vfs = num_vfs;
2685 } else {
2686 /* Platform doesn't support SRIOV though device supports it */
2687 dev_warn(dev, "SRIOV enable failed\n");
2688 return 0;
2689 }
2690
2691 status = be_vf_setup_init(adapter);
2692 if (status)
2693 goto err;
30128031 2694
11ac75ed 2695 for_all_vfs(adapter, vf_cfg, vf) {
abb93951
PR
2696 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2697
2698 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2699 BE_IF_FLAGS_BROADCAST |
2700 BE_IF_FLAGS_MULTICAST);
2701
1578e777
PR
2702 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2703 &vf_cfg->if_handle, vf + 1);
f9449ab7
SP
2704 if (status)
2705 goto err;
f9449ab7
SP
2706 }
2707
39f1d94d
SP
2708 if (!enabled_vfs) {
2709 status = be_vf_eth_addr_config(adapter);
2710 if (status)
2711 goto err;
2712 }
f9449ab7 2713
11ac75ed 2714 for_all_vfs(adapter, vf_cfg, vf) {
8a046d3b
VV
2715 lnk_speed = 1000;
2716 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
f9449ab7
SP
2717 if (status)
2718 goto err;
11ac75ed 2719 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2720
2721 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2722 vf + 1, vf_cfg->if_handle);
2723 if (status)
2724 goto err;
2725 vf_cfg->def_vid = def_vlan;
f9449ab7
SP
2726 }
2727 return 0;
2728err:
2729 return status;
2730}
2731
30128031
SP
2732static void be_setup_init(struct be_adapter *adapter)
2733{
2734 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2735 adapter->phy.link_speed = -1;
30128031
SP
2736 adapter->if_handle = -1;
2737 adapter->be3_native = false;
2738 adapter->promiscuous = false;
2739 adapter->eq_next_idx = 0;
2740}
2741
1578e777
PR
2742static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2743 bool *active_mac, u32 *pmac_id)
590c391d 2744{
1578e777 2745 int status = 0;
e5e1ee89 2746
1578e777
PR
2747 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2748 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2749 if (!lancer_chip(adapter) && !be_physfn(adapter))
2750 *active_mac = true;
2751 else
2752 *active_mac = false;
e5e1ee89 2753
1578e777
PR
2754 return status;
2755 }
e5e1ee89 2756
1578e777
PR
2757 if (lancer_chip(adapter)) {
2758 status = be_cmd_get_mac_from_list(adapter, mac,
2759 active_mac, pmac_id, 0);
2760 if (*active_mac) {
5ee4979b
SP
2761 status = be_cmd_mac_addr_query(adapter, mac, false,
2762 if_handle, *pmac_id);
1578e777
PR
2763 }
2764 } else if (be_physfn(adapter)) {
2765 /* For BE3, for PF get permanent MAC */
5ee4979b 2766 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2767 *active_mac = false;
e5e1ee89 2768 } else {
1578e777 2769 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 2770 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
2771 if_handle, 0);
2772 *active_mac = true;
e5e1ee89 2773 }
590c391d
PR
2774 return status;
2775}
2776
abb93951
PR
2777static void be_get_resources(struct be_adapter *adapter)
2778{
2779 int status;
2780 bool profile_present = false;
2781
2782 if (lancer_chip(adapter)) {
2783 status = be_cmd_get_func_config(adapter);
2784
2785 if (!status)
2786 profile_present = true;
2787 }
2788
2789 if (profile_present) {
2790 /* Sanity fixes for Lancer */
2791 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2792 BE_UC_PMAC_COUNT);
2793 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2794 BE_NUM_VLANS_SUPPORTED);
2795 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2796 BE_MAX_MC);
2797 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2798 MAX_TX_QS);
2799 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2800 BE3_MAX_RSS_QS);
2801 adapter->max_event_queues = min_t(u16,
2802 adapter->max_event_queues,
2803 BE3_MAX_RSS_QS);
2804
2805 if (adapter->max_rss_queues &&
2806 adapter->max_rss_queues == adapter->max_rx_queues)
2807 adapter->max_rss_queues -= 1;
2808
2809 if (adapter->max_event_queues < adapter->max_rss_queues)
2810 adapter->max_rss_queues = adapter->max_event_queues;
2811
2812 } else {
2813 if (be_physfn(adapter))
2814 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2815 else
2816 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2817
2818 if (adapter->function_mode & FLEX10_MODE)
2819 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2820 else
2821 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2822
2823 adapter->max_mcast_mac = BE_MAX_MC;
2824 adapter->max_tx_queues = MAX_TX_QS;
2825 adapter->max_rss_queues = (adapter->be3_native) ?
2826 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2827 adapter->max_event_queues = BE3_MAX_RSS_QS;
2828
2829 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2830 BE_IF_FLAGS_BROADCAST |
2831 BE_IF_FLAGS_MULTICAST |
2832 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2833 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2834 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2835 BE_IF_FLAGS_PROMISCUOUS;
2836
2837 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2838 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2839 }
2840}
2841
39f1d94d
SP
2842/* Routine to query per function resource limits */
2843static int be_get_config(struct be_adapter *adapter)
2844{
abb93951 2845 int pos, status;
39f1d94d
SP
2846 u16 dev_num_vfs;
2847
abb93951
PR
2848 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2849 &adapter->function_mode,
2850 &adapter->function_caps);
2851 if (status)
2852 goto err;
2853
2854 be_get_resources(adapter);
2855
2856 /* primary mac needs 1 pmac entry */
2857 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2858 sizeof(u32), GFP_KERNEL);
2859 if (!adapter->pmac_id) {
2860 status = -ENOMEM;
2861 goto err;
2862 }
2863
39f1d94d
SP
2864 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2865 if (pos) {
2866 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2867 &dev_num_vfs);
7c5a5242
VV
2868 if (!lancer_chip(adapter))
2869 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
39f1d94d
SP
2870 adapter->dev_num_vfs = dev_num_vfs;
2871 }
abb93951
PR
2872err:
2873 return status;
39f1d94d
SP
2874}
2875
5fb379ee
SP
2876static int be_setup(struct be_adapter *adapter)
2877{
39f1d94d 2878 struct device *dev = &adapter->pdev->dev;
abb93951 2879 u32 en_flags;
a54769f5 2880 u32 tx_fc, rx_fc;
10ef9ab4 2881 int status;
ba343c77 2882 u8 mac[ETH_ALEN];
1578e777 2883 bool active_mac;
ba343c77 2884
30128031 2885 be_setup_init(adapter);
6b7c5b94 2886
abb93951
PR
2887 if (!lancer_chip(adapter))
2888 be_cmd_req_native_mode(adapter);
39f1d94d 2889
abb93951
PR
2890 status = be_get_config(adapter);
2891 if (status)
2892 goto err;
73d540f2 2893
10ef9ab4
SP
2894 be_msix_enable(adapter);
2895
2896 status = be_evt_queues_create(adapter);
2897 if (status)
a54769f5 2898 goto err;
6b7c5b94 2899
10ef9ab4
SP
2900 status = be_tx_cqs_create(adapter);
2901 if (status)
2902 goto err;
2903
2904 status = be_rx_cqs_create(adapter);
2905 if (status)
a54769f5 2906 goto err;
6b7c5b94 2907
f9449ab7 2908 status = be_mcc_queues_create(adapter);
10ef9ab4 2909 if (status)
a54769f5 2910 goto err;
6b7c5b94 2911
f9449ab7
SP
2912 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2913 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 2914
abb93951 2915 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 2916 en_flags |= BE_IF_FLAGS_RSS;
1578e777 2917
abb93951 2918 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 2919
abb93951 2920 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 2921 &adapter->if_handle, 0);
5fb379ee 2922 if (status != 0)
a54769f5 2923 goto err;
6b7c5b94 2924
1578e777
PR
2925 memset(mac, 0, ETH_ALEN);
2926 active_mac = false;
2927 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2928 &active_mac, &adapter->pmac_id[0]);
2929 if (status != 0)
2930 goto err;
2931
2932 if (!active_mac) {
2933 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2934 &adapter->pmac_id[0], 0);
2935 if (status != 0)
2936 goto err;
2937 }
2938
2939 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2940 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2941 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 2942 }
0dffc83e 2943
10ef9ab4
SP
2944 status = be_tx_qs_create(adapter);
2945 if (status)
2946 goto err;
2947
04b71175 2948 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2949
1d1e9a46 2950 if (adapter->vlans_added)
10329df8 2951 be_vid_config(adapter);
7ab8b0b4 2952
a54769f5 2953 be_set_rx_mode(adapter->netdev);
5fb379ee 2954
ddc3f5cb 2955 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 2956
ddc3f5cb
AK
2957 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2958 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 2959 adapter->rx_fc);
2dc1deb6 2960
39f1d94d
SP
2961 if (be_physfn(adapter) && num_vfs) {
2962 if (adapter->dev_num_vfs)
2963 be_vf_setup(adapter);
2964 else
2965 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
2966 }
2967
42f11cf2
AK
2968 be_cmd_get_phy_info(adapter);
2969 if (be_pause_supported(adapter))
2970 adapter->phy.fc_autoneg = 1;
2971
191eb756
SP
2972 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2973 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 2974 return 0;
a54769f5
SP
2975err:
2976 be_clear(adapter);
2977 return status;
2978}
6b7c5b94 2979
66268739
IV
2980#ifdef CONFIG_NET_POLL_CONTROLLER
2981static void be_netpoll(struct net_device *netdev)
2982{
2983 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2984 struct be_eq_obj *eqo;
66268739
IV
2985 int i;
2986
10ef9ab4
SP
2987 for_all_evt_queues(adapter, eqo, i)
2988 event_handle(eqo);
2989
2990 return;
66268739
IV
2991}
2992#endif
2993
84517482 2994#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
2995char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2996
fa9a6fed 2997static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2998 const u8 *p, u32 img_start, int image_size,
2999 int hdr_size)
fa9a6fed
SB
3000{
3001 u32 crc_offset;
3002 u8 flashed_crc[4];
3003 int status;
3f0d4560
AK
3004
3005 crc_offset = hdr_size + img_start + image_size - 4;
3006
fa9a6fed 3007 p += crc_offset;
3f0d4560
AK
3008
3009 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3010 (image_size - 4));
fa9a6fed
SB
3011 if (status) {
3012 dev_err(&adapter->pdev->dev,
3013 "could not get crc from flash, not flashing redboot\n");
3014 return false;
3015 }
3016
3017 /*update redboot only if crc does not match*/
3018 if (!memcmp(flashed_crc, p, 4))
3019 return false;
3020 else
3021 return true;
fa9a6fed
SB
3022}
3023
306f1348
SP
3024static bool phy_flashing_required(struct be_adapter *adapter)
3025{
42f11cf2
AK
3026 return (adapter->phy.phy_type == TN_8022 &&
3027 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3028}
3029
c165541e
PR
3030static bool is_comp_in_ufi(struct be_adapter *adapter,
3031 struct flash_section_info *fsec, int type)
3032{
3033 int i = 0, img_type = 0;
3034 struct flash_section_info_g2 *fsec_g2 = NULL;
3035
3036 if (adapter->generation != BE_GEN3)
3037 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3038
3039 for (i = 0; i < MAX_FLASH_COMP; i++) {
3040 if (fsec_g2)
3041 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3042 else
3043 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3044
3045 if (img_type == type)
3046 return true;
3047 }
3048 return false;
3049
3050}
3051
3052struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3053 int header_size,
3054 const struct firmware *fw)
3055{
3056 struct flash_section_info *fsec = NULL;
3057 const u8 *p = fw->data;
3058
3059 p += header_size;
3060 while (p < (fw->data + fw->size)) {
3061 fsec = (struct flash_section_info *)p;
3062 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3063 return fsec;
3064 p += 32;
3065 }
3066 return NULL;
3067}
3068
3f0d4560 3069static int be_flash_data(struct be_adapter *adapter,
c165541e
PR
3070 const struct firmware *fw,
3071 struct be_dma_mem *flash_cmd,
3072 int num_of_images)
3f0d4560 3073
84517482 3074{
3f0d4560 3075 int status = 0, i, filehdr_size = 0;
c165541e 3076 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3f0d4560 3077 u32 total_bytes = 0, flash_op;
84517482
AK
3078 int num_bytes;
3079 const u8 *p = fw->data;
3080 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 3081 const struct flash_comp *pflashcomp;
c165541e
PR
3082 int num_comp, hdr_size;
3083 struct flash_section_info *fsec = NULL;
3084
3085 struct flash_comp gen3_flash_types[] = {
3086 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3087 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3088 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3089 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3090 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3091 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3092 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3093 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3094 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3095 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3096 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3097 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3098 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3099 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3100 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3101 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3102 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3103 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3104 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3105 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3106 };
c165541e
PR
3107
3108 struct flash_comp gen2_flash_types[] = {
3109 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3110 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3111 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3112 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3113 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3114 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3115 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3116 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3117 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3118 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3119 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3120 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3121 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3122 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3123 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3124 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3125 };
3126
3127 if (adapter->generation == BE_GEN3) {
3128 pflashcomp = gen3_flash_types;
3129 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3130 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3131 } else {
3132 pflashcomp = gen2_flash_types;
3133 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3134 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3135 }
c165541e
PR
3136 /* Get flash section info*/
3137 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3138 if (!fsec) {
3139 dev_err(&adapter->pdev->dev,
3140 "Invalid Cookie. UFI corrupted ?\n");
3141 return -1;
3142 }
9fe96934 3143 for (i = 0; i < num_comp; i++) {
c165541e 3144 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3145 continue;
c165541e
PR
3146
3147 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3148 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3149 continue;
3150
3151 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
306f1348
SP
3152 if (!phy_flashing_required(adapter))
3153 continue;
3154 }
c165541e
PR
3155
3156 hdr_size = filehdr_size +
3157 (num_of_images * sizeof(struct image_hdr));
3158
3159 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3160 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3161 pflashcomp[i].size, hdr_size)))
3f0d4560 3162 continue;
c165541e
PR
3163
3164 /* Flash the component */
3f0d4560 3165 p = fw->data;
c165541e 3166 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3167 if (p + pflashcomp[i].size > fw->data + fw->size)
3168 return -1;
3169 total_bytes = pflashcomp[i].size;
3f0d4560
AK
3170 while (total_bytes) {
3171 if (total_bytes > 32*1024)
3172 num_bytes = 32*1024;
3173 else
3174 num_bytes = total_bytes;
3175 total_bytes -= num_bytes;
306f1348 3176 if (!total_bytes) {
c165541e 3177 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
3178 flash_op = FLASHROM_OPER_PHY_FLASH;
3179 else
3180 flash_op = FLASHROM_OPER_FLASH;
3181 } else {
c165541e 3182 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
306f1348
SP
3183 flash_op = FLASHROM_OPER_PHY_SAVE;
3184 else
3185 flash_op = FLASHROM_OPER_SAVE;
3186 }
3f0d4560
AK
3187 memcpy(req->params.data_buf, p, num_bytes);
3188 p += num_bytes;
3189 status = be_cmd_write_flashrom(adapter, flash_cmd,
3190 pflashcomp[i].optype, flash_op, num_bytes);
3191 if (status) {
306f1348
SP
3192 if ((status == ILLEGAL_IOCTL_REQ) &&
3193 (pflashcomp[i].optype ==
c165541e 3194 OPTYPE_PHY_FW))
306f1348 3195 break;
3f0d4560
AK
3196 dev_err(&adapter->pdev->dev,
3197 "cmd to write to flash rom failed.\n");
3198 return -1;
3199 }
84517482 3200 }
84517482 3201 }
84517482
AK
3202 return 0;
3203}
3204
3f0d4560
AK
3205static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3206{
3207 if (fhdr == NULL)
3208 return 0;
3209 if (fhdr->build[0] == '3')
3210 return BE_GEN3;
3211 else if (fhdr->build[0] == '2')
3212 return BE_GEN2;
3213 else
3214 return 0;
3215}
3216
f67ef7ba
PR
3217static int lancer_wait_idle(struct be_adapter *adapter)
3218{
3219#define SLIPORT_IDLE_TIMEOUT 30
3220 u32 reg_val;
3221 int status = 0, i;
3222
3223 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3224 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3225 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3226 break;
3227
3228 ssleep(1);
3229 }
3230
3231 if (i == SLIPORT_IDLE_TIMEOUT)
3232 status = -1;
3233
3234 return status;
3235}
3236
3237static int lancer_fw_reset(struct be_adapter *adapter)
3238{
3239 int status = 0;
3240
3241 status = lancer_wait_idle(adapter);
3242 if (status)
3243 return status;
3244
3245 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3246 PHYSDEV_CONTROL_OFFSET);
3247
3248 return status;
3249}
3250
485bf569
SN
3251static int lancer_fw_download(struct be_adapter *adapter,
3252 const struct firmware *fw)
84517482 3253{
485bf569
SN
3254#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3255#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3256 struct be_dma_mem flash_cmd;
485bf569
SN
3257 const u8 *data_ptr = NULL;
3258 u8 *dest_image_ptr = NULL;
3259 size_t image_size = 0;
3260 u32 chunk_size = 0;
3261 u32 data_written = 0;
3262 u32 offset = 0;
3263 int status = 0;
3264 u8 add_status = 0;
f67ef7ba 3265 u8 change_status;
84517482 3266
485bf569 3267 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3268 dev_err(&adapter->pdev->dev,
485bf569
SN
3269 "FW Image not properly aligned. "
3270 "Length must be 4 byte aligned.\n");
3271 status = -EINVAL;
3272 goto lancer_fw_exit;
d9efd2af
SB
3273 }
3274
485bf569
SN
3275 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3276 + LANCER_FW_DOWNLOAD_CHUNK;
3277 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3278 &flash_cmd.dma, GFP_KERNEL);
3279 if (!flash_cmd.va) {
3280 status = -ENOMEM;
3281 dev_err(&adapter->pdev->dev,
3282 "Memory allocation failure while flashing\n");
3283 goto lancer_fw_exit;
3284 }
84517482 3285
485bf569
SN
3286 dest_image_ptr = flash_cmd.va +
3287 sizeof(struct lancer_cmd_req_write_object);
3288 image_size = fw->size;
3289 data_ptr = fw->data;
3290
3291 while (image_size) {
3292 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3293
3294 /* Copy the image chunk content. */
3295 memcpy(dest_image_ptr, data_ptr, chunk_size);
3296
3297 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3298 chunk_size, offset,
3299 LANCER_FW_DOWNLOAD_LOCATION,
3300 &data_written, &change_status,
3301 &add_status);
485bf569
SN
3302 if (status)
3303 break;
3304
3305 offset += data_written;
3306 data_ptr += data_written;
3307 image_size -= data_written;
3308 }
3309
3310 if (!status) {
3311 /* Commit the FW written */
3312 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3313 0, offset,
3314 LANCER_FW_DOWNLOAD_LOCATION,
3315 &data_written, &change_status,
3316 &add_status);
485bf569
SN
3317 }
3318
3319 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3320 flash_cmd.dma);
3321 if (status) {
3322 dev_err(&adapter->pdev->dev,
3323 "Firmware load error. "
3324 "Status code: 0x%x Additional Status: 0x%x\n",
3325 status, add_status);
3326 goto lancer_fw_exit;
3327 }
3328
f67ef7ba
PR
3329 if (change_status == LANCER_FW_RESET_NEEDED) {
3330 status = lancer_fw_reset(adapter);
3331 if (status) {
3332 dev_err(&adapter->pdev->dev,
3333 "Adapter busy for FW reset.\n"
3334 "New FW will not be active.\n");
3335 goto lancer_fw_exit;
3336 }
3337 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3338 dev_err(&adapter->pdev->dev,
3339 "System reboot required for new FW"
3340 " to be active\n");
3341 }
3342
485bf569
SN
3343 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3344lancer_fw_exit:
3345 return status;
3346}
3347
3348static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3349{
3350 struct flash_file_hdr_g2 *fhdr;
3351 struct flash_file_hdr_g3 *fhdr3;
3352 struct image_hdr *img_hdr_ptr = NULL;
3353 struct be_dma_mem flash_cmd;
3354 const u8 *p;
3355 int status = 0, i = 0, num_imgs = 0;
84517482
AK
3356
3357 p = fw->data;
3f0d4560 3358 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 3359
84517482 3360 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
3361 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3362 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3363 if (!flash_cmd.va) {
3364 status = -ENOMEM;
3365 dev_err(&adapter->pdev->dev,
3366 "Memory allocation failure while flashing\n");
485bf569 3367 goto be_fw_exit;
84517482
AK
3368 }
3369
3f0d4560
AK
3370 if ((adapter->generation == BE_GEN3) &&
3371 (get_ufigen_type(fhdr) == BE_GEN3)) {
3372 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
3373 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3374 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
3375 img_hdr_ptr = (struct image_hdr *) (fw->data +
3376 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
3377 i * sizeof(struct image_hdr)));
3378 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3379 status = be_flash_data(adapter, fw, &flash_cmd,
3380 num_imgs);
3f0d4560
AK
3381 }
3382 } else if ((adapter->generation == BE_GEN2) &&
3383 (get_ufigen_type(fhdr) == BE_GEN2)) {
3384 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3385 } else {
3386 dev_err(&adapter->pdev->dev,
3387 "UFI and Interface are not compatible for flashing\n");
3388 status = -1;
84517482
AK
3389 }
3390
2b7bcebf
IV
3391 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3392 flash_cmd.dma);
84517482
AK
3393 if (status) {
3394 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3395 goto be_fw_exit;
84517482
AK
3396 }
3397
af901ca1 3398 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3399
485bf569
SN
3400be_fw_exit:
3401 return status;
3402}
3403
3404int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3405{
3406 const struct firmware *fw;
3407 int status;
3408
3409 if (!netif_running(adapter->netdev)) {
3410 dev_err(&adapter->pdev->dev,
3411 "Firmware load not allowed (interface is down)\n");
3412 return -1;
3413 }
3414
3415 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3416 if (status)
3417 goto fw_exit;
3418
3419 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3420
3421 if (lancer_chip(adapter))
3422 status = lancer_fw_download(adapter, fw);
3423 else
3424 status = be_fw_download(adapter, fw);
3425
84517482
AK
3426fw_exit:
3427 release_firmware(fw);
3428 return status;
3429}
3430
e5686ad8 3431static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3432 .ndo_open = be_open,
3433 .ndo_stop = be_close,
3434 .ndo_start_xmit = be_xmit,
a54769f5 3435 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3436 .ndo_set_mac_address = be_mac_addr_set,
3437 .ndo_change_mtu = be_change_mtu,
ab1594e9 3438 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3439 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3440 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3441 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3442 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3443 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3444 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3445 .ndo_get_vf_config = be_get_vf_config,
3446#ifdef CONFIG_NET_POLL_CONTROLLER
3447 .ndo_poll_controller = be_netpoll,
3448#endif
6b7c5b94
SP
3449};
3450
3451static void be_netdev_init(struct net_device *netdev)
3452{
3453 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3454 struct be_eq_obj *eqo;
3abcdeda 3455 int i;
6b7c5b94 3456
6332c8d3 3457 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3458 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3459 NETIF_F_HW_VLAN_TX;
3460 if (be_multi_rxq(adapter))
3461 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3462
3463 netdev->features |= netdev->hw_features |
8b8ddc68 3464 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3465
eb8a50d9 3466 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3467 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3468
fbc13f01
AK
3469 netdev->priv_flags |= IFF_UNICAST_FLT;
3470
6b7c5b94
SP
3471 netdev->flags |= IFF_MULTICAST;
3472
b7e5887e 3473 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3474
10ef9ab4 3475 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3476
3477 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3478
10ef9ab4
SP
3479 for_all_evt_queues(adapter, eqo, i)
3480 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3481}
3482
3483static void be_unmap_pci_bars(struct be_adapter *adapter)
3484{
8788fdc2
SP
3485 if (adapter->csr)
3486 iounmap(adapter->csr);
3487 if (adapter->db)
3488 iounmap(adapter->db);
045508a8
PP
3489 if (adapter->roce_db.base)
3490 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3491}
3492
3493static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3494{
3495 struct pci_dev *pdev = adapter->pdev;
3496 u8 __iomem *addr;
3497
3498 addr = pci_iomap(pdev, 2, 0);
3499 if (addr == NULL)
3500 return -ENOMEM;
3501
3502 adapter->roce_db.base = addr;
3503 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3504 adapter->roce_db.size = 8192;
3505 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3506 return 0;
6b7c5b94
SP
3507}
3508
3509static int be_map_pci_bars(struct be_adapter *adapter)
3510{
3511 u8 __iomem *addr;
db3ea781 3512 int db_reg;
6b7c5b94 3513
fe6d2a38 3514 if (lancer_chip(adapter)) {
045508a8
PP
3515 if (be_type_2_3(adapter)) {
3516 addr = ioremap_nocache(
3517 pci_resource_start(adapter->pdev, 0),
3518 pci_resource_len(adapter->pdev, 0));
3519 if (addr == NULL)
3520 return -ENOMEM;
3521 adapter->db = addr;
3522 }
3523 if (adapter->if_type == SLI_INTF_TYPE_3) {
3524 if (lancer_roce_map_pci_bars(adapter))
3525 goto pci_map_err;
3526 }
fe6d2a38
SP
3527 return 0;
3528 }
3529
ba343c77
SB
3530 if (be_physfn(adapter)) {
3531 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3532 pci_resource_len(adapter->pdev, 2));
3533 if (addr == NULL)
3534 return -ENOMEM;
3535 adapter->csr = addr;
3536 }
6b7c5b94 3537
ba343c77 3538 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3539 db_reg = 4;
3540 } else {
ba343c77
SB
3541 if (be_physfn(adapter))
3542 db_reg = 4;
3543 else
3544 db_reg = 0;
3545 }
3546 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3547 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3548 if (addr == NULL)
3549 goto pci_map_err;
ba343c77 3550 adapter->db = addr;
045508a8
PP
3551 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3552 adapter->roce_db.size = 4096;
3553 adapter->roce_db.io_addr =
3554 pci_resource_start(adapter->pdev, db_reg);
3555 adapter->roce_db.total_size =
3556 pci_resource_len(adapter->pdev, db_reg);
3557 }
6b7c5b94
SP
3558 return 0;
3559pci_map_err:
3560 be_unmap_pci_bars(adapter);
3561 return -ENOMEM;
3562}
3563
6b7c5b94
SP
3564static void be_ctrl_cleanup(struct be_adapter *adapter)
3565{
8788fdc2 3566 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3567
3568 be_unmap_pci_bars(adapter);
3569
3570 if (mem->va)
2b7bcebf
IV
3571 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3572 mem->dma);
e7b909a6 3573
5b8821b7 3574 mem = &adapter->rx_filter;
e7b909a6 3575 if (mem->va)
2b7bcebf
IV
3576 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3577 mem->dma);
6b7c5b94
SP
3578}
3579
6b7c5b94
SP
3580static int be_ctrl_init(struct be_adapter *adapter)
3581{
8788fdc2
SP
3582 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3583 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3584 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3585 int status;
6b7c5b94
SP
3586
3587 status = be_map_pci_bars(adapter);
3588 if (status)
e7b909a6 3589 goto done;
6b7c5b94
SP
3590
3591 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3592 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3593 mbox_mem_alloc->size,
3594 &mbox_mem_alloc->dma,
3595 GFP_KERNEL);
6b7c5b94 3596 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3597 status = -ENOMEM;
3598 goto unmap_pci_bars;
6b7c5b94
SP
3599 }
3600 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3601 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3602 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3603 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3604
5b8821b7
SP
3605 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3606 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3607 &rx_filter->dma, GFP_KERNEL);
3608 if (rx_filter->va == NULL) {
e7b909a6
SP
3609 status = -ENOMEM;
3610 goto free_mbox;
3611 }
5b8821b7 3612 memset(rx_filter->va, 0, rx_filter->size);
2984961c 3613 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3614 spin_lock_init(&adapter->mcc_lock);
3615 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3616
dd131e76 3617 init_completion(&adapter->flash_compl);
cf588477 3618 pci_save_state(adapter->pdev);
6b7c5b94 3619 return 0;
e7b909a6
SP
3620
3621free_mbox:
2b7bcebf
IV
3622 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3623 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3624
3625unmap_pci_bars:
3626 be_unmap_pci_bars(adapter);
3627
3628done:
3629 return status;
6b7c5b94
SP
3630}
3631
3632static void be_stats_cleanup(struct be_adapter *adapter)
3633{
3abcdeda 3634 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3635
3636 if (cmd->va)
2b7bcebf
IV
3637 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3638 cmd->va, cmd->dma);
6b7c5b94
SP
3639}
3640
3641static int be_stats_init(struct be_adapter *adapter)
3642{
3abcdeda 3643 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3644
005d5696 3645 if (adapter->generation == BE_GEN2) {
89a88ab8 3646 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3647 } else {
3648 if (lancer_chip(adapter))
3649 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3650 else
3651 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3652 }
2b7bcebf
IV
3653 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3654 GFP_KERNEL);
6b7c5b94
SP
3655 if (cmd->va == NULL)
3656 return -1;
d291b9af 3657 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3658 return 0;
3659}
3660
3661static void __devexit be_remove(struct pci_dev *pdev)
3662{
3663 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3664
6b7c5b94
SP
3665 if (!adapter)
3666 return;
3667
045508a8
PP
3668 be_roce_dev_remove(adapter);
3669
f67ef7ba
PR
3670 cancel_delayed_work_sync(&adapter->func_recovery_work);
3671
6b7c5b94
SP
3672 unregister_netdev(adapter->netdev);
3673
5fb379ee
SP
3674 be_clear(adapter);
3675
bf99e50d
PR
3676 /* tell fw we're done with firing cmds */
3677 be_cmd_fw_clean(adapter);
3678
6b7c5b94
SP
3679 be_stats_cleanup(adapter);
3680
3681 be_ctrl_cleanup(adapter);
3682
d6b6d987
SP
3683 pci_disable_pcie_error_reporting(pdev);
3684
6b7c5b94
SP
3685 pci_set_drvdata(pdev, NULL);
3686 pci_release_regions(pdev);
3687 pci_disable_device(pdev);
3688
3689 free_netdev(adapter->netdev);
3690}
3691
4762f6ce
AK
3692bool be_is_wol_supported(struct be_adapter *adapter)
3693{
3694 return ((adapter->wol_cap & BE_WOL_CAP) &&
3695 !be_is_wol_excluded(adapter)) ? true : false;
3696}
3697
941a77d5
SK
3698u32 be_get_fw_log_level(struct be_adapter *adapter)
3699{
3700 struct be_dma_mem extfat_cmd;
3701 struct be_fat_conf_params *cfgs;
3702 int status;
3703 u32 level = 0;
3704 int j;
3705
3706 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3707 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3708 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3709 &extfat_cmd.dma);
3710
3711 if (!extfat_cmd.va) {
3712 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3713 __func__);
3714 goto err;
3715 }
3716
3717 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3718 if (!status) {
3719 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3720 sizeof(struct be_cmd_resp_hdr));
ac46a462 3721 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
3722 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3723 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3724 }
3725 }
3726 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3727 extfat_cmd.dma);
3728err:
3729 return level;
3730}
abb93951 3731
39f1d94d 3732static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3733{
6b7c5b94 3734 int status;
941a77d5 3735 u32 level;
6b7c5b94 3736
9e1453c5
AK
3737 status = be_cmd_get_cntl_attributes(adapter);
3738 if (status)
3739 return status;
3740
4762f6ce
AK
3741 status = be_cmd_get_acpi_wol_cap(adapter);
3742 if (status) {
3743 /* in case of a failure to get wol capabillities
3744 * check the exclusion list to determine WOL capability */
3745 if (!be_is_wol_excluded(adapter))
3746 adapter->wol_cap |= BE_WOL_CAP;
3747 }
3748
3749 if (be_is_wol_supported(adapter))
3750 adapter->wol = true;
3751
7aeb2156
PR
3752 /* Must be a power of 2 or else MODULO will BUG_ON */
3753 adapter->be_get_temp_freq = 64;
3754
941a77d5
SK
3755 level = be_get_fw_log_level(adapter);
3756 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3757
2243e2e9 3758 return 0;
6b7c5b94
SP
3759}
3760
39f1d94d 3761static int be_dev_type_check(struct be_adapter *adapter)
fe6d2a38
SP
3762{
3763 struct pci_dev *pdev = adapter->pdev;
3764 u32 sli_intf = 0, if_type;
3765
3766 switch (pdev->device) {
3767 case BE_DEVICE_ID1:
3768 case OC_DEVICE_ID1:
3769 adapter->generation = BE_GEN2;
3770 break;
3771 case BE_DEVICE_ID2:
3772 case OC_DEVICE_ID2:
3773 adapter->generation = BE_GEN3;
3774 break;
3775 case OC_DEVICE_ID3:
12f4d0a8 3776 case OC_DEVICE_ID4:
fe6d2a38 3777 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
045508a8
PP
3778 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3779 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38
SP
3780 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3781 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3782 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
045508a8
PP
3783 !be_type_2_3(adapter)) {
3784 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3785 return -EINVAL;
3786 }
3787 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3788 SLI_INTF_FAMILY_SHIFT);
3789 adapter->generation = BE_GEN3;
3790 break;
3791 case OC_DEVICE_ID5:
3792 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3793 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
fe6d2a38
SP
3794 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3795 return -EINVAL;
3796 }
fe6d2a38
SP
3797 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3798 SLI_INTF_FAMILY_SHIFT);
3799 adapter->generation = BE_GEN3;
3800 break;
3801 default:
3802 adapter->generation = 0;
3803 }
39f1d94d
SP
3804
3805 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3806 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38
SP
3807 return 0;
3808}
3809
f67ef7ba 3810static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
3811{
3812 int status;
d8110f62 3813
f67ef7ba
PR
3814 status = lancer_test_and_set_rdy_state(adapter);
3815 if (status)
3816 goto err;
d8110f62 3817
f67ef7ba
PR
3818 if (netif_running(adapter->netdev))
3819 be_close(adapter->netdev);
d8110f62 3820
f67ef7ba
PR
3821 be_clear(adapter);
3822
3823 adapter->hw_error = false;
3824 adapter->fw_timeout = false;
3825
3826 status = be_setup(adapter);
3827 if (status)
3828 goto err;
d8110f62 3829
f67ef7ba
PR
3830 if (netif_running(adapter->netdev)) {
3831 status = be_open(adapter->netdev);
d8110f62
PR
3832 if (status)
3833 goto err;
f67ef7ba 3834 }
d8110f62 3835
f67ef7ba
PR
3836 dev_err(&adapter->pdev->dev,
3837 "Adapter SLIPORT recovery succeeded\n");
3838 return 0;
3839err:
67297ad8
PR
3840 if (adapter->eeh_error)
3841 dev_err(&adapter->pdev->dev,
3842 "Adapter SLIPORT recovery failed\n");
d8110f62 3843
f67ef7ba
PR
3844 return status;
3845}
3846
3847static void be_func_recovery_task(struct work_struct *work)
3848{
3849 struct be_adapter *adapter =
3850 container_of(work, struct be_adapter, func_recovery_work.work);
3851 int status;
d8110f62 3852
f67ef7ba 3853 be_detect_error(adapter);
d8110f62 3854
f67ef7ba 3855 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 3856
f67ef7ba
PR
3857 if (adapter->eeh_error)
3858 goto out;
d8110f62 3859
f67ef7ba
PR
3860 rtnl_lock();
3861 netif_device_detach(adapter->netdev);
3862 rtnl_unlock();
d8110f62 3863
f67ef7ba 3864 status = lancer_recover_func(adapter);
d8110f62 3865
f67ef7ba
PR
3866 if (!status)
3867 netif_device_attach(adapter->netdev);
d8110f62 3868 }
f67ef7ba
PR
3869
3870out:
3871 schedule_delayed_work(&adapter->func_recovery_work,
3872 msecs_to_jiffies(1000));
d8110f62
PR
3873}
3874
3875static void be_worker(struct work_struct *work)
3876{
3877 struct be_adapter *adapter =
3878 container_of(work, struct be_adapter, work.work);
3879 struct be_rx_obj *rxo;
10ef9ab4 3880 struct be_eq_obj *eqo;
d8110f62
PR
3881 int i;
3882
d8110f62
PR
3883 /* when interrupts are not yet enabled, just reap any pending
3884 * mcc completions */
3885 if (!netif_running(adapter->netdev)) {
072a9c48 3886 local_bh_disable();
10ef9ab4 3887 be_process_mcc(adapter);
072a9c48 3888 local_bh_enable();
d8110f62
PR
3889 goto reschedule;
3890 }
3891
3892 if (!adapter->stats_cmd_sent) {
3893 if (lancer_chip(adapter))
3894 lancer_cmd_get_pport_stats(adapter,
3895 &adapter->stats_cmd);
3896 else
3897 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3898 }
3899
7aeb2156
PR
3900 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3901 be_cmd_get_die_temperature(adapter);
3902
d8110f62 3903 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3904 if (rxo->rx_post_starved) {
3905 rxo->rx_post_starved = false;
3906 be_post_rx_frags(rxo, GFP_KERNEL);
3907 }
3908 }
3909
10ef9ab4
SP
3910 for_all_evt_queues(adapter, eqo, i)
3911 be_eqd_update(adapter, eqo);
3912
d8110f62
PR
3913reschedule:
3914 adapter->work_counter++;
3915 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3916}
3917
39f1d94d
SP
3918static bool be_reset_required(struct be_adapter *adapter)
3919{
d79c0a20 3920 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
3921}
3922
d379142b
SP
3923static char *mc_name(struct be_adapter *adapter)
3924{
3925 if (adapter->function_mode & FLEX10_MODE)
3926 return "FLEX10";
3927 else if (adapter->function_mode & VNIC_MODE)
3928 return "vNIC";
3929 else if (adapter->function_mode & UMC_ENABLED)
3930 return "UMC";
3931 else
3932 return "";
3933}
3934
3935static inline char *func_name(struct be_adapter *adapter)
3936{
3937 return be_physfn(adapter) ? "PF" : "VF";
3938}
3939
6b7c5b94
SP
3940static int __devinit be_probe(struct pci_dev *pdev,
3941 const struct pci_device_id *pdev_id)
3942{
3943 int status = 0;
3944 struct be_adapter *adapter;
3945 struct net_device *netdev;
b4e32a71 3946 char port_name;
6b7c5b94
SP
3947
3948 status = pci_enable_device(pdev);
3949 if (status)
3950 goto do_none;
3951
3952 status = pci_request_regions(pdev, DRV_NAME);
3953 if (status)
3954 goto disable_dev;
3955 pci_set_master(pdev);
3956
7f640062 3957 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
3958 if (netdev == NULL) {
3959 status = -ENOMEM;
3960 goto rel_reg;
3961 }
3962 adapter = netdev_priv(netdev);
3963 adapter->pdev = pdev;
3964 pci_set_drvdata(pdev, adapter);
fe6d2a38 3965
39f1d94d 3966 status = be_dev_type_check(adapter);
63657b9c 3967 if (status)
fe6d2a38
SP
3968 goto free_netdev;
3969
6b7c5b94 3970 adapter->netdev = netdev;
2243e2e9 3971 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3972
2b7bcebf 3973 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3974 if (!status) {
3975 netdev->features |= NETIF_F_HIGHDMA;
3976 } else {
2b7bcebf 3977 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3978 if (status) {
3979 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3980 goto free_netdev;
3981 }
3982 }
3983
d6b6d987
SP
3984 status = pci_enable_pcie_error_reporting(pdev);
3985 if (status)
3986 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3987
6b7c5b94
SP
3988 status = be_ctrl_init(adapter);
3989 if (status)
39f1d94d 3990 goto free_netdev;
6b7c5b94 3991
2243e2e9 3992 /* sync up with fw's ready state */
ba343c77 3993 if (be_physfn(adapter)) {
bf99e50d 3994 status = be_fw_wait_ready(adapter);
ba343c77
SB
3995 if (status)
3996 goto ctrl_clean;
ba343c77 3997 }
6b7c5b94 3998
2243e2e9
SP
3999 /* tell fw we're ready to fire cmds */
4000 status = be_cmd_fw_init(adapter);
6b7c5b94 4001 if (status)
2243e2e9
SP
4002 goto ctrl_clean;
4003
39f1d94d
SP
4004 if (be_reset_required(adapter)) {
4005 status = be_cmd_reset_function(adapter);
4006 if (status)
4007 goto ctrl_clean;
4008 }
556ae191 4009
10ef9ab4
SP
4010 /* The INTR bit may be set in the card when probed by a kdump kernel
4011 * after a crash.
4012 */
4013 if (!lancer_chip(adapter))
4014 be_intr_set(adapter, false);
4015
2243e2e9
SP
4016 status = be_stats_init(adapter);
4017 if (status)
4018 goto ctrl_clean;
4019
39f1d94d 4020 status = be_get_initial_config(adapter);
6b7c5b94
SP
4021 if (status)
4022 goto stats_clean;
6b7c5b94
SP
4023
4024 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4025 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4026 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4027
5fb379ee
SP
4028 status = be_setup(adapter);
4029 if (status)
55f5c3c5 4030 goto stats_clean;
2243e2e9 4031
3abcdeda 4032 be_netdev_init(netdev);
6b7c5b94
SP
4033 status = register_netdev(netdev);
4034 if (status != 0)
5fb379ee 4035 goto unsetup;
6b7c5b94 4036
045508a8
PP
4037 be_roce_dev_add(adapter);
4038
f67ef7ba
PR
4039 schedule_delayed_work(&adapter->func_recovery_work,
4040 msecs_to_jiffies(1000));
b4e32a71
PR
4041
4042 be_cmd_query_port_name(adapter, &port_name);
4043
d379142b
SP
4044 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4045 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4046
6b7c5b94
SP
4047 return 0;
4048
5fb379ee
SP
4049unsetup:
4050 be_clear(adapter);
6b7c5b94
SP
4051stats_clean:
4052 be_stats_cleanup(adapter);
4053ctrl_clean:
4054 be_ctrl_cleanup(adapter);
f9449ab7 4055free_netdev:
fe6d2a38 4056 free_netdev(netdev);
8d56ff11 4057 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4058rel_reg:
4059 pci_release_regions(pdev);
4060disable_dev:
4061 pci_disable_device(pdev);
4062do_none:
c4ca2374 4063 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4064 return status;
4065}
4066
4067static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4068{
4069 struct be_adapter *adapter = pci_get_drvdata(pdev);
4070 struct net_device *netdev = adapter->netdev;
4071
71d8d1b5
AK
4072 if (adapter->wol)
4073 be_setup_wol(adapter, true);
4074
f67ef7ba
PR
4075 cancel_delayed_work_sync(&adapter->func_recovery_work);
4076
6b7c5b94
SP
4077 netif_device_detach(netdev);
4078 if (netif_running(netdev)) {
4079 rtnl_lock();
4080 be_close(netdev);
4081 rtnl_unlock();
4082 }
9b0365f1 4083 be_clear(adapter);
6b7c5b94
SP
4084
4085 pci_save_state(pdev);
4086 pci_disable_device(pdev);
4087 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4088 return 0;
4089}
4090
4091static int be_resume(struct pci_dev *pdev)
4092{
4093 int status = 0;
4094 struct be_adapter *adapter = pci_get_drvdata(pdev);
4095 struct net_device *netdev = adapter->netdev;
4096
4097 netif_device_detach(netdev);
4098
4099 status = pci_enable_device(pdev);
4100 if (status)
4101 return status;
4102
4103 pci_set_power_state(pdev, 0);
4104 pci_restore_state(pdev);
4105
2243e2e9
SP
4106 /* tell fw we're ready to fire cmds */
4107 status = be_cmd_fw_init(adapter);
4108 if (status)
4109 return status;
4110
9b0365f1 4111 be_setup(adapter);
6b7c5b94
SP
4112 if (netif_running(netdev)) {
4113 rtnl_lock();
4114 be_open(netdev);
4115 rtnl_unlock();
4116 }
f67ef7ba
PR
4117
4118 schedule_delayed_work(&adapter->func_recovery_work,
4119 msecs_to_jiffies(1000));
6b7c5b94 4120 netif_device_attach(netdev);
71d8d1b5
AK
4121
4122 if (adapter->wol)
4123 be_setup_wol(adapter, false);
a4ca055f 4124
6b7c5b94
SP
4125 return 0;
4126}
4127
82456b03
SP
4128/*
4129 * An FLR will stop BE from DMAing any data.
4130 */
4131static void be_shutdown(struct pci_dev *pdev)
4132{
4133 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4134
2d5d4154
AK
4135 if (!adapter)
4136 return;
82456b03 4137
0f4a6828 4138 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4139 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4140
2d5d4154 4141 netif_device_detach(adapter->netdev);
82456b03 4142
82456b03
SP
4143 if (adapter->wol)
4144 be_setup_wol(adapter, true);
4145
57841869
AK
4146 be_cmd_reset_function(adapter);
4147
82456b03 4148 pci_disable_device(pdev);
82456b03
SP
4149}
4150
cf588477
SP
4151static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4152 pci_channel_state_t state)
4153{
4154 struct be_adapter *adapter = pci_get_drvdata(pdev);
4155 struct net_device *netdev = adapter->netdev;
4156
4157 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4158
f67ef7ba
PR
4159 adapter->eeh_error = true;
4160
4161 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4162
f67ef7ba 4163 rtnl_lock();
cf588477 4164 netif_device_detach(netdev);
f67ef7ba 4165 rtnl_unlock();
cf588477
SP
4166
4167 if (netif_running(netdev)) {
4168 rtnl_lock();
4169 be_close(netdev);
4170 rtnl_unlock();
4171 }
4172 be_clear(adapter);
4173
4174 if (state == pci_channel_io_perm_failure)
4175 return PCI_ERS_RESULT_DISCONNECT;
4176
4177 pci_disable_device(pdev);
4178
eeb7fc7b
SK
4179 /* The error could cause the FW to trigger a flash debug dump.
4180 * Resetting the card while flash dump is in progress
4181 * can cause it not to recover; wait for it to finish
4182 */
4183 ssleep(30);
cf588477
SP
4184 return PCI_ERS_RESULT_NEED_RESET;
4185}
4186
4187static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4188{
4189 struct be_adapter *adapter = pci_get_drvdata(pdev);
4190 int status;
4191
4192 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4193 be_clear_all_error(adapter);
cf588477
SP
4194
4195 status = pci_enable_device(pdev);
4196 if (status)
4197 return PCI_ERS_RESULT_DISCONNECT;
4198
4199 pci_set_master(pdev);
4200 pci_set_power_state(pdev, 0);
4201 pci_restore_state(pdev);
4202
4203 /* Check if card is ok and fw is ready */
bf99e50d 4204 status = be_fw_wait_ready(adapter);
cf588477
SP
4205 if (status)
4206 return PCI_ERS_RESULT_DISCONNECT;
4207
d6b6d987 4208 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4209 return PCI_ERS_RESULT_RECOVERED;
4210}
4211
4212static void be_eeh_resume(struct pci_dev *pdev)
4213{
4214 int status = 0;
4215 struct be_adapter *adapter = pci_get_drvdata(pdev);
4216 struct net_device *netdev = adapter->netdev;
4217
4218 dev_info(&adapter->pdev->dev, "EEH resume\n");
4219
4220 pci_save_state(pdev);
4221
4222 /* tell fw we're ready to fire cmds */
4223 status = be_cmd_fw_init(adapter);
4224 if (status)
4225 goto err;
4226
bf99e50d
PR
4227 status = be_cmd_reset_function(adapter);
4228 if (status)
4229 goto err;
4230
cf588477
SP
4231 status = be_setup(adapter);
4232 if (status)
4233 goto err;
4234
4235 if (netif_running(netdev)) {
4236 status = be_open(netdev);
4237 if (status)
4238 goto err;
4239 }
f67ef7ba
PR
4240
4241 schedule_delayed_work(&adapter->func_recovery_work,
4242 msecs_to_jiffies(1000));
cf588477
SP
4243 netif_device_attach(netdev);
4244 return;
4245err:
4246 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4247}
4248
3646f0e5 4249static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4250 .error_detected = be_eeh_err_detected,
4251 .slot_reset = be_eeh_reset,
4252 .resume = be_eeh_resume,
4253};
4254
6b7c5b94
SP
4255static struct pci_driver be_driver = {
4256 .name = DRV_NAME,
4257 .id_table = be_dev_ids,
4258 .probe = be_probe,
4259 .remove = be_remove,
4260 .suspend = be_suspend,
cf588477 4261 .resume = be_resume,
82456b03 4262 .shutdown = be_shutdown,
cf588477 4263 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4264};
4265
4266static int __init be_init_module(void)
4267{
8e95a202
JP
4268 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4269 rx_frag_size != 2048) {
6b7c5b94
SP
4270 printk(KERN_WARNING DRV_NAME
4271 " : Module param rx_frag_size must be 2048/4096/8192."
4272 " Using 2048\n");
4273 rx_frag_size = 2048;
4274 }
6b7c5b94
SP
4275
4276 return pci_register_driver(&be_driver);
4277}
4278module_init(be_init_module);
4279
4280static void __exit be_exit_module(void)
4281{
4282 pci_unregister_driver(&be_driver);
4283}
4284module_exit(be_exit_module);
This page took 0.78076 seconds and 5 git commands to generate.